diff --git a/charts/spiderpool/README.md b/charts/spiderpool/README.md index 780c3b59e4..3f65ce21c5 100644 --- a/charts/spiderpool/README.md +++ b/charts/spiderpool/README.md @@ -198,7 +198,7 @@ helm install spiderpool spiderpool/spiderpool --wait --namespace kube-system \ | `multus.multusCNI.image.repository` | the multus-CNI image repository | `k8snetworkplumbingwg/multus-cni` | | `multus.multusCNI.image.pullPolicy` | the multus-CNI image pullPolicy | `IfNotPresent` | | `multus.multusCNI.image.digest` | the multus-CNI image digest | `""` | -| `multus.multusCNI.image.tag` | the multus-CNI image tag | `v3.9.3` | +| `multus.multusCNI.image.tag` | the multus-CNI image tag | `v4.1.4` | | `multus.multusCNI.image.imagePullSecrets` | the multus-CNI image imagePullSecrets | `[]` | | `multus.multusCNI.defaultCniCRName` | if this value is empty, multus will automatically get default CNI according to the existed CNI conf file in /etc/cni/net.d/, if no cni files found in /etc/cni/net.d, A Spidermultusconfig CR named default will be created, please update the related SpiderMultusConfig for default CNI after installation. The namespace of defaultCniCRName follows with the release namespace of spdierpool | `""` | | `multus.multusCNI.securityContext.privileged` | the securityContext privileged of multus-CNI daemonset pod | `true` | diff --git a/charts/spiderpool/templates/configmap.yaml b/charts/spiderpool/templates/configmap.yaml index 2202605c5c..f51ae95a31 100644 --- a/charts/spiderpool/templates/configmap.yaml +++ b/charts/spiderpool/templates/configmap.yaml @@ -13,6 +13,7 @@ metadata: {{- include "tplvalues.render" ( dict "value" .Values.global.commonAnnotations "context" $ ) | nindent 4 }} {{- end }} data: + clusterNetwork: {{ .Values.multus.multusCNI.defaultCniCRName | quote }} conf.yml: | ipamUnixSocketPath: {{ .Values.global.ipamUNIXSocketHostPath }} enableIPv4: {{ .Values.ipam.enableIPv4 }} @@ -36,7 +37,7 @@ data: kind: ConfigMap apiVersion: v1 metadata: - name: {{ .Values.multus.multusCNI.name | trunc 63 | trimSuffix "-" }} + name: {{ .Values.multus.multusCNI.name | trunc 63 | trimSuffix "-" }}-entrypoint namespace: {{ .Release.Namespace | quote }} labels: {{- include "spiderpool.multus.labels" . | nindent 4 }} @@ -44,23 +45,160 @@ metadata: {{- include "tplvalues.render" ( dict "value" .Values.global.commonLabels "context" $ ) | nindent 4 }} {{- end }} data: - cni-conf.json: | + entrypoint.sh: | + #!/bin/bash + set -e + + function log(){ + echo "INFO: $(date --iso-8601=seconds) ${1}" + } + function error(){ + log "ERR: {$1}" + } + function warn(){ + log "WARN: {$1}" + } + + function generateKubeConfig { + # Check if we're running as a k8s pod. + if [ -f "$SERVICE_ACCOUNT_TOKEN_PATH" ]; then + # We're running as a k8d pod - expect some variables. + if [ -z ${KUBERNETES_SERVICE_HOST} ]; then + error "KUBERNETES_SERVICE_HOST not set"; exit 1; + fi + if [ -z ${KUBERNETES_SERVICE_PORT} ]; then + error "KUBERNETES_SERVICE_PORT not set"; exit 1; + fi + + if [ "$SKIP_TLS_VERIFY" == "true" ]; then + TLS_CFG="insecure-skip-tls-verify: true" + elif [ -f "$KUBE_CA_FILE" ]; then + TLS_CFG="certificate-authority-data: $(cat $KUBE_CA_FILE | base64 | tr -d '\n')" + fi + + # Get the contents of service account token. + SERVICEACCOUNT_TOKEN=$(cat $SERVICE_ACCOUNT_TOKEN_PATH) + + SKIP_TLS_VERIFY=${SKIP_TLS_VERIFY:-false} + + # Write a kubeconfig file for the CNI plugin. Do this + # to skip TLS verification for now. We should eventually support + # writing more complete kubeconfig files. This is only used + # if the provided CNI network config references it. + touch $MULTUS_TEMP_KUBECONFIG + chmod ${KUBECONFIG_MODE:-600} $MULTUS_TEMP_KUBECONFIG + # Write the kubeconfig to a temp file first. + timenow=$(date) + cat > $MULTUS_TEMP_KUBECONFIG < $MULTUS_TEMP_CONFIG << EOF { "cniVersion": "0.3.1", "name": "multus-cni-network", "type": "multus", "confDir": "/etc/cni/net.d/" , - "logLevel": "{{ .Values.multus.multusCNI.log.logLevel }}", - "logFile": "{{ .Values.multus.multusCNI.log.logFile }}", + "logLevel": "debug", + "logFile": "/var/log/multus.log", "capabilities": { "portMappings": true, "bandwidth": true }, "namespaceIsolation": false, - "clusterNetwork": "{{ .Values.multus.multusCNI.defaultCniCRName }}", + "clusterNetwork": "$MULTUS_CLUSTER_NETWORK", "defaultNetworks": [], - "multusNamespace": "{{ .Release.Namespace }}", + "multusNamespace": "$MULTUS_NAMESPACE", "systemNamespaces": [], "kubeconfig": "/etc/cni/net.d/multus.d/multus.kubeconfig" } -{{- end }} + EOF + + if [ -z "${MULTUS_CLUSTER_NETWORK}" ]; then + log "ENV MULTUS_CLUSTER_NETWORK is empty, Detecting default cni in the ${CNI_CONF_DIR}" + DEFAULT_CNI_FILEPATH=$(ls -l ${CNI_CONF_DIR} | grep ^- | grep -v -i multus | awk '{print $9}' | grep -E '(*\.conf|*\.conflist|*\.json)' | head -n 1) + if [ -z "$DEFAULT_CNI_FILEPATH" ] ; then + error "No default cni file found in ${CNI_CONF_DIR}, please install your default cni in the cluster first" && exit 1 + fi + + log "Found the default-cni file: ${DEFAULT_CNI_FILEPATH}" + log "cat /host/etc/cni/net.d/${DEFAULT_CNI_FILEPATH}:" + cat /host/etc/cni/net.d/${DEFAULT_CNI_FILEPATH} + + echo "" + DEFAULT_CNI_NAME=$(grep '"name":' ${CNI_CONF_DIR}/${DEFAULT_CNI_FILEPATH} | awk '{print $2}' | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' | tr -d ',' | tr -d '"') + if [ -z "$DEFAULT_CNI_NAME" ] ; then + error "The name fleid shouldn't be empty, please check the default cni: ${DEFAULT_CNI_FILEPATH}" && exit 1 + fi + + log "Updating the clusterNetwork of the multus-cni config to $DEFAULT_CNI_NAME" + sed -i "s?\"clusterNetwork\": \"\"?\"clusterNetwork\": \"${DEFAULT_CNI_NAME}\"?g" /tmp/00-multus.conf + else + log "User set multus ClusterNetwork: $MULTUS_CLUSTER_NETWORK" + fi + + generateKubeConfig + log "multus kubeconfig is generated." + + cp $MULTUS_TEMP_CONFIG /host/etc/cni/net.d + log "multus config file ${MULTUS_TEMP_CONFIG} is copied to ${CNI_CONF_DIR}." + log "cat ${CNI_CONF_DIR}/00-multus.conf" + cat ${CNI_CONF_DIR}/00-multus.conf + + log "Entering watch loop..." + while true; do + + # Check the md5sum of the service account token and ca. + svcaccountsum=$(md5sum $SERVICE_ACCOUNT_TOKEN_PATH | awk '{print $1}') + casum=$(md5sum $KUBE_CA_FILE | awk '{print $1}') + if [ "$svcaccountsum" != "$LAST_SERVICEACCOUNT_MD5SUM" ] || [ "$casum" != "$LAST_KUBE_CA_FILE_MD5SUM" ]; then + log "Detected service account or CA file change, regenerating kubeconfig..." + generateKubeConfig + fi + + # todo: watch the default cni file is changed. + sleep 10 + done +{{- end }} \ No newline at end of file diff --git a/charts/spiderpool/templates/daemonset.yaml b/charts/spiderpool/templates/daemonset.yaml index af672a82b1..2478b35628 100644 --- a/charts/spiderpool/templates/daemonset.yaml +++ b/charts/spiderpool/templates/daemonset.yaml @@ -106,6 +106,22 @@ spec: - name: cni-bin-path mountPath: /host/opt/cni/bin {{- end }} + {{- if .Values.multus.multusCNI.install }} + - name: install-multus-binary + image: {{ include "spiderpool.multus.image" . | quote }} + imagePullPolicy: IfNotPresent + command: + - /install_multus + args: + - --type + - thin + securityContext: + privileged: true + volumeMounts: + - mountPath: /host/opt/cni/bin + mountPropagation: Bidirectional + name: cni-bin-path + {{- end }} containers: - name: {{ .Values.spiderpoolAgent.name | trunc 63 | trimSuffix "-" }} image: {{ include "spiderpool.spiderpoolAgent.image" . | quote }} @@ -234,21 +250,30 @@ spec: {{- end }} {{- if .Values.multus.multusCNI.install }} - name: multus-cni - imagePullPolicy: {{ .Values.multus.multusCNI.image.pullPolicy }} - image: {{ include "spiderpool.multus.image" . | quote }} + image: {{ include "spiderpool.spiderpoolAgent.image" . | quote }} + imagePullPolicy: {{ .Values.spiderpoolAgent.image.pullPolicy }} command: - - "/bin/sh" - - "-c" - - | - ITEM="multus" - rm -f /host/opt/cni/bin/${ITEM}.old || true - ( [ -f "/host/opt/cni/bin/${ITEM}" ] && mv /host/opt/cni/bin/${ITEM} /host/opt/cni/bin/${ITEM}.old ) || true - cp /usr/src/multus-cni/bin/${ITEM} /host/opt/cni/bin/${ITEM} - rm -f /host/opt/cni/bin/${ITEM}.old &>/dev/null || true - ./entrypoint.sh --multus-conf-file=/tmp/multus-conf/00-multus.conf \ - --cni-version=0.3.1 + - "/home/entrypoint.sh" securityContext: privileged: true + env: + - name: MULTUS_CLUSTER_NETWORK + valueFrom: + configMapKeyRef: + key: clusterNetwork + name: spiderpool-conf + - name: MULTUS_NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + resources: + limits: + cpu: 100m + memory: 50Mi + requests: + cpu: 100m + memory: 50Mi {{- if .Values.multus.multusCNI.uninstall }} lifecycle: preStop: @@ -264,11 +289,8 @@ spec: volumeMounts: - name: cni mountPath: /host/etc/cni/net.d - - name: cni-bin-path - mountPath: /host/opt/cni/bin - mountPropagation: Bidirectional - - name: multus-cfg - mountPath: /tmp/multus-conf + - mountPath: /home + name: multus-entrypoint {{- if .Values.multus.multusCNI.extraVolumes }} {{- include "tplvalues.render" ( dict "value" .Values.multus.multusCNI.extraVolumeMounts "context" $ ) | nindent 12 }} {{- end }} @@ -304,6 +326,13 @@ spec: items: - key: cni-conf.json path: 00-multus.conf + - name: multus-entrypoint + configMap: + name: {{ .Values.multus.multusCNI.name | trunc 63 | trimSuffix "-" }}-entrypoint + defaultMode: 511 + items: + - key: entrypoint.sh + path: entrypoint.sh {{- end }} {{- if .Values.spiderpoolAgent.extraVolumeMounts }} {{- include "tplvalues.render" ( dict "value" .Values.spiderpoolAgent.extraVolumeMounts "context" $ ) | nindent 6 }} diff --git a/charts/spiderpool/templates/pod.yaml b/charts/spiderpool/templates/pod.yaml index 5e178ea694..8ccf724b87 100644 --- a/charts/spiderpool/templates/pod.yaml +++ b/charts/spiderpool/templates/pod.yaml @@ -80,19 +80,13 @@ spec: {{- end }} - name: SPIDERPOOL_INIT_ENABLE_MULTUS_CONFIG value: {{ .Values.multus.enableMultusConfig | quote }} - - name: SPIDERPOOL_INIT_INSTALL_MULTUS - value: {{ .Values.multus.multusCNI.install | quote }} - name: SPIDERPOOL_INIT_DEFAULT_CNI_NAME value: {{ .Values.multus.multusCNI.defaultCniCRName | quote }} - name: SPIDERPOOL_INIT_DEFAULT_CNI_NAMESPACE value: {{ .Release.Namespace | quote }} - - name: SPIDERPOOL_INIT_MULTUS_CONFIGMAP - value: {{ .Values.multus.multusCNI.name | trunc 63 | trimSuffix "-" | quote }} {{- if eq .Values.multus.multusCNI.defaultCniCRName "" }} - name: SPIDERPOOL_INIT_DEFAULT_CNI_DIR value: {{ .Values.global.cniConfHostPath | quote }} - - name: SPIDERPOOL_INIT_READINESS_FILE - value: "/etc/spiderpool/ready" volumeMounts: - name: cni mountPath: {{ .Values.global.cniConfHostPath }} diff --git a/charts/spiderpool/values.yaml b/charts/spiderpool/values.yaml index c355f4486c..70ac21eac0 100644 --- a/charts/spiderpool/values.yaml +++ b/charts/spiderpool/values.yaml @@ -233,8 +233,7 @@ multus: digest: "" ## @param multus.multusCNI.image.tag the multus-CNI image tag - tag: v3.9.3 - # tag: v4.0.2-thick + tag: v4.1.4 ## @param multus.multusCNI.image.imagePullSecrets the multus-CNI image imagePullSecrets imagePullSecrets: [] diff --git a/cmd/spiderpool-controller/cmd/crd_manager.go b/cmd/spiderpool-controller/cmd/crd_manager.go index b50ed9ea6d..8b5b02563a 100644 --- a/cmd/spiderpool-controller/cmd/crd_manager.go +++ b/cmd/spiderpool-controller/cmd/crd_manager.go @@ -93,6 +93,6 @@ type _webhookHealthCheck struct{} func (*_webhookHealthCheck) ServeHTTP(writer http.ResponseWriter, request *http.Request) { if request.Method == http.MethodGet { writer.WriteHeader(http.StatusOK) - logger.Info("Webhook health check successful") + logger.Debug("Webhook health check successful") } } diff --git a/cmd/spiderpool-init/cmd/config.go b/cmd/spiderpool-init/cmd/config.go index 65fe70948c..c45c3587b1 100644 --- a/cmd/spiderpool-init/cmd/config.go +++ b/cmd/spiderpool-init/cmd/config.go @@ -90,7 +90,6 @@ type InitDefaultConfig struct { // multuscniconfig enableMultusConfig bool - installMultusCNI bool DefaultCNIDir string DefaultCNIName string DefaultCNINamespace string @@ -280,12 +279,6 @@ func parseENVAsDefault() InitDefaultConfig { logger.Sugar().Fatalf("ENV %s: %s invalid: %v", ENVEnableMultusConfig, enableMultusConfig, err) } - installMultusCNI := strings.ReplaceAll(os.Getenv(ENVInstallMultusCNI), "\"", "") - config.installMultusCNI, err = strconv.ParseBool(installMultusCNI) - if err != nil { - logger.Sugar().Fatalf("ENV %s: %s invalid: %v", ENVInstallMultusCNI, installMultusCNI, err) - } - config.DefaultCNIDir = strings.ReplaceAll(os.Getenv(ENVDefaultCNIDir), "\"", "") if config.DefaultCNIDir != "" { _, err = os.ReadDir(config.DefaultCNIDir) diff --git a/cmd/spiderpool-init/cmd/multus.go b/cmd/spiderpool-init/cmd/multus.go index e59712bc6e..800aaacea6 100644 --- a/cmd/spiderpool-init/cmd/multus.go +++ b/cmd/spiderpool-init/cmd/multus.go @@ -4,42 +4,12 @@ package cmd import ( "context" - "encoding/json" "fmt" - "os" - "path" - - v1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - apitypes "k8s.io/apimachinery/pkg/types" - controller_client "sigs.k8s.io/controller-runtime/pkg/client" "github.com/spidernet-io/spiderpool/pkg/constant" "github.com/spidernet-io/spiderpool/pkg/utils" ) -// MultusNetConf for cni config file written in json -// Note: please keep this fields be consistent with multus configMap -// in charts/spiderpool/templates/multus/multus-daemonset.yaml -type MultusNetConf struct { - CNIVersion string `json:"cniVersion,omitempty"` - Name string `json:"name,omitempty"` - Type string `json:"type,omitempty"` - ConfDir string `json:"confDir"` - LogLevel string `json:"logLevel"` - LogFile string `json:"logFile"` - Capabilities map[string]bool `json:"capabilities,omitempty"` - // Option to isolate the usage of CR's to the namespace in which a pod resides. - NamespaceIsolation bool `json:"namespaceIsolation"` - ClusterNetwork string `json:"clusterNetwork"` - DefaultNetworks []string `json:"defaultNetworks"` - // Option to set the namespace that multus-cni uses (clusterNetwork/defaultNetworks) - MultusNamespace string `json:"multusNamespace"` - // Option to set system namespaces (to avoid to add defaultNetworks) - SystemNamespaces []string `json:"systemNamespaces"` - Kubeconfig string `json:"kubeconfig"` -} - func InitMultusDefaultCR(ctx context.Context, config *InitDefaultConfig, client *CoreClient) error { defaultCNIName, defaultCNIType, err := fetchDefaultCNIName(config.DefaultCNIName, config.DefaultCNIDir) if err != nil { @@ -50,64 +20,6 @@ func InitMultusDefaultCR(ctx context.Context, config *InitDefaultConfig, client return err } - if !config.installMultusCNI { - logger.Sugar().Infof("No install MultusCNI, Ignore update clusterNetwork for multus configMap") - return nil - } - - // get multus configMap - cm, err := getConfigMap(ctx, client, config.DefaultCNINamespace, config.MultusConfigMap) - if err != nil { - logger.Sugar().Errorf("get configMap: %v", err) - return err - } - - var multusConfig MultusNetConf - cniConfig := cm.Data["cni-conf.json"] - if err := json.Unmarshal([]byte(cniConfig), &multusConfig); err != nil { - return fmt.Errorf("failed to unmarshal multus config: %v", err) - } - - if multusConfig.ClusterNetwork == defaultCNIName { - // if clusterNetwork is expected, just return - logger.Sugar().Infof("multus clusterNetwork is %s, don't need to update multus configMap", defaultCNIName) - return nil - } - - oldConfigMap := cm.DeepCopy() - multusConfig.ClusterNetwork = defaultCNIName - configDatas, err := json.Marshal(multusConfig) - if err != nil { - return fmt.Errorf("failed to marshal multus config: %v", err) - } - cm.Data["cni-conf.json"] = string(configDatas) - - logger.Sugar().Infof("Try to patch multus configMap %s: %s", config.MultusConfigMap, configDatas) - if err = client.Patch(ctx, cm, controller_client.MergeFrom(oldConfigMap)); err != nil { - return fmt.Errorf("failed to patch multus configMap: %v", err) - } - - // we need restart spideragent-pod after we patch the configmap, make sure these changes works immediately - if err = restartSpiderAgent(ctx, client, config.AgentName, config.DefaultCNINamespace); err != nil { - return err - } - - logger.Sugar().Infof("successfully restart spiderpool-agent") - return nil -} - -func makeReadinessReady(config *InitDefaultConfig) error { - // tell readness by writing to the file that the spiderpool is ready - readinessDir := path.Dir(config.ReadinessFile) - err := os.MkdirAll(readinessDir, 0644) - if err != nil { - return err - } - - if err = os.WriteFile(config.ReadinessFile, []byte("ready"), 0777); err != nil { - return err - } - logger.Sugar().Infof("success to make spiderpool-init pod's readiness to ready") return nil } @@ -123,31 +35,3 @@ func fetchDefaultCNIName(defaultCNIName, cniDir string) (cniName, cniType string } return parseCNIFromConfig(defaultCNIConfPath) } - -func getConfigMap(ctx context.Context, client *CoreClient, namespace, name string) (*corev1.ConfigMap, error) { - var cm corev1.ConfigMap - if err := client.Get(ctx, apitypes.NamespacedName{Name: name, Namespace: namespace}, &cm); err != nil { - return nil, err - } - - return &cm, nil -} - -func restartSpiderAgent(ctx context.Context, client *CoreClient, name, ns string) error { - logger.Sugar().Infof("Try to restart spiderpoo-agent daemonSet: %s/%s", ns, name) - - var spiderAgent v1.DaemonSet - var err error - if err = client.Get(ctx, apitypes.NamespacedName{Name: name, Namespace: ns}, &spiderAgent); err != nil { - return err - } - - if err = client.DeleteAllOf(ctx, &corev1.Pod{}, controller_client.InNamespace(ns), controller_client.MatchingLabels(spiderAgent.Spec.Template.Labels)); err != nil { - return err - } - - if err = client.WaitPodListReady(ctx, ns, spiderAgent.Spec.Template.Labels); err != nil { - return err - } - return nil -} diff --git a/cmd/spiderpool-init/cmd/root.go b/cmd/spiderpool-init/cmd/root.go index 151ac337b2..672faa4603 100644 --- a/cmd/spiderpool-init/cmd/root.go +++ b/cmd/spiderpool-init/cmd/root.go @@ -153,9 +153,5 @@ func Execute() { } } - if err = makeReadinessReady(&config); err != nil { - logger.Fatal(err.Error()) - } - logger.Info("Finish init") } diff --git a/pkg/coordinatormanager/coordinator_informer.go b/pkg/coordinatormanager/coordinator_informer.go index 0f6da33d98..0c636a1b99 100644 --- a/pkg/coordinatormanager/coordinator_informer.go +++ b/pkg/coordinatormanager/coordinator_informer.go @@ -704,18 +704,24 @@ func (cc *CoordinatorController) fetchCiliumIPPools(coordinator *spiderpoolv2bet podCIDR := make([]string, 0, len(ipPoolList)) for _, p := range ipPoolList { - if p.DeletionTimestamp == nil { + if p.DeletionTimestamp != nil { + continue + } + + if p.Spec.IPv4 != nil { for _, cidr := range p.Spec.IPv4.CIDRs { podCIDR = append(podCIDR, string(cidr)) } + } + if p.Spec.IPv6 != nil { for _, cidr := range p.Spec.IPv6.CIDRs { podCIDR = append(podCIDR, string(cidr)) } } } - InformerLogger.Sugar().Debugf("Cilium IPPools CIDR: %v", ipPoolList) + InformerLogger.Sugar().Debugf("Cilium IPPools CIDR: %v", podCIDR) if coordinator.Status.Phase == Synced && reflect.DeepEqual(coordinator.Status.OverlayPodCIDR, podCIDR) { return nil } diff --git a/test/Makefile b/test/Makefile index 361e086377..8b6a51a0d3 100644 --- a/test/Makefile +++ b/test/Makefile @@ -228,7 +228,10 @@ setup_kruise: HELM_OPTION=" --wait --timeout 20m --debug --set manager.image.repository=$(E2E_OPENKRUISE_IMAGE) " ; \ HELM_OPTION+=" --version $(E2E_OPENKRUISE_VERSION) " ; \ helm upgrade --install kruise openkruise/kruise $${HELM_OPTION} \ - --kubeconfig $(E2E_KUBECONFIG) || { KIND_CLUSTER_NAME=$(E2E_CLUSTER_NAME) ./scripts/debugEnv.sh $(E2E_KUBECONFIG) "detail" "$(E2E_LOG_FILE)" ; exit 1 ; } ; \ + --kubeconfig $(E2E_KUBECONFIG) || { \ + kubectl describe pod -n kruise-system --kubeconfig $(E2E_KUBECONFIG) ; \ + KIND_CLUSTER_NAME=$(E2E_CLUSTER_NAME) ./scripts/debugEnv.sh $(E2E_KUBECONFIG) "detail" "$(E2E_LOG_FILE)" ; exit 1 ; \ + } ; \ .PHONY: setup_spiderpool setup_spiderpool: @@ -438,7 +441,8 @@ helm_upgrade_spiderpool: --set spiderpoolInit.image.registry="" \ --set spiderpoolInit.image.repository=$(SPIDERPOOL_CONTROLLER_IMAGE_NAME) \ --set spiderpoolInit.image.tag=$(E2E_SPIDERPOOL_TAG) \ - --set multus.multusCNI.uninstall=false " ; \ + --set multus.multusCNI.uninstall=false \ + --set multus.multusCNI.image.tag=$(E2E_MULTUS_TAG) " ; \ if [ "$(E2E_SPIDERPOOL_ENABLE_SUBNET)" == "true" ] ; then \ HELM_OPTION+=" --set ipam.spiderSubnet.enable=true " ; \ HELM_OPTION+=" --set ipam.spiderSubnet.autoPool.enable=true " ; \ @@ -460,15 +464,15 @@ helm_upgrade_spiderpool: echo "upgrade spiderpool with image $(SPIDERPOOL_AGENT_IMAGE_NAME):$(E2E_SPIDERPOOL_TAG) and $(SPIDERPOOL_CONTROLLER_IMAGE_NAME):$(E2E_SPIDERPOOL_TAG) " ; \ set -x ; \ helm --kubeconfig $(E2E_KUBECONFIG) upgrade $(RELEASE_NAME) $(ROOT_DIR)/charts/spiderpool \ - $${HELM_OPTION} \ + $${HELM_OPTION} \ -n $(RELEASE_NAMESPACE) --debug --reuse-values ; \ - cd $(ROOT_DIR)/charts/spiderpool/crds ; \ + cd $(ROOT_DIR)/charts/spiderpool/crds ; \ ls | grep '\.yaml$$' | xargs -I {} kubectl apply -f {} --kubeconfig $(E2E_KUBECONFIG) ; \ kubectl wait --for=condition=ready -l app.kubernetes.io/instance=spiderpool --timeout=300s pod -n $(RELEASE_NAMESPACE) --kubeconfig $(E2E_KUBECONFIG) || true; \ kubectl scale deploy -n $(RELEASE_NAMESPACE) -l app.kubernetes.io/component=spiderpool-controller --replicas=2 --kubeconfig $(E2E_KUBECONFIG); \ kubectl wait --for=condition=ready -l app.kubernetes.io/component=spiderpool-controller --timeout=300s pod -n $(RELEASE_NAMESPACE) --kubeconfig $(E2E_KUBECONFIG) || true; \ - kubectl get mutatingwebhookconfigurations spiderpool-controller -o yaml --kubeconfig $(E2E_KUBECONFIG) ;\ - helm --kubeconfig $(E2E_KUBECONFIG) list -A ; \ + kubectl get mutatingwebhookconfigurations spiderpool-controller -o yaml --kubeconfig $(E2E_KUBECONFIG) ; \ + helm --kubeconfig $(E2E_KUBECONFIG) list -A .PHONY: clean clean: diff --git a/test/Makefile.defs b/test/Makefile.defs index edb436cf86..c9a834b3d5 100644 --- a/test/Makefile.defs +++ b/test/Makefile.defs @@ -14,7 +14,7 @@ K8S_IPV4_SERVICE_CIDR = 10.233.0.0/18 K8S_IPV6_SERVICE_CIDR = fd00:10:233::/116 CLUSTER_POD_SUBNET_V4 = 10.233.64.0/18 -CLUSTER_POD_SUBNET_V6 = fd00:10:233:64::/64 +CLUSTER_POD_SUBNET_V6 = fd00:10:233:64::/60 CALICO_CLUSTER_POD_SUBNET_V4 = 10.243.64.0/18 CALICO_CLUSTER_POD_SUBNET_V6 = fd00:10:243::/112 CILIUM_CLUSTER_POD_SUBNET_V4 = 10.244.64.0/18 @@ -131,6 +131,7 @@ MULTUS_DEFAULT_CNI_VLAN100 := macvlan-vlan100 MULTUS_DEFAULT_CNI_VLAN200 := macvlan-vlan200 MULTUS_OVS_CNI_VLAN30 := ovs-vlan30 MULTUS_OVS_CNI_VLAN40 := ovs-vlan40 +E2E_MULTUS_TAG ?= v4.1.4 ifeq ($(E2E_CHINA_IMAGE_REGISTRY),true) E2E_MULTUS_IMAGE_REGISTER ?= ghcr.m.daocloud.io diff --git a/test/e2e/common/constant.go b/test/e2e/common/constant.go index 7edf7fb617..f768b90f6e 100644 --- a/test/e2e/common/constant.go +++ b/test/e2e/common/constant.go @@ -57,7 +57,7 @@ var ( // multus CNI MultusDefaultNetwork = "v1.multus-cni.io/default-network" MultusNetworks = "k8s.v1.cni.cncf.io/networks" - PodMultusNetworksStatus = "k8s.v1.cni.cncf.io/networks-status" + PodMultusNetworksStatus = "k8s.v1.cni.cncf.io/network-status" CalicoCNIName string = "k8s-pod-network" CiliumCNIName string = "cilium" diff --git a/test/e2e/common/spiderpool.go b/test/e2e/common/spiderpool.go index c1ac8b309a..b2cdb4441f 100644 --- a/test/e2e/common/spiderpool.go +++ b/test/e2e/common/spiderpool.go @@ -23,8 +23,8 @@ import ( "github.com/spidernet-io/spiderpool/pkg/constant" ip "github.com/spidernet-io/spiderpool/pkg/ip" "github.com/spidernet-io/spiderpool/pkg/types" - corev1 "k8s.io/api/core/v1" + api_errors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -469,6 +469,17 @@ func GenerateExampleIpv4poolObject(ipNum int) (string, *v1.SpiderIPPool) { return v4PoolName, iPv4PoolObj } +func PatchConfigMap(f *frame.Framework, oldcm, newcm *corev1.ConfigMap, opts ...client.PatchOption) error { + mergePatch := client.MergeFrom(oldcm) + d, err := mergePatch.Data(newcm) + GinkgoWriter.Printf("patch configMap: %v. \n", string(d)) + if err != nil { + return fmt.Errorf("failed to generate patch, err is %v", err) + } + + return f.PatchResource(newcm, mergePatch, opts...) +} + func GenerateExampleIpv6poolObject(ipNum int) (string, *v1.SpiderIPPool) { if ipNum < 1 || ipNum > 65533 { GinkgoWriter.Println("the IP range should be between 1 and 65533") diff --git a/test/e2e/spidercoordinator/spidercoordinator_test.go b/test/e2e/spidercoordinator/spidercoordinator_test.go index a36818310b..27ed2f80ad 100644 --- a/test/e2e/spidercoordinator/spidercoordinator_test.go +++ b/test/e2e/spidercoordinator/spidercoordinator_test.go @@ -729,8 +729,10 @@ var _ = Describe("SpiderCoordinator", Label("spidercoordinator", "overlay"), Ser spcCopy.Spec.HostRuleTable = ptr.To(500) Expect(PatchSpiderCoordinator(spcCopy, spc)).NotTo(HaveOccurred()) - GinkgoWriter.Println("delete namespace: ", nsName) - Expect(frame.DeleteNamespace(nsName)).NotTo(HaveOccurred()) + if !CurrentSpecReport().Failed() { + GinkgoWriter.Println("delete namespace: ", nsName) + Expect(frame.DeleteNamespace(nsName)).NotTo(HaveOccurred()) + } }) }) diff --git a/test/scripts/install-default-cni.sh b/test/scripts/install-default-cni.sh index c776853dc2..dda6494e66 100755 --- a/test/scripts/install-default-cni.sh +++ b/test/scripts/install-default-cni.sh @@ -8,9 +8,12 @@ set -o errexit -o nounset -o pipefail OS=$(uname | tr 'A-Z' 'a-z') SED_COMMAND=sed -CURRENT_FILENAME=$( basename $0 ) -CURRENT_DIR_PATH=$(cd $(dirname $0); pwd) -PROJECT_ROOT_PATH=$( cd ${CURRENT_DIR_PATH}/../.. && pwd ) +CURRENT_FILENAME=$(basename $0) +CURRENT_DIR_PATH=$( + cd $(dirname $0) + pwd +) +PROJECT_ROOT_PATH=$(cd ${CURRENT_DIR_PATH}/../.. && pwd) [ -z "$E2E_CLUSTER_NAME" ] && echo "error, miss E2E_CLUSTER_NAME " && exit 1 [ -z "$E2E_IP_FAMILY" ] && echo "error, miss E2E_IP_FAMILY " && exit 1 @@ -54,161 +57,232 @@ CILIUM_CLUSTER_POD_SUBNET_V6=${CILIUM_CLUSTER_POD_SUBNET_V6:-"fd00:10:244::/112" [ -z "${HTTP_PROXY}" ] || export https_proxy=${HTTP_PROXY} function install_calico() { - cp ${PROJECT_ROOT_PATH}/test/yamls/calico.yaml $CLUSTER_PATH/calico.yaml - if [ -z "${CALICO_VERSION}" ]; then - [ -n "${HTTP_PROXY}" ] && { CALICO_VERSION_INFO=$(curl --retry 3 --retry-delay 5 -x "${HTTP_PROXY}" -s https://api.github.com/repos/projectcalico/calico/releases/latest); echo ${CALICO_VERSION_INFO}; CALICO_VERSION=$(echo ${CALICO_VERSION_INFO} | jq -r '.tag_name'); } - [ -z "${HTTP_PROXY}" ] && { CALICO_VERSION_INFO=$(curl --retry 3 --retry-delay 5 -s https://api.github.com/repos/projectcalico/calico/releases/latest ); echo ${CALICO_VERSION_INFO}; CALICO_VERSION=$(echo ${CALICO_VERSION_INFO} | jq -r '.tag_name'); } - [ "${CALICO_VERSION}" == "null" ] && { echo "failed to get the calico version, will try to use default version."; CALICO_VERSION=${DEFAULT_CALICO_VERSION}; } - else - CALICO_VERSION=${CALICO_VERSION} - fi - echo "install calico version ${CALICO_VERSION}" - [ -n "${HTTP_PROXY}" ] && curl --retry 3 -x "${HTTP_PROXY}" -Lo ${CALICO_YAML} https://raw.githubusercontent.com/projectcalico/calico/${CALICO_VERSION}/manifests/calico.yaml - [ -z "${HTTP_PROXY}" ] && curl --retry 3 -Lo ${CALICO_YAML} https://raw.githubusercontent.com/projectcalico/calico/${CALICO_VERSION}/manifests/calico.yaml - - # set registry - if [ -n "${CALICO_IMAGE_REPO}" ]; then - grep -q -e ".*image:.*docker.io" ${CALICO_YAML} || { echo "failed find image"; exit 1; } - ${SED_COMMAND} -i -E 's?(.*image:.*)(docker.io)(.*)?\1'"${CALICO_IMAGE_REPO}"'\3?g' ${CALICO_YAML} - fi - - # accelerate local cluster , in case that it times out to wait calico ready - IMAGE_LIST=`cat ${CALICO_YAML} | grep "image: " | awk '{print \$2}' | sort | uniq | tr '\n' ' ' | tr '\r' ' ' ` - echo "image: ${IMAGE_LIST}" - for IMAGE in ${IMAGE_LIST} ; do - echo "load calico image ${IMAGE} to kind cluster" - docker pull ${IMAGE} - kind load docker-image ${IMAGE} --name ${E2E_CLUSTER_NAME} - done - - export KUBECONFIG=${E2E_KUBECONFIG} - kubectl apply -f ${CALICO_YAML} - sleep 10 - - kubectl wait --for=condition=ready -l k8s-app=calico-node --timeout=${INSTALL_TIME_OUT} pod -n kube-system - kubectl get po -n kube-system - echo -e "\033[35m Succeed to install Calico \033[0m" - - echo -e "\033[35m Patch Calico \033[0m" - kubectl -n kube-system get cm calico-config -oyaml > ${CALICO_CONFIG} - kubectl -n kube-system get ds calico-node -oyaml > ${CALICO_NODE} - - case ${E2E_IP_FAMILY} in - ipv4) - # set configmap - configYaml=$(yq '.data.cni_network_config' ${CALICO_CONFIG} | yq '.plugins[0].ipam = {"type": "calico-ipam", "assign_ipv4": "true", "assign_ipv6": "false"}' --output-format=json) - configYaml=$configYaml yq e '.data.cni_network_config |= strenv(configYaml)' -i ${CALICO_CONFIG} - ${SED_COMMAND} -i 's/"mtu": "__CNI_MTU__"/"mtu": __CNI_MTU__/g' ${CALICO_CONFIG} - kubectl -n kube-system patch cm calico-config --patch "$(cat ${CALICO_CONFIG})" || { echo "failed to patch calico configmap"; exit 1; } - ;; - ipv6) - # set configmap - configYaml=$(yq '.data.cni_network_config' ${CALICO_CONFIG} | yq '.plugins[0].ipam = {"type": "calico-ipam", "assign_ipv4": "false", "assign_ipv6": "true"}' --output-format=json) - configYaml=$configYaml yq e '.data.cni_network_config |= strenv(configYaml)' -i ${CALICO_CONFIG} - ${SED_COMMAND} -i 's/"mtu": "__CNI_MTU__"/"mtu": __CNI_MTU__/g' ${CALICO_CONFIG} - kubectl -n kube-system patch cm calico-config --patch "$(cat ${CALICO_CONFIG})" || { echo "failed to patch calico configmap"; exit 1; } - - # set calico-node env - grep -q "FELIX_IPV6SUPPORT" ${CALICO_NODE} || { echo "failed find FELIX_IPV6SUPPORT"; exit 1; } - ${SED_COMMAND} -i -E '/FELIX_IPV6SUPPORT/{n;s/value: "false"/value: "true"/}' ${CALICO_NODE} - - grep -q "value: autodetect" ${CALICO_NODE} || { echo "failed find autodetect"; exit 1; } - ${SED_COMMAND} -i '/value: autodetect/a\ - name: IP6\n\ value: autodetect' ${CALICO_NODE} - kubectl -n kube-system patch ds calico-node --patch "$(cat ${CALICO_NODE})" || { echo "failed to patch calico-node"; exit 1; } - ;; - dual) - # set configmap - configYaml=$(yq '.data.cni_network_config' ${CALICO_CONFIG} | yq '.plugins[0].ipam = {"type": "calico-ipam", "assign_ipv4": "true", "assign_ipv6": "true"}' --output-format=json) - configYaml=$configYaml yq e '.data.cni_network_config |= strenv(configYaml)' -i ${CALICO_CONFIG} - ${SED_COMMAND} -i 's/"mtu": "__CNI_MTU__"/"mtu": __CNI_MTU__/g' ${CALICO_CONFIG} - kubectl -n kube-system patch cm calico-config --patch "$(cat ${CALICO_CONFIG})" || { echo "failed to patch calico configmap"; exit 1; } - - # set calico-node env - grep -q "FELIX_IPV6SUPPORT" ${CALICO_NODE} || { echo "failed find FELIX_IPV6SUPPORT"; exit 1; } - ${SED_COMMAND} -i -E '/FELIX_IPV6SUPPORT/{n;s/value: "false"/value: "true"/}' ${CALICO_NODE} - grep -q "value: autodetect" ${CALICO_NODE} || { echo "failed find autodetect"; exit 1; } - ${SED_COMMAND} -i '/value: autodetect/a\ - name: IP6\n\ value: autodetect' ${CALICO_NODE} - kubectl -n kube-system patch ds calico-node --patch "$(cat ${CALICO_NODE})" || { echo "failed to patch calico-node"; exit 1; } - ;; - *) - echo "the value of E2E_IP_FAMILY: ipv4 or ipv6 or dual" - exit 1 - esac - # there no default felixconfigurations.crd.projectcalico.org in latest calico version (https://github.com/projectcalico/calico/releases/tag/v3.29.0) - kubectl patch felixconfigurations.crd.projectcalico.org default --type='merge' -p '{"spec":{"chainInsertMode":"Append"}}' || true - - # restart calico pod - kubectl -n kube-system delete pod -l k8s-app=calico-node --force --grace-period=0 && sleep 3 - kubectl wait --for=condition=ready -l k8s-app=calico-node --timeout=${INSTALL_TIME_OUT} pod -n kube-system - kubectl -n kube-system delete pod -l k8s-app=calico-kube-controllers --force --grace-period=0 && sleep 3 - kubectl wait --for=condition=ready -l k8s-app=calico-kube-controllers --timeout=${INSTALL_TIME_OUT} pod -n kube-system - echo -e "\033[35m ===> Succeed to patch calico \033[0m" - - # Update calico's podcidr so that it is inconsistent with the cluster's podcidr. - case ${E2E_IP_FAMILY} in - ipv4) - kubectl patch ippools default-ipv4-ippool --patch '{"spec": {"cidr": "'"${CALICO_IPV4POOL_CIDR}"'"}}' --type=merge - ;; - ipv6) - kubectl delete ippools default-ipv4-ippool --force - kubectl patch ippools default-ipv6-ippool --patch '{"spec": {"cidr": "'"${CALICO_IPV6POOL_CIDR}"'"}}' --type=merge - ;; - dual) - kubectl patch ippools default-ipv4-ippool --patch '{"spec": {"cidr": "'"${CALICO_IPV4POOL_CIDR}"'"}}' --type=merge - kubectl patch ippools default-ipv6-ippool --patch '{"spec": {"cidr": "'"${CALICO_IPV6POOL_CIDR}"'"}}' --type=merge - ;; - *) - echo "the value of E2E_IP_FAMILY: ipv4 or ipv6 or dual" - exit 1 - esac - - echo -e "\033[35m ===> clean tmp \033[0m" - rm -rf ${DEST_CALICO_YAML_DIR} + cp ${PROJECT_ROOT_PATH}/test/yamls/calico.yaml $CLUSTER_PATH/calico.yaml + if [ -z "${CALICO_VERSION}" ]; then + [ -n "${HTTP_PROXY}" ] && { + CALICO_VERSION_INFO=$(curl --retry 3 --retry-delay 5 -x "${HTTP_PROXY}" -s https://api.github.com/repos/projectcalico/calico/releases/latest) + echo ${CALICO_VERSION_INFO} + CALICO_VERSION=$(echo ${CALICO_VERSION_INFO} | jq -r '.tag_name') + } + [ -z "${HTTP_PROXY}" ] && { + CALICO_VERSION_INFO=$(curl --retry 3 --retry-delay 5 -s https://api.github.com/repos/projectcalico/calico/releases/latest) + echo ${CALICO_VERSION_INFO} + CALICO_VERSION=$(echo ${CALICO_VERSION_INFO} | jq -r '.tag_name') + } + [ "${CALICO_VERSION}" == "null" ] && { + echo "failed to get the calico version, will try to use default version." + CALICO_VERSION=${DEFAULT_CALICO_VERSION} + } + else + CALICO_VERSION=${CALICO_VERSION} + fi + echo "install calico version ${CALICO_VERSION}" + [ -n "${HTTP_PROXY}" ] && curl --retry 3 -x "${HTTP_PROXY}" -Lo ${CALICO_YAML} https://raw.githubusercontent.com/projectcalico/calico/${CALICO_VERSION}/manifests/calico.yaml + [ -z "${HTTP_PROXY}" ] && curl --retry 3 -Lo ${CALICO_YAML} https://raw.githubusercontent.com/projectcalico/calico/${CALICO_VERSION}/manifests/calico.yaml + + # set registry + if [ -n "${CALICO_IMAGE_REPO}" ]; then + grep -q -e ".*image:.*docker.io" ${CALICO_YAML} || { + echo "failed find image" + exit 1 + } + ${SED_COMMAND} -i -E 's?(.*image:.*)(docker.io)(.*)?\1'"${CALICO_IMAGE_REPO}"'\3?g' ${CALICO_YAML} + fi + + # accelerate local cluster , in case that it times out to wait calico ready + IMAGE_LIST=$(cat ${CALICO_YAML} | grep "image: " | awk '{print $2}' | sort | uniq | tr '\n' ' ' | tr '\r' ' ') + echo "image: ${IMAGE_LIST}" + for IMAGE in ${IMAGE_LIST}; do + echo "load calico image ${IMAGE} to kind cluster" + docker pull ${IMAGE} + kind load docker-image ${IMAGE} --name ${E2E_CLUSTER_NAME} + done + + export KUBECONFIG=${E2E_KUBECONFIG} + kubectl apply -f ${CALICO_YAML} + sleep 10 + + kubectl wait --for=condition=ready -l k8s-app=calico-node --timeout=${INSTALL_TIME_OUT} pod -n kube-system + kubectl get po -n kube-system + echo -e "\033[35m Succeed to install Calico \033[0m" + + echo -e "\033[35m Patch Calico \033[0m" + kubectl -n kube-system get cm calico-config -oyaml >${CALICO_CONFIG} + kubectl -n kube-system get ds calico-node -oyaml >${CALICO_NODE} + + case ${E2E_IP_FAMILY} in + ipv4) + # set configmap + configYaml=$(yq '.data.cni_network_config' ${CALICO_CONFIG} | yq '.plugins[0].ipam = {"type": "calico-ipam", "assign_ipv4": "true", "assign_ipv6": "false"}' --output-format=json) + configYaml=$configYaml yq e '.data.cni_network_config |= strenv(configYaml)' -i ${CALICO_CONFIG} + ${SED_COMMAND} -i 's/"mtu": "__CNI_MTU__"/"mtu": __CNI_MTU__/g' ${CALICO_CONFIG} + kubectl -n kube-system patch cm calico-config --patch "$(cat ${CALICO_CONFIG})" || { + echo "failed to patch calico configmap" + exit 1 + } + ;; + ipv6) + # set configmap + configYaml=$(yq '.data.cni_network_config' ${CALICO_CONFIG} | yq '.plugins[0].ipam = {"type": "calico-ipam", "assign_ipv4": "false", "assign_ipv6": "true"}' --output-format=json) + configYaml=$configYaml yq e '.data.cni_network_config |= strenv(configYaml)' -i ${CALICO_CONFIG} + ${SED_COMMAND} -i 's/"mtu": "__CNI_MTU__"/"mtu": __CNI_MTU__/g' ${CALICO_CONFIG} + kubectl -n kube-system patch cm calico-config --patch "$(cat ${CALICO_CONFIG})" || { + echo "failed to patch calico configmap" + exit 1 + } + + # set calico-node env + grep -q "FELIX_IPV6SUPPORT" ${CALICO_NODE} || { + echo "failed find FELIX_IPV6SUPPORT" + exit 1 + } + ${SED_COMMAND} -i -E '/FELIX_IPV6SUPPORT/{n;s/value: "false"/value: "true"/}' ${CALICO_NODE} + + grep -q "value: autodetect" ${CALICO_NODE} || { + echo "failed find autodetect" + exit 1 + } + ${SED_COMMAND} -i '/value: autodetect/a\ - name: IP6\n\ value: autodetect' ${CALICO_NODE} + kubectl -n kube-system patch ds calico-node --patch "$(cat ${CALICO_NODE})" || { + echo "failed to patch calico-node" + exit 1 + } + ;; + dual) + # set configmap + configYaml=$(yq '.data.cni_network_config' ${CALICO_CONFIG} | yq '.plugins[0].ipam = {"type": "calico-ipam", "assign_ipv4": "true", "assign_ipv6": "true"}' --output-format=json) + configYaml=$configYaml yq e '.data.cni_network_config |= strenv(configYaml)' -i ${CALICO_CONFIG} + ${SED_COMMAND} -i 's/"mtu": "__CNI_MTU__"/"mtu": __CNI_MTU__/g' ${CALICO_CONFIG} + kubectl -n kube-system patch cm calico-config --patch "$(cat ${CALICO_CONFIG})" || { + echo "failed to patch calico configmap" + exit 1 + } + + # set calico-node env + grep -q "FELIX_IPV6SUPPORT" ${CALICO_NODE} || { + echo "failed find FELIX_IPV6SUPPORT" + exit 1 + } + ${SED_COMMAND} -i -E '/FELIX_IPV6SUPPORT/{n;s/value: "false"/value: "true"/}' ${CALICO_NODE} + grep -q "value: autodetect" ${CALICO_NODE} || { + echo "failed find autodetect" + exit 1 + } + ${SED_COMMAND} -i '/value: autodetect/a\ - name: IP6\n\ value: autodetect' ${CALICO_NODE} + kubectl -n kube-system patch ds calico-node --patch "$(cat ${CALICO_NODE})" || { + echo "failed to patch calico-node" + exit 1 + } + ;; + *) + echo "the value of E2E_IP_FAMILY: ipv4 or ipv6 or dual" + exit 1 + ;; + esac + # there no default felixconfigurations.crd.projectcalico.org in latest calico version (https://github.com/projectcalico/calico/releases/tag/v3.29.0) + kubectl patch felixconfigurations.crd.projectcalico.org default --type='merge' -p '{"spec":{"chainInsertMode":"Append"}}' || true + + # restart calico pod + kubectl -n kube-system delete pod -l k8s-app=calico-node --force --grace-period=0 && sleep 3 + kubectl wait --for=condition=ready -l k8s-app=calico-node --timeout=${INSTALL_TIME_OUT} pod -n kube-system + kubectl -n kube-system delete pod -l k8s-app=calico-kube-controllers --force --grace-period=0 && sleep 3 + kubectl wait --for=condition=ready -l k8s-app=calico-kube-controllers --timeout=${INSTALL_TIME_OUT} pod -n kube-system + echo -e "\033[35m ===> Succeed to patch calico \033[0m" + + # Update calico's podcidr so that it is inconsistent with the cluster's podcidr. + case ${E2E_IP_FAMILY} in + ipv4) + kubectl patch ippools default-ipv4-ippool --patch '{"spec": {"cidr": "'"${CALICO_IPV4POOL_CIDR}"'"}}' --type=merge + ;; + ipv6) + kubectl delete ippools default-ipv4-ippool --force + kubectl patch ippools default-ipv6-ippool --patch '{"spec": {"cidr": "'"${CALICO_IPV6POOL_CIDR}"'"}}' --type=merge + ;; + dual) + kubectl patch ippools default-ipv4-ippool --patch '{"spec": {"cidr": "'"${CALICO_IPV4POOL_CIDR}"'"}}' --type=merge + kubectl patch ippools default-ipv6-ippool --patch '{"spec": {"cidr": "'"${CALICO_IPV6POOL_CIDR}"'"}}' --type=merge + ;; + *) + echo "the value of E2E_IP_FAMILY: ipv4 or ipv6 or dual" + exit 1 + ;; + esac + + echo -e "\033[35m ===> clean tmp \033[0m" + rm -rf ${DEST_CALICO_YAML_DIR} } function install_cilium() { - echo -e "\033[35m ===> Start to install cilium \033[0m" - # cni.exclusive using multus-cni need close - # kubeProxyReplacement Enhance kube-proxy (value probe static default: probe) - # k8sServiceHost api-server address - # k8sServicePort api-service port - # bpf.vlanBypass allow vlan traffic to pass - KUBE_PROXY_REPLACEMENT=false - if [ "$DISABLE_KUBE_PROXY" = "true" ]; then - KUBE_PROXY_REPLACEMENT=true - fi - CILIUM_HELM_OPTIONS=" --set cni.exclusive=false \ - --set kubeProxyReplacement=${KUBE_PROXY_REPLACEMENT} \ - --set k8sServiceHost=${E2E_CLUSTER_NAME}-control-plane \ - --set k8sServicePort=6443 \ - --set bpf.vlanBypass={0} " - case ${E2E_IP_FAMILY} in - ipv4) - CILIUM_HELM_OPTIONS+=" --set ipam.operator.clusterPoolIPv4PodCIDRList=${CILIUM_CLUSTER_POD_SUBNET_V4} \ + echo -e "\033[35m ===> Start to install cilium \033[0m" + # cni.exclusive using multus-cni need close + # kubeProxyReplacement Enhance kube-proxy (value probe static default: probe) + # k8sServiceHost api-server address + # k8sServicePort api-service port + # bpf.vlanBypass allow vlan traffic to pass + # cilium ipamMode: multi-pool required routingMode=native and kubeProxyReplacement + CILIUM_HELM_OPTIONS=" --set cni.exclusive=false \ + --set k8sServiceHost=${E2E_CLUSTER_NAME}-control-plane \ + --set k8sServicePort=6443 \ + --set bpf.vlanBypass={0} " + if [ "$DISABLE_KUBE_PROXY" = "true" ]; then + CILIUM_HELM_OPTIONS+=" --set kubeProxyReplacement=true \ + --set routingMode=native \ + --set ipam.mode=multi-pool \ + --set nodeinit.enabled=true \ + --set autoDirectNodeRoutes=true \ + --set bpf.masquerade=true \ + --set endpointRoutes.enabled=true\ + " + fi + case ${E2E_IP_FAMILY} in + ipv4) + CILIUM_HELM_OPTIONS+=" --set ipam.operator.clusterPoolIPv4PodCIDRList=${CILIUM_CLUSTER_POD_SUBNET_V4} \ --set ipv4.enabled=true \ --set ipv6.enabled=false " - ;; - ipv6) - CILIUM_HELM_OPTIONS+=" --set ipam.operator.clusterPoolIPv6PodCIDRList=${CILIUM_CLUSTER_POD_SUBNET_V6} \ - --set ipv4.enabled=false \ - --set ipv6.enabled=true \ - --set ipv6NativeRoutingCIDR=${CILIUM_CLUSTER_POD_SUBNET_V6} \ - --set autoDirectNodeRoutes=true \ - --set enableIPv6Masquerade=true \ - --set routingMode=native " - ;; - dual) - CILIUM_HELM_OPTIONS+=" --set ipam.operator.clusterPoolIPv4PodCIDRList=${CILIUM_CLUSTER_POD_SUBNET_V4} \ - --set ipam.operator.clusterPoolIPv6PodCIDRList=${CILIUM_CLUSTER_POD_SUBNET_V6} \ - --set ipv4.enabled=true \ - --set ipv6.enabled=true " - ;; - *) - echo "the value of E2E_IP_FAMILY: ipv4 or ipv6 or dual" - exit 1 - esac - - CILIUM_HELM_OPTIONS+=" \ + if [ "$DISABLE_KUBE_PROXY" = "true" ]; then + # run for multi-pool mode + CILIUM_HELM_OPTIONS+=" --set ipv4NativeRoutingCIDR=${CILIUM_CLUSTER_POD_SUBNET_V4} \ + --set ipam.operator.autoCreateCiliumPodIPPools.default.ipv4.cidrs=${CILIUM_CLUSTER_POD_SUBNET_V4} \ + --set ipam.operator.autoCreateCiliumPodIPPools.default.ipv4.maskSize=26 \ + --set enableIPv4Masquerade=true " + fi + ;; + ipv6) + CILIUM_HELM_OPTIONS+=" --set ipam.operator.clusterPoolIPv6PodCIDRList=${CILIUM_CLUSTER_POD_SUBNET_V6} \ + --set ipv4.enabled=false \ + --set ipv6.enabled=true \ + --set routingMode=native \ + --set ipam.operator.autoCreateCiliumPodIPPools.default.ipv6.cidrs=${CILIUM_CLUSTER_POD_SUBNET_V6} \ + --set ipam.operator.autoCreateCiliumPodIPPools.default.ipv6.maskSize=124 \ + --set ipv6NativeRoutingCIDR=${CILIUM_CLUSTER_POD_SUBNET_V6} \ + --set autoDirectNodeRoutes=true \ + --set enableIPv6Masquerade=true " + ;; + dual) + CILIUM_HELM_OPTIONS+=" --set ipam.operator.clusterPoolIPv4PodCIDRList=${CILIUM_CLUSTER_POD_SUBNET_V4} \ + --set ipam.operator.clusterPoolIPv6PodCIDRList=${CILIUM_CLUSTER_POD_SUBNET_V6} \ + --set ipv4.enabled=true \ + --set ipv6.enabled=true " + if [ "$DISABLE_KUBE_PROXY" = "true" ]; then + # run for multi-pool mode + CILIUM_HELM_OPTIONS+=" --set ipam.operator.autoCreateCiliumPodIPPools.default.ipv4.cidrs=${CILIUM_CLUSTER_POD_SUBNET_V4} \ + --set ipam.operator.autoCreateCiliumPodIPPools.default.ipv4.maskSize=26 \ + --set ipam.operator.autoCreateCiliumPodIPPools.default.ipv6.cidrs=${CILIUM_CLUSTER_POD_SUBNET_V6} \ + --set ipam.operator.autoCreateCiliumPodIPPools.default.ipv6.maskSize=124 \ + --set ipv6NativeRoutingCIDR=${CILIUM_CLUSTER_POD_SUBNET_V6} \ + --set enableIPv4Masquerade=true \ + --set enableIPv6Masquerade=true \ + --set ipv4NativeRoutingCIDR=${CILIUM_CLUSTER_POD_SUBNET_V4} \ + --set ipv6NativeRoutingCIDR=${CILIUM_CLUSTER_POD_SUBNET_V6} " + fi + ;; + *) + echo "the value of E2E_IP_FAMILY: ipv4 or ipv6 or dual" + exit 1 + ;; + esac + + CILIUM_HELM_OPTIONS+=" \ --set image.repository=${E2E_CILIUM_IMAGE_REPO}/cilium/cilium \ --set image.useDigest=false \ --set certgen.image.repository=${E2E_CILIUM_IMAGE_REPO}/cilium/certgen \ @@ -223,49 +297,53 @@ function install_cilium() { --set preflight.image.useDigest=false \ --set nodeinit.image.repository=${E2E_CILIUM_IMAGE_REPO}/cilium/startup-script " - echo "CILIUM_HELM_OPTIONS: ${CILIUM_HELM_OPTIONS}" + echo "CILIUM_HELM_OPTIONS: ${CILIUM_HELM_OPTIONS}" - helm repo remove cilium &>/dev/null || true - helm repo add cilium https://helm.cilium.io - helm repo update + helm repo remove cilium &>/dev/null || true + helm repo add cilium https://helm.cilium.io + helm repo update - if [ -n "${CILIUM_VERSION}" ] ; then - CILIUM_HELM_OPTIONS+=" --version ${CILIUM_VERSION} " - fi + if [ -n "${CILIUM_VERSION}" ]; then + CILIUM_HELM_OPTIONS+=" --version ${CILIUM_VERSION} " + fi - HELM_IMAGES_LIST=` helm template test cilium/cilium ${CILIUM_HELM_OPTIONS} | grep " image: " | tr -d '"'| awk '{print $2}' | awk -F "@" '{print $1}' | uniq ` - [ -z "${HELM_IMAGES_LIST}" ] && echo "can't found image of cilium" && exit 1 - LOCAL_IMAGE_LIST=`docker images | awk '{printf("%s:%s\n",$1,$2)}'` + HELM_IMAGES_LIST=$(helm template test cilium/cilium ${CILIUM_HELM_OPTIONS} | grep " image: " | tr -d '"' | awk '{print $2}' | awk -F "@" '{print $1}' | uniq) + [ -z "${HELM_IMAGES_LIST}" ] && echo "can't found image of cilium" && exit 1 + LOCAL_IMAGE_LIST=$(docker images | awk '{printf("%s:%s\n",$1,$2)}') - for CILIUM_IMAGE in ${HELM_IMAGES_LIST}; do - if ! grep ${CILIUM_IMAGE} <<< ${LOCAL_IMAGE_LIST} ; then - echo "===> docker pull ${CILIUM_IMAGE} " - docker pull ${CILIUM_IMAGE} - fi - echo "===> load image ${CILIUM_IMAGE} to kind..." - kind load docker-image ${CILIUM_IMAGE} --name ${E2E_CLUSTER_NAME} - done - - # Install cilium - helm upgrade --install cilium cilium/cilium --wait -n kube-system --debug --kubeconfig ${E2E_KUBECONFIG} ${CILIUM_HELM_OPTIONS} - - # no matching resources found - sleep 3 - kubectl wait --for=condition=ready -l k8s-app=cilium --timeout=${INSTALL_TIME_OUT} pod -n kube-system \ - --kubeconfig ${E2E_KUBECONFIG} - - sleep 10 - - echo -e "\033[35m ===> Succeed to install cilium \033[0m" + for CILIUM_IMAGE in ${HELM_IMAGES_LIST}; do + if ! grep ${CILIUM_IMAGE} <<<${LOCAL_IMAGE_LIST}; then + echo "===> docker pull ${CILIUM_IMAGE} " + docker pull ${CILIUM_IMAGE} + fi + echo "===> load image ${CILIUM_IMAGE} to kind..." + kind load docker-image ${CILIUM_IMAGE} --name ${E2E_CLUSTER_NAME} + done + + # Install cilium + helm upgrade --install cilium cilium/cilium --wait -n kube-system --debug --kubeconfig ${E2E_KUBECONFIG} ${CILIUM_HELM_OPTIONS} + + # no matching resources found + sleep 3 + kubectl wait --for=condition=ready -l app.kubernetes.io/part-of=cilium --timeout=${INSTALL_TIME_OUT} pod -n kube-system \ + --kubeconfig ${E2E_KUBECONFIG} || { + kubectl get pod -n kube-system -l app.kubernetes.io/part-of=cilium --kubeconfig ${E2E_KUBECONFIG} + kubectl describe pod -n kube-system -l app.kubernetes.io/part-of=cilium --kubeconfig ${E2E_KUBECONFIG} + kubectl get po -n kube-system -l app.kubernetes.io/part-of=cilium --kubeconfig ${E2E_KUBECONFIG} --no-headers | grep CrashLoopBackOff | awk '{print $1}' | xargs -I {} kubectl --kubeconfig ${E2E_KUBECONFIG} logs -n kube-system {} + exit 1 + } + + sleep 10 + + echo -e "\033[35m ===> Succeed to install cilium \033[0m" } -if [ "${INSTALL_CALICO}" == "true" ] ; then +if [ "${INSTALL_CALICO}" == "true" ]; then install_calico fi -if [ "${INSTALL_CILIUM}" == "true" ] ; then +if [ "${INSTALL_CILIUM}" == "true" ]; then install_cilium fi - kubectl get po -n kube-system --kubeconfig ${E2E_KUBECONFIG} -owide