diff --git a/.github/codespell-ignorewords b/.github/codespell-ignorewords index 312ddf3310..9fccb35638 100644 --- a/.github/codespell-ignorewords +++ b/.github/codespell-ignorewords @@ -4,3 +4,4 @@ keypair cyclinder shouldnot Requestor +passt diff --git a/.github/workflows/e2e-init.yaml b/.github/workflows/e2e-init.yaml index 3e6a4e3e0b..6276f3f68c 100644 --- a/.github/workflows/e2e-init.yaml +++ b/.github/workflows/e2e-init.yaml @@ -124,7 +124,8 @@ jobs: -e SPIDERPOOL_AGENT_IMAGE_NAME=spiderpool-agent-race \ -e SPIDERPOOL_CONTROLLER_IMAGE_NAME=spiderpool-controller-race \ -e E2E_IP_FAMILY=${{ inputs.ip_family }} -e PYROSCOPE_LOCAL_PORT="" \ - -e E2E_KIND_IMAGE_TAG=${{ inputs.k8s_version }} + -e E2E_KIND_IMAGE_TAG=${{ inputs.k8s_version }} \ + -e INSTALL_KUBEVIRT=true - name: Run e2e Test id: run_e2e @@ -137,6 +138,7 @@ jobs: make ${{ matrix.e2e_test_mode }} -e E2E_CLUSTER_NAME=${{ env.E2E_CLUSTER_NAME }} \ -e E2E_GINKGO_LABELS=${E2E_LABELS} \ -e E2E_TIMEOUT=${{ env.E2E_TIME_OUT }} \ + -e INSTALL_MULTUS=true \ -e E2E_IP_FAMILY=${{ inputs.ip_family }} || RESULT=1 if ((RESULT==0)) ; then echo "RUN_E2E_PASS=true" >> $GITHUB_ENV diff --git a/charts/spiderpool/README.md b/charts/spiderpool/README.md index c450e3698e..467652124e 100644 --- a/charts/spiderpool/README.md +++ b/charts/spiderpool/README.md @@ -126,22 +126,27 @@ helm install spiderpool spiderpool/spiderpool --wait --namespace kube-system \ ### ipam parameters -| Name | Description | Value | -| -------------------------------------- | ------------------------------------------------------------------------------------------------ | ------- | -| `ipam.enableIPv4` | enable ipv4 | `true` | -| `ipam.enableIPv6` | enable ipv6 | `true` | -| `ipam.enableStatefulSet` | the network mode | `true` | -| `ipam.enableKubevirtStaticIP` | the feature to keep kubevirt vm pod static IP | `true` | -| `ipam.enableSpiderSubnet` | SpiderSubnet feature gate. | `true` | -| `ipam.subnetDefaultFlexibleIPNumber` | the default flexible IP number of SpiderSubnet feature auto-created IPPools | `1` | -| `ipam.gc.enabled` | enable retrieve IP in spiderippool CR | `true` | -| `ipam.gc.gcAll.intervalInSecond` | the gc all interval duration | `600` | -| `ipam.gc.GcDeletingTimeOutPod.enabled` | enable retrieve IP for the pod who times out of deleting graceful period | `true` | -| `ipam.gc.GcDeletingTimeOutPod.delay` | the gc delay seconds after the pod times out of deleting graceful period | `0` | -| `grafanaDashboard.install` | install grafanaDashboard for spiderpool. This requires the grafana operator CRDs to be available | `false` | -| `grafanaDashboard.namespace` | the grafanaDashboard namespace. Default to the namespace of helm instance | `""` | -| `grafanaDashboard.annotations` | the additional annotations of spiderpool grafanaDashboard | `{}` | -| `grafanaDashboard.labels` | the additional label of spiderpool grafanaDashboard | `{}` | +| Name | Description | Value | +| -------------------------------------- | --------------------------------------------------------------------------- | ------ | +| `ipam.enableIPv4` | enable ipv4 | `true` | +| `ipam.enableIPv6` | enable ipv6 | `true` | +| `ipam.enableStatefulSet` | the network mode | `true` | +| `ipam.enableKubevirtStaticIP` | the feature to keep kubevirt vm pod static IP | `true` | +| `ipam.enableSpiderSubnet` | SpiderSubnet feature gate. | `true` | +| `ipam.subnetDefaultFlexibleIPNumber` | the default flexible IP number of SpiderSubnet feature auto-created IPPools | `1` | +| `ipam.gc.enabled` | enable retrieve IP in spiderippool CR | `true` | +| `ipam.gc.gcAll.intervalInSecond` | the gc all interval duration | `600` | +| `ipam.gc.GcDeletingTimeOutPod.enabled` | enable retrieve IP for the pod who times out of deleting graceful period | `true` | +| `ipam.gc.GcDeletingTimeOutPod.delay` | the gc delay seconds after the pod times out of deleting graceful period | `0` | + +### grafanaDashboard parameters + +| Name | Description | Value | +| ------------------------------ | ------------------------------------------------------------------------------------------------ | ------- | +| `grafanaDashboard.install` | install grafanaDashboard for spiderpool. This requires the grafana operator CRDs to be available | `false` | +| `grafanaDashboard.namespace` | the grafanaDashboard namespace. Default to the namespace of helm instance | `""` | +| `grafanaDashboard.annotations` | the additional annotations of spiderpool grafanaDashboard | `{}` | +| `grafanaDashboard.labels` | the additional label of spiderpool grafanaDashboard | `{}` | ### coordinator parameters diff --git a/cmd/spiderpool-agent/cmd/coordinator.go b/cmd/spiderpool-agent/cmd/coordinator.go index 6538acd88d..ad3b7f93ea 100644 --- a/cmd/spiderpool-agent/cmd/coordinator.go +++ b/cmd/spiderpool-agent/cmd/coordinator.go @@ -80,7 +80,6 @@ func (g *_unixGetCoordinatorConfig) Handle(params daemonset.GetCoordinatorConfig if ok { _, err := kubevirtMgr.GetVMIMByName(ctx, pod.Namespace, vmimName, false) if nil != err { - // TODO (Icarus9913): should we still cancel the IP conflict detection for no VMIM pod ? if apierrors.IsNotFound(err) { logger.Sugar().Warnf("no kubevirt vm pod '%s/%s' corresponding VirtualMachineInstanceMigration '%s/%s' found, still execute IP conflict detection", pod.Namespace, pod.Name, pod.Namespace, vmimName) @@ -90,6 +89,7 @@ func (g *_unixGetCoordinatorConfig) Handle(params daemonset.GetCoordinatorConfig } } else { // cancel IP conflict detection because there's a moment the old vm pod still running during the vm live migration phase + logger.Sugar().Infof("cancel IP conflict detection for live migration new pod '%s/%s'", pod.Namespace, pod.Name) detectIPConflict = false } } diff --git a/cmd/spiderpool-agent/cmd/daemon.go b/cmd/spiderpool-agent/cmd/daemon.go index ad90117e87..46cbcb5cc7 100644 --- a/cmd/spiderpool-agent/cmd/daemon.go +++ b/cmd/spiderpool-agent/cmd/daemon.go @@ -343,12 +343,14 @@ func initAgentServiceManagers(ctx context.Context) { } agentContext.StsManager = statefulSetManager - logger.Debug("Begin to initialize Kubevirt manager") - kubevirtManager := kubevirtmanager.NewKubevirtManager( - agentContext.CRDManager.GetClient(), - agentContext.CRDManager.GetAPIReader(), - ) - agentContext.KubevirtManager = kubevirtManager + if agentContext.Cfg.EnableKubevirtStaticIP { + logger.Debug("Begin to initialize Kubevirt manager") + kubevirtManager := kubevirtmanager.NewKubevirtManager( + agentContext.CRDManager.GetClient(), + agentContext.CRDManager.GetAPIReader(), + ) + agentContext.KubevirtManager = kubevirtManager + } logger.Debug("Begin to initialize Endpoint manager") endpointManager, err := workloadendpointmanager.NewWorkloadEndpointManager( diff --git a/cmd/spiderpool-controller/cmd/daemon.go b/cmd/spiderpool-controller/cmd/daemon.go index 96b9f91a10..501fbfe493 100644 --- a/cmd/spiderpool-controller/cmd/daemon.go +++ b/cmd/spiderpool-controller/cmd/daemon.go @@ -262,12 +262,14 @@ func initControllerServiceManagers(ctx context.Context) { } controllerContext.StsManager = statefulSetManager - logger.Debug("Begin to initialize Kubevirt manager") - kubevirtManager := kubevirtmanager.NewKubevirtManager( - controllerContext.CRDManager.GetClient(), - controllerContext.CRDManager.GetAPIReader(), - ) - controllerContext.KubevirtManager = kubevirtManager + if controllerContext.Cfg.EnableKubevirtStaticIP { + logger.Debug("Begin to initialize Kubevirt manager") + kubevirtManager := kubevirtmanager.NewKubevirtManager( + controllerContext.CRDManager.GetClient(), + controllerContext.CRDManager.GetAPIReader(), + ) + controllerContext.KubevirtManager = kubevirtManager + } logger.Debug("Begin to initialize Endpoint manager") endpointManager, err := workloadendpointmanager.NewWorkloadEndpointManager( diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml index 3ef5a62793..c14241ef80 100644 --- a/docs/mkdocs.yml +++ b/docs/mkdocs.yml @@ -83,6 +83,7 @@ nav: - Plugin ifacer: usage/ifacer.md - node-based topology: usage/network-topology.md - RDMA: usage/rdma.md + - Kubevirt: usage/kubevirt.md - FAQ: usage/debug.md - Concepts: - Architecture: concepts/arch.md diff --git a/docs/usage/kubevirt-zh_CN.md b/docs/usage/kubevirt-zh_CN.md new file mode 100644 index 0000000000..0c719baa98 --- /dev/null +++ b/docs/usage/kubevirt-zh_CN.md @@ -0,0 +1,174 @@ +# Kubevirt + +**简体中文** | [**English**](./kubevirt.md) + +## 介绍 + +*Spiderpool 能保证 kubevirt vm 的 Pod 在重启、重建场景下,持续获取到相同的 IP 地址。* + +## Kubevirt 网络搭配 + +Spiderpool underlay 网络解决方案可给 Kubevirt 赋予介入 underlay 的能力: + +1. 对于 Kubevirt 的 Passt 网络模式,可搭配 Spiderpool macvlan 集成方案使用。在该网络模式下,**支持** Service Mesh 的所有功能,不过只能使用**单网卡**,且不支持热迁移。 + +2. 对于 Kubevirt 的 Bridge 网络模式,可搭配 OVS CNI 使用。在该网络模式下,**不支持** Service Mesh 功能,可使用**多网卡**,不支持热迁移。 + +## Kubevirt VM 固定地址 + +Kubevirt VM 会在以下一些场景中会出现固定地址的使用: + +1. VM 的热迁移,期望迁移过后的 VM 仍能继承之前的 IP 地址。 + +2. VM 资源对应的 Pod 出现了重启的情况。 + +3. VM 资源对应的 VMI(VirtualMachineInstance) 资源被删除的情景。 + +此外,Kubevirt VM 固定 IP 地址与 StatefulSet 的表现形式是不一样的: + +1. 对于 VM ,Pod 重启前后,其 Pod 的名字是会发生变化的,但是其对应的 VMI 不论重启与否,其名字都不会发生变化。因此,我们将会以 VM 为单位来记录其固定的 IP 地址(我们的 SpiderEndpoint 资源将会继承使用 VM 资源的命名空间以及名字)。 + +2. 对于 StatefulSet,Pod 副本重启前后,其 Pod 名保持不变,我们 Spiderpool 会因此以 Pod 为单位来记录其固定的 IP 地址。 + +> Notice: 该功能默认开启。若开启,无任何限制, VM 可通过有限 IP 地址集合的 IP 池来固化 IP 的范围,但是,无论 VM 是否使用固定的 IP 池,它的 Pod 都可以持续分到相同 IP。 若关闭,VM 对应的 Pod 将被当作无状态对待,使用 Helm 安装 Spiderpool 时,可通过`--set ipam.enableKubevirtStaticIP=false` 关闭。 + +## 实施要求 + +1. 一套 Kubernetes 集群。 + +2. 已安装 [Helm](https://helm.sh/docs/intro/install/)。 + +## 步骤 + +以下流程将会演示 Kubevirt 的 Passt 网络模式搭配 macvlan CNI 以使得 VM 获得 underlay 接入能力,并通过 spiderpool 实现分配固定 IP 的功能。 + +> Notice:当前 macvlan 和 ipvlan 并不适用于 Kubevirt 的 bridge 网络模式,因为对于 bridge 网络模式会将 pod 网卡的 MAC 地址移动到VM,使得 Pod 使用另一个不同的地址。而 macvlan 和 ipvlan CNI 要求 Pod 的网卡接口具有原始 MAC 地址。 + +### 安装 Spiderpool + +请参考 [Macvlan Quick Start](./install/underlay/get-started-macvlan-zh_CN.md) + +### 创建 Kubevirt VM 应用 + +以下的示例 Yaml 中, 会创建 1 个 Kubevirt VM 应用 ,其中: + +- `v1.multus-cni.io/default-network`:为应用选择一张默认网卡的 CNI 配置。 + +```bash +apiVersion: kubevirt.io/v1 +kind: VirtualMachine +metadata: + name: vm-cirros + labels: + kubevirt.io/vm: vm-cirros +spec: + runStrategy: Always + template: + metadata: + annotations: + v1.multus-cni.io/default-network: kube-system/macvlan-ens192 + labels: + kubevirt.io/vm: vm-cirros + spec: + domain: + devices: + disks: + - name: containerdisk + disk: + bus: virtio + - name: cloudinitdisk + disk: + bus: virtio + interfaces: + - name: default + passt: {} + resources: + requests: + memory: 64M + networks: + - name: default + pod: {} + volumes: + - name: containerdisk + containerDisk: + image: quay.io/kubevirt/cirros-container-disk-demo + - name: cloudinitdisk + cloudInitNoCloud: + userData: | + #!/bin/sh + echo 'printed from cloud-init userdata' +``` + +最终,在 Kubevirt VM 应用被创建时,Spiderpool 会从指定 IPPool 中随机选择一个 IP 来与应用形成绑定关系。 + +```bash +~# kubectl get spiderippool +NAME VERSION SUBNET ALLOCATED-IP-COUNT TOTAL-IP-COUNT DEFAULT +test-ippool 4 10.6.0.0/16 1 10 false + +~# kubectl get po -l vm.kubevirt.io/name=vm-cirros -o wide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +virt-launcher-vm-cirros-rg6fs 2/2 Running 0 3m43s 10.6.168.105 node2 1/1 +``` + +重启 Kubevirt VM Pod, 观察到新的 Pod 的 IP 不会变化,符合预期。 + +```bash +~# kubectl delete pod virt-launcher-vm-cirros-rg6fs +pod "virt-launcher-vm-cirros-rg6fs" deleted + +~# kubectl get po -l vm.kubevirt.io/name=vm-cirros -o wide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +virt-launcher-vm-cirros-d68l2 2/2 Running 0 1m21s 10.6.168.105 node2 1/1 +``` + +重启 Kubevirt VMI,观察到后续新的 Pod 的IP 也不会变化,符合预期。 + +```bash +~# kubectl delete vmi vm-cirros +virtualmachineinstance.kubevirt.io "vm-cirros" deleted + +~# kubectl get po -l vm.kubevirt.io/name=vm-cirros -o wide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +virt-launcher-vm-cirros-jjgrl 2/2 Running 0 104s 10.6.168.105 node2 1/1 +``` + +VM 也可与其他 underlay Pod 的通信。 + +```bash +~# kubectl get po -o wide +NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +daocloud-2048-5855b45f44-bvmdr 1/1 Running 0 5m55s 10.6.168.108 spider-worker + +~# kubectl virtctl console vm-cirros +$ ping -c 1 10.6.168.108 +PING 10.6.168.108 (10.6.168.108): 56 data bytes +64 bytes from 10.6.168.108: seq=0 ttl=255 time=70.554 ms + +--- 10.6.168.108 ping statistics --- +1 packets transmitted, 1 packets received, 0% packet loss +round-trip min/avg/max = 70.554/70.554/70.554 ms +``` + +VM 也可访问 cluster IP。 + +```bash +~# kubectl get svc -o wide +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR +daocloud-2048-svc ClusterIP 10.233.36.38 80/TCP 3m50s app=daocloud-2048 + +~# curl -I 10.233.36.38:80 +HTTP/1.1 200 OK +Server: nginx/1.10.1 +Date: Tue, 17 Oct 2023 06:50:04 GMT +Content-Type: text/html +Content-Length: 4090 +Last-Modified: Tue, 17 Oct 2023 06:40:53 GMT +Connection: keep-alive +ETag: "652e2c75-ffa" +Accept-Ranges: bytes +``` + +## 总结 + +Spiderpool 能保证 Kubevirt VM Pod 在重启、重建场景下,持续获取到相同的 IP 地址,能很好的满足 Kubevirt 虚拟机的固定 IP 需求。并可配合 macvlan 或 OVS CNI 与 Kubevirt 的多种网络模式实现 VM underlay 接入能力。 diff --git a/docs/usage/kubevirt.md b/docs/usage/kubevirt.md new file mode 100644 index 0000000000..9a0ad795db --- /dev/null +++ b/docs/usage/kubevirt.md @@ -0,0 +1,3 @@ +# Kubevirt + +**English** | [**简体中文**](./kubevirt-zh_CN.md) diff --git a/go.mod b/go.mod index bcbae3a206..872ae62137 100644 --- a/go.mod +++ b/go.mod @@ -63,6 +63,7 @@ require ( kubevirt.io/api v1.0.0 sigs.k8s.io/controller-runtime v0.16.1 sigs.k8s.io/controller-tools v0.11.4 + sigs.k8s.io/yaml v1.3.0 ) require ( @@ -179,5 +180,4 @@ require ( kubevirt.io/controller-lifecycle-operator-sdk/api v0.0.0-20220329064328-f3cc58c6ed90 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/pkg/ipam/release.go b/pkg/ipam/release.go index 117c97d60b..7dd4315e71 100644 --- a/pkg/ipam/release.go +++ b/pkg/ipam/release.go @@ -124,7 +124,7 @@ func (i *ipam) releaseForAllNICs(ctx context.Context, uid, nic string, endpoint } } - // Check whether the kubevirt VM pod needs to keep its IP allocation. + // Check whether the kubevirt VM pod needs to keep its IP allocation. if i.config.EnableKubevirtStaticIP && endpoint.Status.OwnerControllerType == constant.KindKubevirtVMI { isValidVMPod, err := i.kubevirtManager.IsValidVMPod(ctx, endpoint.Namespace, endpoint.Status.OwnerControllerType, endpoint.Status.OwnerControllerName) if nil != err { diff --git a/test/Makefile b/test/Makefile index b14312aa4d..edf5cfd61b 100644 --- a/test/Makefile +++ b/test/Makefile @@ -56,6 +56,9 @@ ifeq ($(INSTALL_MULTUS),true) MULTUS_DEFAULT_CNI_VLAN200=$(MULTUS_DEFAULT_CNI_VLAN200) \ MULTUS_ADDITIONAL_CNI_VLAN100=$(MULTUS_ADDITIONAL_CNI_VLAN100) \ MULTUS_ADDITIONAL_CNI_VLAN200=$(MULTUS_ADDITIONAL_CNI_VLAN200) \ + MULTUS_KUBEVIRT_CNI_VLAN30=$(MULTUS_KUBEVIRT_CNI_VLAN30) \ + MULTUS_KUBEVIRT_CNI_VLAN40=$(MULTUS_KUBEVIRT_CNI_VLAN40) \ + INSTALL_KUBEVIRT=$(INSTALL_KUBEVIRT) \ RELEASE_NAMESPACE=$(RELEASE_NAMESPACE) \ CLUSTER_PATH=$(CLUSTER_DIR)/$(E2E_CLUSTER_NAME) \ E2E_SPIDERPOOL_ENABLE_SUBNET=${E2E_SPIDERPOOL_ENABLE_SUBNET} \ @@ -75,6 +78,13 @@ ifeq ($(INSTALL_SPIDERDOCTOR),true) E2E_SPIDERDOCTOR_IMAGE_REPO=$(E2E_SPIDERDOCTOR_IMAGE_REPO) \ HTTP_PROXY=$(HTTP_PROXY) \ $(QUIET) bash scripts/install-spiderdoctor.sh +endif +ifeq ($(INSTALL_KUBEVIRT),true) + @echo -e "\033[35m [Step 10] Install kubevirt \033[0m" + E2E_KUBECONFIG=$(E2E_KUBECONFIG) \ + E2E_CLUSTER_NAME=$(E2E_CLUSTER_NAME) \ + HTTP_PROXY=$(HTTP_PROXY) \ + $(QUIET) bash scripts/install-kubevirt.sh endif @ echo "wait for the cluster ready" ; \ TEST_IMAGE_NAME=$(TEST_IMAGE_NAME) \ diff --git a/test/Makefile.defs b/test/Makefile.defs index 2dd61ac4e8..972729cbf8 100644 --- a/test/Makefile.defs +++ b/test/Makefile.defs @@ -47,6 +47,8 @@ INSTALL_NETTOOLS ?= false INSTALL_SPIDERDOCTOR ?= true +INSTALL_KUBEVIRT ?= false + CALICO_VERSION ?= v3.25.0 CNI_PACKAGE_VERSION ?= v1.3.0 @@ -113,6 +115,8 @@ MULTUS_DEFAULT_CNI_CILIUM := cilium MULTUS_DEFAULT_CNI_VLAN0 := macvlan-vlan0 MULTUS_DEFAULT_CNI_VLAN100 := macvlan-vlan100 MULTUS_DEFAULT_CNI_VLAN200 := macvlan-vlan200 +MULTUS_KUBEVIRT_CNI_VLAN30 := kubevirt-macvlan-vlan30 +MULTUS_KUBEVIRT_CNI_VLAN40 := kubevirt-macvlan-vlan40 ifeq ($(E2E_CHINA_IMAGE_REGISTRY),true) E2E_MULTUS_IMAGE_REGISTER ?= ghcr.m.daocloud.io diff --git a/test/doc/kubevirt.md b/test/doc/kubevirt.md index a4f09a7455..b05564b848 100644 --- a/test/doc/kubevirt.md +++ b/test/doc/kubevirt.md @@ -2,5 +2,6 @@ | Case ID | Title | Priority | Smoke | Status | Other | |---------|-------------------------------------------------------------------------------|----------|-------|--------|-------| -| F00001 | Succeed to keep static IP for kubevirt VM/VMI after restarting the VM/VMI pod | P1 | | | | -| F00002 | Succeed to keep static IP for the kubevirt VM live migration | P1 | | | | +| F00001 | Succeed to keep static IP for kubevirt VM/VMI after restarting the VM/VMI pod | P1 | | done | | +| F00002 | Succeed to keep static IP for the kubevirt VM live migration | P1 | | done | | +| F00003 | Succeed to allocation multiple NICs | P1 | | done | | diff --git a/test/e2e/common/constant.go b/test/e2e/common/constant.go index 125675b98a..d451372702 100644 --- a/test/e2e/common/constant.go +++ b/test/e2e/common/constant.go @@ -59,6 +59,12 @@ var ( MacvlanUnderlayVlan0 string = "macvlan-vlan0" MacvlanVlan100 string = "macvlan-vlan100" MacvlanVlan200 string = "macvlan-vlan200" + KubevirtMacvlan30 string = "kubevirt-macvlan-vlan30" + KubevirtMacvlan40 string = "kubevirt-macvlan-vlan40" + KubevirtPoolIPv4Vlan30 string = "kubevirt-vlan30-v4" + KubevirtPoolIPv6Vlan30 string = "kubevirt-vlan30-v6" + KubevirtPoolIPv4Vlan40 string = "kubevirt-vlan40-v4" + KubevirtPoolIPv6Vlan40 string = "kubevirt-vlan40-v6" SpiderPoolIPv4SubnetDefault string = "default-v4-subnet" SpiderPoolIPv6SubnetDefault string = "default-v6-subnet" SpiderPoolIPv4SubnetVlan100 string = "vlan100-v4" diff --git a/test/e2e/common/mode.go b/test/e2e/common/mode.go index 5afaab36a3..59235fe5c7 100644 --- a/test/e2e/common/mode.go +++ b/test/e2e/common/mode.go @@ -12,6 +12,7 @@ const ( ENV_INSTALL_OVERLAY = "INSTALL_OVERLAY_CNI" E2E_SPIDERPOOL_ENABLE_SUBNET = "E2E_SPIDERPOOL_ENABLE_SUBNET" INSTALL_CILIUM = "INSTALL_CILIUM" + INSTALL_MULTUS = "INSTALL_MULTUS" ) func checkBoolEnv(name string) bool { @@ -34,3 +35,7 @@ func CheckSubnetFeatureOn() bool { func CheckCiliumFeatureOn() bool { return checkBoolEnv(INSTALL_CILIUM) } + +func CheckMultusFeatureOn() bool { + return checkBoolEnv(INSTALL_MULTUS) +} diff --git a/test/e2e/kubevirt/kubevirt_suite_test.go b/test/e2e/kubevirt/kubevirt_suite_test.go new file mode 100644 index 0000000000..d568ee173c --- /dev/null +++ b/test/e2e/kubevirt/kubevirt_suite_test.go @@ -0,0 +1,122 @@ +// Copyright 2023 Authors of spidernet-io +// SPDX-License-Identifier: Apache-2.0 + +package kubevirt_test + +import ( + "fmt" + "os" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + e2e "github.com/spidernet-io/e2eframework/framework" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + kubevirtv1 "kubevirt.io/api/core/v1" + k8yaml "sigs.k8s.io/yaml" + + spiderpoolv2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + "github.com/spidernet-io/spiderpool/test/e2e/common" +) + +const ( + TEST_VM_TEMPLATE_PATH = "./testvm.yaml" + randomLength = 6 +) + +var ( + vmTemplate = new(kubevirtv1.VirtualMachine) + frame *e2e.Framework +) + +func TestKubevirt(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Kubevirt Suite") +} + +var _ = BeforeSuite(func() { + defer GinkgoRecover() + + if common.CheckRunOverlayCNI() { + Skip("overlay CNI is installed , ignore this suite") + } + if !common.CheckMultusFeatureOn() { + Skip("multus is not installed , ignore this suite") + } + + var err error + frame, err = e2e.NewFramework(GinkgoT(), []func(*runtime.Scheme) error{spiderpoolv2beta1.AddToScheme, kubevirtv1.AddToScheme}) + Expect(err).NotTo(HaveOccurred()) + + // make sure we have macvlan net-attach-def resource + _, err = frame.GetMultusInstance(common.KubevirtMacvlan30, common.MultusNs) + if nil != err { + if errors.IsNotFound(err) { + Skip(fmt.Sprintf("no kubevirt multus CR '%s/%s' installed, ignore this suite", common.MultusNs, common.KubevirtMacvlan30)) + } + Fail(err.Error()) + } + _, err = frame.GetMultusInstance(common.KubevirtMacvlan40, common.MultusNs) + if nil != err { + if errors.IsNotFound(err) { + Skip(fmt.Sprintf("no kubevirt multus CR '%s/%s' installed, ignore this suite", common.MultusNs, common.KubevirtMacvlan40)) + } + Fail(err.Error()) + } + if frame.Info.IpV4Enabled { + _, err := getSpiderIPPoolByName(common.KubevirtPoolIPv4Vlan30) + if nil != err { + if errors.IsNotFound(err) { + Skip(fmt.Sprintf("no kubevirt IPv4 IPPool resource '%s' installed, ignore this suite", common.KubevirtPoolIPv4Vlan30)) + } + Fail(err.Error()) + } + _, err = getSpiderIPPoolByName(common.KubevirtPoolIPv4Vlan40) + if nil != err { + if errors.IsNotFound(err) { + Skip(fmt.Sprintf("no kubevirt IPv4 IPPool resource '%s' installed, ignore this suite", common.KubevirtPoolIPv4Vlan40)) + } + Fail(err.Error()) + } + } + if frame.Info.IpV6Enabled { + _, err := getSpiderIPPoolByName(common.KubevirtPoolIPv6Vlan30) + if nil != err { + if errors.IsNotFound(err) { + Skip(fmt.Sprintf("no kubevirt IPv6 IPPool resource '%s' installed, ignore this suite", common.KubevirtPoolIPv6Vlan30)) + } + Fail(err.Error()) + } + _, err = getSpiderIPPoolByName(common.KubevirtPoolIPv6Vlan40) + if nil != err { + if errors.IsNotFound(err) { + Skip(fmt.Sprintf("no kubevirt IPv6 IPPool resource '%s' installed, ignore this suite", common.KubevirtPoolIPv6Vlan40)) + } + Fail(err.Error()) + } + } + + readTestVMTemplate() +}) + +func readTestVMTemplate() { + bytes, err := os.ReadFile(TEST_VM_TEMPLATE_PATH) + Expect(err).NotTo(HaveOccurred()) + + err = k8yaml.Unmarshal(bytes, vmTemplate) + Expect(err).NotTo(HaveOccurred()) +} + +func getSpiderIPPoolByName(name string) (*spiderpoolv2beta1.SpiderIPPool, error) { + var pool spiderpoolv2beta1.SpiderIPPool + err := frame.GetResource(types.NamespacedName{ + Name: name, + }, &pool) + if nil != err { + return nil, err + } + + return &pool, nil +} diff --git a/test/e2e/kubevirt/kubevirt_test.go b/test/e2e/kubevirt/kubevirt_test.go new file mode 100644 index 0000000000..554c8bf81d --- /dev/null +++ b/test/e2e/kubevirt/kubevirt_test.go @@ -0,0 +1,285 @@ +// Copyright 2023 Authors of spidernet-io +// SPDX-License-Identifier: Apache-2.0 + +package kubevirt_test + +import ( + "context" + "fmt" + "sort" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + utilrand "k8s.io/apimachinery/pkg/util/rand" + kubevirtv1 "kubevirt.io/api/core/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + spiderpoolv2beta1 "github.com/spidernet-io/spiderpool/pkg/k8s/apis/spiderpool.spidernet.io/v2beta1" + "github.com/spidernet-io/spiderpool/test/e2e/common" +) + +var _ = Describe("test kubevirt", Label("kubevirt"), func() { + var ( + virtualMachine *kubevirtv1.VirtualMachine + ctx context.Context + namespace string + ) + + BeforeEach(func() { + ctx = context.TODO() + + // make sure the vm has the macvlan annotation. + virtualMachine = vmTemplate.DeepCopy() + anno := virtualMachine.Spec.Template.ObjectMeta.GetAnnotations() + anno[common.MultusDefaultNetwork] = common.MacvlanUnderlayVlan0 + virtualMachine.Spec.Template.ObjectMeta.SetAnnotations(anno) + + // create namespace + namespace = "ns" + utilrand.String(randomLength) + GinkgoWriter.Printf("create namespace %v. \n", namespace) + err := frame.CreateNamespaceUntilDefaultServiceAccountReady(namespace, common.ServiceAccountReadyTimeout) + Expect(err).NotTo(HaveOccurred()) + + DeferCleanup(func() { + if CurrentSpecReport().Failed() { + GinkgoWriter.Println("If the use case fails, the cleanup step will be skipped") + return + } + + GinkgoWriter.Printf("delete namespace %v. \n", namespace) + Expect(frame.DeleteNamespace(namespace)).NotTo(HaveOccurred()) + }) + }) + + It("Succeed to keep static IP for kubevirt VM/VMI after restarting the VM/VMI pod", Label("F00001"), func() { + // 1. create a kubevirt vm with passt network mode + virtualMachine.Spec.Template.Spec.Networks = []kubevirtv1.Network{ + { + Name: "default", + NetworkSource: kubevirtv1.NetworkSource{ + Pod: &kubevirtv1.PodNetwork{}, + }, + }, + } + virtualMachine.Spec.Template.Spec.Domain.Devices.Interfaces = []kubevirtv1.Interface{ + { + Name: "default", + InterfaceBindingMethod: kubevirtv1.InterfaceBindingMethod{ + Passt: &kubevirtv1.InterfacePasst{}, + }, + }, + } + virtualMachine.Name = fmt.Sprintf("%s-%s", virtualMachine.Name, utilrand.String(randomLength)) + virtualMachine.Namespace = namespace + GinkgoWriter.Printf("try to create kubevirt VM: %v \n", virtualMachine) + err := frame.CreateResource(virtualMachine) + Expect(err).NotTo(HaveOccurred()) + + // 2. wait for the vmi to be ready and record the vmi corresponding vmi pod IP + vmi, err := waitVMIUntilRunning(virtualMachine.Namespace, virtualMachine.Name, time.Minute*5) + Expect(err).NotTo(HaveOccurred()) + + vmInterfaces := make(map[string][]string) + for _, vmNetworkInterface := range vmi.Status.Interfaces { + ips := vmNetworkInterface.IPs + sort.Strings(ips) + vmInterfaces[vmNetworkInterface.Name] = ips + } + GinkgoWriter.Printf("original VMI NIC allocations: %v \n", vmInterfaces) + + // 3. restart the vmi object and compare the new vmi pod IP whether is same with the previous-recorded IP + GinkgoWriter.Printf("try to restart VMI %s/%s", vmi.Namespace, vmi.Name) + err = frame.KClient.Delete(ctx, vmi) + Expect(err).NotTo(HaveOccurred()) + vmi, err = waitVMIUntilRunning(virtualMachine.Namespace, virtualMachine.Name, time.Minute*5) + Expect(err).NotTo(HaveOccurred()) + + tmpVMInterfaces := make(map[string][]string) + for _, vmNetworkInterface := range vmi.Status.Interfaces { + ips := vmNetworkInterface.IPs + sort.Strings(ips) + tmpVMInterfaces[vmNetworkInterface.Name] = ips + } + GinkgoWriter.Printf("new VMI NIC allocations: %v \n", tmpVMInterfaces) + Expect(vmInterfaces).Should(Equal(tmpVMInterfaces)) + }) + + It("Succeed to keep static IP for the kubevirt VM live migration", Label("F00002"), func() { + // 1. create a kubevirt vm with masquerade mode (At present, it seems like the live migration only supports masquerade mode) + virtualMachine.Spec.Template.Spec.Networks = []kubevirtv1.Network{ + { + Name: "default", + NetworkSource: kubevirtv1.NetworkSource{ + Pod: &kubevirtv1.PodNetwork{}, + }, + }, + } + virtualMachine.Spec.Template.Spec.Domain.Devices.Interfaces = []kubevirtv1.Interface{ + { + Name: "default", + InterfaceBindingMethod: kubevirtv1.InterfaceBindingMethod{ + Masquerade: &kubevirtv1.InterfaceMasquerade{}, + }, + }, + } + virtualMachine.Name = fmt.Sprintf("%s-%s", virtualMachine.Name, utilrand.String(randomLength)) + virtualMachine.Namespace = namespace + GinkgoWriter.Printf("try to create kubevirt VM: %v \n", virtualMachine) + err := frame.CreateResource(virtualMachine) + Expect(err).NotTo(HaveOccurred()) + + // 2. record the vmi corresponding vmi pod IP + _, err = waitVMIUntilRunning(virtualMachine.Namespace, virtualMachine.Name, time.Minute*5) + Expect(err).NotTo(HaveOccurred()) + + var podList corev1.PodList + err = frame.KClient.List(ctx, &podList, client.MatchingLabels{ + kubevirtv1.VirtualMachineNameLabel: virtualMachine.Name, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(podList.Items).To(HaveLen(1)) + originalPodName := podList.Items[0].Name + originalPodIPs := podList.Items[0].Status.PodIPs + GinkgoWriter.Printf("original virt-launcher pod '%s/%s' IP allocations: %v \n", namespace, originalPodName, originalPodIPs) + + // 3. create a vm migration + vmim := &kubevirtv1.VirtualMachineInstanceMigration{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-migration", virtualMachine.Name), + Namespace: virtualMachine.Namespace, + }, + Spec: kubevirtv1.VirtualMachineInstanceMigrationSpec{ + VMIName: virtualMachine.Name, + }, + } + GinkgoWriter.Printf("try to create VirtualMachineInstanceMigration: %v \n", vmim) + err = frame.KClient.Create(ctx, vmim) + Expect(err).NotTo(HaveOccurred()) + + // 4. wait for the completion of the migration and compare the new vmi pod IP whether is same with the previous-recorded IP + Eventually(func() error { + tmpPod, err := frame.GetPod(originalPodName, virtualMachine.Namespace) + if nil != err { + return err + } + if tmpPod.Status.Phase == corev1.PodSucceeded { + return nil + } + return fmt.Errorf("virt-launcher pod %s/%s phase is %s, the vm is still in live migration phase", tmpPod.Namespace, tmpPod.Name, tmpPod.Status.Phase) + }).WithTimeout(time.Minute * 10).WithPolling(time.Second * 5).Should(BeNil()) + GinkgoWriter.Printf("virt-launcher pod %s/%s is completed\n", namespace, originalPodName) + + var newPodList corev1.PodList + err = frame.KClient.List(ctx, &newPodList, client.MatchingLabels{ + kubevirtv1.VirtualMachineNameLabel: virtualMachine.Name, + }) + Expect(err).NotTo(HaveOccurred()) + Expect(newPodList.Items).To(HaveLen(2)) + for _, tmpPod := range newPodList.Items { + if tmpPod.Name == originalPodName { + continue + } + GinkgoWriter.Printf("the new migration virt-launcher pod %s/%s IP allocations: %v \n", tmpPod.Namespace, tmpPod.Name, tmpPod.Status.PodIPs) + Expect(tmpPod.Status.PodIPs).To(Equal(originalPodIPs)) + } + }) + + It("Succeed to allocation multiple NICs", Label("F00003"), func() { + // 1. create a kubevirt vm with bridge + multus multiple NIC network mode + macvlan30 := "macvlan30" + macvlan40 := "macvlan40" + virtualMachine.Spec.Template.Spec.Networks = []kubevirtv1.Network{ + { + Name: macvlan30, + NetworkSource: kubevirtv1.NetworkSource{ + Multus: &kubevirtv1.MultusNetwork{ + NetworkName: fmt.Sprintf("%s/%s", common.MultusNs, common.KubevirtMacvlan30), + Default: true, + }, + }, + }, + { + Name: macvlan40, + NetworkSource: kubevirtv1.NetworkSource{ + Multus: &kubevirtv1.MultusNetwork{ + NetworkName: fmt.Sprintf("%s/%s", common.MultusNs, common.KubevirtMacvlan40), + }, + }, + }, + } + virtualMachine.Spec.Template.Spec.Domain.Devices.Interfaces = []kubevirtv1.Interface{ + { + Name: macvlan30, + InterfaceBindingMethod: kubevirtv1.InterfaceBindingMethod{ + Bridge: &kubevirtv1.InterfaceBridge{}, + }, + }, + { + Name: macvlan40, + InterfaceBindingMethod: kubevirtv1.InterfaceBindingMethod{ + Bridge: &kubevirtv1.InterfaceBridge{}, + }, + }, + } + + // with virtualMachine.Spec.Template.Spec.Networks set with multus, we don't need to add multus annotations + virtualMachine.SetAnnotations(map[string]string{}) + virtualMachine.Name = fmt.Sprintf("%s-%s", virtualMachine.Name, utilrand.String(randomLength)) + virtualMachine.Namespace = namespace + GinkgoWriter.Printf("try to create kubevirt VM: %v \n", virtualMachine) + err := frame.CreateResource(virtualMachine) + Expect(err).NotTo(HaveOccurred()) + + // 2. wait for the vmi to be ready + vmi, err := waitVMIUntilRunning(virtualMachine.Namespace, virtualMachine.Name, time.Minute*5) + Expect(err).NotTo(HaveOccurred()) + GinkgoWriter.Printf("kubevirt VMI '%s/%s' is ready, try to check its IP allocations", vmi.Namespace, vmi.Name) + + // 3. check the SpiderEndpoint resource IP allocations + var endpoint spiderpoolv2beta1.SpiderEndpoint + err = frame.KClient.Get(ctx, types.NamespacedName{ + Namespace: vmi.Namespace, + Name: vmi.Name, + }, &endpoint) + Expect(err).NotTo(HaveOccurred()) + + GinkgoWriter.Printf("kubevirt VMI '%s/%s' IP allocations: %s", endpoint.Namespace, endpoint.Name, endpoint.Status.String()) + Expect(endpoint.Status.Current.IPs).To(HaveLen(2)) + }) +}) + +func waitVMIUntilRunning(namespace, name string, timeout time.Duration) (*kubevirtv1.VirtualMachineInstance, error) { + tick := time.Tick(timeout) + var vmi kubevirtv1.VirtualMachineInstance + + for { + select { + case <-tick: + GinkgoWriter.Printf("VMI %s/%s is still in phase %s \n", namespace, name, vmi.Status.Phase) + return nil, fmt.Errorf("time out to wait VMI %s/%s running", namespace, name) + + default: + err := frame.GetResource(types.NamespacedName{ + Namespace: namespace, + Name: name, + }, &vmi) + if nil != err { + if errors.IsNotFound(err) { + time.Sleep(time.Second * 5) + continue + } + + return nil, err + } + if vmi.Status.Phase == kubevirtv1.Running { + return &vmi, nil + } + time.Sleep(time.Second * 5) + } + } +} diff --git a/test/e2e/kubevirt/testvm.yaml b/test/e2e/kubevirt/testvm.yaml new file mode 100644 index 0000000000..87d5ad00f3 --- /dev/null +++ b/test/e2e/kubevirt/testvm.yaml @@ -0,0 +1,43 @@ +apiVersion: kubevirt.io/v1 +kind: VirtualMachine +metadata: + name: testvm + namespace: default + labels: + kubevirt.io/vm: vm-cirros +spec: + runStrategy: Always + template: + metadata: + annotations: + v1.multus-cni.io/default-network: kube-system/macvlan-vlan0 + labels: + kubevirt.io/vm: vm-cirros + spec: + domain: + devices: + disks: + - name: containerdisk + disk: + bus: virtio + - name: cloudinitdisk + disk: + bus: virtio + interfaces: + - name: default + passt: {} + resources: + requests: + memory: 64M + networks: + - name: default + pod: {} + volumes: + - name: containerdisk + containerDisk: + image: quay.io/kubevirt/cirros-container-disk-demo + - name: cloudinitdisk + cloudInitNoCloud: + userData: | + #!/bin/sh + echo 'printed from cloud-init userdata' diff --git a/test/scripts/install-kubevirt.sh b/test/scripts/install-kubevirt.sh new file mode 100644 index 0000000000..52b2d7e6f9 --- /dev/null +++ b/test/scripts/install-kubevirt.sh @@ -0,0 +1,63 @@ +#!/bin/bash + +# SPDX-License-Identifier: Apache-2.0 +# Copyright Authors of Spider + +set -o errexit -o nounset -o pipefail + +CURRENT_FILENAME=$( basename $0 ) + +[ -z "${HTTP_PROXY}" ] || export https_proxy=${HTTP_PROXY} + +export KUBEVIRT_VERSION=$(curl -s https://api.github.com/repos/kubevirt/kubevirt/releases | grep tag_name | grep -v -- '-rc' | sort -r | head -1 | awk -F': ' '{print $2}' | sed 's/,//' | xargs) + +# Should we set a fixed version just like "v1.1.0" ? +[ -z "$KUBEVIRT_VERSION" ] && echo "error, miss KUBEVIRT_VERSION" && exit 1 +echo "$CURRENT_FILENAME : KUBEVIRT_VERSION $KUBEVIRT_VERSION " + +[ -z "$E2E_CLUSTER_NAME" ] && echo "error, miss E2E_CLUSTER_NAME " && exit 1 +echo "$CURRENT_FILENAME : E2E_CLUSTER_NAME $E2E_CLUSTER_NAME " + +[ -z "$E2E_KUBECONFIG" ] && echo "error, miss E2E_KUBECONFIG " && exit 1 +[ ! -f "$E2E_KUBECONFIG" ] && echo "error, could not find file $E2E_KUBECONFIG " && exit 1 +echo "$CURRENT_FILENAME : E2E_KUBECONFIG $E2E_KUBECONFIG " + +KUBEVIRT_OPERATOR_IMAGE=quay.io/kubevirt/virt-operator:${KUBEVIRT_VERSION} +KUBEVIRT_API_IMAGE=quay.io/kubevirt/virt-api:${KUBEVIRT_VERSION} +KUBEVIRT_CONTROLLER_IMAGE=quay.io/kubevirt/virt-controller:${KUBEVIRT_VERSION} +KUBEVIRT_HANDLER_IMAGE=quay.io/kubevirt/virt-handler:${KUBEVIRT_VERSION} +KUBEVIRT_LAUNCHER_IMAGE=quay.io/kubevirt/virt-launcher:${KUBEVIRT_VERSION} +KUBEVIRT_TEST_IMAGE=quay.io/kubevirt/cirros-container-disk-demo +KUBEVIRT_IMAGE_LIST="${KUBEVIRT_OPERATOR_IMAGE} ${KUBEVIRT_API_IMAGE} ${KUBEVIRT_CONTROLLER_IMAGE} ${KUBEVIRT_HANDLER_IMAGE} ${KUBEVIRT_LAUNCHER_IMAGE}" + +LOCAL_IMAGE_LIST=`docker images | awk '{printf("%s:%s\n",$1,$2)}'` + +for IMAGE in ${KUBEVIRT_IMAGE_LIST}; do + if ! grep ${IMAGE} <<< ${LOCAL_IMAGE_LIST}; then + echo "===> docker pull ${IMAGE}... " + docker pull ${IMAGE} + fi + echo "===> load image ${IMAGE} to kind ..." + kind load docker-image ${IMAGE} --name $E2E_CLUSTER_NAME +done + +kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-operator.yaml --kubeconfig ${E2E_KUBECONFIG} + +kubectl apply -f https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-cr.yaml --kubeconfig ${E2E_KUBECONFIG} + +kubectl rollout status deployment/virt-operator -n kubevirt --timeout 120s --kubeconfig ${E2E_KUBECONFIG} +echo "wait kubevirt related pod running ..." + +# wait for the virt-operator to set up kubevirt component pods +sleep 60 + +kubectl wait --for=condition=ready -l app.kubernetes.io/component=kubevirt -n kubevirt --timeout=300s pod --kubeconfig ${E2E_KUBECONFIG} + +# If the kind cluster runs on a virtual machine consider enabling nested virtualization. +# Enable the network Passt and LiveMigration feature. +# We need to wait for all kubevirt component pods ready(webhook ready) to submit the patch action. +kubectl -n kubevirt patch kubevirt kubevirt --type=merge --patch '{"spec":{"configuration":{"developerConfiguration":{"useEmulation":true,"featureGates": ["Passt"]}}}}' --kubeconfig ${E2E_KUBECONFIG} + +sleep 1 + +echo -e "\033[35m Succeed to install kubevirt \033[0m" diff --git a/test/scripts/install-multus.sh b/test/scripts/install-multus.sh index abcc2c0d6f..776b13d376 100755 --- a/test/scripts/install-multus.sh +++ b/test/scripts/install-multus.sh @@ -35,6 +35,12 @@ echo "$CURRENT_FILENAME : MULTUS_DEFAULT_CNI_VLAN100 $MULTUS_DEFAULT_CNI_VLAN100 [ -z "$MULTUS_DEFAULT_CNI_VLAN200" ] && echo "error, miss MULTUS_DEFAULT_CNI_VLAN200" && exit 1 echo "$CURRENT_FILENAME : MULTUS_DEFAULT_CNI_VLAN200 $MULTUS_DEFAULT_CNI_VLAN200 " +[ -z "$MULTUS_KUBEVIRT_CNI_VLAN30" ] && echo "error, miss MULTUS_KUBEVIRT_CNI_VLAN30" && exit 1 +echo "$CURRENT_FILENAME : MULTUS_KUBEVIRT_CNI_VLAN30 $MULTUS_KUBEVIRT_CNI_VLAN30 " + +[ -z "$MULTUS_KUBEVIRT_CNI_VLAN40" ] && echo "error, miss MULTUS_KUBEVIRT_CNI_VLAN40" && exit 1 +echo "$CURRENT_FILENAME : MULTUS_KUBEVIRT_CNI_VLAN40 $MULTUS_KUBEVIRT_CNI_VLAN40 " + #============== OS=$(uname | tr 'A-Z' 'a-z') SED_COMMAND=sed @@ -68,6 +74,10 @@ spec: VLAN100_IPV6_IPPOOLS="" VLAN200_IPV4_IPPOOLS=vlan200-v4 VLAN200_IPV6_IPPOOLS="" + KUBEVIRT_VLAN30_IPV4_IPPOOLS=kubevirt-vlan30-v4 + KUBEVIRT_VLAN30_IPV6_IPPOOLS="" + KUBEVIRT_VLAN40_IPV4_IPPOOLS=kubevirt-vlan40-v4 + KUBEVIRT_VLAN40_IPV6_IPPOOLS="" ;; ipv6) @@ -77,6 +87,10 @@ spec: VLAN100_IPV6_IPPOOLS=vlan100-v6 VLAN200_IPV4_IPPOOLS='' VLAN200_IPV6_IPPOOLS=vlan200-v6 + KUBEVIRT_VLAN30_IPV4_IPPOOLS='' + KUBEVIRT_VLAN30_IPV6_IPPOOLS=kubevirt-vlan30-v6 + KUBEVIRT_VLAN40_IPV4_IPPOOLS='' + KUBEVIRT_VLAN40_IPV6_IPPOOLS=kubevirt-vlan30-v6 ;; dual) @@ -86,6 +100,10 @@ spec: VLAN100_IPV6_IPPOOLS=vlan100-v6 VLAN200_IPV4_IPPOOLS=vlan200-v4 VLAN200_IPV6_IPPOOLS=vlan200-v6 + KUBEVIRT_VLAN30_IPV4_IPPOOLS=kubevirt-vlan30-v4 + KUBEVIRT_VLAN30_IPV6_IPPOOLS=kubevirt-vlan30-v6 + KUBEVIRT_VLAN40_IPV4_IPPOOLS=kubevirt-vlan40-v4 + KUBEVIRT_VLAN40_IPV6_IPPOOLS=kubevirt-vlan40-v6 ;; *) @@ -109,8 +127,8 @@ spec: | sed 's?<>?auto?g' \ | sed 's?<>?eth0?g' \ | sed 's?<>?100?g' \ - | sed 's?<>?vlan100-v4?g' \ - | sed 's?<>?vlan100-v6?g' \ + | sed 's?<>?'""${VLAN100_IPV4_IPPOOLS}""'?g' \ + | sed 's?<>?'""${VLAN100_IPV6_IPPOOLS}""'?g' \ | kubectl apply --kubeconfig ${E2E_KUBECONFIG} -f - echo "${MACVLAN_CR_TEMPLATE}" \ @@ -119,9 +137,31 @@ spec: | sed 's?<>?auto?g' \ | sed 's?<>?eth0?g' \ | sed 's?<>?200?g' \ - | sed 's?<>?vlan200-v4?g' \ - | sed 's?<>?vlan200-v6?g' \ + | sed 's?<>?'""${VLAN200_IPV4_IPPOOLS}""'?g' \ + | sed 's?<>?'""${VLAN200_IPV6_IPPOOLS}""'?g' \ | kubectl apply --kubeconfig ${E2E_KUBECONFIG} -f - + + if [ ${INSTALL_KUBEVIRT} == "true" ]; then + echo "${MACVLAN_CR_TEMPLATE}" \ + | sed 's?<>?'""${MULTUS_KUBEVIRT_CNI_VLAN30}""'?g' \ + | sed 's?<>?'"${RELEASE_NAMESPACE}"'?g' \ + | sed 's?<>?auto?g' \ + | sed 's?<>?eth0?g' \ + | sed 's?<>?30?g' \ + | sed 's?<>?'""${KUBEVIRT_VLAN30_IPV4_IPPOOLS}""'?g' \ + | sed 's?<>?'""${KUBEVIRT_VLAN30_IPV6_IPPOOLS}""'?g' \ + | kubectl apply --kubeconfig ${E2E_KUBECONFIG} -f - + + echo "${MACVLAN_CR_TEMPLATE}" \ + | sed 's?<>?'""${MULTUS_KUBEVIRT_CNI_VLAN40}""'?g' \ + | sed 's?<>?'"${RELEASE_NAMESPACE}"'?g' \ + | sed 's?<>?auto?g' \ + | sed 's?<>?eth0?g' \ + | sed 's?<>?40?g' \ + | sed 's?<>?'""${KUBEVIRT_VLAN40_IPV4_IPPOOLS}""'?g' \ + | sed 's?<>?'""${KUBEVIRT_VLAN40_IPV6_IPPOOLS}""'?g' \ + | kubectl apply --kubeconfig ${E2E_KUBECONFIG} -f - + fi } @@ -139,6 +179,15 @@ Install::SpiderpoolCR(){ SPIDERPOOL_VLAN200_GATEWAY_V4=172.200.0.1 SPIDERPOOL_VLAN200_GATEWAY_V6=fd00:172:200::1 + SPIDERPOOL_VLAN30_POOL_V4=172.30.0.0/16 + SPIDERPOOL_VLAN30_POOL_V6=fd00:172:30::/64 + SPIDERPOOL_VLAN30_RANGES_V4=172.30.0.201-172.30.10.199 + SPIDERPOOL_VLAN30_RANGES_V6=fd00:172:30::201-fd00:172:30::fff1 + SPIDERPOOL_VLAN40_POOL_V4=172.40.0.0/16 + SPIDERPOOL_VLAN40_POOL_V6=fd00:172:40::/64 + SPIDERPOOL_VLAN40_RANGES_V4=172.40.0.201-172.40.10.199 + SPIDERPOOL_VLAN40_RANGES_V6=fd00:172:40::201-fd00:172:40::fff1 + if [ "${E2E_SPIDERPOOL_ENABLE_SUBNET}" == "true" ] ; then CR_KIND="SpiderSubnet" echo "spiderpool subnet feature is on , install SpiderSubnet CR" @@ -177,6 +226,34 @@ EOF EOF } + INSTALL_KUBEVIRT_V4_CR(){ + cat <