diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 9744c232..724505aa 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -57,9 +57,16 @@ jobs: export E2E_DOCKER_CONFIG_JSON_SECRET=`cat ~/.docker/config.json| base64 -w 0` make e2e E2EARGS="-v=5 -skip-features 'pv expansion|custom config for dynamic|tools for exporter'" env: + E2E_AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + E2E_AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + E2E_BR_IMAGE: reg.vesoft-inc.com/cloud-dev/br-ent + E2E_BR_VERSION: v3.7.0 + E2E_GOOGLE_APPLICATION_CREDENTIALS: ${{ secrets.GOOGLE_APPLICATION_CREDENTIALS }} E2E_OPERATOR_IMAGE: reg.vesoft-inc.com/ci/nebula-operator:ci-e2e E2E_OPERATOR_INSTALL: "true" - E2E_NC_VERSION: v3.6.0 + E2E_NC_VERSION: v3.7.0 + E2E_NC_AGENT_IMAGE: reg.vesoft-inc.com/cloud-dev/nebula-agent + E2E_NC_AGENT_VERSION: v3.6.0 E2E_NC_GRAPHD_IMAGE: reg.vesoft-inc.com/vesoft-ent/nebula-graphd-ent E2E_NC_METAD_IMAGE: reg.vesoft-inc.com/vesoft-ent/nebula-metad-ent E2E_NC_STORAGED_IMAGE: reg.vesoft-inc.com/vesoft-ent/nebula-storaged-ent diff --git a/go.mod b/go.mod index ff8addcd..9c5cfb22 100644 --- a/go.mod +++ b/go.mod @@ -53,6 +53,24 @@ require ( github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect github.com/aws/aws-sdk-go v1.44.178 // indirect + github.com/aws/aws-sdk-go-v2 v1.25.0 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.0 // indirect + github.com/aws/aws-sdk-go-v2/config v1.27.0 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.17.0 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.0 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.50.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.19.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.22.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.27.0 // indirect + github.com/aws/smithy-go v1.20.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v4 v4.1.3 // indirect diff --git a/go.sum b/go.sum index f474a18b..95520e87 100644 --- a/go.sum +++ b/go.sum @@ -63,6 +63,42 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4 github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.44.178 h1:4igreoWPEA7xVLnOeSXLhDXTsTSPKQONZcQ3llWAJw0= github.com/aws/aws-sdk-go v1.44.178/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go-v2 v1.25.0 h1:sv7+1JVJxOu/dD/sz/csHX7jFqmP001TIY7aytBWDSQ= +github.com/aws/aws-sdk-go-v2 v1.25.0/go.mod h1:G104G1Aho5WqF+SR3mDIobTABQzpYV0WxMsKxlMggOA= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.0 h1:2UO6/nT1lCZq1LqM67Oa4tdgP1CvL1sLSxvuD+VrOeE= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.0/go.mod h1:5zGj2eA85ClyedTDK+Whsu+w9yimnVIZvhvBKrDquM8= +github.com/aws/aws-sdk-go-v2/config v1.27.0 h1:J5sdGCAHuWKIXLeXiqr8II/adSvetkx0qdZwdbXXpb0= +github.com/aws/aws-sdk-go-v2/config v1.27.0/go.mod h1:cfh8v69nuSUohNFMbIISP2fhmblGmYEOKs5V53HiHnk= +github.com/aws/aws-sdk-go-v2/credentials v1.17.0 h1:lMW2x6sKBsiAJrpi1doOXqWFyEPoE886DTb1X0wb7So= +github.com/aws/aws-sdk-go-v2/credentials v1.17.0/go.mod h1:uT41FIH8cCIxOdUYIL0PYyHlL1NoneDuDSCwg5VE/5o= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.0 h1:xWCwjjvVz2ojYTP4kBKUuUh9ZrXfcAXpflhOUUeXg1k= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.15.0/go.mod h1:j3fACuqXg4oMTQOR2yY7m0NmJY0yBK4L4sLsRXq1Ins= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.0 h1:NPs/EqVO+ajwOoq56EfcGKa3L3ruWuazkIw1BqxwOPw= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.0/go.mod h1:D+duLy2ylgatV+yTlQ8JTuLfDD0BnFvnQRc+o6tbZ4M= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.0 h1:ks7KGMVUMoDzcxNWUlEdI+/lokMFD136EL6DWmUOV80= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.0/go.mod h1:hL6BWM/d/qz113fVitZjbXR0E+RCTU1+x+1Idyn5NgE= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.0 h1:TkbRExyKSVHELwG9gz2+gql37jjec2R5vus9faTomwE= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.0/go.mod h1:T3/9xMKudHhnj8it5EqIrhvv11tVZqWYkKcot+BFStc= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.0 h1:a33HuFlO0KsveiP90IUJh8Xr/cx9US2PqkSroaLc+o8= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.0/go.mod h1:SxIkWpByiGbhbHYTo9CMTUnx2G4p4ZQMrDPcRRy//1c= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.0 h1:UiSyK6ent6OKpkMJN3+k5HZ4sk4UfchEaaW5wv7SblQ= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.0/go.mod h1:l7kzl8n8DXoRyFz5cIMG70HnPauWa649TUhgw8Rq6lo= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.0 h1:SHN/umDLTmFTmYfI+gkanz6da3vK8Kvj/5wkqnTHbuA= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.0/go.mod h1:l8gPU5RYGOFHJqWEpPMoRTP0VoaWQSkJdKo+hwWnnDA= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.0 h1:l5puwOHr7IxECuPMIuZG7UKOzAnF24v6t4l+Z5Moay4= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.0/go.mod h1:Oov79flWa/n7Ni+lQC3z+VM7PoRM47omRqbJU9B5Y7E= +github.com/aws/aws-sdk-go-v2/service/s3 v1.50.0 h1:jZAdMD1ioZdqirzzVVRhpHHWJmcGGCn8JqDYBs5nmYA= +github.com/aws/aws-sdk-go-v2/service/s3 v1.50.0/go.mod h1:1o/W6JFUuREj2ExoQ21vHJgO7wakvjhol91M9eknFgs= +github.com/aws/aws-sdk-go-v2/service/sso v1.19.0 h1:u6OkVDxtBPnxPkZ9/63ynEe+8kHbtS5IfaC4PzVxzWM= +github.com/aws/aws-sdk-go-v2/service/sso v1.19.0/go.mod h1:YqbU3RS/pkDVu+v+Nwxvn0i1WB0HkNWEePWbmODEbbs= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.22.0 h1:6DL0qu5+315wbsAEEmzK+P9leRwNbkp+lGjPC+CEvb8= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.22.0/go.mod h1:olUAyg+FaoFaL/zFaeQQONjOZ9HXoxgvI/c7mQTYz7M= +github.com/aws/aws-sdk-go-v2/service/sts v1.27.0 h1:cjTRjh700H36MQ8M0LnDn33W3JmwC77mdxIIyPWCdpM= +github.com/aws/aws-sdk-go-v2/service/sts v1.27.0/go.mod h1:nXfOBMWPokIbOY+Gi7a1psWMSvskUCemZzI+SMB7Akc= +github.com/aws/smithy-go v1.20.0 h1:6+kZsCXZwKxZS9RfISnPc4EXlHoyAkm2hPuM8X2BrrQ= +github.com/aws/smithy-go v1.20.0/go.mod h1:uo5RKksAl4PzhqaAbjd4rLgFoq5koTsQKYuGe7dklGc= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= diff --git a/tests/e2e/config/config.go b/tests/e2e/config/config.go index 7392b8ba..828a0ba8 100644 --- a/tests/e2e/config/config.go +++ b/tests/e2e/config/config.go @@ -58,6 +58,9 @@ type CommonConfig struct { // DockerConfigJsonSecret is the docker config file. // export E2E_DOCKER_CONFIG_JSON_SECRET=`cat ~/.docker/config.json| base64 -w 0` DockerConfigJsonSecret Base64Value `env:"E2E_DOCKER_CONFIG_JSON_SECRET"` + AWSAccessKey Base64Value `env:"E2E_AWS_ACCESS_KEY_ID"` + AWSSecretKey Base64Value `env:"E2E_AWS_SECRET_ACCESS_KEY"` + GSSecret Base64Value `env:"E2E_GOOGLE_APPLICATION_CREDENTIALS"` } type ClusterConfig struct { @@ -75,6 +78,10 @@ type OperatorConfig struct { type NebulaClusterConfig struct { ChartPath string `env:"E2E_NC_CHART_PATH,notEmpty,required" envDefault:"../../charts/nebula-cluster"` Version string `env:"E2E_NC_VERSION"` + AgentImage string `env:"E2E_NC_AGENT_IMAGE"` + AgentVersion string `env:"E2E_NC_AGENT_VERSION"` + BrImage string `env:"E2E_BR_IMAGE"` + BrVersion string `env:"E2E_BR_VERSION"` GraphdImage string `env:"E2E_NC_GRAPHD_IMAGE"` MetadImage string `env:"E2E_NC_METAD_IMAGE"` StoragedImage string `env:"E2E_NC_STORAGED_IMAGE"` diff --git a/tests/e2e/envfuncsext/backup.go b/tests/e2e/envfuncsext/backup.go new file mode 100644 index 00000000..b0c72744 --- /dev/null +++ b/tests/e2e/envfuncsext/backup.go @@ -0,0 +1,274 @@ +package envfuncsext + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + "sigs.k8s.io/e2e-framework/klient/wait" + "sigs.k8s.io/e2e-framework/pkg/env" + "sigs.k8s.io/e2e-framework/pkg/envconf" + + appsv1alpha1 "github.com/vesoft-inc/nebula-operator/apis/apps/v1alpha1" +) + +type ( + NebulaBackupInstallOptions struct { + Name string + Namespace string + Spec appsv1alpha1.BackupSpec + CronBackupOps *NebulaCronBackupOptions + } + + NebulaCronBackupOptions struct { + Schedule string + TestPause bool + } + + nebulaBackupCtxKey struct { + backupType string + } + + NebulaBackupCtxValue struct { + // general fields + Name string + Namespace string + BackupFileName string + StorageType string + BucketName string + Region string + CleanBackupData bool + + // for cron backup only + Schedule string + TestPause bool + TriggeredBackupName string + BackupSpec appsv1alpha1.BackupSpec + } + + NebulaBackupOption func(*NebulaBackupOptions) + NebulaBackupOptions struct { + WaitOptions []wait.Option + } +) + +func DeployNebulaBackup(incremental bool, nbCtx NebulaBackupInstallOptions) env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + namespaceToUse := cfg.Namespace() + if nbCtx.Namespace != "" { + namespaceToUse = nbCtx.Namespace + } + + nb := &appsv1alpha1.NebulaBackup{ + ObjectMeta: metav1.ObjectMeta{ + Name: nbCtx.Name, + Namespace: namespaceToUse, + }, + Spec: nbCtx.Spec, + } + + ctx, err := CreateObject(nb)(ctx, cfg) + if err != nil { + return ctx, fmt.Errorf("error creating nebula backup [%v/%v]: %v", namespaceToUse, nbCtx.Name, err) + } + + key := nebulaBackupCtxKey{backupType: "base"} + if incremental { + key = nebulaBackupCtxKey{backupType: "incr"} + } + + var stoType, region, bucketName string + if nb.Spec.Config.S3 != nil { + stoType = "S3" + region = nb.Spec.Config.S3.Region + bucketName = nb.Spec.Config.S3.Bucket + } else if nb.Spec.Config.GS != nil { + stoType = "GS" + region = nb.Spec.Config.GS.Location + bucketName = nb.Spec.Config.GS.Bucket + } + + return context.WithValue(ctx, key, &NebulaBackupCtxValue{ + Name: nbCtx.Name, + Namespace: namespaceToUse, + StorageType: stoType, + Region: region, + BucketName: bucketName, + CleanBackupData: *nb.Spec.CleanBackupData, + }), nil + } +} + +func GetNebulaBackupCtxValue(incremental bool, ctx context.Context) *NebulaBackupCtxValue { + key := nebulaBackupCtxKey{backupType: "base"} + if incremental { + key = nebulaBackupCtxKey{backupType: "incr"} + } + + v := ctx.Value(key) + data, _ := v.(*NebulaBackupCtxValue) + return data +} + +func WaitNebulaBackupFinished(incremental bool, opts ...NebulaBackupOption) env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + o := (&NebulaBackupOptions{}).WithOptions(opts...) + + backupContextValue := GetNebulaBackupCtxValue(incremental, ctx) + + nb := &appsv1alpha1.NebulaBackup{} + if err := wait.For(func(ctx context.Context) (done bool, err error) { + err = cfg.Client().Resources().Get(ctx, backupContextValue.Name, backupContextValue.Namespace, nb) + if err != nil { + klog.ErrorS(err, "Get NebulaBackup failed", "namespace", backupContextValue.Namespace, "name", backupContextValue.Name) + return true, err + } + + klog.V(4).InfoS("Waiting for NebulaBackup to complete", + "namespace", nb.Namespace, "name", nb.Name, + "generation", nb.Generation, + ) + + if nb.Status.Phase == appsv1alpha1.BackupComplete { + return true, nil + } + + if nb.Status.Phase == appsv1alpha1.BackupFailed || nb.Status.Phase == appsv1alpha1.BackupInvalid { + return true, fmt.Errorf("nebula backup [%v/%v] has failed", nb.Namespace, nb.Name) + } + + return false, nil + }, o.WaitOptions...); err != nil { + klog.ErrorS(err, "Waiting for NebulaBackup to complete failed", "namespace", backupContextValue.Namespace, "name", backupContextValue.Name) + return ctx, err + } + + klog.InfoS("Waiting for NebulaBackup to complete successful", "namespace", backupContextValue.Namespace, "name", backupContextValue.Name, "backup file name", nb.Status.BackupName) + + key := nebulaBackupCtxKey{backupType: "base"} + if incremental { + key = nebulaBackupCtxKey{backupType: "incr"} + } + + return context.WithValue(ctx, key, &NebulaBackupCtxValue{ + Name: backupContextValue.Name, + Namespace: backupContextValue.Namespace, + BackupFileName: nb.Status.BackupName, + StorageType: backupContextValue.StorageType, + Region: backupContextValue.Region, + BucketName: backupContextValue.BucketName, + CleanBackupData: backupContextValue.CleanBackupData, + }), nil + } +} + +func (o *NebulaBackupOptions) WithOptions(opts ...NebulaBackupOption) *NebulaBackupOptions { + for _, opt := range opts { + opt(o) + } + return o +} + +func WithNebulaBackupWaitOptions(opts ...wait.Option) NebulaBackupOption { + return func(o *NebulaBackupOptions) { + o.WaitOptions = append(o.WaitOptions, opts...) + } +} + +func DeleteNebulaBackup(incremental bool) env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + backupContextValue := GetNebulaBackupCtxValue(incremental, ctx) + + nb := &appsv1alpha1.NebulaBackup{ + ObjectMeta: metav1.ObjectMeta{ + Name: backupContextValue.Name, + Namespace: backupContextValue.Namespace, + }, + } + + ctx, err := DeleteObject(nb)(ctx, cfg) + if err != nil { + return ctx, fmt.Errorf("error deleting nebula backup [%v/%v]: %v", backupContextValue.Namespace, backupContextValue.Name, err) + } + + return ctx, nil + } +} + +func WaitForCleanBackup(incremental bool, opts ...NebulaBackupOption) env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + o := (&NebulaBackupOptions{}).WithOptions(opts...) + + backupContextValue := GetNebulaBackupCtxValue(incremental, ctx) + + nb := &appsv1alpha1.NebulaBackup{} + if err := wait.For(func(ctx context.Context) (done bool, err error) { + err = cfg.Client().Resources().Get(ctx, backupContextValue.Name, backupContextValue.Namespace, nb) + if err != nil { + if apierrors.IsNotFound(err) { + return true, nil + } + return true, fmt.Errorf("error deleting nebula backup [%v/%v]: %v", backupContextValue.Namespace, backupContextValue.Name, err) + } + + if nb.Status.Phase == appsv1alpha1.BackupComplete || nb.Status.Phase == appsv1alpha1.BackupClean { + klog.V(4).InfoS("Waiting for NebulaBackup cleanup to complete", + "namespace", nb.Namespace, "name", nb.Name, "file name", backupContextValue.BackupFileName, + "generation", nb.Generation, + ) + return false, nil + } + + return true, fmt.Errorf("nebula backup clean for [%v/%v] has failed", backupContextValue.Namespace, backupContextValue.Name) + }, o.WaitOptions...); err != nil { + klog.ErrorS(err, "Waiting for NebulaBackup clean to complete failed", "namespace", backupContextValue.Namespace, "name", backupContextValue.Name, "file name", backupContextValue.BackupFileName) + return ctx, err + } + klog.InfoS("Waiting for NebulaBackup clean to complete successful", "namespace", backupContextValue.Namespace, "name", backupContextValue.Name, "file name", backupContextValue.BackupFileName) + + return ctx, nil + } +} + +func CreateServiceAccount(namespace, name string) env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + sa := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + + if ctx, err := CreateObject(sa)(ctx, cfg); err != nil { + if apierrors.IsAlreadyExists(err) { + klog.Infof("service account [%s/%s] already exists", sa.Namespace, sa.Name) + return ctx, nil + } + return ctx, err + } + + klog.Infof("Service account [%s/%s] created successfully", namespace, name) + return ctx, nil + } +} + +func DeleteServiceAccount(namespace, name string) env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + sa := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + + if err := cfg.Client().Resources().Delete(ctx, sa); err != nil { + return ctx, err + } + + klog.Infof("Service account [%s/%s] deleted successfully", namespace, name) + return ctx, nil + } +} diff --git a/tests/e2e/envfuncsext/cronbackup.go b/tests/e2e/envfuncsext/cronbackup.go new file mode 100644 index 00000000..29e50053 --- /dev/null +++ b/tests/e2e/envfuncsext/cronbackup.go @@ -0,0 +1,272 @@ +package envfuncsext + +import ( + "context" + "fmt" + + "k8s.io/klog/v2" + "k8s.io/utils/pointer" + "sigs.k8s.io/e2e-framework/klient/wait" + "sigs.k8s.io/e2e-framework/pkg/env" + "sigs.k8s.io/e2e-framework/pkg/envconf" + + appsv1alpha1 "github.com/vesoft-inc/nebula-operator/apis/apps/v1alpha1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func DeployNebulaCronBackup(incremental bool, nbCtx NebulaBackupInstallOptions) env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + namespaceToUse := cfg.Namespace() + if nbCtx.Namespace != "" { + namespaceToUse = nbCtx.Namespace + } + + disable := false + ncb := &appsv1alpha1.NebulaCronBackup{ + ObjectMeta: metav1.ObjectMeta{ + Name: nbCtx.Name, + Namespace: namespaceToUse, + }, + Spec: appsv1alpha1.CronBackupSpec{ + Schedule: nbCtx.CronBackupOps.Schedule, + Pause: &disable, + BackupTemplate: nbCtx.Spec, + }, + } + + ctx, err := CreateObject(ncb)(ctx, cfg) + if err != nil { + return ctx, fmt.Errorf("error creating nebula cron backup [%v/%v]: %v", namespaceToUse, nbCtx.Name, err) + } + + key := nebulaBackupCtxKey{backupType: "base"} + if incremental { + key = nebulaBackupCtxKey{backupType: "incr"} + } + + var stoType, region, bucketName string + if ncb.Spec.BackupTemplate.Config.S3 != nil { + stoType = "S3" + region = ncb.Spec.BackupTemplate.Config.S3.Region + bucketName = ncb.Spec.BackupTemplate.Config.S3.Bucket + } else if ncb.Spec.BackupTemplate.Config.GS != nil { + stoType = "GS" + region = ncb.Spec.BackupTemplate.Config.GS.Location + bucketName = ncb.Spec.BackupTemplate.Config.GS.Bucket + } + + return context.WithValue(ctx, key, &NebulaBackupCtxValue{ + Name: nbCtx.Name, + Namespace: namespaceToUse, + StorageType: stoType, + Region: region, + BucketName: bucketName, + CleanBackupData: *ncb.Spec.BackupTemplate.CleanBackupData, + Schedule: nbCtx.CronBackupOps.Schedule, + TestPause: nbCtx.CronBackupOps.TestPause, + BackupSpec: *nbCtx.Spec.DeepCopy(), + }), nil + } +} + +func WaitNebulaCronBackupFinished(incremental bool, opts ...NebulaBackupOption) env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + o := (&NebulaBackupOptions{}).WithOptions(opts...) + + backupContextValue := GetNebulaBackupCtxValue(incremental, ctx) + + ncb := &appsv1alpha1.NebulaCronBackup{} + nb := &appsv1alpha1.NebulaBackup{} + if err := wait.For(func(ctx context.Context) (done bool, err error) { + err = cfg.Client().Resources().Get(ctx, backupContextValue.Name, backupContextValue.Namespace, ncb) + if err != nil { + klog.ErrorS(err, "Get NebulaCronBackup failed", "namespace", backupContextValue.Namespace, "name", backupContextValue.Name) + return true, err + } + + if pointer.BoolDeref(ncb.Spec.Pause, false) { + err = fmt.Errorf("NebulaCronBackup is still paused") + klog.ErrorS(err, "check NebulaCronBackup failed", "namespace", backupContextValue.Namespace, "name", backupContextValue.Name) + return true, err + } + + if ncb.Status.LastBackup == "" || backupContextValue.BackupFileName == ncb.Status.LastBackup { + klog.V(4).InfoS("Waiting for NebulaCronBackup to trigger backup", + "namespace", ncb.Namespace, "name", ncb.Name, + "generation", ncb.Generation, + ) + return false, nil + } + + err = cfg.Client().Resources().Get(ctx, ncb.Status.LastBackup, backupContextValue.Namespace, nb) + if err != nil { + klog.ErrorS(err, "Get NebulaBackup failed", "namespace", backupContextValue.Namespace, "name", ncb.Status.LastBackup) + return true, err + } + + if nb.Status.Phase == appsv1alpha1.BackupComplete { + return true, nil + } + + if nb.Status.Phase == appsv1alpha1.BackupFailed { + return true, fmt.Errorf("nebula backup [%v/%v] has failed", nb.Namespace, nb.Name) + } + + klog.V(4).InfoS("Waiting for backup triggered by NebulaCronBackup to complete", + "namespace", ncb.Namespace, "name", ncb.Name, + "generation", ncb.Generation, "triggered backup name", ncb.Status.LastBackup, + ) + + return false, nil + }, o.WaitOptions...); err != nil { + if ncb.Status.LastBackup == "" { + klog.ErrorS(err, "Waiting for NebulaCronBackup to complete failed", "namespace", backupContextValue.Namespace, "name", backupContextValue.Name) + } else { + klog.ErrorS(err, "Waiting for NebulaBackup triggered by NebulaCronBackup to complete failed", "namespace", backupContextValue.Namespace, "name", backupContextValue.Name, "triggered backup name", ncb.Status.LastBackup) + } + return ctx, err + } + + klog.InfoS("Waiting for NebulaBackup triggered by NebulaCronBackup to complete successful", "namespace", backupContextValue.Namespace, "name", backupContextValue.Name, "triggered backup name", nb.Name, "backup file name", nb.Status.BackupName) + + key := nebulaBackupCtxKey{backupType: "base"} + if incremental { + key = nebulaBackupCtxKey{backupType: "incr"} + } + + return context.WithValue(ctx, key, &NebulaBackupCtxValue{ + Name: backupContextValue.Name, + Namespace: backupContextValue.Namespace, + BackupFileName: nb.Status.BackupName, + StorageType: backupContextValue.StorageType, + Region: backupContextValue.Region, + BucketName: backupContextValue.BucketName, + CleanBackupData: backupContextValue.CleanBackupData, + Schedule: backupContextValue.Schedule, + TestPause: backupContextValue.TestPause, + TriggeredBackupName: nb.Name, + BackupSpec: backupContextValue.BackupSpec, + }), nil + } +} + +func SetCronBackupPause(incremental, pause bool) env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + backupContextValue := GetNebulaBackupCtxValue(incremental, ctx) + + ncb := &appsv1alpha1.NebulaCronBackup{} + err := cfg.Client().Resources().Get(ctx, backupContextValue.Name, backupContextValue.Namespace, ncb) + if err != nil { + klog.ErrorS(err, "Get NebulaCronBackup failed", "namespace", backupContextValue.Namespace, "name", backupContextValue.Name) + return ctx, err + } + + ncb.Spec.Pause = &pause + + err = cfg.Client().Resources().Update(ctx, ncb) + if err != nil { + if pause { + return ctx, fmt.Errorf("error pausing nebula cron backup [%v/%v]: %v", backupContextValue.Namespace, backupContextValue.Name, err) + } else { + return ctx, fmt.Errorf("error resuming nebula cron backup [%v/%v]: %v", backupContextValue.Namespace, backupContextValue.Name, err) + } + } + + return ctx, nil + } +} + +func CheckCronBackupPaused(incremental bool, opts ...NebulaBackupOption) env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + o := (&NebulaBackupOptions{}).WithOptions(opts...) + + backupContextValue := GetNebulaBackupCtxValue(incremental, ctx) + + ncb := &appsv1alpha1.NebulaCronBackup{} + firstTime := true + if err := wait.For(func(ctx context.Context) (done bool, err error) { + err = cfg.Client().Resources().Get(ctx, backupContextValue.Name, backupContextValue.Namespace, ncb) + if err != nil { + klog.ErrorS(err, "Get NebulaCronBackup failed", "namespace", backupContextValue.Namespace, "name", backupContextValue.Name) + return true, err + } + + if !pointer.BoolDeref(ncb.Spec.Pause, false) { + err = fmt.Errorf("nebula cron backup is not paused") + klog.ErrorS(err, "Pausing NebulaCronBackup failed", "namespace", backupContextValue.Namespace, "name", backupContextValue.Name) + return false, err + } + + if !firstTime { + if ncb.Status.LastBackup != backupContextValue.TriggeredBackupName { + err = fmt.Errorf("nubula cron backup was not paused successfully. New backup was triggered. Backup name %v does not match previous backup name %v", ncb.Status.LastBackup, backupContextValue.TriggeredBackupName) + klog.ErrorS(err, "Pausing NebulaCronBackup failed", "namespace", backupContextValue.Namespace, "name", backupContextValue.Name) + return true, err + } + return true, nil + } else { + klog.V(4).Infof("NebulaCronBackup [%v/%v] was just paused. Will check if pause was successful during the next duration.", backupContextValue.Namespace, backupContextValue.Name) + firstTime = false + return false, nil + } + }, o.WaitOptions...); err != nil { + klog.ErrorS(err, "Waiting for NebulaCronBackup to pause failed", "namespace", backupContextValue.Namespace, "name", backupContextValue.Name) + return ctx, err + } + + klog.InfoS("Waiting for NebulaCronBackup to pause successful", "namespace", backupContextValue.Namespace, "name", backupContextValue.Name) + return ctx, nil + } +} + +func DeleteNebulaCronBackup(incremental bool) env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + backupContextValue := GetNebulaBackupCtxValue(incremental, ctx) + + ncb := &appsv1alpha1.NebulaCronBackup{ + ObjectMeta: metav1.ObjectMeta{ + Name: backupContextValue.Name, + Namespace: backupContextValue.Namespace, + }, + } + ctx, err := DeleteObject(ncb)(ctx, cfg) + if err != nil { + return ctx, fmt.Errorf("error deleting nebula cron backup [%v/%v]: %v", backupContextValue.Namespace, backupContextValue.Name, err) + } + + return ctx, nil + } +} + +func WaitForCleanCronBackup(incremental bool, opts ...NebulaBackupOption) env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + o := (&NebulaBackupOptions{}).WithOptions(opts...) + + backupContextValue := GetNebulaBackupCtxValue(incremental, ctx) + + ncb := &appsv1alpha1.NebulaCronBackup{} + if err := wait.For(func(ctx context.Context) (done bool, err error) { + err = cfg.Client().Resources().Get(ctx, backupContextValue.Name, backupContextValue.Namespace, ncb) + if err != nil { + if apierrors.IsNotFound(err) { + return true, nil + } + return true, fmt.Errorf("error deleting nebula cron backup [%v/%v]: %v", backupContextValue.Namespace, backupContextValue.Name, err) + } + + klog.V(4).InfoS("Waiting for NebulaCronBackup cleanup to complete", + "namespace", ncb.Namespace, "name", ncb.Name, "file name", backupContextValue.BackupFileName, + "generation", ncb.Generation, + ) + return false, nil + + }, o.WaitOptions...); err != nil { + klog.ErrorS(err, "Waiting for NebulaCronBackup clean to complete failed", "namespace", backupContextValue.Namespace, "name", backupContextValue.Name, "file name", backupContextValue.BackupFileName) + return ctx, err + } + klog.InfoS("Waiting for NebulaCronBackup clean to complete successful", "namespace", backupContextValue.Namespace, "name", backupContextValue.Name, "file name", backupContextValue.BackupFileName) + + return ctx, nil + } +} diff --git a/tests/e2e/envfuncsext/nebula-command.go b/tests/e2e/envfuncsext/nebula-command.go new file mode 100644 index 00000000..6aeb3820 --- /dev/null +++ b/tests/e2e/envfuncsext/nebula-command.go @@ -0,0 +1,181 @@ +package envfuncsext + +import ( + "context" + "fmt" + "time" + + "k8s.io/klog/v2" + "sigs.k8s.io/e2e-framework/pkg/env" + "sigs.k8s.io/e2e-framework/pkg/envconf" + + nebulaclient "github.com/vesoft-inc/nebula-go/v3" + appsv1alpha1 "github.com/vesoft-inc/nebula-operator/apis/apps/v1alpha1" + "github.com/vesoft-inc/nebula-operator/tests/e2e/e2eutils" +) + +type ( + NebulaCommandOptions struct { + ClusterName string + ClusterNamespace string + Username string + Password string + Space string + } + + nebulaCommandCtxKey struct { + clusterNamespace string + clusterName string + } + + NebulaCommandCtxValue struct { + Results nebulaclient.ResultSet + } +) + +func GetNebulaCommandCtxValue(clusterNamespace, clusterName string, ctx context.Context) *NebulaCommandCtxValue { + v := ctx.Value(nebulaCommandCtxKey{ + clusterNamespace: clusterNamespace, + clusterName: clusterName, + }) + data, _ := v.(*NebulaCommandCtxValue) + return data +} + +func RunNGCommand(cmdCtx NebulaCommandOptions, cmd string) env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + nc := &appsv1alpha1.NebulaCluster{} + err := cfg.Client().Resources().Get(ctx, cmdCtx.ClusterName, cmdCtx.ClusterNamespace, nc) + if err != nil { + klog.ErrorS(err, "Get NebulaCluster failed", "namespace", cmdCtx.ClusterNamespace, "name", cmdCtx.ClusterName) + return ctx, err + } + + podName := nc.GraphdComponent().GetPodName(0) + thriftPort := int(nc.GraphdComponent().GetPort(appsv1alpha1.GraphdPortNameThrift)) + localPorts, stopChan, err := e2eutils.PortForward( + e2eutils.WithRestConfig(cfg.Client().RESTConfig()), + e2eutils.WithPod(nc.GetNamespace(), podName), + e2eutils.WithAddress("localhost"), + e2eutils.WithPorts(thriftPort), + ) + if err != nil { + klog.ErrorS(err, "Unable to run command. Port forward failed.", + "namespace", nc.GetNamespace(), + "name", podName, + "ports", []int{thriftPort}, + "command", cmd, + ) + return ctx, err + } + defer close(stopChan) + + pool, err := nebulaclient.NewSslConnectionPool( + []nebulaclient.HostAddress{{Host: "localhost", Port: localPorts[0]}}, + nebulaclient.PoolConfig{ + MaxConnPoolSize: 10, + }, + nil, + nebulaclient.DefaultLogger{}, + ) + if err != nil { + klog.ErrorS(err, "Unable to run command. Create graph connection pool failed", + "namespace", nc.GetNamespace(), + "name", podName, + "ports", []int{thriftPort}, + "command", cmd, + ) + return ctx, err + } + defer pool.Close() + + session, err := pool.GetSession("root", "nebula") + if err != nil { + klog.ErrorS(err, "Unable to run command. Create graph connection session failed", + "namespace", nc.GetNamespace(), + "name", podName, + "ports", []int{thriftPort}, + "command", cmd, + ) + return ctx, err + } + defer session.Release() + + result, err := session.Execute(cmd) + if err != nil { + klog.ErrorS(err, "Unable to run command. Graph exec failed", + "namespace", nc.GetNamespace(), + "name", podName, + "ports", []int{thriftPort}, + "command", cmd, + ) + return ctx, err + } + + if result != nil { + return context.WithValue(ctx, nebulaCommandCtxKey{ + clusterNamespace: cmdCtx.ClusterNamespace, + clusterName: cmdCtx.ClusterName, + }, &NebulaCommandCtxValue{ + Results: *result, + }), nil + } + + return ctx, nil + } +} + +func RunGetStats(ctx context.Context, cfg *envconf.Config, space string, ngOptions NebulaCommandOptions) (*nebulaclient.ResultSet, error) { + ctx, err := RunNGCommand(ngOptions, fmt.Sprintf("USE %v;submit job stats", space))(ctx, cfg) + if err != nil { + return nil, fmt.Errorf("failed to submit stats job: %v", err) + } + + time.Sleep(3 * time.Second) //sleep 3 seconds to allow stats job to finish running + + ctx, err = RunNGCommand(ngOptions, fmt.Sprintf("USE %v;show stats", space))(ctx, cfg) + if err != nil { + return nil, fmt.Errorf("failed to show stats: %v", err) + } + + commandContext := GetNebulaCommandCtxValue(ngOptions.ClusterNamespace, ngOptions.ClusterName, ctx) + + return &commandContext.Results, nil +} + +func NGResultsEqual(resA, resB nebulaclient.ResultSet) bool { + if resA.IsEmpty() || resB.IsEmpty() { + return false + } + + arrA := resA.AsStringTable() + arrB := resB.AsStringTable() + + if len(arrA) != len(arrB) { + return false + } + + for i := 0; i < len(arrA); i++ { + if len(arrA[i]) != len(arrB[i]) { + return false + } + for j := 0; j < len(arrA[i]); j++ { + if arrA[i][j] != arrB[i][j] { + return false + } + } + } + + return true +} + +func PrintCommandResults(res nebulaclient.ResultSet) { + arr := res.AsStringTable() + + for i := 0; i < len(arr); i++ { + for j := 0; j < len(arr[i]); j++ { + klog.V(4).Infof("%v \n", arr[i][j]) + } + klog.V(4).Info() + } +} diff --git a/tests/e2e/envfuncsext/nebulacluster-ready-func.go b/tests/e2e/envfuncsext/nebulacluster-ready-func.go index d2935e81..447793b1 100644 --- a/tests/e2e/envfuncsext/nebulacluster-ready-func.go +++ b/tests/e2e/envfuncsext/nebulacluster-ready-func.go @@ -25,6 +25,7 @@ import ( "math" "net/http" "regexp" + "strconv" "strings" "github.com/google/go-cmp/cmp" @@ -578,8 +579,45 @@ func isComponentStatefulSetExpected(ctx context.Context, cfg *envconf.Config, co env := component.ComponentSpec().PodEnvVars() if len(env) == 0 { - env = nil + script := ` +set -x + +if [ ! -s "metadata/flags.json" ]; then + echo "flags.json is empty" + exit 0 +fi +while : +do + curl -i -X PUT -H "Content-Type: application/json" -d @/metadata/flags.json -s "http://${MY_IP}:${HTTP_PORT}/flags" + if [ $? -eq 0 ] + then + break + fi + sleep 1 +done +` + ports := component.GenerateContainerPorts() + env = []corev1.EnvVar{ + { + Name: "MY_IP", + ValueFrom: &corev1.EnvVarSource{ + FieldRef: &corev1.ObjectFieldSelector{ + APIVersion: "v1", + FieldPath: "status.podIP", + }, + }, + }, + { + Name: "HTTP_PORT", + Value: strconv.Itoa(int(ports[1].ContainerPort)), + }, + { + Name: "SCRIPT", + Value: script, + }, + } } + nodeSelector := component.ComponentSpec().NodeSelector() if len(nodeSelector) == 0 { nodeSelector = nil diff --git a/tests/e2e/envfuncsext/restore.go b/tests/e2e/envfuncsext/restore.go new file mode 100644 index 00000000..316e3af9 --- /dev/null +++ b/tests/e2e/envfuncsext/restore.go @@ -0,0 +1,180 @@ +package envfuncsext + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + "sigs.k8s.io/e2e-framework/klient/wait" + "sigs.k8s.io/e2e-framework/pkg/env" + "sigs.k8s.io/e2e-framework/pkg/envconf" + + appsv1alpha1 "github.com/vesoft-inc/nebula-operator/apis/apps/v1alpha1" +) + +type ( + NebulaRestoreInstallOptions struct { + Name string + Namespace string + Spec appsv1alpha1.RestoreSpec + } + + nebulaRestoreCtxKey struct{} + + NebulaRestoreCtxValue struct { + Name string + Namespace string + BackupFileName string + StorageType string + BucketName string + RestoreClusterNamespace string + RestoreClusterName string + } + + NebulaRestoreOption func(*NebulaRestoreOptions) + NebulaRestoreOptions struct { + WaitOptions []wait.Option + } +) + +func DeployNebulaRestore(nbCtx NebulaRestoreInstallOptions) env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + namespaceToUse := cfg.Namespace() + if nbCtx.Namespace != "" { + namespaceToUse = nbCtx.Namespace + } + + nr := &appsv1alpha1.NebulaRestore{ + ObjectMeta: metav1.ObjectMeta{ + Name: nbCtx.Name, + Namespace: namespaceToUse, + }, + Spec: nbCtx.Spec, + } + + ctx, err := CreateObject(nr)(ctx, cfg) + if err != nil { + return ctx, fmt.Errorf("error creating nebula restore [%v/%v]: %v", namespaceToUse, nbCtx.Name, err) + } + + var stoType, bucketName string + if nr.Spec.Config.S3 != nil { + stoType = "S3" + bucketName = nr.Spec.Config.S3.Bucket + } else if nr.Spec.Config.GS != nil { + stoType = "GS" + bucketName = nr.Spec.Config.GS.Bucket + } + + return context.WithValue(ctx, nebulaRestoreCtxKey{}, &NebulaRestoreCtxValue{ + Name: nbCtx.Name, + Namespace: namespaceToUse, + StorageType: stoType, + BucketName: bucketName, + }), nil + } +} + +func GetNebulaRestoreCtxValue(ctx context.Context) *NebulaRestoreCtxValue { + v := ctx.Value(nebulaRestoreCtxKey{}) + data, _ := v.(*NebulaRestoreCtxValue) + return data +} + +func WaitNebulaRestoreFinished(opts ...NebulaRestoreOption) env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + o := (&NebulaRestoreOptions{}).WithOptions(opts...) + + restoreContextValue := GetNebulaRestoreCtxValue(ctx) + + nr := &appsv1alpha1.NebulaRestore{} + if err := wait.For(func(ctx context.Context) (done bool, err error) { + err = cfg.Client().Resources().Get(ctx, restoreContextValue.Name, restoreContextValue.Namespace, nr) + if err != nil { + klog.ErrorS(err, "Get NebulaRestore failed", "namespace", restoreContextValue.Namespace, "name", restoreContextValue.Name) + return false, err + } + klog.V(4).InfoS("Waiting for NebulaRestore to complete", + "namespace", nr.Namespace, "name", nr.Name, + "generation", nr.Generation, "backup filename", restoreContextValue.BackupFileName, + "storage type", restoreContextValue.StorageType, "bucket name", restoreContextValue.BucketName, + ) + + if nr.Status.Phase == appsv1alpha1.RestoreComplete { + return true, nil + } + + if nr.Status.Phase == appsv1alpha1.RestoreFailed { + return true, fmt.Errorf("nebula restore [%v/%v] has failed", nr.Namespace, nr.Name) + } + + return false, nil + }, o.WaitOptions...); err != nil { + klog.ErrorS(err, "Waiting for NebulaRestore to complete failed", "namespace", restoreContextValue.Namespace, "name", restoreContextValue.Name) + return ctx, err + } + + klog.InfoS("Waiting for NebulaRestore to complete successful", "namespace", restoreContextValue.Namespace, "name", restoreContextValue.Name) + + return context.WithValue(ctx, nebulaRestoreCtxKey{}, &NebulaRestoreCtxValue{ + Name: restoreContextValue.Name, + Namespace: restoreContextValue.Namespace, + StorageType: restoreContextValue.StorageType, + BucketName: restoreContextValue.BucketName, + RestoreClusterNamespace: cfg.Namespace(), + RestoreClusterName: nr.Status.ClusterName, + }), nil + } +} + +func (o *NebulaRestoreOptions) WithOptions(opts ...NebulaRestoreOption) *NebulaRestoreOptions { + for _, opt := range opts { + opt(o) + } + return o +} + +func WithNebulaRestoreWaitOptions(opts ...wait.Option) NebulaRestoreOption { + return func(o *NebulaRestoreOptions) { + o.WaitOptions = append(o.WaitOptions, opts...) + } +} + +func DeleteNebulaRestore() env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + restoreContextValue := GetNebulaRestoreCtxValue(ctx) + + nr := &appsv1alpha1.NebulaRestore{ + ObjectMeta: metav1.ObjectMeta{ + Name: restoreContextValue.Name, + Namespace: restoreContextValue.Namespace, + }, + } + ctx, err := DeleteObject(nr)(ctx, cfg) + if err != nil { + return ctx, fmt.Errorf("error deleting nebula restore [%v/%v]: %v", restoreContextValue.Namespace, restoreContextValue.Name, err) + } + + return ctx, nil + } +} + +func DeleteNebulaRestoredCluster() env.Func { + return func(ctx context.Context, cfg *envconf.Config) (context.Context, error) { + restoreContextValue := GetNebulaRestoreCtxValue(ctx) + + nc := &appsv1alpha1.NebulaCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: restoreContextValue.RestoreClusterName, + Namespace: restoreContextValue.RestoreClusterNamespace, + }, + } + ctx, err := DeleteObject(nc)(ctx, cfg) + if err != nil { + return ctx, fmt.Errorf("error deleting nebula restore cluster [%v/%v]: %v", restoreContextValue.RestoreClusterNamespace, restoreContextValue.RestoreClusterName, err) + } + + return ctx, nil + } +} diff --git a/tests/e2e/main_test.go b/tests/e2e/main_test.go index 398c6422..7b06273d 100644 --- a/tests/e2e/main_test.go +++ b/tests/e2e/main_test.go @@ -37,9 +37,22 @@ import ( const ( ImagePullSecretName = "image-pull-secret.e2e" + AWSSecretName = "aws-secret.e2e" + AWSRegion = "default" + AWSBucketName = "nebula-operator-e2e-test" + AWSBucketEndpoint = "http://oss.vesoft-inc.com:9000" + + GSLocation = "us-east-5" + GSSecretName = "gs-secret" + GSBucketName = "nebula-operator-e2e-test" LabelKeyCategory = "category" LabelKeyGroup = "group" + + Username = "root" + Password = "nebula" + Space = "e2e_ldbc_snb" + SaName = "nebula-sa" ) var ( diff --git a/tests/e2e/nebulacluster_backup_test.go b/tests/e2e/nebulacluster_backup_test.go new file mode 100644 index 00000000..357ca0f8 --- /dev/null +++ b/tests/e2e/nebulacluster_backup_test.go @@ -0,0 +1,309 @@ +/* +Copyright 2023 Vesoft Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "sigs.k8s.io/e2e-framework/third_party/helm" + + "github.com/vesoft-inc/nebula-operator/tests/e2e/config" + "github.com/vesoft-inc/nebula-operator/tests/e2e/e2ematcher" + "github.com/vesoft-inc/nebula-operator/tests/e2e/envfuncsext" + + appsv1alpha1 "github.com/vesoft-inc/nebula-operator/apis/apps/v1alpha1" +) + +const ( + LabelCategoryBackup = "backup" + LabelGroupBasic = "basic" +) + +var testCasesBackup []ncTestCase + +func init() { + testCasesBackup = append(testCasesBackup, testCasesBasicBackup...) +} + +// helper variables +var enable, disable bool = true, false + +var storageS3 = appsv1alpha1.StorageProvider{ + S3: &appsv1alpha1.S3StorageProvider{ + Region: AWSRegion, + Bucket: AWSBucketName, + Endpoint: AWSBucketEndpoint, + SecretName: AWSSecretName, + }, +} + +var storageGS = appsv1alpha1.StorageProvider{ + GS: &appsv1alpha1.GsStorageProvider{ + Location: GSLocation, + Bucket: GSBucketName, + SecretName: GSSecretName, + }, +} + +var basicBackupSpec = appsv1alpha1.BackupSpec{ + Image: config.C.NebulaGraph.BrImage, + Version: config.C.NebulaGraph.BrVersion, + Resources: corev1.ResourceRequirements{ + Limits: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("200m"), + corev1.ResourceMemory: resource.MustParse("300Mi"), + }, + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + ImagePullSecrets: []corev1.LocalObjectReference{ + { + Name: ImagePullSecretName, + }, + }, + AutoRemoveFinished: &disable, + CleanBackupData: &disable, + Config: &appsv1alpha1.BackupConfig{ + StorageProvider: storageS3, + }, +} + +var cronBackupOps = envfuncsext.NebulaCronBackupOptions{ + Schedule: "* * * * *", + TestPause: true, +} + +var basicRestoreSpec = appsv1alpha1.RestoreSpec{ + AutoRemoveFailed: disable, + Config: &appsv1alpha1.RestoreConfig{ + Concurrency: 3, + StorageProvider: storageS3, + }, +} + +// test cases about backup +var testCasesBasicBackup = []ncTestCase{ + { + Name: "NebulaGraph Backup Basic Tests", + Labels: map[string]string{ + LabelKeyCategory: LabelCategoryBackup, + LabelKeyGroup: LabelGroupBasic, + }, + InstallNCOptions: []envfuncsext.NebulaClusterOption{ + envfuncsext.WithNebulaClusterHelmRawOptions( + helm.WithArgs( + "--set", "nebula.enableBR=true", + ), + ), + }, + InstallWaitNCOptions: []envfuncsext.NebulaClusterOption{ + envfuncsext.WithNebulaClusterReadyFuncs( + envfuncsext.NebulaClusterReadyFuncForFields(false, map[string]any{ + "Spec": map[string]any{ + "Graphd": map[string]any{ + "Replicas": e2ematcher.ValidatorEq(2), + }, + "Metad": map[string]any{ + "Replicas": e2ematcher.ValidatorEq(3), + }, + "Storaged": map[string]any{ + "Replicas": e2ematcher.ValidatorEq(3), + "EnableAutoBalance": e2ematcher.ValidatorEq(false), + }, + }, + }), + envfuncsext.DefaultNebulaClusterReadyFunc, + ), + }, + LoadLDBC: true, + UpgradeCases: nil, + BackupCases: []ncBackupCase{ + { + Name: "basic backup with S3", + BackupInstallOptions: envfuncsext.NebulaBackupInstallOptions{ + Name: "nb-basic-s3", + Spec: *basicBackupSpec.DeepCopy(), + }, + RestoreInstallOptions: &envfuncsext.NebulaRestoreInstallOptions{ + Name: "nb-basic-s3", + Spec: *basicRestoreSpec.DeepCopy(), + }, + }, + { + Name: "basic backup with S3 and auto delete", + BackupInstallOptions: envfuncsext.NebulaBackupInstallOptions{ + Name: "nb-basic-s3-auto", + Spec: *basicBackupSpec.DeepCopy(), + }, + BackupUpdateOptions: map[string]any{ + "autoRemoveFinished": &enable, + "cleanBackupData": &enable, + }, + RestoreInstallOptions: &envfuncsext.NebulaRestoreInstallOptions{ + Name: "nb-basic-s3-auto", + Spec: *basicRestoreSpec.DeepCopy(), + }, + }, + { + Name: "incremental backup with S3", + BackupInstallOptions: envfuncsext.NebulaBackupInstallOptions{ + Name: "nb-incr-s3", + Spec: *basicBackupSpec.DeepCopy(), + }, + Incremental: true, + RestoreInstallOptions: &envfuncsext.NebulaRestoreInstallOptions{ + Name: "nb-incr-s3", + Spec: *basicRestoreSpec.DeepCopy(), + }, + }, + { + Name: "basic backup with S3 across namespaces", + BackupInstallOptions: envfuncsext.NebulaBackupInstallOptions{ + Name: "nb-basic-ns-s3", + Namespace: "default", + Spec: *basicBackupSpec.DeepCopy(), + }, + RestoreInstallOptions: &envfuncsext.NebulaRestoreInstallOptions{ + Name: "nb-basic-ns-s3", + Namespace: "default", + Spec: *basicRestoreSpec.DeepCopy(), + }, + }, + { + Name: "incremental backup with S3 across namespaces", + BackupInstallOptions: envfuncsext.NebulaBackupInstallOptions{ + Name: "nb-incr-ns-s3", + Namespace: "default", + Spec: *basicBackupSpec.DeepCopy(), + }, + Incremental: true, + RestoreInstallOptions: &envfuncsext.NebulaRestoreInstallOptions{ + Name: "nb-incr-ns-s3", + Namespace: "default", + Spec: *basicRestoreSpec.DeepCopy(), + }, + }, + { + Name: "cron backup with S3", + BackupInstallOptions: envfuncsext.NebulaBackupInstallOptions{ + Name: "nb-cron-s3", + Spec: *basicBackupSpec.DeepCopy(), + CronBackupOps: &cronBackupOps, + }, + RestoreInstallOptions: &envfuncsext.NebulaRestoreInstallOptions{ + Name: "nb-cron-s3", + Spec: *basicRestoreSpec.DeepCopy(), + }, + }, + { + Name: "basic backup with gs", + BackupInstallOptions: envfuncsext.NebulaBackupInstallOptions{ + Name: "nb-basic-gs", + Spec: *basicBackupSpec.DeepCopy(), + }, + BackupUpdateOptions: map[string]any{ + "storageProvider": storageGS, + }, + RestoreInstallOptions: &envfuncsext.NebulaRestoreInstallOptions{ + Name: "nb-basic-gs", + Spec: *basicRestoreSpec.DeepCopy(), + }, + }, + { + Name: "basic backup with gs and auto delete", + BackupInstallOptions: envfuncsext.NebulaBackupInstallOptions{ + Name: "nb-basic-gs-auto", + Spec: *basicBackupSpec.DeepCopy(), + }, + BackupUpdateOptions: map[string]any{ + "storageProvider": storageGS, + "autoRemoveFinished": &enable, + "cleanBackupData": &enable, + }, + RestoreInstallOptions: &envfuncsext.NebulaRestoreInstallOptions{ + Name: "nb-basic-gs-auto", + Spec: *basicRestoreSpec.DeepCopy(), + }, + }, + { + Name: "incremental backup with gs", + BackupInstallOptions: envfuncsext.NebulaBackupInstallOptions{ + Name: "nb-incr-gs-auto", + Spec: *basicBackupSpec.DeepCopy(), + }, + BackupUpdateOptions: map[string]any{ + "autoRemoveFinished": &enable, + "cleanBackupData": &enable, + "storageProvider": storageGS, + }, + Incremental: true, + RestoreInstallOptions: &envfuncsext.NebulaRestoreInstallOptions{ + Name: "nb-incr-gs-auto", + Spec: *basicRestoreSpec.DeepCopy(), + }, + }, + { + Name: "basic backup with gs across namespaces", + BackupInstallOptions: envfuncsext.NebulaBackupInstallOptions{ + Name: "nb-basic-ns-gs", + Namespace: "default", + Spec: *basicBackupSpec.DeepCopy(), + }, + BackupUpdateOptions: map[string]any{ + "storageProvider": storageGS, + }, + RestoreInstallOptions: &envfuncsext.NebulaRestoreInstallOptions{ + Name: "nb-basic-ns-gs", + Spec: *basicRestoreSpec.DeepCopy(), + }, + }, + { + Name: "incr backup with gs across namespaces", + BackupInstallOptions: envfuncsext.NebulaBackupInstallOptions{ + Name: "nb-incr-ns-gs", + Namespace: "default", + Spec: *basicBackupSpec.DeepCopy(), + }, + BackupUpdateOptions: map[string]any{ + "storageProvider": storageGS, + }, + Incremental: true, + RestoreInstallOptions: &envfuncsext.NebulaRestoreInstallOptions{ + Name: "nb-incr-ns-gs", + Spec: *basicRestoreSpec.DeepCopy(), + }, + }, + { + Name: "cron backup with GCP", + BackupInstallOptions: envfuncsext.NebulaBackupInstallOptions{ + Name: "nb-cron-gcp", + Spec: *basicBackupSpec.DeepCopy(), + CronBackupOps: &cronBackupOps, + }, + BackupUpdateOptions: map[string]any{ + "storageProvider": storageGS, + }, + RestoreInstallOptions: &envfuncsext.NebulaRestoreInstallOptions{ + Name: "nb-cron-gcp", + Spec: *basicRestoreSpec.DeepCopy(), + }, + }, + }, + }, +} diff --git a/tests/e2e/nebulacluster_test.go b/tests/e2e/nebulacluster_test.go index 458f4d72..05b28468 100644 --- a/tests/e2e/nebulacluster_test.go +++ b/tests/e2e/nebulacluster_test.go @@ -25,12 +25,14 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" + "k8s.io/utils/pointer" "sigs.k8s.io/e2e-framework/klient/wait" "sigs.k8s.io/e2e-framework/pkg/envconf" "sigs.k8s.io/e2e-framework/pkg/envfuncs" "sigs.k8s.io/e2e-framework/pkg/features" "sigs.k8s.io/e2e-framework/third_party/helm" + "github.com/robfig/cron/v3" appsv1alpha1 "github.com/vesoft-inc/nebula-operator/apis/apps/v1alpha1" "github.com/vesoft-inc/nebula-operator/tests/e2e/config" "github.com/vesoft-inc/nebula-operator/tests/e2e/envfuncsext" @@ -45,6 +47,7 @@ type ( InstallWaitNCOptions []envfuncsext.NebulaClusterOption LoadLDBC bool UpgradeCases []ncTestUpgradeCase + BackupCases []ncBackupCase } ncTestUpgradeCase struct { @@ -53,6 +56,14 @@ type ( UpgradeNCOptions []envfuncsext.NebulaClusterOption UpgradeWaitNCOptions []envfuncsext.NebulaClusterOption } + + ncBackupCase struct { + Name string + BackupInstallOptions envfuncsext.NebulaBackupInstallOptions + BackupUpdateOptions map[string]any + Incremental bool + RestoreInstallOptions *envfuncsext.NebulaRestoreInstallOptions + } ) func TestNebulaCluster(t *testing.T) { @@ -63,6 +74,7 @@ func TestNebulaCluster(t *testing.T) { ncTestCases = append(ncTestCases, testCasesZone...) ncTestCases = append(ncTestCases, testCasesPV...) ncTestCases = append(ncTestCases, testCasesK8s...) + ncTestCases = append(ncTestCases, testCasesBackup...) defaultNebulaClusterHelmArgs := getDefaultNebulaClusterHelmArgs() @@ -218,7 +230,7 @@ func TestNebulaCluster(t *testing.T) { ctx, err = envfuncsext.WaitNebulaClusterReady(append([]envfuncsext.NebulaClusterOption{ envfuncsext.WithNebulaClusterWaitOptions( wait.WithInterval(time.Second*5), - wait.WithTimeout(time.Minute*5), + wait.WithTimeout(time.Minute*12), ), }, upgradeCase.UpgradeWaitNCOptions...)...)(ctx, cfg) if err != nil { @@ -229,6 +241,284 @@ func TestNebulaCluster(t *testing.T) { ) } + for backupCaseIdx := range tc.BackupCases { + backupCase := tc.BackupCases[backupCaseIdx] + + err := setNebulaBackupSpecs(&backupCase.BackupInstallOptions.Spec, &backupCase.RestoreInstallOptions.Spec, backupCase.BackupUpdateOptions) + if err != nil { + t.Errorf("failed to modify backup options for cluster [%v/%v]", namespace, name) + } + + feature.Assess(fmt.Sprintf("Creating cloud provider secrets for testcase %v", backupCase.Name), func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + nsToUse := namespace + if backupCase.BackupInstallOptions.Namespace != "" { + nsToUse = backupCase.BackupInstallOptions.Namespace + } + + cloudStorage, objectMeta, data := getCloudStorageSecretData(backupCase, nsToUse) + + klog.V(4).Infof("Creating %v secret for test case", cloudStorage) + ctx, err := envfuncsext.CreateObject( + &corev1.Secret{ + ObjectMeta: objectMeta, + Type: corev1.SecretTypeOpaque, + Data: data, + }, + )(ctx, cfg) + if err != nil { + t.Errorf("failed to create %v secret: %v", cloudStorage, err) + } + klog.V(4).Infof("%v secret created successfully", cloudStorage) + + return ctx + }) + + if backupCase.BackupInstallOptions.Namespace != "" && backupCase.BackupInstallOptions.Namespace != namespace { + feature.Assess("Creating needed image pull secret and service account for cross namespace backup", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + + ctx, err := envfuncsext.CreateObject( + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: ImagePullSecretName, + Namespace: backupCase.BackupInstallOptions.Namespace, + }, + Type: corev1.SecretTypeDockerConfigJson, + Data: map[string][]byte{ + corev1.DockerConfigJsonKey: config.C.DockerConfigJsonSecret, + }, + }, + )(ctx, cfg) + if err != nil { + t.Errorf("failed to create image pull secret %v", err) + } + + ctx, err = envfuncsext.CreateServiceAccount(backupCase.BackupInstallOptions.Namespace, SaName)(ctx, cfg) + if err != nil { + t.Errorf("failed to create service account [%v/%v]: %v", backupCase.BackupInstallOptions.Namespace, SaName, err) + } + + return ctx + }) + } + + feature.Assess(fmt.Sprintf("Creating base backup for %s for NebulaCluster", backupCase.Name), getNBCreateFunction(backupCase, false, name)) + + feature.Assess(fmt.Sprintf("Wait for backup to be complete after %s", backupCase.Name), getNBWaitFunction(false, name)) + + if backupCase.BackupInstallOptions.CronBackupOps != nil && backupCase.BackupInstallOptions.CronBackupOps.TestPause { + feature.Assess(fmt.Sprintf("Pausing cron backup for %s for NebulaCluster", backupCase.Name), getTogglePauseFunction(false, true)) + + feature.Assess(fmt.Sprintf("Wait for cron backup to pause for %s for NebulaCluster", backupCase.Name), getWaitForPauseFunction(false)) + + feature.Assess(fmt.Sprintf("Resuming cron backup for %s for NebulaCluster", backupCase.Name), getTogglePauseFunction(false, false)) + + feature.Assess(fmt.Sprintf("Wait for cron backup to resume after pause for %s", backupCase.Name), getNBWaitFunction(false, name)) + } + + if backupCase.Incremental { + feature.Assess(fmt.Sprintf("Running commmands to add vertex and edges for case %v", backupCase.Name), + func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + ngOptions := envfuncsext.NebulaCommandOptions{ + ClusterName: name, + ClusterNamespace: namespace, + Username: Username, + Password: Password, + Space: Space, + } + + _, err := envfuncsext.RunNGCommand(ngOptions, + "INSERT VERTEX Place(name, url, type) VALUES \"placeNgTest\":(\"ngnbnr\", \"https://www.ngnbnr.com\", \"office\");"+ + "INSERT VERTEX Tag(name, url) VALUES \"tagNgTest\":(\"problematic\", \"https://www.problematic.com\");"+ + "INSERT EDGE HAS_TAG() VALUES \"placeNgTest\" -> \"tagNgTest\";", + )(ctx, cfg) + if err != nil { + t.Errorf("failed to get stats for original cluster [%v/%v]: %v", ngOptions.ClusterNamespace, ngOptions.ClusterName, err) + } + return ctx + }, + ) + + feature.Assess(fmt.Sprintf("Creating Incremental backup for case %s for NebulaCluster", backupCase.Name), getNBCreateFunction(backupCase, true, name)) + + feature.Assess(fmt.Sprintf("Wait for backup to be complete after incremental backup for %s", backupCase.Name), getNBWaitFunction(true, name)) + } + + if backupCase.RestoreInstallOptions != nil { + feature.Assess(fmt.Sprintf("Restoring cluster for case %v for NebulaCluster", backupCase.Name), getNRCreateFunction(backupCase, backupCase.Incremental, name)) + + feature.Assess(fmt.Sprintf("Wait for restore to be complete for %s", backupCase.Name), getNRWaitFunction(name)) + + feature.Assess(fmt.Sprintf("Check if restore stats equal for %s", backupCase.Name), getCheckStatsFunction(name)) + + feature.Assess(fmt.Sprintf("Deleting restore cluster for %s", backupCase.Name), + func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + restoreContext := envfuncsext.GetNebulaRestoreCtxValue(ctx) + klog.V(4).InfoS("Deleting restore cluster", "cluster namespace", namespace, "cluster name", restoreContext.RestoreClusterName) + + ctx, err = envfuncsext.DeleteNebulaRestoredCluster()(ctx, cfg) + if err != nil { + t.Errorf("Deleting restore cluster [%v/%v] failed: %v", namespace, restoreContext.RestoreClusterName, err) + } + return ctx + }, + ) + + feature.Assess(fmt.Sprintf("Deleting nebula restore after %v", backupCase.Name), + func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + restoreContext := envfuncsext.GetNebulaRestoreCtxValue(ctx) + klog.V(4).InfoS("Deleting nebula restore", "namespace", restoreContext.Namespace, "name", restoreContext.Name) + + ctx, err = envfuncsext.DeleteNebulaRestore()(ctx, cfg) + if err != nil { + t.Errorf("Deleting nebula restore [%v/%v] failed: %v", restoreContext.Namespace, restoreContext.Name, err) + } + return ctx + }, + ) + } + + if backupCase.Incremental { + feature.Assess(fmt.Sprintf("Running commmands to delete added vertex and edges for case %v", backupCase.Name), + func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + ngOptions := envfuncsext.NebulaCommandOptions{ + ClusterName: name, + ClusterNamespace: namespace, + Username: Username, + Password: Password, + Space: Space, + } + + _, err := envfuncsext.RunNGCommand(ngOptions, + "DELETE VERTEX \"placeNgTest\" WITH EDGE;"+ + "DELETE VERTEX \"tagNgTest\" WITH EDGE;", + )(ctx, cfg) + if err != nil { + t.Errorf("failed to get stats for original cluster [%v/%v]: %v", ngOptions.ClusterNamespace, ngOptions.ClusterName, err) + } + return ctx + }, + ) + } + + feature.Assess(fmt.Sprintf("Delete backup after %s", backupCase.Name), + func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + klog.V(4).InfoS("Deleting backup for NebulaCluster", "cluster namespace", namespace, "cluster name", name) + + var err error + if backupCase.Incremental { + if backupCase.BackupInstallOptions.CronBackupOps != nil { + ctx, err = envfuncsext.DeleteNebulaCronBackup(true)(ctx, cfg) + } else { + ctx, err = envfuncsext.DeleteNebulaBackup(true)(ctx, cfg) + } + if err != nil { + t.Errorf("Deleting incremental backup for NebulaCluster failed: %v", err) + } + } + + if backupCase.BackupInstallOptions.CronBackupOps != nil { + ctx, err = envfuncsext.DeleteNebulaCronBackup(false)(ctx, cfg) + } else { + ctx, err = envfuncsext.DeleteNebulaBackup(false)(ctx, cfg) + } + if err != nil { + t.Errorf("Deleting base backup for NebulaCluster failed: %v", err) + } + return ctx + }, + ) + + if pointer.BoolDeref(backupCase.BackupInstallOptions.Spec.AutoRemoveFinished, false) { + feature.Assess(fmt.Sprintf("Check auto delete after %s", backupCase.Name), + func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + klog.V(4).Info("Auto delete has been enabled. Checking if backup job is deleted for NebulaCluster", "cluster namespace", namespace, "cluster name", name) + + var err error + if backupCase.Incremental { + ctx, err = envfuncsext.WaitForCleanBackup( + true, + envfuncsext.WithNebulaBackupWaitOptions( + wait.WithInterval(time.Second*5), + wait.WithTimeout(time.Minute*16), + ), + )(ctx, cfg) + if err != nil { + t.Errorf("error checking base backup job deletion or backup still exists: %v", err) + } else { + klog.Info("checking incremental backup job deletion successful. Nebulabackup no longer exists.") + } + } + + ctx, err = envfuncsext.WaitForCleanBackup( + false, + envfuncsext.WithNebulaBackupWaitOptions( + wait.WithInterval(time.Second*5), + wait.WithTimeout(time.Minute*16), + ), + )(ctx, cfg) + if err != nil { + t.Errorf("error checking base backup job deletion or backup still exists: %v", err) + } else { + klog.Info("checking base backup job deletion successful. Nebulabackup no longer exists.") + } + return ctx + }, + ) + } + + if backupCase.BackupInstallOptions.Namespace != "" && backupCase.BackupInstallOptions.Namespace != namespace { + feature.Assess("Deleteing image pull secret and service account for cross namespace backup", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + + ctx, err := envfuncsext.DeleteObject( + &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: ImagePullSecretName, + Namespace: backupCase.BackupInstallOptions.Namespace, + }, + Type: corev1.SecretTypeDockerConfigJson, + Data: map[string][]byte{ + corev1.DockerConfigJsonKey: config.C.DockerConfigJsonSecret, + }, + }, + )(ctx, cfg) + if err != nil { + t.Errorf("failed to create image pull secret %v", err) + } + + ctx, err = envfuncsext.DeleteServiceAccount(backupCase.BackupInstallOptions.Namespace, SaName)(ctx, cfg) + if err != nil { + t.Errorf("failed to delete service account [%v/%v]: %v", backupCase.BackupInstallOptions.Namespace, SaName, err) + } + + return ctx + }) + } + + feature.Assess(fmt.Sprintf("Deleting cloud provider secrets for testcase %v", backupCase.Name), func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + nsToUse := namespace + if backupCase.BackupInstallOptions.Namespace != "" { + nsToUse = backupCase.BackupInstallOptions.Namespace + } + + cloudStorage, objectMeta, data := getCloudStorageSecretData(backupCase, nsToUse) + + klog.V(4).Infof("Deleting %v secret for test case", cloudStorage) + ctx, err := envfuncsext.DeleteObject( + &corev1.Secret{ + ObjectMeta: objectMeta, + Type: corev1.SecretTypeOpaque, + Data: data, + }, + )(ctx, cfg) + if err != nil { + t.Errorf("failed to delete %v secret: %v", cloudStorage, err) + } + klog.V(4).Infof("%v secret deleted successfully", cloudStorage) + + return ctx + }) + } + feature.Assess("Uninstall NebulaCluster", func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { klog.V(4).InfoS("Uninstall NebulaCluster", "namespace", namespace, "name", name) @@ -248,10 +538,15 @@ func TestNebulaCluster(t *testing.T) { ) feature.Teardown(func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { - var err error - ctx, err = envfuncs.DeleteNamespace(namespace)(ctx, cfg) - if err != nil { - t.Errorf("failed to delete namespace %v", err) + if t.Failed() { + klog.V(4).Info("Skipping teardown due to test failure.") + } else { + + var err error + ctx, err = envfuncs.DeleteNamespace(namespace)(ctx, cfg) + if err != nil { + t.Errorf("failed to delete namespace %v", err) + } } return ctx }) @@ -270,6 +565,12 @@ func getDefaultNebulaClusterHelmArgs() []string { if config.C.NebulaGraph.Version != "" { args = append(args, "--set", fmt.Sprintf("nebula.version=%s", config.C.NebulaGraph.Version)) } + if config.C.NebulaGraph.AgentImage != "" { + args = append(args, "--set", fmt.Sprintf("nebula.agent.image=%s", config.C.NebulaGraph.AgentImage)) + } + if config.C.NebulaGraph.AgentVersion != "" { + args = append(args, "--set", fmt.Sprintf("nebula.agent.version=%s", config.C.NebulaGraph.AgentVersion)) + } if config.C.NebulaGraph.GraphdImage != "" { args = append(args, "--set", fmt.Sprintf("nebula.graphd.image=%s", config.C.NebulaGraph.GraphdImage)) } @@ -284,3 +585,239 @@ func getDefaultNebulaClusterHelmArgs() []string { } return args } + +func setNebulaBackupSpecs(backupSpec *appsv1alpha1.BackupSpec, restoreSpec *appsv1alpha1.RestoreSpec, backupUpdateOptions map[string]any) error { + for option, value := range backupUpdateOptions { + switch option { + case "autoRemoveFinished": + backupFinished := value.(*bool) + backupSpec.AutoRemoveFinished = backupFinished + case "cleanBackupData": + cleanBackupData := value.(*bool) + backupSpec.CleanBackupData = cleanBackupData + case "storageProvider": + storageProvider := value.(appsv1alpha1.StorageProvider) + backupSpec.Config.StorageProvider = storageProvider + restoreSpec.Config.StorageProvider = storageProvider + default: + return fmt.Errorf("error setting backup configs. Invalid option: %v", option) + } + } + return nil +} + +func getNBCreateFunction(backupCase ncBackupCase, incremental bool, clusterName string) features.Func { + return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + clusterNamespace := cfg.Namespace() + klog.V(4).InfoS("Backup NebulaCluster", "namespace", clusterNamespace, "name", clusterName) + + backupCase.BackupInstallOptions.Spec.Config.ClusterName = clusterName + backupCase.BackupInstallOptions.Spec.Config.ClusterNamespace = &clusterNamespace + + if incremental { + backupContextValue := envfuncsext.GetNebulaBackupCtxValue(false, ctx) + klog.Infof("Incremental backup detected. Base backup name: %v", backupContextValue.BackupFileName) + backupCase.BackupInstallOptions.Name = fmt.Sprintf("%v-incr", backupCase.BackupInstallOptions.Name) + backupCase.BackupInstallOptions.Spec.Config.BaseBackupName = &backupContextValue.BackupFileName + } + + var err error + if backupCase.BackupInstallOptions.CronBackupOps != nil { + ctx, err = envfuncsext.DeployNebulaCronBackup(incremental, backupCase.BackupInstallOptions)(ctx, cfg) + } else { + ctx, err = envfuncsext.DeployNebulaBackup(incremental, backupCase.BackupInstallOptions)(ctx, cfg) + } + if err != nil { + t.Errorf("failed to backup NebulaCluster %v", err) + } + + return ctx + } +} + +func getNBWaitFunction(incremental bool, clusterName string) features.Func { + return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + klog.V(4).InfoS("Waiting for backup for NebulaCluster to be complete", "cluster namespace", cfg.Namespace(), "cluster name", clusterName) + + backupContextValue := envfuncsext.GetNebulaBackupCtxValue(false, ctx) + + var err error + if backupContextValue.Schedule != "" { + ctx, err = envfuncsext.WaitNebulaCronBackupFinished( + incremental, + envfuncsext.WithNebulaBackupWaitOptions( + wait.WithInterval(time.Second*5), + wait.WithTimeout(time.Minute*10), + ), + )(ctx, cfg) + } else { + ctx, err = envfuncsext.WaitNebulaBackupFinished( + incremental, + envfuncsext.WithNebulaBackupWaitOptions( + wait.WithInterval(time.Second*5), + wait.WithTimeout(time.Minute*10), + ), + )(ctx, cfg) + } + + if err != nil { + t.Errorf("failed waiting for backup for NebulaCluster to be complete: %v", err) + } + + return ctx + } +} + +func getCloudStorageSecretData(backupCase ncBackupCase, namespace string) (string, metav1.ObjectMeta, map[string][]byte) { + var cloudStorage string + var objectMeta metav1.ObjectMeta + var data map[string][]byte + + if backupCase.BackupInstallOptions.Spec.Config.S3 != nil { + cloudStorage = "S3" + objectMeta = metav1.ObjectMeta{ + Name: AWSSecretName, + Namespace: namespace, + } + data = map[string][]byte{ + "access_key": config.C.AWSAccessKey, + "secret_key": config.C.AWSSecretKey, + } + } else if backupCase.BackupInstallOptions.Spec.Config.GS != nil { + cloudStorage = "GS" + objectMeta = metav1.ObjectMeta{ + Name: GSSecretName, + Namespace: namespace, + } + data = map[string][]byte{ + "credentials": config.C.GSSecret, + } + } + + return cloudStorage, objectMeta, data +} + +func getNRCreateFunction(backupCase ncBackupCase, incremental bool, clusterName string) features.Func { + return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + clusterNamespace := cfg.Namespace() + klog.V(4).InfoS("Restore NebulaCluster", "namespace", clusterNamespace, "name", clusterName) + + backupCase.RestoreInstallOptions.Spec.Config.ClusterName = clusterName + backupCase.RestoreInstallOptions.Spec.Config.ClusterNamespace = &clusterNamespace + + backupContextValue := envfuncsext.GetNebulaBackupCtxValue(incremental, ctx) + if incremental { + klog.Infof("Incremental backup detected. Restoring from incremental backup %v", backupContextValue.BackupFileName) + } else { + klog.Infof("No incemental backups detected. Restoring from base backup %v", backupContextValue.BackupFileName) + } + backupCase.RestoreInstallOptions.Spec.Config.BackupName = backupContextValue.BackupFileName + + ctx, err := envfuncsext.DeployNebulaRestore(*backupCase.RestoreInstallOptions)(ctx, cfg) + if err != nil { + t.Errorf("failed to restore NebulaCluster %v", err) + } + return ctx + } +} + +func getNRWaitFunction(clusterName string) features.Func { + return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + klog.V(4).InfoS("Waiting for restore for NebulaCluster to be complete", "cluster namespace", cfg.Namespace(), "cluster name", clusterName) + + ctx, err := envfuncsext.WaitNebulaRestoreFinished( + envfuncsext.WithNebulaRestoreWaitOptions( + wait.WithInterval(time.Second*5), + wait.WithTimeout(time.Minute*30), + ), + )(ctx, cfg) + if err != nil { + t.Errorf("failed waiting for restore for NebulaCluster to be complete: %v", err) + } + + return ctx + } +} + +func getCheckStatsFunction(clusterName string) features.Func { + return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + restoreContext := envfuncsext.GetNebulaRestoreCtxValue(ctx) + klog.V(4).Infof("Checking if restore stats are equal between nebula clusters [%v/%v] and [%v/%v]", cfg.Namespace(), clusterName, cfg.Namespace(), restoreContext.Name) + + ngOptions := envfuncsext.NebulaCommandOptions{ + ClusterName: clusterName, + ClusterNamespace: cfg.Namespace(), + Username: Username, + Password: Password, + Space: Space, + } + + results1, err := envfuncsext.RunGetStats(ctx, cfg, Space, ngOptions) + if err != nil { + t.Errorf("failed to get stats for original cluster [%v/%v]: %v", ngOptions.ClusterNamespace, ngOptions.ClusterName, err) + } + + ngOptions.ClusterName = restoreContext.RestoreClusterName + results2, err := envfuncsext.RunGetStats(ctx, cfg, Space, ngOptions) + if err != nil { + t.Errorf("failed to get stats for restore cluster [%v/%v]: %v", ngOptions.ClusterNamespace, ngOptions.ClusterName, err) + } + + equal := envfuncsext.NGResultsEqual(*results1, *results2) + if !equal { + klog.V(4).Info("Original cluster results:") + envfuncsext.PrintCommandResults(*results2) + klog.V(4).Info("Restore cluster results:") + envfuncsext.PrintCommandResults(*results1) + t.Errorf("test failed: results set between original cluster and restored cluster are not equal.") + } + + return ctx + } +} + +func getTogglePauseFunction(incremental, pause bool) features.Func { + return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + backupContextValue := envfuncsext.GetNebulaBackupCtxValue(incremental, ctx) + klog.V(4).InfoS("Pausing cron backup for NebulaCluster", "cluster namespace", cfg.Namespace(), "cluster name", backupContextValue.BackupSpec.Config.ClusterName) + + ctx, err := envfuncsext.SetCronBackupPause(incremental, pause)(ctx, cfg) + if err != nil { + t.Errorf("failed to initiate pause for cron backup for NebulaCluster: %v", err) + } + + klog.V(4).Info("successfully initiated pause for cron backup for NebulaCluster [%v/%v]", cfg.Namespace(), backupContextValue.BackupSpec.Config.ClusterName) + + return ctx + } +} + +func getWaitForPauseFunction(incremental bool) features.Func { + return func(ctx context.Context, t *testing.T, cfg *envconf.Config) context.Context { + backupContextValue := envfuncsext.GetNebulaBackupCtxValue(incremental, ctx) + klog.V(4).InfoS("Checking if cron backup was paused successfully for NebulaCluster", "cluster namespace", cfg.Namespace(), "cluster name", backupContextValue.BackupSpec.Config.ClusterName) + + now := time.Now() + sched, err := cron.ParseStandard(backupContextValue.Schedule) + if err != nil { + t.Errorf("failed to parse schedule for cron backup for NebulaCluster [%v/%v]", cfg.Namespace(), backupContextValue.BackupSpec.Config.ClusterName) + } + + nextTime := sched.Next(now) + waitDuration := nextTime.Sub(nextTime) + + ctx, err = envfuncsext.CheckCronBackupPaused(incremental, + envfuncsext.WithNebulaBackupWaitOptions( + wait.WithInterval(waitDuration+time.Second*5), + wait.WithTimeout(waitDuration+time.Minute*10), + ), + )(ctx, cfg) + if err != nil { + t.Errorf("failed to pause cron backup for NebulaCluster: %v", err) + } + + klog.V(4).Info("successfully paused cron backup for NebulaCluster [%v/%v]", cfg.Namespace(), backupContextValue.BackupSpec.Config.ClusterName) + + return ctx + } +}