diff --git a/Gopkg.lock b/Gopkg.lock index aa2222479..96bbf6202 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -169,12 +169,12 @@ revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" [[projects]] - digest = "1:97df918963298c287643883209a2c3f642e6593379f97ab400c2a2e219ab647d" + digest = "1:318f1c959a8a740366fce4b1e1eb2fd914036b4af58fbd0a003349b305f118ad" name = "github.com/golang/protobuf" packages = ["proto"] pruneopts = "NUT" - revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" - version = "v1.2.0" + revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30" + version = "v1.3.1" [[projects]] branch = "master" diff --git a/Gopkg.toml b/Gopkg.toml index 28c1038b3..fb13e25d8 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -63,7 +63,7 @@ [[constraint]] name = "github.com/golang/protobuf" - version = "1.2.0" + version = "1.3.1" [[constraint]] branch = "master" diff --git a/api/cmd/main.go b/api/cmd/main.go index aeff6a331..33715960d 100644 --- a/api/cmd/main.go +++ b/api/cmd/main.go @@ -49,12 +49,12 @@ func main() { ws.Filter(auth.FilterFactory()) backend.RegisterRouter(ws) - //s3.RegisterRouter(ws) dataflow.RegisterRouter(ws) // add filter for authentication context - wc.Filter(logging.FilterFactory()) - wc.Filter(context.FilterFactory()) - + ws.Filter(logging.FilterFactory()) + ws.Filter(context.FilterFactory()) + ws.Filter(auth.FilterFactory()) + s3ws := new(restful.WebService) s3ws.Path("/v1/s3") s3ws.Doc("OpenSDS Multi-Cloud API") diff --git a/api/pkg/examples/policy.json b/api/pkg/examples/policy.json index fce3e1718..88cc4f607 100644 --- a/api/pkg/examples/policy.json +++ b/api/pkg/examples/policy.json @@ -30,5 +30,9 @@ "object:put": "", "object:delete": "", "routbucket:put": "", - "routobject:put": "" + "routobject:put": "", + "storageclass:get":"", + "bucketlifecycle:put": "", + "bucketlifecycle:get": "", + "bucketlifecycle:delete": "" } diff --git a/api/pkg/s3/abortmultipartupload.go b/api/pkg/s3/abortmultipartupload.go index 76dc1ca51..cc079f2d0 100644 --- a/api/pkg/s3/abortmultipartupload.go +++ b/api/pkg/s3/abortmultipartupload.go @@ -43,6 +43,10 @@ func (s *APIService) AbortMultipartUpload(request *restful.Request, response *re return } + // delete multipart upload record, if delete failed, it will be cleaned by lifecycle management + record := s3.MultipartUploadRecord{ObjectKey: objectKey, Bucket: bucketName, UploadId: uploadId} + s.s3Client.DeleteUploadRecord(context.Background(), &record) + deleteInput := s3.DeleteObjectInput{Key: objectKey, Bucket: bucketName} res, err := s.s3Client.DeleteObject(ctx, &deleteInput) if err != nil { diff --git a/api/pkg/s3/bucketlifecycledelete.go b/api/pkg/s3/bucketlifecycledelete.go new file mode 100644 index 000000000..e2632f707 --- /dev/null +++ b/api/pkg/s3/bucketlifecycledelete.go @@ -0,0 +1,73 @@ +// Copyright 2019 The OpenSDS Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s3 + +import ( + "net/http" + "strings" + + "github.com/emicklei/go-restful" + "github.com/micro/go-log" + "github.com/opensds/multi-cloud/api/pkg/policy" + s3 "github.com/opensds/multi-cloud/s3/proto" + "golang.org/x/net/context" +) + +func (s *APIService) BucketLifecycleDelete(request *restful.Request, response *restful.Response) { + if !policy.Authorize(request, response, "bucket:delete") { + return + } + //var foundID int + FoundIDArray := []string{} + NonFoundIDArray := []string{} + bucketName := request.PathParameter("bucketName") + ruleID := request.Request.URL.Query()["ruleID"] + if ruleID != nil { + ctx := context.Background() + log.Logf("Received request for bucket lifecycle delete for bucket: %s and the ruleID: %s", bucketName, ruleID) + bucket, _ := s.s3Client.GetBucket(ctx, &s3.Bucket{Name: bucketName}) + for _, id := range ruleID { + isfound := false + for _, lcRule := range bucket.LifecycleConfiguration { + if lcRule.ID == id { + isfound = true + FoundIDArray = append(FoundIDArray, id) + } + } + if !isfound { + NonFoundIDArray = append(NonFoundIDArray, id) + } + } + for _, id := range NonFoundIDArray { + response.WriteErrorString(http.StatusBadRequest, strings.Replace("error: rule ID $1 doesn't exist \n\n", "$1", id, 1)) + } + + for _, id := range FoundIDArray { + deleteInput := s3.DeleteLifecycleInput{Bucket: bucketName, RuleID: id} + res, err := s.s3Client.DeleteBucketLifecycle(ctx, &deleteInput) + if err != nil { + response.WriteError(http.StatusBadRequest, err) + return + } + response.WriteEntity(res) + } + + } else { + response.WriteErrorString(http.StatusBadRequest, NoRuleIDForLifecycleDelete) + return + } + log.Log("Delete bucket lifecycle successfully.") +} + diff --git a/api/pkg/s3/bucketlifecycleget.go b/api/pkg/s3/bucketlifecycleget.go new file mode 100644 index 000000000..7a12fdc4a --- /dev/null +++ b/api/pkg/s3/bucketlifecycleget.go @@ -0,0 +1,124 @@ +// Copyright 2019 The OpenSDS Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s3 + +import ( + "fmt" + + "github.com/emicklei/go-restful" + "github.com/micro/go-log" + "github.com/opensds/multi-cloud/api/pkg/policy" + . "github.com/opensds/multi-cloud/api/pkg/utils/constants" + "github.com/opensds/multi-cloud/s3/pkg/model" + s3 "github.com/opensds/multi-cloud/s3/proto" + "golang.org/x/net/context" +) + +//Convert function from storage tier to storage class for XML format output +func (s *APIService) tier2class(tier int32) (string, error) { + { + mutext.Lock() + defer mutext.Unlock() + if len(ClassAndTier) == 0 { + err := s.loadStorageClassDefinition() + if err != nil { + log.Logf("load storage classes failed: %v.\n", err) + return "", err + } + } + } + className := "" + for k, v := range ClassAndTier { + if v == tier { + className = k + } + } + if className == "" { + log.Logf("invalid tier: %d\n", tier) + return "", fmt.Errorf(InvalidTier) + } + return className, nil +} + +//Function for GET Bucket Lifecycle API +func (s *APIService) BucketLifecycleGet(request *restful.Request, response *restful.Response) { + if !policy.Authorize(request, response, "bucket:get") { + return + } + bucketName := request.PathParameter("bucketName") + log.Logf("Received request for bucket details in GET lifecycle: %s", bucketName) + + ctx := context.Background() + bucket, _ := s.s3Client.GetBucket(ctx, &s3.Bucket{Name: bucketName}) + + // convert back to xml struct + getLifecycleConf := model.LifecycleConfiguration{} + + // convert lifecycle rule to xml Rule + if bucket.LifecycleConfiguration != nil { + for _, lcRule := range bucket.LifecycleConfiguration { + xmlRule := model.Rule{} + + xmlRule.Status = lcRule.Status + xmlRule.ID = lcRule.ID + xmlRule.Filter = converts3FilterToRuleFilter(lcRule.Filter) + xmlRule.AbortIncompleteMultipartUpload = converts3UploadToRuleUpload(lcRule.AbortIncompleteMultipartUpload) + xmlRule.Transition = make([]model.Transition, 0) + + //Arranging the transition and expiration actions in XML + for _, action := range lcRule.Actions { + log.Logf("Action is : %v\n", action) + + if action.Name == ActionNameTransition { + xmlTransition := model.Transition{} + xmlTransition.Days = action.Days + xmlTransition.Backend = action.Backend + className, err := s.tier2class(action.Tier) + if err == nil { + xmlTransition.StorageClass = className + } + xmlRule.Transition = append(xmlRule.Transition, xmlTransition) + } + if action.Name == ActionNameExpiration { + xmlExpiration := model.Expiration{} + xmlExpiration.Days = action.Days + xmlRule.Expiration = append(xmlRule.Expiration, xmlExpiration) + } + } + // append each xml rule to xml array + getLifecycleConf.Rule = append(getLifecycleConf.Rule, xmlRule) + } + } + + // marshall the array back to xml format + response.WriteAsXml(getLifecycleConf) + log.Log("Get bucket lifecycle successfully.") +} + +func converts3FilterToRuleFilter(filter *s3.LifecycleFilter) model.Filter { + retFilter := model.Filter{} + if filter != nil { + retFilter.Prefix = filter.Prefix + } + return retFilter +} + +func converts3UploadToRuleUpload(upload *s3.AbortMultipartUpload) model.AbortIncompleteMultipartUpload { + retUpload := model.AbortIncompleteMultipartUpload{} + if upload != nil { + retUpload.DaysAfterInitiation = upload.DaysAfterInitiation + } + return retUpload +} diff --git a/api/pkg/s3/bucketlifecycleput.go b/api/pkg/s3/bucketlifecycleput.go new file mode 100644 index 000000000..50b3adc2c --- /dev/null +++ b/api/pkg/s3/bucketlifecycleput.go @@ -0,0 +1,245 @@ +// Copyright 2019 The OpenSDS Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s3 + +import ( + "crypto/md5" + "encoding/xml" + "fmt" + "net/http" + + //"sort" + "strings" + "sync" + + . "github.com/opensds/multi-cloud/api/pkg/utils/constants" + + "github.com/emicklei/go-restful" + "github.com/micro/go-log" + + "github.com/opensds/multi-cloud/api/pkg/policy" + "github.com/opensds/multi-cloud/s3/pkg/model" + s3 "github.com/opensds/multi-cloud/s3/proto" + "golang.org/x/net/context" +) + +// Map from storage calss to tier +var ClassAndTier map[string]int32 +var mutext sync.Mutex + +func (s *APIService) loadStorageClassDefinition() error { + ctx := context.Background() + log.Log("Load storage classes.") + res, err := s.s3Client.GetStorageClasses(ctx, &s3.BaseRequest{}) + if err != nil { + log.Logf("get storage classes from s3 service failed: %v\n", err) + return err + } + ClassAndTier = make(map[string]int32) + for _, v := range res.Classes { + ClassAndTier[v.Name] = v.Tier + } + return nil +} + +func (s *APIService) class2tier(name string) (int32, error) { + { + mutext.Lock() + defer mutext.Unlock() + if len(ClassAndTier) == 0 { + err := s.loadStorageClassDefinition() + if err != nil { + log.Logf("load storage classes failed: %v.\n", err) + return 0, err + } + } + } + tier, ok := ClassAndTier[name] + if !ok { + log.Logf("translate storage class name[%s] to tier failed: %s.\n", name) + return 0, fmt.Errorf("invalid storage class:%s", name) + } + log.Logf("class[%s] to tier[%d]\n", name, tier) + return tier, nil +} + +func checkValidationOfActions(actions []*s3.Action) error { + var pre *s3.Action = nil + for _, action := range actions { + log.Logf("action: %+v\n", *action) + if pre == nil { + if action.Name == ActionNameExpiration && action.Days < ExpirationMinDays { + // If only an expiration action for a rule, the days for that action should be more than ExpirationMinDays + return fmt.Errorf("error: Days for Expiring object must not be less than %d", ExpirationMinDays) + } + if action.Name == ActionNameTransition && action.Days < TransitionMinDays { + // the days for transition to tiers except tier999 should not less than TransitionMinDays + minDays := int32(TransitionMinDays) + if action.Tier == Tier999 { + // the days for transition to tier999 should not less than TransitionToArchiveMinDays + minDays = TransitionToArchiveMinDays + } + if action.Days < minDays { + return fmt.Errorf("error: days for transitioning object to tier_%d must not be less than %d", + action.Tier, minDays) + } + + + } + } else { + if pre.Name == ActionNameExpiration { + // Only one expiration action for each rule is supported + return fmt.Errorf(MoreThanOneExpirationAction) + } + + if action.Name == ActionNameExpiration && pre.Days+ExpirationMinDays > action.Days { + return fmt.Errorf(DaysInStorageClassBeforeExpiration) + } + + if action.Name == ActionNameTransition && pre.Days+LifecycleTransitionDaysStep > action.Days { + return fmt.Errorf(DaysInStorageClassBeforeTransition) + } + } + pre = action + } + return nil +} + +func (s *APIService) BucketLifecyclePut(request *restful.Request, response *restful.Response) { + if !policy.Authorize(request, response, "bucket:put") { + return + } + bucketName := request.PathParameter("bucketName") + log.Logf("Received request for create bucket lifecycle: %s", bucketName) + ctx := context.Background() + bucket, _ := s.s3Client.GetBucket(ctx, &s3.Bucket{Name: bucketName}) + body := ReadBody(request) + log.Logf("MD5 sum for body is %x", md5.Sum(body)) + + if body != nil { + createLifecycleConf := model.LifecycleConfiguration{} + err := xml.Unmarshal(body, &createLifecycleConf) + if err != nil { + response.WriteError(http.StatusInternalServerError, err) + return + } else { + dupIdCheck := make(map[string]interface{}) + s3RulePtrArr := make([]*s3.LifecycleRule, 0) + for _, rule := range createLifecycleConf.Rule { + s3Rule := s3.LifecycleRule{} + + //check if the ruleID has any duplicate values + if _, ok := dupIdCheck[rule.ID]; ok { + log.Logf("Duplicate ruleID found for rule : %s\n", rule.ID) + ErrStr := strings.Replace(DuplicateRuleIDError, "$1", rule.ID, 1) + response.WriteError(http.StatusBadRequest, fmt.Errorf(ErrStr)) + return + } + // Assigning the rule ID + dupIdCheck[rule.ID] = struct{}{} + s3Rule.ID = rule.ID + + //Assigning the status value to s3 status + log.Logf("Status in rule file is %v\n", rule.Status) + s3Rule.Status = rule.Status + + //Assigning the filter, using convert function to convert xml struct to s3 struct + s3Rule.Filter = convertRuleFilterToS3Filter(rule.Filter) + + // Create the type of transition array + s3ActionArr := make([]*s3.Action, 0) + + for _, transition := range rule.Transition { + + //Defining the Transition array and assigning the values tp populate fields + s3Transition := s3.Action{Name: ActionNameTransition} + + //Assigning the value of days for transition to happen + s3Transition.Days = transition.Days + + //Assigning the backend value to the s3 struct + s3Transition.Backend = transition.Backend + + //Assigning the storage class of the object to s3 struct + tier, err := s.class2tier(transition.StorageClass) + if err != nil { + response.WriteError(http.StatusBadRequest, err) + return + } + s3Transition.Tier = tier + + //Adding the transition value to the main rule + s3ActionArr = append(s3ActionArr, &s3Transition) + } + + //Loop for getting the values from xml struct + for _, expiration := range rule.Expiration { + s3Expiration := s3.Action{Name: ActionNameExpiration} + s3Expiration.Days = expiration.Days + s3ActionArr = append(s3ActionArr, &s3Expiration) + } + + //validate actions + err := checkValidationOfActions(s3ActionArr) + if err != nil { + log.Logf("validation of actions failed: %v\n", err) + response.WriteError(http.StatusBadRequest, err) + return + } + + //Assigning the Expiration action to s3 struct expiration + s3Rule.Actions = s3ActionArr + + s3Rule.AbortIncompleteMultipartUpload = convertRuleUploadToS3Upload(rule.AbortIncompleteMultipartUpload) + + // add to the s3 array + s3RulePtrArr = append(s3RulePtrArr, &s3Rule) + } + // assign lifecycle rules to s3 bucket + bucket.LifecycleConfiguration = s3RulePtrArr + } + } else { + log.Log("No request body provided for creating lifecycle configuration") + response.WriteError(http.StatusBadRequest, fmt.Errorf(NoRequestBody)) + return + } + + // Create bucket with bucket name will check if the bucket exists or not, if it exists + // it will internally call UpdateBucket function + res, err := s.s3Client.UpdateBucket(ctx, bucket) + if err != nil { + response.WriteError(http.StatusInternalServerError, err) + return + } + log.Log("Create bucket lifecycle successfully.") + response.WriteEntity(res) + +} + +func convertRuleFilterToS3Filter(filter model.Filter) *s3.LifecycleFilter { + retFilter := s3.LifecycleFilter{} + if filter.Prefix != "" { + retFilter.Prefix = filter.Prefix + return &retFilter + } else { + return nil + } +} + +func convertRuleUploadToS3Upload(upload model.AbortIncompleteMultipartUpload) *s3.AbortMultipartUpload { + retUpload := s3.AbortMultipartUpload{} + retUpload.DaysAfterInitiation = upload.DaysAfterInitiation + return &retUpload +} diff --git a/api/pkg/s3/bucketput.go b/api/pkg/s3/bucketput.go index 95b38c752..45544ef7b 100644 --- a/api/pkg/s3/bucketput.go +++ b/api/pkg/s3/bucketput.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Huawei Technologies Co., Ltd. All Rights Reserved. +// Copyright 2019 The OpenSDS Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -33,7 +33,6 @@ func (s *APIService) BucketPut(request *restful.Request, response *restful.Respo return } bucketName := request.PathParameter("bucketName") - log.Logf("Received request for create bucket: %s", bucketName) ctx := context.Background() bucket := s3.Bucket{Name: bucketName} @@ -45,7 +44,7 @@ func (s *APIService) BucketPut(request *restful.Request, response *restful.Respo bucket.Deleted = false bucket.OwnerDisplayName = ownerDisplayName bucket.CreationDate = time.Now().Unix() - //log.Logf("Create bucket body: %s", string(body)) + if body != nil { createBucketConf := model.CreateBucketConfiguration{} err := xml.Unmarshal(body, &createBucketConf) @@ -63,8 +62,8 @@ func (s *APIService) BucketPut(request *restful.Request, response *restful.Respo return } } else { - log.Logf("backetName is %v\n", backendName) - response.WriteError(http.StatusNotFound, NoSuchBackend.Error()) + log.Log("default backend is not provided.") + response.WriteError(http.StatusBadRequest, NoSuchBackend.Error()) return } } @@ -77,5 +76,4 @@ func (s *APIService) BucketPut(request *restful.Request, response *restful.Respo } log.Log("Create bucket successfully.") response.WriteEntity(res) - } diff --git a/api/pkg/s3/completemultipartupload.go b/api/pkg/s3/completemultipartupload.go index 5f01aba2b..9aa63e0ba 100644 --- a/api/pkg/s3/completemultipartupload.go +++ b/api/pkg/s3/completemultipartupload.go @@ -55,11 +55,9 @@ func (s *APIService) CompleteMultipartUpload(request *restful.Request, response return } - //_, s3err = client.GetObjectInfo(bucketName, objectKey, ctx) - if s3err != NoError { - response.WriteError(http.StatusInternalServerError, s3err.Error()) - return - } + // delete multipart upload record, if delete failed, it will be cleaned by lifecycle management + record := s3.MultipartUploadRecord{ObjectKey: objectKey, Bucket: bucketName, UploadId: UploadId} + s.s3Client.DeleteUploadRecord(context.Background(), &record) objectMD.Partions = nil objectMD.LastModified = time.Now().Unix() diff --git a/api/pkg/s3/datastore/azure/azure.go b/api/pkg/s3/datastore/azure/azure.go index 276e78389..882e434d2 100644 --- a/api/pkg/s3/datastore/azure/azure.go +++ b/api/pkg/s3/datastore/azure/azure.go @@ -27,7 +27,6 @@ import ( "github.com/Azure/azure-storage-blob-go/azblob" "github.com/micro/go-log" - backendpb "github.com/opensds/multi-cloud/backend/proto" . "github.com/opensds/multi-cloud/s3/pkg/exception" "github.com/opensds/multi-cloud/s3/pkg/model" @@ -161,6 +160,13 @@ func (ad *AzureAdapter) DELETE(object *pb.DeleteObjectInput, ctx context.Context delRsp, err := blobURL.Delete(ctx, azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}) log.Logf("blobURL=%v,err=%v\n", blobURL, err) if err != nil { + if serr, ok := err.(azblob.StorageError); ok { // This error is a Service-specific + log.Logf("service code:%s\n", serr.ServiceCode()) + if string(serr.ServiceCode()) == string(azblob.StorageErrorCodeBlobNotFound) { + return NoError + } + } + log.Logf("[AzureAdapter] Delete failed:%v\n", err) return S3Error{Code: 500, Description: "Delete failed"} } diff --git a/api/pkg/s3/lifecycleerrorcodes.go b/api/pkg/s3/lifecycleerrorcodes.go new file mode 100644 index 000000000..99afd8ca0 --- /dev/null +++ b/api/pkg/s3/lifecycleerrorcodes.go @@ -0,0 +1,26 @@ +// Copyright 2019 The OpenSDS Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s3 + +const ( + DuplicateRuleIDError = "error: PUT bucket lifecycle failed because the ruleId $1 is duplicate." + NoRequestBody = "error: No request body specified for creating lifecycle configuration" + MoreThanOneExpirationAction = "error: More than one expiration action is not permitted in one rule" + DaysInStorageClassBeforeExpiration = "error: minimum days for an object in the current storage class should be less than Expiration Days" + DaysInStorageClassBeforeTransition = "error: minimum days for an object in the current storage class is less before transition action" + NoRuleIDForLifecycleDelete = "error: No rule ID specified to delete from lifecycle configuration" + WrongRuleIDForLifecycleDelete = "error: The rule ID which is specified for delete does not exist" + InvalidTier = "error: Invalid tier" +) diff --git a/api/pkg/s3/liststorageclasses.go b/api/pkg/s3/liststorageclasses.go index 9ed50f009..35d7705c5 100644 --- a/api/pkg/s3/liststorageclasses.go +++ b/api/pkg/s3/liststorageclasses.go @@ -15,16 +15,15 @@ package s3 import ( - "net/http" "encoding/xml" + "net/http" "github.com/emicklei/go-restful" "github.com/micro/go-log" - "golang.org/x/net/context" - "github.com/opensds/multi-cloud/api/pkg/policy" - "github.com/opensds/multi-cloud/s3/proto" "github.com/opensds/multi-cloud/s3/pkg/model" + s3 "github.com/opensds/multi-cloud/s3/proto" + "golang.org/x/net/context" ) func (s *APIService) GetStorageClasses(request *restful.Request, response *restful.Response) { @@ -59,4 +58,3 @@ func (s *APIService) GetStorageClasses(request *restful.Request, response *restf log.Logf("Get List of storage classes successfully:%v\n", string(xmlstring)) } } - diff --git a/api/pkg/s3/multipartuploadinit.go b/api/pkg/s3/multipartuploadinit.go index d47484784..d63fff25c 100644 --- a/api/pkg/s3/multipartuploadinit.go +++ b/api/pkg/s3/multipartuploadinit.go @@ -18,17 +18,17 @@ import ( "net/http" "time" - "github.com/opensds/multi-cloud/api/pkg/s3/datastore" + "encoding/xml" "github.com/emicklei/go-restful" "github.com/micro/go-log" - - "encoding/xml" - + "github.com/opensds/multi-cloud/api/pkg/s3/datastore" . "github.com/opensds/multi-cloud/s3/pkg/exception" "github.com/opensds/multi-cloud/s3/pkg/model" + "github.com/opensds/multi-cloud/s3/pkg/utils" s3 "github.com/opensds/multi-cloud/s3/proto" "golang.org/x/net/context" + "github.com/opensds/multi-cloud/api/pkg/utils/constants" ) //ObjectPut - @@ -67,8 +67,24 @@ func (s *APIService) MultiPartUploadInit(request *restful.Request, response *res response.WriteError(http.StatusInternalServerError, s3err.Error()) return } - object.ObjectKey = objectKey + lastModified := time.Now().Unix() + record := s3.MultipartUploadRecord{ObjectKey: objectKey, Bucket: bucketName, Backend: object.Backend, UploadId: res.UploadId} + record.InitTime = lastModified + _, err := s.s3Client.AddUploadRecord(context.Background(), &record) + if err != nil { + client.AbortMultipartUpload(res, ctx) + response.WriteError(http.StatusInternalServerError, s3err.Error()) + return + } + + // Currently, only support tier1 as default + tier := int32(utils.Tier1) + object.Tier = tier + // standard as default + object.StorageClass = constants.StorageClassOpenSDSStandard + + object.ObjectKey = objectKey objectInput := s3.GetObjectInput{Bucket: bucketName, Key: objectKey} objectMD, _ := s.s3Client.GetObject(ctx, &objectInput) if objectMD != nil { @@ -80,6 +96,8 @@ func (s *APIService) MultiPartUploadInit(request *restful.Request, response *res objectMD.Backend = object.Backend objectMD.Size = int64(size) objectMD.LastModified = lastModified + objectMD.Tier = object.Tier + objectMD.StorageClass = object.StorageClass //insert metadata _, err := s.s3Client.CreateObject(ctx, objectMD) if err != nil { diff --git a/api/pkg/s3/objectput.go b/api/pkg/s3/objectput.go index 067050255..5725ca326 100644 --- a/api/pkg/s3/objectput.go +++ b/api/pkg/s3/objectput.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Huawei Technologies Co., Ltd. All Rights Reserved. +// Copyright 2019 The OpenSDS Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -16,18 +16,17 @@ package s3 import ( "net/http" + "strconv" "strings" "time" "github.com/emicklei/go-restful" "github.com/micro/go-log" - "github.com/opensds/multi-cloud/api/pkg/s3/datastore" - - // "github.com/micro/go-micro/errors" - "strconv" - "github.com/opensds/multi-cloud/api/pkg/policy" + "github.com/opensds/multi-cloud/api/pkg/s3/datastore" + "github.com/opensds/multi-cloud/api/pkg/utils/constants" . "github.com/opensds/multi-cloud/s3/pkg/exception" + "github.com/opensds/multi-cloud/s3/pkg/utils" s3 "github.com/opensds/multi-cloud/s3/proto" "golang.org/x/net/context" ) @@ -50,6 +49,10 @@ func (s *APIService) ObjectPut(request *restful.Request, response *restful.Respo contentLenght := request.HeaderParameter("content-length") backendName := request.HeaderParameter("x-amz-storage-class") log.Logf("backendName is :%v\n", backendName) + + // Currently, only support tier1 as default + tier := int32(utils.Tier1) + object := s3.Object{} object.BucketName = bucketName size, _ := strconv.ParseInt(contentLenght, 10, 64) @@ -58,6 +61,10 @@ func (s *APIService) ObjectPut(request *restful.Request, response *restful.Respo object.IsDeleteMarker = "" object.InitFlag = "" object.LastModified = time.Now().Unix() + object.Tier = tier + // standard as default + object.StorageClass = constants.StorageClassOpenSDSStandard + ctx := context.WithValue(request.Request.Context(), "operation", "upload") log.Logf("Received request for create bucket: %s", bucketName) @@ -86,6 +93,7 @@ func (s *APIService) ObjectPut(request *restful.Request, response *restful.Respo response.WriteError(http.StatusInternalServerError, s3err.Error()) return } + res, err := s.s3Client.CreateObject(ctx, &object) if err != nil { log.Logf("err is %v\n", err) diff --git a/api/pkg/s3/routbucketdelete.go b/api/pkg/s3/routbucketdelete.go new file mode 100644 index 000000000..aaf354472 --- /dev/null +++ b/api/pkg/s3/routbucketdelete.go @@ -0,0 +1,44 @@ +// Copyright 2019 The OpenSDS Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s3 + +import ( + "github.com/emicklei/go-restful" + "github.com/opensds/multi-cloud/api/pkg/policy" +) + +func (s *APIService) RouteBucketDelete(request *restful.Request, response *restful.Response) { + if !policy.Authorize(request, response, "routbucket:delete") { + return + } + if IsQuery(request, "acl") { + //TODO + } else if IsQuery(request, "versioning") { + //TODO + } else if IsQuery(request, "website") { + //TODO + } else if IsQuery(request, "cors") { + //TODO + + } else if IsQuery(request, "replication") { + //TODO + + } else if IsQuery(request, "lifecycle") { + s.BucketLifecycleDelete(request, response) + + } else { + s.BucketDelete(request, response) + } +} diff --git a/api/pkg/s3/routbucketget.go b/api/pkg/s3/routbucketget.go new file mode 100644 index 000000000..3629612f3 --- /dev/null +++ b/api/pkg/s3/routbucketget.go @@ -0,0 +1,44 @@ +// Copyright 2019 The OpenSDS Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s3 + +import ( + "github.com/emicklei/go-restful" + "github.com/opensds/multi-cloud/api/pkg/policy" +) + +func (s *APIService) RouteBucketGet(request *restful.Request, response *restful.Response) { + if !policy.Authorize(request, response, "routbucket:get") { + return + } + if IsQuery(request, "acl") { + //TODO + } else if IsQuery(request, "versioning") { + //TODO + } else if IsQuery(request, "website") { + //TODO + } else if IsQuery(request, "cors") { + //TODO + + } else if IsQuery(request, "replication") { + //TODO + + } else if IsQuery(request, "lifecycle") { + s.BucketLifecycleGet(request, response) + + } else { + s.BucketGet(request, response) + } +} diff --git a/api/pkg/s3/routbucketput.go b/api/pkg/s3/routbucketput.go index d772cf2d4..1e2565677 100644 --- a/api/pkg/s3/routbucketput.go +++ b/api/pkg/s3/routbucketput.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Huawei Technologies Co., Ltd. All Rights Reserved. +// Copyright 2019 The OpenSDS Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -36,7 +36,7 @@ func (s *APIService) RouteBucketPut(request *restful.Request, response *restful. //TODO } else if IsQuery(request, "lifecycle") { - //TODO + s.BucketLifecyclePut(request, response) } else { s.BucketPut(request, response) diff --git a/api/pkg/s3/routeobjectput.go b/api/pkg/s3/routeobjectput.go index cc944c355..0e9df9a73 100644 --- a/api/pkg/s3/routeobjectput.go +++ b/api/pkg/s3/routeobjectput.go @@ -17,7 +17,6 @@ package s3 import ( "github.com/emicklei/go-restful" "github.com/opensds/multi-cloud/api/pkg/policy" - // "github.com/micro/go-micro/errors" ) func (s *APIService) RouteObjectPut(request *restful.Request, response *restful.Response) { diff --git a/api/pkg/s3/router.go b/api/pkg/s3/router.go index 2fc8e78b2..b0ba735b3 100644 --- a/api/pkg/s3/router.go +++ b/api/pkg/s3/router.go @@ -26,8 +26,8 @@ func RegisterRouter(ws *restful.WebService) { ws.Route(ws.GET("/storageClasses").To(handler.GetStorageClasses)).Doc("Return supported storage classes.") ws.Route(ws.PUT("/{bucketName}").To(handler.RouteBucketPut)).Doc("Create bucket for the user") //ws.Route(ws.HEAD("/s3/{bucketName}").To(handler.BucketHead)).Doc("Determine if bucket exists and if user has permission to access it") - ws.Route(ws.GET("/{bucketName}").To(handler.BucketGet)).Doc("Return list of objects in bucket") - ws.Route(ws.DELETE("/{bucketName}").To(handler.BucketDelete)).Doc("Delete bucket") + ws.Route(ws.GET("/{bucketName}").To(handler.RouteBucketGet)).Doc("Return list of objects in bucket") + ws.Route(ws.DELETE("/{bucketName}").To(handler.RouteBucketDelete)).Doc("Delete bucket") ws.Route(ws.PUT("/{bucketName}/{objectKey:*}").To(handler.RouteObjectPut)).Doc("Upload object") ws.Route(ws.DELETE("/{bucketName}/{objectKey:*}").To(handler.RouteObjectDelete)).Doc("Delete object") ws.Route(ws.GET("/{bucketName}/{objectKey:*}").To(handler.ObjectGet)).Doc("Download object") @@ -36,4 +36,9 @@ func RegisterRouter(ws *restful.WebService) { ws.Route(ws.PUT("/{bucketName}/{objectKey:*}").To(handler.RouteObjectPut)).Doc("UploadPart") ws.Route(ws.PUT("/{bucketName}/{objectKey:*}").To(handler.RouteObjectPut)).Doc("CompleteMultipartUpload") ws.Route(ws.DELETE("/{bucketName}/{objectKey:*}").To(handler.RouteObjectDelete)).Doc("AbortMultipartUpload") + + //Router for PUT and GET bucket lifecycle + ws.Route(ws.PUT("/{bucketName}/?lifecycle").To(handler.RouteBucketPut)).Doc("Create lifecycle configuration for the bucket") + ws.Route(ws.GET("/{bucketName}/?lifecycle").To(handler.RouteBucketGet)).Doc("Get lifecycle configuration from the bucket") + ws.Route(ws.DELETE("/{bucketName}/?lifecycle").To(handler.RouteBucketDelete)).Doc("Delete lifecycle configuration from the bucket") } diff --git a/api/pkg/utils/constants/constants.go b/api/pkg/utils/constants/constants.go index c06dfd256..b3c7250ae 100644 --- a/api/pkg/utils/constants/constants.go +++ b/api/pkg/utils/constants/constants.go @@ -1,4 +1,4 @@ -// Copyright (c) 2017 Huawei Technologies Co., Ltd. All Rights Reserved. +// Copyright 2019 The OpenSDS Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -37,5 +37,22 @@ const ( ) const ( - StorageClassAWSStandard = "STANDARD" + StorageClassOpenSDSStandard = "STANDARD" + StorageClassAWSStandard = "STANDARD" +) + +const ( + ActionNameExpiration = "expiration" + ActionNameTransition = "transition" +) + +const ( + ExpirationMinDays = 1 + TransitionMinDays = 30 + LifecycleTransitionDaysStep = 30 // The days an object should be save in the current tier before transition to the next tier + TransitionToArchiveMinDays = 1 +) + +const ( + Tier999 = 999 ) diff --git a/dataflow/cmd/main.go b/dataflow/cmd/main.go index a8ed7b22a..0ba498f1d 100644 --- a/dataflow/cmd/main.go +++ b/dataflow/cmd/main.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Huawei Technologies Co., Ltd. All Rights Reserved. +// Copyright 2019 The OpenSDS Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -32,7 +32,9 @@ func main() { service.Init() pb.RegisterDataFlowHandler(service.Server(), handler.NewDataFlowService()) scheduler.LoadAllPlans() + scheduler.LoadLifecycleScheduler() if err := service.Run(); err != nil { log.Log(err) } } + diff --git a/dataflow/pkg/db/db.go b/dataflow/pkg/db/db.go index eb91073f0..eeca738e1 100644 --- a/dataflow/pkg/db/db.go +++ b/dataflow/pkg/db/db.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Huawei Technologies Co., Ltd. All Rights Reserved. +// Copyright 2019 The OpenSDS Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -76,6 +76,8 @@ type DBAdapter interface { GetPlanByPolicy(ctx *c.Context, policyId string, limit int, offset int) ([]model.Plan, error) LockSched(planId string) int UnlockSched(planId string) int + LockBucketLifecycleSched(bucketName string) int + UnlockBucketLifecycleSched(bucketName string) int //Job CreateJob(ctx *c.Context, job *model.Job) (*model.Job, error) GetJob(ctx *c.Context, id string) (*model.Job, error) diff --git a/dataflow/pkg/db/drivers/mongo/mongo.go b/dataflow/pkg/db/drivers/mongo/mongo.go index a531cf467..2e1c93195 100644 --- a/dataflow/pkg/db/drivers/mongo/mongo.go +++ b/dataflow/pkg/db/drivers/mongo/mongo.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Huawei Technologies Co., Ltd. All Rights Reserved. +// Copyright 2019 The OpenSDS Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -204,6 +204,20 @@ func (ad *adapter) UnlockSched(planId string) int { return unlock(ss, planId) } +func (ad *adapter) LockBucketLifecycleSched(bucketName string) int { + ss := ad.s.Copy() + defer ss.Close() + + return lock(ss, bucketName, 300) //One schedule is supposed to be finished in 300 seconds +} + +func (ad *adapter) UnlockBucketLifecycleSched(bucketName string) int { + ss := ad.s.Copy() + defer ss.Close() + + return unlock(ss, bucketName) +} + func (ad *adapter) CreatePolicy(ctx *Context, pol *Policy) (*Policy, error) { pol.Tenant = ctx.TenantId pol.Id = bson.NewObjectId() diff --git a/dataflow/pkg/scheduler/lifecycle/lifecycle_sched.go b/dataflow/pkg/scheduler/lifecycle/lifecycle_sched.go new file mode 100644 index 000000000..8d92683e5 --- /dev/null +++ b/dataflow/pkg/scheduler/lifecycle/lifecycle_sched.go @@ -0,0 +1,385 @@ +// Copyright 2019 The OpenSDS Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lifecycle + +import ( + "encoding/json" + "fmt" + "sort" + "strconv" + "sync" + + "github.com/micro/go-log" + "github.com/micro/go-micro/client" + "github.com/opensds/multi-cloud/dataflow/pkg/db" + "github.com/opensds/multi-cloud/dataflow/pkg/kafka" + . "github.com/opensds/multi-cloud/dataflow/pkg/model" + . "github.com/opensds/multi-cloud/dataflow/pkg/utils" + datamover "github.com/opensds/multi-cloud/datamover/proto" + osdss3 "github.com/opensds/multi-cloud/s3/proto" + s3 "github.com/opensds/multi-cloud/s3/proto" + "golang.org/x/net/context" +) + +var topicLifecycle = "lifecycle" +var s3client = osdss3.NewS3Service("s3", client.DefaultClient) +type InterRules []*InternalLifecycleRule + +// map from a specific tier to an array of tiers, that means transition can happens from the specific tier to those tiers in the array. +var TransitionMap map[string]struct{} +var mutext sync.Mutex + +func loadStorageClassDefinition() error { + res, _ := s3client.GetTierMap(context.Background(), &s3.BaseRequest{}) + if len(res.Transition) == 0 { + log.Log("get transition map failed") + return fmt.Errorf("get tier definition failed") + } else { + log.Logf("res.Transition:%v", res.Transition) + log.Logf("res.Tier2Name:%+v", res.Tier2Name) + } + + TransitionMap = make(map[string]struct{}) + for _, v := range res.Transition { + TransitionMap[v] = struct{}{} + } + + return nil +} + +// Get liecycle rules for each bucket from db, and schedule according to those rules. +func ScheduleLifecycle() { + log.Log("[ScheduleLifecycle] begin ...") + // Load transition map. + { + mutext.Lock() + defer mutext.Unlock() + if len(TransitionMap) == 0 { + err := loadStorageClassDefinition() + if err != nil { + log.Logf("[ScheduleLifecycle]load storage classes failed: %v.\n", err) + return + } + } + } + + // Get bucket list. + listReq := s3.BaseRequest{Id: "test"} + listRsp, err := s3client.ListBuckets(context.Background(), &listReq) + if err != nil { + log.Logf("[ScheduleLifecycle]list buckets failed: %v.\n", err) + return + } + + for _, v := range listRsp.Buckets { + // For each bucket, get the lifecycle rules, and schedule each rule. + if v.LifecycleConfiguration == nil { + log.Logf("[ScheduleLifecycle]bucket[%s] has no lifecycle rule.\n", v.Name) + continue + } + + log.Logf("[ScheduleLifecycle]bucket[%s] has lifecycle rule.\n", v.Name) + + err := handleBucketLifecyle(v.Name, v.Backend, v.LifecycleConfiguration) + if err != nil { + log.Logf("[ScheduleLifecycle]handle bucket lifecycle for bucket[%s] failed, err:%v.\n", v.Name, err) + continue + } + } + + log.Log("[ScheduleLifecycle] end ...") +} + +// Need to lock the bucket, incase the schedule period is too short and the bucket is scheduled at the same time. +// Need to consider confliction between rules. +func handleBucketLifecyle(bucket string, defaultBackend string, rules []*osdss3.LifecycleRule) error { + // Translate rules set by user to internal rules which can be sorted. + + // Lifecycle scheduling must be mutual excluded among several schedulers, so get lock first. + ret := db.DbAdapter.LockBucketLifecycleSched(bucket) + for i := 0; i < 3; i++ { + if ret == LockSuccess { + break + } else if ret == LockBusy { + return fmt.Errorf("lifecycle scheduling of bucket[%s] is in progress", bucket) + } else { + // Try to lock again, try three times at most + ret = db.DbAdapter.LockBucketLifecycleSched(bucket) + } + } + if ret != LockSuccess { + log.Logf("lock scheduling failed.\n") + return fmt.Errorf("internal error: lock failed") + } + // Make sure unlock before return + defer db.DbAdapter.UnlockBucketLifecycleSched(bucket) + + var inRules, abortRules InterRules + for _, rule := range rules { + if rule.Status == RuleStatusDisabled { + continue + } + + // actions + for _, ac := range rule.Actions { + if ac.Backend == "" { + // if no backend specified, then use the default backend of the bucket + ac.Backend = defaultBackend + } + var acType int + if ac.Name == ActionNameExpiration { + // Expiration + acType = ActionExpiration + } else if ac.Backend == defaultBackend { + acType = ActionIncloudTransition + } else { + acType = ActionCrosscloudTransition + } + + v := InternalLifecycleRule{Bucket: bucket, Days: ac.GetDays(), ActionType: acType, Tier: ac.GetTier(), Backend: ac.GetBackend()} + if rule.GetFilter() != nil { + v.Filter = InternalLifecycleFilter{Prefix: rule.Filter.Prefix} + } + inRules = append(inRules, &v) + } + + if rule.AbortIncompleteMultipartUpload.DaysAfterInitiation > 0 { + // abort incomplete multipart uploads + abortRule := InternalLifecycleRule{Bucket: bucket, Days: rule.AbortIncompleteMultipartUpload.DaysAfterInitiation, ActionType: AbortIncompleteMultipartUpload} + if rule.GetFilter() != nil { + abortRule.Filter = InternalLifecycleFilter{Prefix: rule.Filter.Prefix} + } + abortRules = append(abortRules, &abortRule) + } + } + + // Sort rules, in case different actions exist for an object at the same time, for example, expiration after 30 days + // and transition to azure after 60 days, we need to make sure only one action will be taken, and that needs the + // sorting be stable. + sort.Stable(inRules) + // Begin: Log for debug + for _, v := range inRules { + log.Logf("action rule: %+v\n", *v) + } + // End: Log for debug + schedSortedActionsRules(&inRules) + + sort.Stable(abortRules) + // Begin: Log for debug + for _, v := range abortRules { + log.Logf("abort rule: %+v\n", *v) + } + // End: Log for debug + schedSortedAbortRules(&abortRules) + + return nil +} + +func checkTransitionValidation(source int32, destination int32) bool { + key := fmt.Sprintf("%d:%d", source, destination) + if _, ok := TransitionMap[key]; !ok { + return false + } + + return true +} + +func getObjects(r *InternalLifecycleRule, offset, limit int32) ([]*osdss3.Object, error) { + // Get objects by communicating with s3 service. + filt := make(map[string]string) + if len(r.Filter.Prefix) > 0 { + filt[KObjKey] = "^" + r.Filter.Prefix + } + + modifyFilt := fmt.Sprintf("{\"gte\":\"%d\"}", r.Days) + filt[KLastModified] = modifyFilt + if r.ActionType != ActionExpiration { + filt[KStorageTier] = strconv.Itoa(int(r.Tier)) + } + + s3req := osdss3.ListObjectsRequest{ + Bucket: r.Bucket, + Filter: filt, + Offset: offset, + Limit: limit, + } + ctx := context.Background() + log.Logf("ListObjectsRequest:%+v\n", s3req) + s3rsp, err := s3client.ListObjects(ctx, &s3req) + if err != nil { + log.Logf("list objects failed, req: %v.\n", s3req) + return nil, err + } + + return s3rsp.ListObjects, nil +} + +func schedSortedAbortRules(inRules *InterRules) { + dupCheck := map[string]struct{}{} + for _, r := range *inRules { + var offset, limit int32 = 0, 1000 + for { + req := osdss3.ListMultipartUploadRequest{Bucket: r.Bucket, Prefix: r.Filter.Prefix, Days: r.Days, Limit: limit, Offset: offset} + s3rsp, err := s3client.ListUploadRecord(context.Background(), &req) + if err != nil { + log.Logf("schedule for rule[id=%s,bucket=%s] failed, err:%v\n", r.Id, r.Bucket, err) + break + } + records := s3rsp.Records + num := int32(len(records)) + offset += num + log.Logf("schedSortedAbortRules:num=%d,offset=%d\n", num, offset) + for _, rc := range records { + if _, ok := dupCheck[rc.UploadId]; !ok { + req := datamover.LifecycleActionRequest{ + ObjKey: rc.ObjectKey, + BucketName: rc.Bucket, + UploadId: rc.UploadId, + TargetBackend: rc.Backend, + Action: AbortIncompleteMultipartUpload, + } + // If send failed, then ignore it, because it will be re-sent in the next schedule period. + sendActionRequest(&req) + + // Add object key to dupCheck so it will not be processed repeatedly in this round or scheduling. + dupCheck[rc.UploadId] = struct{}{} + + } else { + log.Logf("upload[id=%s] is already handled in this schedule time.\n", rc.UploadId) + } + } + if num < limit { + break + } + } + } +} + +func schedSortedActionsRules(inRules *InterRules) { + dupCheck := map[string]struct{}{} + for _, r := range *inRules { + var offset, limit int32 = 0, 1000 + for { + objs, err := getObjects(r, offset, limit) + if err != nil { + break + } + num := int32(len(objs)) + offset += num + // Check if the object exist in the dupCheck map. + for _, obj := range objs { + if obj.IsDeleteMarker == "1" { + log.Logf("deleteMarker of object[%s] is set, no lifecycle action need.\n", obj.ObjectKey) + continue + } + if _, ok := dupCheck[obj.ObjectKey]; !ok { + // Not exist means this object has is not processed in this round of scheduling. + if r.ActionType != ActionExpiration && obj.Backend == r.Backend && obj.Tier == r.Tier { + // For transition, if target backend and storage class is the same as source backend and storage class, then no transition is need. + log.Logf("no need transition for object[%s], backend=%s, tier=%d\n", obj.ObjectKey, r.Backend, r.Tier) + // in case different actions exist for an object at the same time, for example transition to aws after 30 days + // and transition to azure after 30 days, we need to make sure only one action will be taken. + dupCheck[obj.ObjectKey] = struct{}{} + continue + } + + //Send request. + var action int32 + if r.ActionType == ActionExpiration { + action = int32(ActionExpiration) + } else if obj.Backend == r.Backend { + action = int32(ActionIncloudTransition) + } else { + action = int32(ActionCrosscloudTransition) + } + + if r.ActionType != ActionExpiration && checkTransitionValidation(obj.Tier, r.Tier) != true { + log.Logf("transition object[%s] from tier[%d] to tier[%d] is invalid.\n", obj.ObjectKey, obj.Tier, r.Tier) + // in case different actions exist for an object at the same time, for example transition to aws after 30 days + // and transition to azure after 30 days, we need to make sure only one action will be taken. + dupCheck[obj.ObjectKey] = struct{}{} + continue + } + log.Logf("lifecycle action: object=[%s] type=[%d] source-tier=[%d] target-tier=[%d] source-backend=[%s] target-backend=[%s].\n", + obj.ObjectKey, r.ActionType, obj.Tier, r.Tier, obj.Backend, r.Backend) + acreq := datamover.LifecycleActionRequest{ + ObjKey: obj.ObjectKey, + BucketName: obj.BucketName, + Action: action, + SourceTier: obj.Tier, + TargetTier: r.Tier, + SourceBackend: obj.Backend, + TargetBackend: r.Backend, + ObjSize: obj.Size, + LastModified: obj.LastModified, + } + + // If send failed, then ignore it, because it will be re-sent in the next schedule period. + sendActionRequest(&acreq) + + // Add object key to dupCheck so it will not be processed repeatedly in this round or scheduling. + dupCheck[obj.ObjectKey] = struct{}{} + } else { + log.Logf("object[%s] is already handled in this schedule time.\n", obj.ObjectKey) + } + } + if num < limit { + break + } + } + } +} + +func sendActionRequest(req *datamover.LifecycleActionRequest) error { + log.Logf("Send lifecycle request to datamover: %v\n", req) + data, err := json.Marshal(*req) + if err != nil { + log.Logf("marshal run job request failed, err:%v\n", data) + return err + } + + return kafka.ProduceMsg(topicLifecycle, data) +} + +func (r InterRules) Len() int { + return len(r) +} + +/* + Less reports whether the element with index i should sort before the element with index j. + Expiration action has higher priority than transition action. + For the same action type, bigger Days has higher priority. +*/ +func (r InterRules) Less(i, j int) bool { + var ret bool + if r[i].ActionType < r[j].ActionType { + ret = true + } else { + if r[i].Days >= r[j].Days { + ret = true + } else { + ret = false + } + } + + return ret +} + +func (r InterRules) Swap(i, j int) { + tmp := r[i] + r[i] = r[j] + + r[j] = tmp +} diff --git a/dataflow/pkg/scheduler/scheduler.go b/dataflow/pkg/scheduler/scheduler.go index 9c781121e..dce91a422 100644 --- a/dataflow/pkg/scheduler/scheduler.go +++ b/dataflow/pkg/scheduler/scheduler.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Huawei Technologies Co., Ltd. All Rights Reserved. +// Copyright 2019 The OpenSDS Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -15,14 +15,20 @@ package scheduler import ( + "fmt" + "os" + "github.com/micro/go-log" "github.com/opensds/multi-cloud/api/pkg/filters/context" "github.com/opensds/multi-cloud/dataflow/pkg/db" "github.com/opensds/multi-cloud/dataflow/pkg/model" "github.com/opensds/multi-cloud/dataflow/pkg/plan" + "github.com/opensds/multi-cloud/dataflow/pkg/scheduler/lifecycle" "github.com/opensds/multi-cloud/dataflow/pkg/scheduler/trigger" + "github.com/robfig/cron" ) + func LoadAllPlans() { ctx := context.NewAdminContext() @@ -56,3 +62,21 @@ func LoadAllPlans() { } } } + +//This scheduler will scan all buckets periodically to get lifecycle rules, and scheduling according to these rules. +func LoadLifecycleScheduler() error { + spec := os.Getenv("LIFECYCLE_CRON_CONFIG") + log.Logf("Value of LIFECYCLE_CRON_CONFIG is: %s\n", spec) + + //TODO: Check the validation of spec + cn := cron.New() + //0 */10 * * * ? + if err := cn.AddFunc(spec, lifecycle.ScheduleLifecycle); err != nil { + log.Logf("add lifecyecle scheduler to cron trigger failed: %v.\n", err) + return fmt.Errorf("add lifecyecle scheduler to cron trigger failed: %v", err) + } + cn.Start() + + log.Log("add lifecycle scheduler to cron trigger successfully.") + return nil +} diff --git a/dataflow/pkg/utils/utils.go b/dataflow/pkg/utils/utils.go index 65db33880..b2f1d1986 100644 --- a/dataflow/pkg/utils/utils.go +++ b/dataflow/pkg/utils/utils.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Huawei Technologies Co., Ltd. All Rights Reserved. +// Copyright 2019 The OpenSDS Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,8 +14,46 @@ package utils +const ( + ActionExpiration = 1 + ActionIncloudTransition = 2 + ActionCrosscloudTransition = 3 + AbortIncompleteMultipartUpload = 4 +) + +const ( + ActionNameExpiration = "expiration" +) + +const ( + KObjKey = "objkey" + KLastModified = "lastmodified" + KStorageTier = "tier" +) + +const ( + RuleStatusEnabled = "enabled" + RuleStatusDisabled = "disabled" +) + type Database struct { Credential string `conf:"credential,username:password@tcp(ip:port)/dbname"` Driver string `conf:"driver,mongodb"` Endpoint string `conf:"endpoint,localhost:27017"` } + +type InternalLifecycleFilter struct { + Prefix string + Tags []string +} + +type InternalLifecycleRule struct { + Id string + Bucket string + Filter InternalLifecycleFilter + Days int32 + Tier int32 + ActionType int // 0-Expiration, 1-IncloudTransition, 2-CrossCloudTransition, 3-AbortMultipartUpload + DeleteMarker string + Backend string +} diff --git a/datamover/pkg/amazon/s3/s3lifecycle.go b/datamover/pkg/amazon/s3/s3lifecycle.go new file mode 100644 index 000000000..8401a2a1a --- /dev/null +++ b/datamover/pkg/amazon/s3/s3lifecycle.go @@ -0,0 +1,90 @@ +// Copyright 2019 The OpenSDS Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package s3mover + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/micro/go-log" + . "github.com/opensds/multi-cloud/datamover/pkg/utils" +) + +func (mover *S3Mover) ChangeStorageClass(objKey *string, newClass *string, loca *BackendInfo) error { + log.Logf("[s3lifecycle] Change storage class of object[%s] to %s.", objKey, newClass) + s3c := s3Cred{ak: loca.Access, sk: loca.Security} + creds := credentials.NewCredentials(&s3c) + sess, err := session.NewSession(&aws.Config{ + Region: aws.String(loca.Region), + Endpoint: aws.String(loca.EndPoint), + Credentials: creds, + }) + if err != nil { + log.Logf("[s3lifecycle] new session failed, err:%v\n", err) + return handleAWSS3Errors(err) + } + + svc := s3.New(sess) + input := &s3.CopyObjectInput{ + Bucket: aws.String(loca.BucketName), + Key: aws.String(*objKey), + CopySource: aws.String(loca.BucketName + "/" + *objKey), + } + input.StorageClass = aws.String(*newClass) + _, err = svc.CopyObject(input) + if err != nil { + log.Logf("[s3lifecycle] Change storage class of object[%s] to %s failed: %v.\n", objKey, newClass, err) + e := handleAWSS3Errors(err) + return e + } + + // TODO: How to make sure copy is complemented? Wait to see if the item got copied (example:svc.WaitUntilObjectExists)? + + return nil +} + +func (mover *S3Mover) DeleteIncompleteMultipartUpload(objKey, uploadId string, loc *LocationInfo) error { + log.Logf("[s3lifecycle] Abort multipart upload[objkey:%s] for uploadId#%s.\n", objKey, uploadId) + s3c := s3Cred{ak: loc.Access, sk: loc.Security} + creds := credentials.NewCredentials(&s3c) + sess, err := session.NewSession(&aws.Config{ + Region: aws.String(loc.Region), + Endpoint: aws.String(loc.EndPoint), + Credentials: creds, + }) + if err != nil { + log.Logf("[s3lifecycle] new session failed, err:%v\n", err) + return handleAWSS3Errors(err) + } + + abortInput := &s3.AbortMultipartUploadInput{ + Bucket: aws.String(loc.BucketName), + Key: aws.String(objKey), + UploadId: aws.String(uploadId), + } + + svc := s3.New(sess) + _, err = svc.AbortMultipartUpload(abortInput) + e := handleAWSS3Errors(err) + if e == nil || e.Error() == DMERR_NoSuchUpload { + log.Logf("[s3lifecycle] abort multipart upload[objkey:%s, uploadid:%s] successfully, err:%v.\n", objKey, uploadId) + return nil + } else { + log.Logf("[s3lifecycle] abort multipart upload[objkey:%s, uploadid:%s] failed, err:%v.\n", objKey, uploadId, err) + } + + return e +} diff --git a/datamover/pkg/amazon/s3/s3mover.go b/datamover/pkg/amazon/s3/s3mover.go index f133b80db..58dbeb289 100644 --- a/datamover/pkg/amazon/s3/s3mover.go +++ b/datamover/pkg/amazon/s3/s3mover.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Huawei Technologies Co., Ltd. All Rights Reserved. +// Copyright 2019 The OpenSDS Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ import ( "strconv" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" @@ -51,6 +52,25 @@ func (myc *s3Cred) IsExpired() bool { return false } +func handleAWSS3Errors(err error) error { + if err != nil { + if serr, ok := err.(awserr.Error); ok { // This error is a Service-specific + switch serr.Code() { // Compare serviceCode to ServiceCodeXxx constants + case "InvalidAccessKeyId": + log.Log("aws s3 error: permission denied.") + return errors.New(DMERR_NoPermission) + case "NoSuchUpload": + return errors.New(DMERR_NoSuchUpload) + default: + return err + } + } + return err + } + + return nil +} + func (mover *S3Mover) UploadObj(objKey string, destLoca *LocationInfo, buf []byte) error { log.Logf("[s3mover] UploadObj object, key:%s.", objKey) s3c := s3Cred{ak: destLoca.Access, sk: destLoca.Security} @@ -62,22 +82,27 @@ func (mover *S3Mover) UploadObj(objKey string, destLoca *LocationInfo, buf []byt }) if err != nil { log.Logf("[s3mover] New session failed, err:%v\n", err) - return err + return handleAWSS3Errors(err) } reader := bytes.NewReader(buf) uploader := s3manager.NewUploader(sess) log.Logf("[s3mover] Try to upload, bucket:%s,obj:%s\n", destLoca.BucketName, objKey) + input := s3manager.UploadInput{ + Bucket: aws.String(destLoca.BucketName), + Key: aws.String(objKey), + Body: reader, + } + if destLoca.ClassName != "" { + input.StorageClass = aws.String(destLoca.ClassName) + } for tries := 1; tries <= 3; tries++ { - _, err = uploader.Upload(&s3manager.UploadInput{ - Bucket: aws.String(destLoca.BucketName), - Key: aws.String(objKey), - Body: reader, - }) + _, err = uploader.Upload(&input) if err != nil { log.Logf("[s3mover] Upload object[%s] failed %d times, err:%v\n", objKey, tries, err) - if tries == 3 { - return err + e := handleAWSS3Errors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { //If no permission, then no need to retry. + return e } } else { log.Logf("[s3mover] Upload object[%s] successfully.", objKey) @@ -86,7 +111,7 @@ func (mover *S3Mover) UploadObj(objKey string, destLoca *LocationInfo, buf []byt } log.Logf("[s3mover] Upload object, bucket:%s,obj:%s, should not be here.\n", destLoca.BucketName, objKey) - return errors.New("internal error") + return errors.New(DMERR_InternalError) } func (mover *S3Mover) DownloadObj(objKey string, srcLoca *LocationInfo, buf []byte) (size int64, err error) { @@ -99,7 +124,7 @@ func (mover *S3Mover) DownloadObj(objKey string, srcLoca *LocationInfo, buf []by }) if err != nil { log.Logf("[s3mover] New session failed, err:%v\n", err) - return 0, err + return 0, handleAWSS3Errors(err) } writer := aws.NewWriteAtBuffer(buf) @@ -114,17 +139,18 @@ func (mover *S3Mover) DownloadObj(objKey string, srcLoca *LocationInfo, buf []by if err != nil { log.Logf("[s3mover]download object[bucket:%s,key:%s] failed %d times, err:%v\n", srcLoca.BucketName, objKey, tries, err) - if tries == 3 { - return 0, err + e := handleAWSS3Errors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { //If no permission, then no need to retry. + return 0, e } } else { log.Logf("[s3mover]downlad object[bucket:%s,key:%s] succeed, bytes:%d\n", srcLoca.BucketName, objKey, numBytes) - return numBytes, err + return numBytes, nil } } log.Logf("[s3mover]downlad object[bucket:%s,key:%s], should not be here.\n", srcLoca.BucketName, objKey) - return 0, errors.New("internal error") + return 0, errors.New(DMERR_InternalError) } func (mover *S3Mover) MultiPartDownloadInit(srcLoca *LocationInfo) error { @@ -136,8 +162,8 @@ func (mover *S3Mover) MultiPartDownloadInit(srcLoca *LocationInfo) error { Credentials: creds, }) if err != nil { - log.Logf("[s3mover] New session for multipart download failed, err:%v\n", err) - return err + log.Logf("[s3mover] new session for multipart download failed, err:%v\n", err) + return handleAWSS3Errors(err) } mover.downloader = s3manager.NewDownloader(sess) @@ -160,22 +186,23 @@ func (mover *S3Mover) DownloadRange(objKey string, srcLoca *LocationInfo, buf [] for tries := 1; tries <= 3; tries++ { numBytes, err := mover.downloader.Download(writer, &getObjInput) if err != nil { - log.Logf("[s3mover] Download object[%s] range[%d - %d] faild %d times, err:%v\n", + log.Logf("[s3mover] download object[%s] range[%d - %d] failed %d times, err:%v\n", objKey, start, end, tries, err) - if tries == 3 { - return 0, err + e := handleAWSS3Errors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { //If no permission, then no need to retry. + return 0, e } } else { - log.Logf("[s3mover] Download object[%s] range[%d - %d] succeed, bytes:%d\n", objKey, start, end, numBytes) - return numBytes, err + log.Logf("[s3mover] download object[%s] range[%d - %d] succeed, bytes:%d\n", objKey, start, end, numBytes) + return numBytes, nil } } - log.Logf("[s3mover] Download object[%s] range[%d - %d], should not be here.\n", objKey, start, end) - return 0, errors.New("internal error") + log.Logf("[s3mover] download object[%s] range[%d - %d], should not be here.\n", objKey, start, end) + return 0, errors.New(DMERR_InternalError) } -func (mover *S3Mover) MultiPartUploadInit(objKey string, destLoca *LocationInfo) error { +func (mover *S3Mover) MultiPartUploadInit(objKey string, destLoca *LocationInfo) (string, error) { s3c := s3Cred{ak: destLoca.Access, sk: destLoca.Security} creds := credentials.NewCredentials(&s3c) sess, err := session.NewSession(&aws.Config{ @@ -184,8 +211,8 @@ func (mover *S3Mover) MultiPartUploadInit(objKey string, destLoca *LocationInfo) Credentials: creds, }) if err != nil { - log.Logf("[s3mover] New session failed, err:%v\n", err) - return err + log.Logf("[s3mover] new session failed, err:%v\n", err) + return "", handleAWSS3Errors(err) } mover.svc = s3.New(sess) @@ -193,23 +220,27 @@ func (mover *S3Mover) MultiPartUploadInit(objKey string, destLoca *LocationInfo) Bucket: aws.String(destLoca.BucketName), Key: aws.String(objKey), } + if destLoca.ClassName != "" { + multiUpInput.StorageClass = aws.String(destLoca.ClassName) + } log.Logf("[s3mover] Try to init multipart upload[objkey:%s].\n", objKey) for tries := 1; tries <= 3; tries++ { resp, err := mover.svc.CreateMultipartUpload(multiUpInput) if err != nil { - log.Logf("[s3mover] Init multipart upload[objkey:%s] failed %d times.\n", objKey, tries) - if tries == 3 { - return err + log.Logf("[s3mover] init multipart upload[objkey:%s] failed %d times, err:%v.\n", objKey, tries, err) + e := handleAWSS3Errors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { //If no permission, then no need to retry. + return "", e } } else { mover.multiUploadInitOut = resp - log.Logf("[s3mover] Init multipart upload[objkey:%s] successfully, UploadId:%s\n", objKey, *resp.UploadId) - return nil + log.Logf("[s3mover] init multipart upload[objkey:%s] successfully, UploadId:%s\n", objKey, *resp.UploadId) + return "", nil } } - log.Logf("[s3mover] Init multipart upload[objkey:%s], should not be here.\n", objKey) - return errors.New("internal error") + log.Logf("[s3mover] init multipart upload[objkey:%s], should not be here.\n", objKey) + return *mover.multiUploadInitOut.UploadId, errors.New(DMERR_InternalError) } func (mover *S3Mover) UploadPart(objKey string, destLoca *LocationInfo, upBytes int64, buf []byte, partNumber int64, offset int64) error { @@ -229,10 +260,11 @@ func (mover *S3Mover) UploadPart(objKey string, destLoca *LocationInfo, upBytes for tries := 1; tries <= 3; tries++ { upRes, err := mover.svc.UploadPart(upPartInput) if err != nil { - log.Logf("[s3mover] Upload range[objkey:%s, partnumber#%d, offset#%d] failed %d times, err:%v\n", + log.Logf("[s3mover] upload range[objkey:%s, partnumber#%d, offset#%d] failed %d times, err:%v\n", objKey, partNumber, offset, tries, err) - if tries == 3 { - return err + e := handleAWSS3Errors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { //If no permission, then no need to retry. + return e } } else { part := s3.CompletedPart{ @@ -245,8 +277,8 @@ func (mover *S3Mover) UploadPart(objKey string, destLoca *LocationInfo, upBytes } } - log.Logf("[s3mover] Upload range[objkey:%s, partnumber#%d, offset#%d], should not be here.\n", objKey, partNumber, offset) - return errors.New("internal error") + log.Logf("[s3mover] upload range[objkey:%s, partnumber#%d, offset#%d], should not be here.\n", objKey, partNumber, offset) + return errors.New(DMERR_InternalError) } func (mover *S3Mover) AbortMultipartUpload(objKey string, destLoca *LocationInfo) error { @@ -260,20 +292,21 @@ func (mover *S3Mover) AbortMultipartUpload(objKey string, destLoca *LocationInfo for tries := 1; tries <= 3; tries++ { _, err := mover.svc.AbortMultipartUpload(abortInput) if err != nil { - log.Logf("[s3mover] Abort multipart upload[objkey:%s] for uploadId#%s failed %d times.\n", - objKey, *mover.multiUploadInitOut.UploadId, tries) - if tries == 3 { - return err + log.Logf("[s3mover] abort multipart upload[objkey:%s] for uploadId#%s failed %d times, err:%v.\n", + objKey, *mover.multiUploadInitOut.UploadId, tries, err) + e := handleAWSS3Errors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { //If no permission, then no need to retry. + return e } } else { - log.Logf("[s3mover] Abort multipart upload[objkey:%s] for uploadId#%s successfully.\n", + log.Logf("[s3mover] abort multipart upload[objkey:%s] for uploadId#%s successfully.\n", objKey, *mover.multiUploadInitOut.UploadId, tries) return nil } } - log.Logf("[s3mover] Abort multipart upload[objkey:%s] for uploadId#%s, should not be here.\n", + log.Logf("[s3mover] abort multipart upload[objkey:%s] for uploadId#%s, should not be here.\n", objKey, *mover.multiUploadInitOut.UploadId) - return errors.New("internal error") + return errors.New(DMERR_InternalError) } func (mover *S3Mover) CompleteMultipartUpload(objKey string, destLoca *LocationInfo) error { @@ -291,8 +324,9 @@ func (mover *S3Mover) CompleteMultipartUpload(objKey string, destLoca *LocationI rsp, err := mover.svc.CompleteMultipartUpload(completeInput) if err != nil { log.Logf("[s3mover] completeMultipartUpload [objkey:%s] failed %d times, err:%v\n", objKey, tries, err) - if tries == 3 { - return err + e := handleAWSS3Errors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { //If no permission, then no need to retry. + return e } } else { log.Logf("[s3mover] completeMultipartUpload successfully [objkey:%s], rsp:%v\n", objKey, rsp) @@ -301,7 +335,7 @@ func (mover *S3Mover) CompleteMultipartUpload(objKey string, destLoca *LocationI } log.Logf("[s3mover] completeMultipartUpload [objkey:%s], should not be here.\n", objKey) - return errors.New("internal error") + return errors.New(DMERR_InternalError) } func (mover *S3Mover) DeleteObj(objKey string, loca *LocationInfo) error { @@ -313,7 +347,7 @@ func (mover *S3Mover) DeleteObj(objKey string, loca *LocationInfo) error { Credentials: creds, }) if err != nil { - log.Logf("[s3mover] New session failed, err:%v\n", err) + log.Logf("[s3mover] new session failed, err:%v\n", err) return err } @@ -322,10 +356,11 @@ func (mover *S3Mover) DeleteObj(objKey string, loca *LocationInfo) error { for tries := 1; tries <= 3; tries++ { _, err = svc.DeleteObject(&s3.DeleteObjectInput{Bucket: aws.String(loca.BucketName), Key: aws.String(objKey)}) if err != nil { - log.Logf("[s3mover] Delete object[key:%s] from bucket %s failed %d times, err:%v\n", + log.Logf("[s3mover] delete object[key:%s] from bucket %s failed %d times, err:%v\n", objKey, loca.BucketName, tries, err) - if tries == 3 { - return err + e := handleAWSS3Errors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { //If no permission, then no need to retry. + return e } } else { err = svc.WaitUntilObjectNotExists(&s3.HeadObjectInput{ @@ -333,16 +368,16 @@ func (mover *S3Mover) DeleteObj(objKey string, loca *LocationInfo) error { Key: aws.String(objKey), }) if err != nil { - log.Logf("[s3mover] Error occurred while waiting for object[%s] to be deleted.\n", objKey) + log.Logf("[s3mover] error occurred while waiting for object[%s] to be deleted.\n", objKey) } else { - log.Logf("[s3mover] Delete object[key:%s] from bucket %s successfully.\n", objKey, loca.BucketName) + log.Logf("[s3mover] delete object[key:%s] from bucket %s successfully.\n", objKey, loca.BucketName) } return err } } log.Logf("[s3mover] Delete Object[%s], should not be here.\n", objKey) - return errors.New("internal error") + return errors.New(DMERR_InternalError) } func ListObjs(loca *LocationInfo, filt *pb.Filter) ([]*s3.Object, error) { @@ -354,8 +389,8 @@ func ListObjs(loca *LocationInfo, filt *pb.Filter) ([]*s3.Object, error) { Credentials: creds, }) if err != nil { - log.Logf("[s3mover] New session failed, err:%v\n", err) - return nil, err + log.Logf("[s3mover] new session failed, err:%v\n", err) + return nil, handleAWSS3Errors(err) } svc := s3.New(sess) @@ -363,9 +398,10 @@ func ListObjs(loca *LocationInfo, filt *pb.Filter) ([]*s3.Object, error) { if filt != nil { input.Prefix = &filt.Prefix } - output, e := svc.ListObjects(input) + output, err := svc.ListObjects(input) + e := handleAWSS3Errors(err) if e != nil { - log.Logf("[s3mover] List aws bucket failed, err:%v\n", e) + log.Logf("[s3mover] list aws bucket failed, err:%v\n", e) return nil, e } @@ -373,9 +409,10 @@ func ListObjs(loca *LocationInfo, filt *pb.Filter) ([]*s3.Object, error) { for *output.IsTruncated == true { input.Marker = output.NextMarker output, err = svc.ListObjects(input) - if err != nil { - log.Logf("[s3mover] List objects failed, err:%v\n", err) - return nil, err + e := handleAWSS3Errors(err) + if e != nil { + log.Logf("[s3mover] list objects failed, err:%v\n", e) + return nil, e } objs = append(objs, output.Contents...) } diff --git a/datamover/pkg/azure/blob/bloblifecycle.go b/datamover/pkg/azure/blob/bloblifecycle.go new file mode 100644 index 000000000..6a6a8513c --- /dev/null +++ b/datamover/pkg/azure/blob/bloblifecycle.go @@ -0,0 +1,65 @@ +// Copyright 2019 The OpenSDS Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package blobmover + +import ( + "context" + "errors" + + "github.com/Azure/azure-storage-blob-go/azblob" + "github.com/go-log/log" + . "github.com/opensds/multi-cloud/datamover/pkg/utils" +) + +func (mover *BlobMover) setTier(objKey *string, newClass *string) error { + ctx := context.Background() + blobURL := mover.containerURL.NewBlockBlobURL(*objKey) + var res *azblob.BlobSetTierResponse + var err error + switch *newClass { + case string(azblob.AccessTierHot): + res, err = blobURL.SetTier(ctx, azblob.AccessTierHot, azblob.LeaseAccessConditions{}) + case string(azblob.AccessTierCool): + res, err = blobURL.SetTier(ctx, azblob.AccessTierCool, azblob.LeaseAccessConditions{}) + case string(azblob.AccessTierArchive): + res, err = blobURL.SetTier(ctx, azblob.AccessTierArchive, azblob.LeaseAccessConditions{}) + default: + log.Logf("[blobmover]set tier of object[%s] to %s failed, err: invalid storage class.\n", objKey, newClass) + return errors.New("Invalid storage class") + } + if err != nil { + log.Logf("[blobmover]set tier of object[%s] to %s failed, err:%v\n", objKey, newClass, err) + } else { + log.Logf("[blobmover]set tier of object[%s] to %s succeed, res:%v\n", objKey, newClass, res.Response()) + } + + return err +} + +func (mover *BlobMover) ChangeStorageClass(objKey *string, newClass *string, bkend *BackendInfo) error { + log.Logf("") + err := mover.Init(&bkend.EndPoint, &bkend.Access, &bkend.Security) + if err != nil { + return err + } + + return mover.setTier(objKey, newClass) +} + +func (mover *BlobMover) DeleteIncompleteMultipartUpload(objKey, uploadId string, destLoca *LocationInfo) error { + log.Log("Azure blob does not support to delete incomplete multipart upload.") + + return errors.New(DMERR_InternalError) +} diff --git a/datamover/pkg/azure/blob/blobmover.go b/datamover/pkg/azure/blob/blobmover.go index 72d534056..5a8823a41 100644 --- a/datamover/pkg/azure/blob/blobmover.go +++ b/datamover/pkg/azure/blob/blobmover.go @@ -1,3 +1,17 @@ +// Copyright 2019 The OpenSDS Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package blobmover import ( @@ -30,12 +44,29 @@ type BlobMover struct { completeParts []string } +func handleAzureBlobErrors(err error) error { + if err != nil { + if serr, ok := err.(azblob.StorageError); ok { // This error is a Service-specific + code := string(serr.ServiceCode()) + switch code { // Compare serviceCode to ServiceCodeXxx constants + case string(azblob.StorageErrorCodeAuthenticationFailed): + log.Log("azure error: permission denied.") + return errors.New(DMERR_NoPermission) + default: + return err + } + } + } + + return nil +} + func (mover *BlobMover) Init(endpoint *string, acountName *string, accountKey *string) error { var err error mover.containerURL, err = mover.createContainerURL(endpoint, acountName, accountKey) if err != nil { - log.Logf("[blobmover] Init container URL faild:%v\n", err) - return err + log.Logf("[blobmover] init container URL faild:%v\n", err) + return handleAzureBlobErrors(err) } log.Log("[blobmover] Init succeed, container URL:", mover.containerURL.String()) @@ -46,8 +77,8 @@ func (mover *BlobMover) createContainerURL(endpoint *string, acountName *string, error) { credential, err := azblob.NewSharedKeyCredential(*acountName, *accountKey) if err != nil { - log.Logf("[blobmover] Create credential failed, err:%v\n", err) - return azblob.ContainerURL{}, err + log.Logf("[blobmover] create credential failed, err:%v\n", err) + return azblob.ContainerURL{}, handleAzureBlobErrors(err) } //create containerURL @@ -64,7 +95,7 @@ func (mover *BlobMover) createContainerURL(endpoint *string, acountName *string, func (mover *BlobMover) DownloadObj(objKey string, srcLoca *LocationInfo, buf []byte) (size int64, err error) { err = mover.Init(&srcLoca.EndPoint, &srcLoca.Access, &srcLoca.Security) if err != nil { - return 0, err + return 0, handleAzureBlobErrors(err) } log.Logf("[blobmover] Try to download, bucket:%s,obj:%s\n", srcLoca.BucketName, objKey) @@ -74,9 +105,10 @@ func (mover *BlobMover) DownloadObj(objKey string, srcLoca *LocationInfo, buf [] downloadResp, err := blobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false) if err != nil { - log.Logf("[blobmover] Download object[%s] faild %d times, err:%v\n", objKey, tries, err) - if tries == 3 { - return 0, err + log.Logf("[blobmover] download object[%s] failed %d times, err:%v\n", objKey, tries, err) + e := handleAzureBlobErrors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { //If no permission, then no need to retry. + return 0, e } } else { size = 0 @@ -101,8 +133,8 @@ func (mover *BlobMover) DownloadObj(objKey string, srcLoca *LocationInfo, buf [] } } - log.Logf("[blobmover] Download object[%s], should not be here.", objKey) - return 0, errors.New("internal error") + log.Logf("[blobmover] download object[%s], should not be here.", objKey) + return 0, errors.New(DMERR_InternalError) } func (mover *BlobMover) UploadObj(objKey string, destLoca *LocationInfo, buf []byte) error { @@ -118,23 +150,30 @@ func (mover *BlobMover) UploadObj(objKey string, destLoca *LocationInfo, buf []b uploadResp, err := blobURL.Upload(ctx, bytes.NewReader(buf), azblob.BlobHTTPHeaders{}, nil, azblob.BlobAccessConditions{}) if err != nil { - log.Logf("[blobmover] Upload object[%s] faild %d times, err:%v\n", objKey, tries, err) - if tries == 3 { - return err + log.Logf("[blobmover] upload object[%s] failed %d times, err:%v\n", objKey, tries, err) + e := handleAzureBlobErrors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { //If no permission, then no need to retry. + return e } } else if uploadResp.StatusCode() != HTTP_CREATED { - log.Logf("[blobmover] Upload object[%s] StatusCode:%d\n", objKey, uploadResp.StatusCode()) + log.Logf("[blobmover] upload object[%s] StatusCode:%d\n", objKey, uploadResp.StatusCode()) if tries == 3 { - return errors.New("Upload failed") + return errors.New(DMERR_InternalError) } } else { log.Logf("[blobmover] Upload object[%s] successfully.", objKey) + if destLoca.ClassName != "" { + err := mover.setTier(&objKey, &destLoca.ClassName) + if err != nil { + // TODO: + } + } return nil } } - log.Logf("[blobmover] Upload object[%s], should not be here.", objKey) - return errors.New("internal error") + log.Logf("[blobmover] upload object[%s], should not be here.", objKey) + return errors.New(DMERR_InternalError) } func (mover *BlobMover) DeleteObj(objKey string, loca *LocationInfo) error { @@ -149,23 +188,24 @@ func (mover *BlobMover) DeleteObj(objKey string, loca *LocationInfo) error { for tries := 1; tries <= 3; tries++ { delRsp, err := blobURL.Delete(ctx, azblob.DeleteSnapshotsOptionInclude, azblob.BlobAccessConditions{}) if err != nil { - log.Logf("[blobmover] Delete object[%s] faild %d times, err:%v\n", objKey, tries, err) - if tries == 3 { - return err + log.Logf("[blobmover] delete object[%s] failed %d times, err:%v\n", objKey, tries, err) + e := handleAzureBlobErrors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { //If no permission, then no need to retry. + return e } } else if delRsp.StatusCode() == HTTP_OK || delRsp.StatusCode() == HTTP_ACCEPTED { - log.Logf("[blobmover] Delete object[%s] successfully.", objKey) + log.Logf("[blobmover] delete object[%s] successfully.", objKey) return nil } else { - log.Logf("[blobmover] Delete object[%s] StatusCode:%d\n", objKey, delRsp.StatusCode()) - if tries == 3 { - return errors.New("Delete failed") + log.Logf("[blobmover] delete object[%s] StatusCode:%d\n", objKey, delRsp.StatusCode()) + if tries >= 3 { + return errors.New(DMERR_InternalError) } } } - log.Logf("[blobmover] Delete object[%s], should not be here.", objKey) - return errors.New("internal error") + log.Logf("[blobmover] delete object[%s], should not be here.", objKey) + return errors.New(DMERR_InternalError) } func (mover *BlobMover) MultiPartDownloadInit(srcLoca *LocationInfo) error { @@ -185,25 +225,26 @@ func (mover *BlobMover) DownloadRange(objKey string, srcLoca *LocationInfo, buf for tries := 1; tries <= 3; tries++ { err = azblob.DownloadBlobToBuffer(ctx, blobURL, start, count, buf, azblob.DownloadFromBlobOptions{}) if err != nil { - log.Logf("[blobomver] Donwload object[%s] to buffer failed %d times, err:%v\n", objKey, tries, err) - if tries == 3 { - return 0, err + log.Logf("[blobomver] donwload object[%s] to buffer failed %d times, err:%v\n", objKey, tries, err) + e := handleAzureBlobErrors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { //If no permission, then no need to retry. + return 0, e } } else { - log.Logf("[blobmover] Download object[%s] range[%d - %d] successfully.\n", objKey, start, end) + log.Logf("[blobmover] download object[%s] range[%d - %d] successfully.\n", objKey, start, end) return count, nil } } - log.Logf("[blobmover] Download object[%s] range[%d - %d], should not be here.\n", objKey, start, end) - return 0, errors.New("internal error") + log.Logf("[blobmover] download object[%s] range[%d - %d], should not be here.\n", objKey, start, end) + return 0, errors.New(DMERR_InternalError) } -func (mover *BlobMover) MultiPartUploadInit(objKey string, destLoca *LocationInfo) error { +func (mover *BlobMover) MultiPartUploadInit(objKey string, destLoca *LocationInfo) (string, error) { log.Logf("[blobmover] Prepare to do part upload for object[%s], container:%s, blob:%s\n", objKey, destLoca.BucketName, objKey) - return mover.Init(&destLoca.EndPoint, &destLoca.Access, &destLoca.Security) + return "", mover.Init(&destLoca.EndPoint, &destLoca.Access, &destLoca.Security) } func (mover *BlobMover) Int64ToBase64(blockID int64) string { @@ -232,9 +273,10 @@ func (mover *BlobMover) UploadPart(objKey string, destLoca *LocationInfo, upByte for tries := 1; tries <= 3; tries++ { _, err := blobURL.StageBlock(ctx, base64ID, bytes.NewReader(buf), azblob.LeaseAccessConditions{}, nil) if err != nil { - log.Logf("[blobmover] Upload object[objkey:%s] part[%d] failed %d times. err:%v\n", objKey, partNumber, tries, err) - if tries == 3 { - return err + log.Logf("[blobmover] upload object[objkey:%s] part[%d] failed %d times. err:%v\n", objKey, partNumber, tries, err) + e := handleAzureBlobErrors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { //If no permission, then no need to retry. + return e } } else { log.Logf("[blobmover] Upload range[objkey:%s, partnumber#%d, base64ID#%d] successfully.\n", @@ -244,9 +286,9 @@ func (mover *BlobMover) UploadPart(objKey string, destLoca *LocationInfo, upByte } } - log.Logf("[blobmover] Upload range[objkey:%s, partnumber#%d, base64ID#%d], should not be here.\n", + log.Logf("[blobmover] upload range[objkey:%s, partnumber#%d, base64ID#%d], should not be here.\n", objKey, partNumber, base64ID) - return errors.New("internal error") + return errors.New(DMERR_InternalError) } func (mover *BlobMover) AbortMultipartUpload(objKey string, destLoca *LocationInfo) error { @@ -263,26 +305,34 @@ func (mover *BlobMover) CompleteMultipartUpload(objKey string, destLoca *Locatio for tries := 1; tries <= 3; tries++ { _, err := blobURL.CommitBlockList(ctx, mover.completeParts, azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{}) if err != nil { - log.Logf("[blobmover] CompleteMultipartUpload of object[%s] failed:%v\n", objKey, err) - if tries == 3 { - return err + log.Logf("[blobmover] completeMultipartUpload of object[%s] failed:%v\n", objKey, err) + e := handleAzureBlobErrors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { //If no permission, then no need to retry. + return e } } else { - log.Logf("[blobmover] CompleteMultipartUpload of object[%s] successfully.\n", objKey) + log.Logf("[blobmover] completeMultipartUpload of object[%s] successfully.\n", objKey) + if destLoca.ClassName != "" { + err := mover.setTier(&objKey, &destLoca.ClassName) + if err != nil { + mover.DeleteObj(objKey, destLoca) + return err + } + } return nil } } - log.Logf("[blobmover] CompleteMultipartUpload of object[%s], should not be here.\n", objKey) - return errors.New("internal error") + log.Logf("[blobmover] completeMultipartUpload of object[%s], should not be here.\n", objKey) + return errors.New(DMERR_InternalError) } func ListObjs(loca *LocationInfo, filt *pb.Filter) ([]azblob.BlobItem, error) { log.Logf("[blobmover] List objects of container[%s]\n", loca.BucketName) credential, err := azblob.NewSharedKeyCredential(loca.Access, loca.Security) if err != nil { - log.Fatalf("[blobmover] Create credential failed for list objects, err:%v\n", err) - return nil, err + log.Fatalf("[blobmover] create credential failed for list objects, err:%v\n", err) + return nil, handleAzureBlobErrors(err) } //create containerURL @@ -302,8 +352,9 @@ func ListObjs(loca *LocationInfo, filt *pb.Filter) ([]azblob.BlobItem, error) { // Get a result segment starting with the blob indicated by the current Marker. listBlob, err := containerURL.ListBlobsFlatSegment(ctx, marker, option) if err != nil { - log.Logf("[blobmover] ListBlobsFlatSegment failed:%v\n", err) - return nil, err + log.Logf("[blobmover] listBlobsFlatSegment failed:%v\n", err) + e := handleAzureBlobErrors(err) + return nil, e } objs = append(objs, listBlob.Segment.BlobItems...) diff --git a/datamover/pkg/ceph/s3/s3lifecycle.go b/datamover/pkg/ceph/s3/s3lifecycle.go new file mode 100644 index 000000000..8bcc7f9c4 --- /dev/null +++ b/datamover/pkg/ceph/s3/s3lifecycle.go @@ -0,0 +1,46 @@ +// Copyright 2019 The OpenSDS Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cephs3mover + +import ( + "errors" + "github.com/go-log/log" + . "github.com/opensds/multi-cloud/datamover/pkg/utils" + . "github.com/webrtcn/s3client" +) + +func (mover *CephS3Mover) ChangeStorageClass(objKey *string, newClass *string, bkend *BackendInfo) error { + log.Logf("[cephs3lifecycle]: Failed to change storage class of object[key:%s, backend:%s] to %s, no transition support for ceph s3.\n", + objKey, bkend.BakendName, newClass) + + return errors.New(DMERR_UnSupportOperation) +} + +func (mover *CephS3Mover) DeleteIncompleteMultipartUpload(objKey, uploadId string, loc *LocationInfo) error { + log.Logf("[cephs3mover] Delete incomplete multipart upload[objkey=%s,uploadId=%s].\n", objKey, mover.multiUploadInitOut.UploadID) + + sess := NewClient(loc.EndPoint, loc.Access, loc.Security) + bucket := sess.NewBucket() + cephObject := bucket.NewObject(loc.BucketName) + uploader := cephObject.NewUploads(objKey) + err := uploader.RemoveUploads(uploadId) + if err != nil { + log.Logf("[cephs3lifecycle] abort multipart upload[objkey:%s, uploadid:%s] failed, err:%v.\n", objKey, uploadId, err) + } else { + log.Logf("[cephs3lifecycle] abort multipart upload[objkey:%s, uploadid:%s] successfully, err:%v.\n", objKey, uploadId) + } + + return err +} \ No newline at end of file diff --git a/datamover/pkg/ceph/s3/s3mover.go b/datamover/pkg/ceph/s3/s3mover.go index f4f14baae..13bf5a5c8 100644 --- a/datamover/pkg/ceph/s3/s3mover.go +++ b/datamover/pkg/ceph/s3/s3mover.go @@ -48,6 +48,20 @@ func md5Content(data []byte) string { return value } +func handleCephS3Errors(err error) error { + if err != nil { + switch err.Error() { + case "SignatureDoesNotMatch": + log.Log("ceph s3 error: permission denied.") + return errors.New(DMERR_NoPermission) + default: + return err + } + } + + return nil +} + func (mover *CephS3Mover) UploadObj(objKey string, destLoca *LocationInfo, buf []byte) error { log.Logf("[cephs3mover] UploadObj object, key:%s.", objKey) sess := NewClient(destLoca.EndPoint, destLoca.Access, destLoca.Security) @@ -60,18 +74,20 @@ func (mover *CephS3Mover) UploadObj(objKey string, destLoca *LocationInfo, buf [ for tries := 1; tries <= 3; tries++ { err := cephObject.Create(objKey, contentMD5, "", length, body, models.Private) if err != nil { - log.Logf("[cephs3mover] Upload object[%s] failed %d times, err:%v\n", objKey, tries, err) - if tries == 3 { - return err + log.Logf("[cephs3mover] upload object[bucket:%s,key:%s] failed %d times, err:%v\n", + destLoca.BucketName, objKey, tries, err) + e := handleCephS3Errors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { //If no permission, then no need to retry. + return e } } else { - log.Logf("[cephs3mover] Upload object[%s] successfully.", objKey) + log.Logf("[cephs3mover] Upload object[bucket:%s,key:%s] successfully.", destLoca.BucketName, objKey) return nil } } - log.Logf("[cephs3mover] Upload object, bucket:%s,obj:%s, should not be here.\n", destLoca.BucketName, objKey) - return errors.New("internal error") + log.Logf("[cephs3mover] upload object, bucket:%s,obj:%s, should not be here.\n", destLoca.BucketName, objKey) + return errors.New(DMERR_InternalError) } func (mover *CephS3Mover) DownloadObj(objKey string, srcLoca *LocationInfo, buf []byte) (size int64, err error) { @@ -83,18 +99,23 @@ func (mover *CephS3Mover) DownloadObj(objKey string, srcLoca *LocationInfo, buf log.Logf("[cephs3mover] Try to download, bucket:%s,obj:%s\n", srcLoca.BucketName, objKey) for tries := 1; tries <= 3; tries++ { getObject, err := cephObject.Get(objKey, nil) + if err != nil { + log.Logf("[cephs3mover]download object[bucket:%s,key:%s] failed: %v.\n", srcLoca.BucketName, objKey, err) + e := handleCephS3Errors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { //If no permission, then no need to retry. + return 0, e + } else { + continue + } + } //defer getObject.Body.Close() d, err := ioutil.ReadAll(getObject.Body) data := []byte(d) size = int64(len(data)) copy(buf, data) - if err != nil { log.Logf("[cephs3mover]download object[bucket:%s,key:%s] failed %d times, err:%v\n", srcLoca.BucketName, objKey, tries, err) - if tries == 3 { - return 0, err - } } else { numBytes = getObject.ContentLength log.Logf("[cephs3mover]download object[bucket:%s,key:%s] succeed, bytes:%d\n", srcLoca.BucketName, objKey, numBytes) @@ -103,7 +124,7 @@ func (mover *CephS3Mover) DownloadObj(objKey string, srcLoca *LocationInfo, buf } log.Logf("[cephs3mover]download object[bucket:%s,key:%s], should not be here.\n", srcLoca.BucketName, objKey) - return 0, errors.New("internal error") + return 0, errors.New(DMERR_InternalError) } func (mover *CephS3Mover) MultiPartDownloadInit(srcLoca *LocationInfo) error { @@ -130,49 +151,56 @@ func (mover *CephS3Mover) DownloadRange(objKey string, srcLoca *LocationInfo, bu log.Logf("[cephs3mover] Try to download object:%s, range:=%s\n", objKey, rg) for tries := 1; tries <= 3; tries++ { resp, err := cephObject.Get(objKey, &getObjectOption) + if err != nil { + log.Logf("[cephs3mover] download object[bucket:%s,key:%s] failed: %v.\n", + srcLoca.BucketName, objKey, err) + e := handleCephS3Errors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { + return 0, e + } else { + continue + } + } //defer resp.Body.Close() d, err := ioutil.ReadAll(resp.Body) data := []byte(d) size = int64(len(data)) copy(buf, data) if err != nil { - log.Logf("[cephs3mover] Download object[%s] range[%d - %d] faild %d times, err:%v\n", + log.Logf("[cephs3mover] download object[%s] range[%d - %d] faild %d times, err:%v\n", objKey, start, end, tries, err) - if tries == 3 { - return 0, err - } } else { - log.Logf("[cephs3mover] Download object[%s] range[%d - %d] succeed, bytes:%d\n", objKey, start, end, size) + log.Logf("[cephs3mover] download object[%s] range[%d - %d] succeed, bytes:%d\n", objKey, start, end, size) return size, err } } - log.Logf("[cephs3mover] Download object[%s] range[%d - %d], should not be here.\n", objKey, start, end) - return 0, errors.New("internal error") + log.Logf("[cephs3mover] download object[%s] range[%d - %d], should not be here.\n", objKey, start, end) + return 0, errors.New(DMERR_InternalError) } -func (mover *CephS3Mover) MultiPartUploadInit(objKey string, destLoca *LocationInfo) error { +func (mover *CephS3Mover) MultiPartUploadInit(objKey string, destLoca *LocationInfo) (string, error) { sess := NewClient(destLoca.EndPoint, destLoca.Access, destLoca.Security) bucket := sess.NewBucket() cephObject := bucket.NewObject(destLoca.BucketName) mover.svc = cephObject.NewUploads(objKey) log.Logf("[cephs3mover] Try to init multipart upload[objkey:%s].\n", objKey) for tries := 1; tries <= 3; tries++ { - resp, err := mover.svc.Initiate(nil) if err != nil { - log.Logf("[cephs3mover] Init multipart upload[objkey:%s] failed %d times.\n", objKey, tries) - if tries == 3 { - return err + log.Logf("[cephs3mover] init multipart upload[objkey:%s] failed %d times, err:%v.\n", objKey, tries, err) + e := handleCephS3Errors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { + return "", e } } else { mover.multiUploadInitOut = &CreateMultipartUploadOutput{resp.UploadID} log.Logf("[cephs3mover] Init multipart upload[objkey:%s] successfully, UploadId:%s\n", objKey, resp.UploadID) - return nil + return resp.UploadID, nil } } - log.Logf("[cephs3mover] Init multipart upload[objkey:%s], should not be here.\n", objKey) - return errors.New("internal error") + log.Logf("[cephs3mover] init multipart upload[objkey:%s], should not be here.\n", objKey) + return "", errors.New(DMERR_InternalError) } @@ -188,20 +216,20 @@ func (mover *CephS3Mover) UploadPart(objKey string, destLoca *LocationInfo, upBy for tries := 1; tries <= 3; tries++ { upRes, err := mover.svc.UploadPart(int(partNumber), mover.multiUploadInitOut.UploadID, contentMD5, "", length, body) if err != nil { - log.Logf("[cephs3mover] Upload range[objkey:%s, partnumber#%d, offset#%d] failed %d times, err:%v\n", + log.Logf("[cephs3mover] upload range[objkey:%s, partnumber#%d, offset#%d] failed %d times, err:%v\n", objKey, partNumber, offset, tries, err) - if tries == 3 { - return err + e := handleCephS3Errors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { + return e } } else { - mover.completeParts = append(mover.completeParts, upRes) log.Logf("[cephs3mover] Upload range[objkey:%s, partnumber#%d,offset#%d] successfully.\n", objKey, partNumber, offset) return nil } } - log.Logf("[cephs3mover] Upload range[objkey:%s, partnumber#%d, offset#%d], should not be here.\n", objKey, partNumber, offset) - return errors.New("internal error") + log.Logf("[cephs3mover] upload range[objkey:%s, partnumber#%d, offset#%d], should not be here.\n", objKey, partNumber, offset) + return errors.New(DMERR_InternalError) } func (mover *CephS3Mover) AbortMultipartUpload(objKey string, destLoca *LocationInfo) error { @@ -212,10 +240,11 @@ func (mover *CephS3Mover) AbortMultipartUpload(objKey string, destLoca *Location for tries := 1; tries <= 3; tries++ { err := uploader.RemoveUploads(mover.multiUploadInitOut.UploadID) if err != nil { - log.Logf("[cephs3mover] Abort multipart upload[objkey:%s] for uploadId#%s failed %d times.\n", + log.Logf("[cephs3mover] abort multipart upload[objkey:%s] for uploadId#%s failed %d times.\n", objKey, mover.multiUploadInitOut.UploadID, tries) - if tries == 3 { - return err + e := handleCephS3Errors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { + return e } } else { log.Logf("[cephs3mover] Abort multipart upload[objkey:%s] for uploadId#%s successfully.\n", @@ -223,9 +252,9 @@ func (mover *CephS3Mover) AbortMultipartUpload(objKey string, destLoca *Location return nil } } - log.Logf("[cephs3mover] Abort multipart upload[objkey:%s] for uploadId#%s, should not be here.\n", + log.Logf("[cephs3mover] abort multipart upload[objkey:%s] for uploadId#%s, should not be here.\n", objKey, mover.multiUploadInitOut.UploadID) - return errors.New("internal error") + return errors.New(DMERR_InternalError) } func (mover *CephS3Mover) CompleteMultipartUpload(objKey string, destLoca *LocationInfo) error { @@ -242,8 +271,9 @@ func (mover *CephS3Mover) CompleteMultipartUpload(objKey string, destLoca *Locat rsp, err := mover.svc.Complete(mover.multiUploadInitOut.UploadID, completeParts) if err != nil { log.Logf("[cephs3mover] completeMultipartUpload [objkey:%s] failed %d times, err:%v\n", objKey, tries, err) - if tries == 3 { - return err + e := handleCephS3Errors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { + return e } } else { log.Logf("[cephs3mover] completeMultipartUpload successfully [objkey:%s], rsp:%v\n", objKey, rsp) @@ -260,10 +290,10 @@ func (mover *CephS3Mover) DeleteObj(objKey string, loca *LocationInfo) error { cephObject := bucket.NewObject(loca.BucketName) err := cephObject.Remove(objKey) - if err != nil { - log.Logf("[cephs3mover] Error occurred while waiting for object[%s] to be deleted.\n", objKey) - return err + log.Logf("[cephs3mover] error occurred while waiting for object[%s] to be deleted.\n", objKey) + e := handleCephS3Errors(err) + return e } log.Logf("[cephs3mover] Delete Object[%s] successfully.\n", objKey) @@ -271,26 +301,23 @@ func (mover *CephS3Mover) DeleteObj(objKey string, loca *LocationInfo) error { } func ListObjs(loca *LocationInfo, filt *pb.Filter) ([]models.GetBucketResponseContent, error) { - sess := NewClient(loca.EndPoint, loca.Access, loca.Security) bucket := sess.NewBucket() var output *models.GetBucketResponse var err error if filt != nil { output, err = bucket.Get(string(loca.BucketName), filt.Prefix, "", "", 1000) - } else { output, err = bucket.Get(string(loca.BucketName), "", "", "", 1000) } if err != nil { - log.Logf("[cephs3mover] List bucket failed, err:%v\n", err) - return nil, err + log.Logf("[cephs3mover] list bucket failed, err:%v\n", err) + e := handleCephS3Errors(err) + return nil, e } objs := output.Contents - size := len(objs) - var out []models.GetBucketResponseContent for i := 0; i < size; i++ { out = append(out, models.GetBucketResponseContent{ diff --git a/datamover/pkg/db/db.go b/datamover/pkg/db/db.go index 1d5ba9886..b1193fae1 100644 --- a/datamover/pkg/db/db.go +++ b/datamover/pkg/db/db.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Huawei Technologies Co., Ltd. All Rights Reserved. +// Copyright 2019 The OpenSDS Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/datamover/pkg/db/drivers/mongo/mongo.go b/datamover/pkg/db/drivers/mongo/mongo.go index 0c48e37f7..22e6b75be 100644 --- a/datamover/pkg/db/drivers/mongo/mongo.go +++ b/datamover/pkg/db/drivers/mongo/mongo.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Huawei Technologies Co., Ltd. All Rights Reserved. +// Copyright 2019 The OpenSDS Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/datamover/pkg/drivers/https/migration.go b/datamover/pkg/drivers/https/migration.go index a19b7cb4d..d203e4db1 100644 --- a/datamover/pkg/drivers/https/migration.go +++ b/datamover/pkg/drivers/https/migration.go @@ -1,3 +1,17 @@ +// Copyright 2019 The OpenSDS Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package migration import ( @@ -51,15 +65,15 @@ func HandleMsg(msgData []byte) error { var job pb.RunJobRequest err := json.Unmarshal(msgData, &job) if err != nil { - logger.Printf("Unmarshal failed, err:%v\n", err) + logger.Printf("unmarshal failed, err:%v\n", err) return err } //Check the status of job, and run it if needed status := db.DbAdapter.GetJobStatus(job.Id) if status != flowtype.JOB_STATUS_PENDING { - logger.Printf("Job[ID#%s] is not in %s status.\n", job.Id, flowtype.JOB_STATUS_PENDING) - return errors.New("Job already running.") + logger.Printf("job[id#%s] is not in %s status.\n", job.Id, flowtype.JOB_STATUS_PENDING) + return nil //No need to consume this message again } logger.Printf("HandleMsg:job=%+v\n", job) @@ -83,17 +97,17 @@ func doMove(ctx context.Context, objs []*osdss3.Object, capa chan int64, th chan func getOsdsLocation(ctx context.Context, virtBkname string, backendName string) (*LocationInfo, error) { if backendName == "" { - logger.Println("Get backend location failed, because backend name is null.") + logger.Println("get backend location failed, because backend name is null.") return nil, errors.New("failed") } bk, err := db.DbAdapter.GetBackendByName(backendName) if err != nil { - logger.Printf("Get backend information failed, err:%v\n", err) + logger.Printf("get backend information failed, err:%v\n", err) return nil, errors.New("failed") } else { - loca := &LocationInfo{bk.Type, bk.Region, bk.Endpoint, bk.BucketName, - virtBkname, bk.Access, bk.Security, backendName} + loca := &LocationInfo{StorType: bk.Type, Region: bk.Region, EndPoint: bk.Endpoint, BucketName: bk.BucketName, + VirBucket: virtBkname, Access: bk.Access, Security: bk.Security, BakendName: backendName} logger.Printf("Refresh backend[name:%s,id:%s] successfully.\n", backendName, bk.Id.String()) return loca, nil } @@ -107,7 +121,7 @@ func getConnLocation(ctx context.Context, conn *pb.Connector) (*LocationInfo, er reqbk := osdss3.Bucket{Name: virtBkname} rspbk, err := s3client.GetBucket(ctx, &reqbk) if err != nil { - logger.Printf("Get bucket[%s] information failed when refresh connector location, err:%v\n", virtBkname, err) + logger.Printf("get bucket[%s] information failed when refresh connector location, err:%v\n", virtBkname, err) return nil, errors.New("get bucket information failed") } return getOsdsLocation(ctx, virtBkname, rspbk.Backend) @@ -130,20 +144,20 @@ func getConnLocation(ctx context.Context, conn *pb.Connector) (*LocationInfo, er case "security": loca.Security = cfg[i].Value default: - logger.Printf("Uknow key[%s] for connector.\n", cfg[i].Key) + logger.Printf("unknow key[%s] for connector.\n", cfg[i].Key) } } return &loca, nil } default: { - logger.Printf("Unsupport type:%s.\n", conn.Type) + logger.Printf("unsupport type:%s.\n", conn.Type) return nil, errors.New("unsupport type") } } } -func moveObj(obj *osdss3.Object, srcLoca *LocationInfo, destLoca *LocationInfo) error { +func MoveObj(obj *osdss3.Object, srcLoca *LocationInfo, destLoca *LocationInfo) error { logger.Printf("*****Move object[%s] from #%s# to #%s#, size is %d.\n", obj.ObjectKey, srcLoca.BakendName, destLoca.BakendName, obj.Size) if obj.Size <= 0 { @@ -179,13 +193,13 @@ func moveObj(obj *osdss3.Object, srcLoca *LocationInfo, destLoca *LocationInfo) size, err = downloader.DownloadObj(downloadObjKey, srcLoca, buf) default: { - logger.Printf("Not support source backend type:%v\n", srcLoca.StorType) - err = errors.New("Not support source backend type.") + logger.Printf("not support source backend type:%v\n", srcLoca.StorType) + err = errors.New("not support source backend type") } } if err != nil { - logger.Printf("Download object[%s] failed.", obj.ObjectKey) + logger.Printf("download object[%s] failed.", obj.ObjectKey) return err } logger.Printf("Download object[%s] succeed, size=%d\n", obj.ObjectKey, size) @@ -216,13 +230,13 @@ func moveObj(obj *osdss3.Object, srcLoca *LocationInfo, destLoca *LocationInfo) uploader = &Gcps3mover.GcpS3Mover{} err = uploader.UploadObj(uploadObjKey, destLoca, buf) default: - logger.Printf("Not support destination backend type:%v\n", destLoca.StorType) - return errors.New("Not support destination backend type.") + logger.Printf("not support destination backend type:%v\n", destLoca.StorType) + return errors.New("not support destination backend type.") } if err != nil { - logger.Printf("Upload object[bucket:%s,key:%s] failed, err:%v.\n", destLoca.BucketName, uploadObjKey, err) + logger.Printf("upload object[bucket:%s,key:%s] failed, err:%v.\n", destLoca.BucketName, uploadObjKey, err) } else { - logger.Printf("Upload object[bucket:%s,key:%s] successfully.\n", destLoca.BucketName, uploadObjKey) + logger.Printf("upload object[bucket:%s,key:%s] successfully.\n", destLoca.BucketName, uploadObjKey) } return err @@ -256,43 +270,44 @@ func multiPartDownloadInit(srcLoca *LocationInfo) (mover MoveWorker, err error) return mover, err default: - logger.Printf("Unsupport storType[%s] to init multipart download.\n", srcLoca.StorType) + logger.Printf("unsupport storType[%s] to init multipart download.\n", srcLoca.StorType) } - return nil, errors.New("Unsupport storage type.") + return nil, errors.New("unsupport storage type.") } -func multiPartUploadInit(objKey string, destLoca *LocationInfo) (mover MoveWorker, err error) { +func multiPartUploadInit(objKey string, destLoca *LocationInfo) (mover MoveWorker, uploadId string, err error) { + uploadId = "" switch destLoca.StorType { case flowtype.STOR_TYPE_AWS_S3: - mover := &s3mover.S3Mover{} - err := mover.MultiPartUploadInit(objKey, destLoca) - return mover, err + mover = &s3mover.S3Mover{} + uploadId, err = mover.MultiPartUploadInit(objKey, destLoca) + return case flowtype.STOR_TYPE_IBM_COS: - mover := &ibmcosmover.IBMCOSMover{} - err := mover.MultiPartUploadInit(objKey, destLoca) - return mover, err + mover = &ibmcosmover.IBMCOSMover{} + uploadId, err = mover.MultiPartUploadInit(objKey, destLoca) + return case flowtype.STOR_TYPE_HW_OBS, flowtype.STOR_TYPE_HW_FUSIONSTORAGE, flowtype.STOR_TYPE_HW_FUSIONCLOUD: - mover := &obsmover.ObsMover{} - err := mover.MultiPartUploadInit(objKey, destLoca) - return mover, err + mover = &obsmover.ObsMover{} + uploadId, err = mover.MultiPartUploadInit(objKey, destLoca) + return case flowtype.STOR_TYPE_AZURE_BLOB: - mover := &blobmover.BlobMover{} - err := mover.MultiPartUploadInit(objKey, destLoca) - return mover, err + mover = &blobmover.BlobMover{} + uploadId, err = mover.MultiPartUploadInit(objKey, destLoca) + return case flowtype.STOR_TYPE_CEPH_S3: - mover := &cephs3mover.CephS3Mover{} - err := mover.MultiPartUploadInit(objKey, destLoca) - return mover, err + mover = &cephs3mover.CephS3Mover{} + uploadId, err = mover.MultiPartUploadInit(objKey, destLoca) + return case flowtype.STOR_TYPE_GCP_S3: - mover := &Gcps3mover.GcpS3Mover{} - err := mover.MultiPartUploadInit(objKey, destLoca) - return mover, err + mover = &Gcps3mover.GcpS3Mover{} + uploadId, err = mover.MultiPartUploadInit(objKey, destLoca) + return mover, uploadId, err default: - logger.Printf("Unsupport storType[%s] to download.\n", destLoca.StorType) + logger.Printf("unsupport storType[%s] to download.\n", destLoca.StorType) } - return nil, errors.New("Unsupport storage type.") + return nil, uploadId, errors.New("unsupport storage type") } func abortMultipartUpload(objKey string, destLoca *LocationInfo, mover MoveWorker) error { @@ -301,10 +316,10 @@ func abortMultipartUpload(objKey string, destLoca *LocationInfo, mover MoveWorke flowtype.STOR_TYPE_HW_FUSIONCLOUD, flowtype.STOR_TYPE_AZURE_BLOB, flowtype.STOR_TYPE_CEPH_S3, flowtype.STOR_TYPE_GCP_S3, flowtype.STOR_TYPE_IBM_COS: return mover.AbortMultipartUpload(objKey, destLoca) default: - logger.Printf("Unsupport storType[%s] to download.\n", destLoca.StorType) + logger.Printf("unsupport storType[%s] to download.\n", destLoca.StorType) } - return errors.New("Unsupport storage type.") + return errors.New("unsupport storage type") } func completeMultipartUpload(objKey string, destLoca *LocationInfo, mover MoveWorker) error { @@ -313,13 +328,38 @@ func completeMultipartUpload(objKey string, destLoca *LocationInfo, mover MoveWo flowtype.STOR_TYPE_HW_FUSIONCLOUD, flowtype.STOR_TYPE_AZURE_BLOB, flowtype.STOR_TYPE_CEPH_S3, flowtype.STOR_TYPE_GCP_S3, flowtype.STOR_TYPE_IBM_COS: return mover.CompleteMultipartUpload(objKey, destLoca) default: - logger.Printf("Unsupport storType[%s] to download.\n", destLoca.StorType) + logger.Printf("unsupport storType[%s] to download.\n", destLoca.StorType) + } + + return errors.New("unsupport storage type") +} + +func addMultipartUpload(objKey, virtBucket, backendName, uploadId string) { + // some cloud vendor, like azure, does not support user to delete uncomplete multipart upload data, and no uploadId provided, + // so we do not need to manage the uncomplete multipart upload data. + if len(uploadId) == 0 { + return + } + + record := osdss3.MultipartUploadRecord{ObjectKey: objKey, Bucket: virtBucket, Backend: backendName, UploadId: uploadId} + record.InitTime = time.Now().Unix() + + s3client.AddUploadRecord(context.Background(), &record) + // TODO: Need consider if add failed +} + +func deleteMultipartUpload(objKey, virtBucket, backendName, uploadId string) { + // some cloud vendor, like azure, does not support user to delete uncomplete multipart upload data, and no uploadId provided, + // so we do not need to manage the uncomplete multipart upload data. + if len(uploadId) == 0 { + return } - return errors.New("Unsupport storage type.") + record := osdss3.MultipartUploadRecord{ObjectKey: objKey, Bucket: virtBucket, Backend: backendName, UploadId: uploadId} + s3client.DeleteUploadRecord(context.Background(), &record) } -func multipartMoveObj(obj *osdss3.Object, srcLoca *LocationInfo, destLoca *LocationInfo) error { +func MultipartMoveObj(obj *osdss3.Object, srcLoca *LocationInfo, destLoca *LocationInfo) error { partCount := int64(obj.Size / PART_SIZE) if obj.Size%PART_SIZE != 0 { partCount++ @@ -340,6 +380,7 @@ func multipartMoveObj(obj *osdss3.Object, srcLoca *LocationInfo, destLoca *Locat var i int64 var err error var uploadMover, downloadMover MoveWorker + var uploadId string currPartSize := PART_SIZE for i = 0; i < partCount; i++ { partNumber := i + 1 @@ -361,20 +402,22 @@ func multipartMoveObj(obj *osdss3.Object, srcLoca *LocationInfo, destLoca *Locat } readSize, err := downloadMover.DownloadRange(downloadObjKey, srcLoca, buf, start, end) if err != nil { - return errors.New("Download failed.") + return errors.New("download failed") } //fmt.Printf("Download part %d range[%d:%d] successfully.\n", partNumber, offset, end) if int64(readSize) != currPartSize { - logger.Printf("Internal error, currPartSize=%d, readSize=%d\n", currPartSize, readSize) - return errors.New("Internal error") + logger.Printf("internal error, currPartSize=%d, readSize=%d\n", currPartSize, readSize) + return errors.New(DMERR_InternalError) } //upload if partNumber == 1 { //init multipart upload - uploadMover, err = multiPartUploadInit(uploadObjKey, destLoca) + uploadMover, uploadId, err = multiPartUploadInit(uploadObjKey, destLoca) if err != nil { return err + } else { + addMultipartUpload(obj.ObjectKey, destLoca.VirBucket, destLoca.BakendName, uploadId) } } err1 := uploadMover.UploadPart(uploadObjKey, destLoca, currPartSize, buf, partNumber, offset) @@ -382,8 +425,10 @@ func multipartMoveObj(obj *osdss3.Object, srcLoca *LocationInfo, destLoca *Locat err := abortMultipartUpload(obj.ObjectKey, destLoca, uploadMover) if err != nil { logger.Printf("Abort s3 multipart upload failed, err:%v\n", err) + } else { + deleteMultipartUpload(obj.ObjectKey, destLoca.VirBucket, destLoca.BakendName, uploadId) } - return errors.New("S3 multipart upload failed.") + return errors.New("multipart upload failed") } //completeParts = append(completeParts, completePart) } @@ -393,8 +438,12 @@ func multipartMoveObj(obj *osdss3.Object, srcLoca *LocationInfo, destLoca *Locat logger.Println(err.Error()) err := abortMultipartUpload(obj.ObjectKey, destLoca, uploadMover) if err != nil { - logger.Printf("Abort s3 multipart upload failed, err:%v\n", err) + logger.Printf("abort s3 multipart upload failed, err:%v\n", err) + } else { + deleteMultipartUpload(obj.ObjectKey, destLoca.VirBucket, destLoca.BakendName, uploadId) } + } else { + deleteMultipartUpload(obj.ObjectKey, destLoca.VirBucket, destLoca.BakendName, uploadId) } return err @@ -426,8 +475,8 @@ func deleteObj(ctx context.Context, obj *osdss3.Object, loca *LocationInfo) erro mover := Gcps3mover.GcpS3Mover{} err = mover.DeleteObj(objKey, loca) default: - logger.Printf("Delete object[objkey:%s] from backend storage failed.\n", obj.ObjectKey) - err = errors.New("Unspport storage type.") + logger.Printf("delete object[objkey:%s] from backend storage failed.\n", obj.ObjectKey) + err = errors.New(DMERR_UnSupportBackendType) } if err != nil { @@ -439,7 +488,7 @@ func deleteObj(ctx context.Context, obj *osdss3.Object, loca *LocationInfo) erro delMetaReq := osdss3.DeleteObjectInput{Bucket: loca.VirBucket, Key: obj.ObjectKey} _, err = s3client.DeleteObject(ctx, &delMetaReq) if err != nil { - logger.Printf("Delete object metadata of obj[bucket:%s,objKey:%s] failed, err:%v\n", loca.VirBucket, + logger.Printf("delete object metadata of obj[bucket:%s,objKey:%s] failed, err:%v\n", loca.VirBucket, obj.ObjectKey, err) } else { logger.Printf("Delete object metadata of obj[bucket:%s,objKey:%s] successfully.\n", loca.VirBucket, @@ -497,9 +546,9 @@ func move(ctx context.Context, obj *osdss3.Object, capa chan int64, th chan int, } } if obj.Size <= PART_SIZE { - err = moveObj(obj, newSrcLoca, destLoca) + err = MoveObj(obj, newSrcLoca, destLoca) } else { - err = multipartMoveObj(obj, newSrcLoca, destLoca) + err = MultipartMoveObj(obj, newSrcLoca, destLoca) } if err != nil { @@ -515,10 +564,10 @@ func move(ctx context.Context, obj *osdss3.Object, capa chan int64, th chan int, obj.LastModified = time.Now().Unix() _, err := s3client.CreateObject(ctx, obj) if err != nil { - logger.Printf("Add object metadata of obj [objKey:%s] to bucket[name:%s] failed,err:%v.\n", obj.ObjectKey, + logger.Printf("add object metadata of obj [objKey:%s] to bucket[name:%s] failed, err:%v.\n", obj.ObjectKey, obj.BucketName, err) } else { - logger.Printf("Add object metadata of obj [objKey:%s] to bucket[name:%s] succeed.\n", obj.ObjectKey, + logger.Printf("add object metadata of obj [objKey:%s] to bucket[name:%s] succeed.\n", obj.ObjectKey, obj.BucketName) } } @@ -566,9 +615,9 @@ func getOsdsS3Objs(ctx context.Context, conn *pb.Connector, filt *pb.Filter, } if valid == true { srcObjs = append(srcObjs, objs.ListObjects[i]) - logger.Printf("Object[%s] match, will be migrated.\n", objs.ListObjects[i].ObjectKey) + logger.Printf("object[%s] match, will be migrated.\n", objs.ListObjects[i].ObjectKey) } else { - logger.Printf("Object[%s] does not match, will not be migrated.\n", objs.ListObjects[i].ObjectKey) + logger.Printf("object[%s] does not match, will not be migrated.\n", objs.ListObjects[i].ObjectKey) } } return srcObjs, nil @@ -685,11 +734,11 @@ func getSourceObjs(ctx context.Context, conn *pb.Connector, filt *pb.Filter, return getGcpS3Objs(ctx, conn, filt, defaultSrcLoca) default: { - logger.Printf("Unsupport storage type:%v\n", conn.Type) - return nil, errors.New("unsupport storage type") + logger.Printf("unsupport storage type:%v\n", conn.Type) + return nil, errors.New(DMERR_UnSupportBackendType) } } - return nil, errors.New("Get source objects failed") + return nil, errors.New(DMERR_InternalError) } func prepare4Run(ctx context.Context, j *flowtype.Job, in *pb.RunJobRequest) (srcLoca *LocationInfo, destLoca *LocationInfo, @@ -719,7 +768,7 @@ func prepare4Run(ctx context.Context, j *flowtype.Job, in *pb.RunJobRequest) (sr objs, err = getSourceObjs(ctx, in.SourceConn, in.GetFilt(), srcLoca) totalObjs := len(objs) if err != nil { - logger.Printf("List objects failed, err:%v, total objects:%d\n", err, totalObjs) + logger.Printf("list objects failed, err:%v, total objects:%d\n", err, totalObjs) //update database j.Status = flowtype.JOB_STATUS_FAILED j.EndTime = time.Now() @@ -831,7 +880,7 @@ func runjob(in *pb.RunJobRequest) error { j.PassedCount = int64(passedCount) if passedCount < totalObjs { errmsg := strconv.FormatInt(totalObjs, 10) + " objects, passed " + strconv.FormatInt(passedCount, 10) - logger.Printf("Run job failed: %s\n", errmsg) + logger.Printf("run job failed: %s\n", errmsg) ret = errors.New("failed") j.Status = flowtype.JOB_STATUS_FAILED } else { diff --git a/datamover/pkg/drivers/lifecycle/abortIncompleteMultipartUpload.go b/datamover/pkg/drivers/lifecycle/abortIncompleteMultipartUpload.go new file mode 100644 index 000000000..dc621dda3 --- /dev/null +++ b/datamover/pkg/drivers/lifecycle/abortIncompleteMultipartUpload.go @@ -0,0 +1,74 @@ +package lifecycle + +import ( + "context" + "fmt" + + "github.com/micro/go-log" + flowtype "github.com/opensds/multi-cloud/dataflow/pkg/model" + s3mover "github.com/opensds/multi-cloud/datamover/pkg/amazon/s3" + cephs3mover "github.com/opensds/multi-cloud/datamover/pkg/ceph/s3" + obsmover "github.com/opensds/multi-cloud/datamover/pkg/hw/obs" + ibmcosmover "github.com/opensds/multi-cloud/datamover/pkg/ibm/cos" + . "github.com/opensds/multi-cloud/datamover/pkg/utils" + datamover "github.com/opensds/multi-cloud/datamover/proto" + osdss3 "github.com/opensds/multi-cloud/s3/proto" +) + +func clearFromBackend(objKey, uploadId string, loca *LocationInfo) error { + if loca.VirBucket != "" { + objKey = loca.VirBucket + "/" + objKey + } + + var err error = nil + switch loca.StorType { + case flowtype.STOR_TYPE_AWS_S3: + mover := s3mover.S3Mover{} + err = mover.DeleteIncompleteMultipartUpload(objKey, uploadId, loca) + case flowtype.STOR_TYPE_IBM_COS: + mover := ibmcosmover.IBMCOSMover{} + err = mover.DeleteIncompleteMultipartUpload(objKey, uploadId, loca) + case flowtype.STOR_TYPE_HW_OBS, flowtype.STOR_TYPE_HW_FUSIONSTORAGE, flowtype.STOR_TYPE_HW_FUSIONCLOUD: + mover := obsmover.ObsMover{} + err = mover.DeleteIncompleteMultipartUpload(objKey, uploadId, loca) + case flowtype.STOR_TYPE_CEPH_S3: + mover := cephs3mover.CephS3Mover{} + err = mover.DeleteIncompleteMultipartUpload(objKey, uploadId, loca) + default: + err = fmt.Errorf("delete incomplete multipart upload is not support for storage type:%s", loca.StorType) + } + + if err != nil { + log.Logf("delete incomplete multipart upload[id=%s] from backend[type:%s,bucket:%s] failed.\n", + uploadId, loca.StorType, loca.BucketName) + } else { + log.Logf("delete incomplete multipart upload[id=%s] from backend[type:%s,bucket:%s] successfully.\n", + uploadId, loca.StorType, loca.BucketName) + } + + return err +} + +func doAbortUpload(acReq *datamover.LifecycleActionRequest) error { + log.Logf("abort incomplete upload: key=%s, uploadid=%s.\n", acReq.ObjKey, acReq.UploadId) + + // delete incomplete multipart upload data in each backend + bkend, err := getBackendInfo(&acReq.TargetBackend, false) + if err != nil { + log.Logf("abort incomplete upload[key=%s, uploadid=%s] failed because get location failed.\n", acReq.ObjKey, acReq.UploadId) + return err + } + + loc := &LocationInfo{StorType: bkend.StorType, Region: bkend.Region, EndPoint: bkend.EndPoint, BucketName: bkend.BucketName, + VirBucket: acReq.BucketName, Access: bkend.Access, Security: bkend.Security, BakendName: bkend.BakendName} + err = clearFromBackend(acReq.ObjKey, acReq.UploadId, loc) + + // Delete record from database, if delete failed, it will be deleted again in the next schedule period + if err == nil { + record := osdss3.MultipartUploadRecord{ObjectKey: acReq.ObjKey, Bucket: acReq.BucketName, Backend: acReq.TargetBackend, + UploadId: acReq.UploadId} + s3client.DeleteUploadRecord(context.Background(), &record) + } + + return nil +} diff --git a/datamover/pkg/drivers/lifecycle/crosscloud_transition.go b/datamover/pkg/drivers/lifecycle/crosscloud_transition.go new file mode 100644 index 000000000..74dd34fca --- /dev/null +++ b/datamover/pkg/drivers/lifecycle/crosscloud_transition.go @@ -0,0 +1,145 @@ +// Copyright 2019 The OpenSDS Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lifecycle + +import ( + "context" + "errors" + "fmt" + "os" + "strconv" + "sync" + + "github.com/micro/go-log" + mover "github.com/opensds/multi-cloud/datamover/pkg/drivers/https" + . "github.com/opensds/multi-cloud/datamover/pkg/utils" + datamover "github.com/opensds/multi-cloud/datamover/proto" + osdss3 "github.com/opensds/multi-cloud/s3/proto" +) + +//The max object size that can be moved directly, default is 16M. +var PART_SIZE int64 = 16 * 1024 * 1024 + +// If transition for an object is in-progress, then the next transition message will be abandoned. +var InProgressObjs map[string]struct{} + +func copyObj(ctx context.Context, obj *osdss3.Object, src *BackendInfo, dest *BackendInfo, className *string) error { + // move object + part_size, err := strconv.ParseInt(os.Getenv("PARTSIZE"), 10, 64) + log.Logf("part_size=%d, err=%v.\n", part_size, err) + if err == nil { + // part_size must be more than 5M and less than 100M + if part_size >= 5 && part_size <= 100 { + PART_SIZE = part_size * 1024 * 1024 + log.Logf("Set PART_SIZE to be %d.\n", PART_SIZE) + } + } + + srcLoc := &LocationInfo{StorType: src.StorType, Region: src.Region, EndPoint: src.EndPoint, BucketName: src.BucketName, + Access: src.Access, Security: src.Security, BakendName: src.BakendName, VirBucket: obj.BucketName} + targetLoc := &LocationInfo{StorType: dest.StorType, Region: dest.Region, EndPoint: dest.EndPoint, BucketName: dest.BucketName, + Access: dest.Access, Security: dest.Security, BakendName: dest.BakendName, ClassName: *className, VirBucket: obj.BucketName} + + // add object to InProgressObjs + if InProgressObjs == nil { + var mutex sync.Mutex + mutex.Lock() + if InProgressObjs == nil { + InProgressObjs = make(map[string]struct{}) + } + mutex.Unlock() + } + if _, ok := InProgressObjs[obj.ObjectKey]; !ok { + InProgressObjs[obj.ObjectKey] = struct{}{} + } else { + log.Logf("the transition of object[%s] is in-progress\n", obj.ObjectKey) + return errors.New(DMERR_TransitionInprogress) + } + + if obj.Size <= PART_SIZE { + err = mover.MoveObj(obj, srcLoc, targetLoc) + } else { + err = mover.MultipartMoveObj(obj, srcLoc, targetLoc) + } + + // TODO: Need to confirm the integrity by comparing Etags. + + // remove object from InProgressObjs + delete(InProgressObjs, obj.ObjectKey) + + return err +} + +func doCrossCloudTransition(acReq *datamover.LifecycleActionRequest) error { + log.Logf("cross-cloud transition action: transition %s from %d of %s to %d of %s.\n", + acReq.ObjKey, acReq.SourceTier, acReq.SourceBackend, acReq.TargetTier, acReq.TargetBackend) + + src, err := getBackendInfo(&acReq.SourceBackend, false) + if err != nil { + log.Logf("cross-cloud transition of %s failed:%v\n", acReq.ObjKey, err) + return err + } + target, err := getBackendInfo(&acReq.TargetBackend, false) + if err != nil { + log.Logf("cross-cloud transition of %s failed:%v\n", acReq.ObjKey, err) + return err + } + + className, err := getStorageClassName(acReq.TargetTier, target.StorType) + if err != nil { + log.Logf("cross-cloud transition of %s failed because target tier is not supported.\n", acReq.ObjKey) + return err + } + + log.Logf("transition object[%s] from [%+v] to [%+v]\n", acReq.ObjKey, src, target) + obj := osdss3.Object{ObjectKey: acReq.ObjKey, Size: acReq.ObjSize, BucketName: acReq.BucketName} + err = copyObj(context.Background(), &obj, src, target, &className) + if err != nil && err.Error() == DMERR_NoPermission { + log.Logf("cross-cloud transition of %s failed:%v\n", acReq.ObjKey, err) + // In case credentials is changed. + src, _ = getBackendInfo(&acReq.SourceBackend, true) + target, _ = getBackendInfo(&acReq.TargetBackend, true) + err = copyObj(context.Background(), &obj, src, target, &className) + } + if err != nil && err.Error() == "in-progress" { + log.Logf("transition of object[%s] is in-progress\n", acReq.ObjKey) + return nil + } else if err != nil { + log.Logf("cross-cloud transition of %s failed:%v\n", acReq.ObjKey, err) + return err + } + + // update meta data. + setting := make(map[string]string) + setting[OBJMETA_TIER] = fmt.Sprintf("%d", acReq.TargetTier) + setting[OBJMETA_BACKEND] = acReq.TargetBackend + req := osdss3.UpdateObjMetaRequest{ObjKey: acReq.ObjKey, BucketName: acReq.BucketName, Setting: setting, LastModified: acReq.LastModified} + _, err = s3client.UpdateObjMeta(context.Background(), &req) + var loca *LocationInfo + if err != nil { + // if update metadata failed, then delete object from target storage backend. + loca = &LocationInfo{StorType: target.StorType, Region: target.Region, EndPoint: target.EndPoint, BucketName: target.BucketName, + Access: target.Access, Security: target.Security, BakendName: target.BakendName, VirBucket: obj.BucketName} + } else { + // if update metadata successfully, then delete object from source storage backend. + loca = &LocationInfo{StorType: src.StorType, Region: src.Region, EndPoint: src.EndPoint, BucketName: src.BucketName, + Access: src.Access, Security: src.Security, BakendName: src.BakendName, VirBucket: obj.BucketName} + } + + // delete object from the storage backend. + deleteObjFromBackend(acReq.ObjKey, loca) + + return nil +} diff --git a/datamover/pkg/drivers/lifecycle/expiration.go b/datamover/pkg/drivers/lifecycle/expiration.go new file mode 100644 index 000000000..7d3229047 --- /dev/null +++ b/datamover/pkg/drivers/lifecycle/expiration.go @@ -0,0 +1,78 @@ +// Copyright 2019 The OpenSDS Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lifecycle + +import ( + "context" + "errors" + + "github.com/micro/go-log" + . "github.com/opensds/multi-cloud/datamover/pkg/utils" + datamover "github.com/opensds/multi-cloud/datamover/proto" + osdss3 "github.com/opensds/multi-cloud/s3/proto" +) + +func deleteObj(objKey string, lastmodifed int64, virtBucket string, bkend *BackendInfo) error { + log.Logf("object expiration: objKey=%s, virtBucket=%s, bkend:%+v\n", objKey, virtBucket, *bkend) + if virtBucket == "" { + log.Logf("expiration of object[%s] is failed: virtual bucket is null.\n", objKey) + return errors.New(DMERR_InternalError) + } + + loca := &LocationInfo{StorType: bkend.StorType, Region: bkend.Region, EndPoint: bkend.EndPoint, BucketName: bkend.BucketName, + Access: bkend.Access, Security: bkend.Security, BakendName: bkend.BakendName, VirBucket: virtBucket} + err := deleteObjFromBackend(objKey, loca) + if err != nil { + return err + } + + // delete metadata + delMetaReq := osdss3.DeleteObjectInput{Bucket: virtBucket, Key: objKey, Lastmodified: lastmodifed} + ctx := context.Background() + _, err = s3client.DeleteObject(ctx, &delMetaReq) + if err != nil { + // if it is deleted failed, it will be delete again in the next schedule round + log.Logf("delete object metadata of obj[bucket:%s,objKey:%s] failed, err:%v\n", + virtBucket, objKey, err) + return err + } else { + log.Logf("delete object metadata of obj[bucket:%s,objKey:%s] successfully.\n", + virtBucket, objKey) + } + + return err +} + +func doExpirationAction(acReq *datamover.LifecycleActionRequest) error { + log.Logf("delete action: delete %s.\n", acReq.ObjKey) + + loc, err := getBackendInfo(&acReq.SourceBackend, false) + if err != nil { + log.Logf("expiration of %s failed because get location failed\n", acReq.ObjKey) + return err + } + + err = deleteObj(acReq.ObjKey, acReq.LastModified, acReq.BucketName, loc) + if err != nil && err.Error() == DMERR_NoPermission { + // if permission denied, then flash backend information and try again + loc, err = getBackendInfo(&acReq.SourceBackend, true) + if err != nil { + return err + } + err = deleteObj(acReq.ObjKey, acReq.LastModified, acReq.BucketName, loc) + } + + return err +} diff --git a/datamover/pkg/drivers/lifecycle/incloud_transition.go b/datamover/pkg/drivers/lifecycle/incloud_transition.go new file mode 100644 index 000000000..d30a14bac --- /dev/null +++ b/datamover/pkg/drivers/lifecycle/incloud_transition.go @@ -0,0 +1,182 @@ +// Copyright 2019 The OpenSDS Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lifecycle + +import ( + "context" + "errors" + "fmt" + + "github.com/micro/go-log" + backend "github.com/opensds/multi-cloud/backend/pkg/utils/constants" + flowtype "github.com/opensds/multi-cloud/dataflow/pkg/model" + "github.com/opensds/multi-cloud/datamover/pkg/amazon/s3" + "github.com/opensds/multi-cloud/datamover/pkg/azure/blob" + "github.com/opensds/multi-cloud/datamover/pkg/hw/obs" + "github.com/opensds/multi-cloud/datamover/pkg/ibm/cos" + . "github.com/opensds/multi-cloud/datamover/pkg/utils" + "github.com/opensds/multi-cloud/datamover/proto" + osdss3 "github.com/opensds/multi-cloud/s3/proto" + "sync" +) + +func changeStorageClass(objKey *string, newClass *string, virtBucket *string, bkend *BackendInfo) error { + log.Logf("Change storage class of object[%s] to %s.\n", *objKey, *newClass) + if *virtBucket == "" { + log.Logf("change storage class of object[%s] is failed: virtual bucket is null\n", objKey) + return errors.New(DMERR_InternalError) + } + + key := *objKey + if *virtBucket != "" { + key = *virtBucket + "/" + *objKey + } + + var err error + switch bkend.StorType { + case flowtype.STOR_TYPE_AWS_S3: + mover := s3mover.S3Mover{} + err = mover.ChangeStorageClass(&key, newClass, bkend) + case flowtype.STOR_TYPE_IBM_COS: + mover := ibmcosmover.IBMCOSMover{} + err = mover.ChangeStorageClass(&key, newClass, bkend) + case flowtype.STOR_TYPE_HW_OBS: + mover := obsmover.ObsMover{} + err = mover.ChangeStorageClass(&key, newClass, bkend) + case flowtype.STOR_TYPE_AZURE_BLOB: + mover := blobmover.BlobMover{} + err = mover.ChangeStorageClass(&key, newClass, bkend) + default: + log.Logf("change storage class of object[objkey:%s] failed: backend type is not support.\n", objKey) + err = errors.New(DMERR_UnSupportBackendType) + } + + if err == nil { + log.Logf("Change storage class of object[%s] to %s successfully.\n", *objKey, *newClass) + } + + return err +} + +func loadStorageClassDefinition() error { + res, _ := s3client.GetTierMap(context.Background(), &osdss3.BaseRequest{}) + if len(res.Tier2Name) == 0 { + log.Log("get tier definition failed") + return fmt.Errorf("get tier definition failed") + } + + log.Logf("Load storage class definition from s3 service successfully, res.Tier2Name:%+v\n", res.Tier2Name) + Int2ExtTierMap = make(map[string]*Int2String) + for k, v := range res.Tier2Name { + val := make(Int2String) + for k1, v1 := range v.Lst { + val[k1] = v1 + } + Int2ExtTierMap[k] = &val + } + + return nil +} + +func getStorageClassName(tier int32, storageType string) (string, error) { + log.Logf("Get storage class name of tier[%d].\n", tier) + var err error + var mutex sync.Mutex + mutex.Lock() + if len(Int2ExtTierMap) == 0 { + err = loadStorageClassDefinition() + } else { + err = nil + } + mutex.Unlock() + + if err != nil { + return "", err + } + + key := "" + switch storageType { + case flowtype.STOR_TYPE_AWS_S3: + key = backend.BackendTypeAws + case flowtype.STOR_TYPE_IBM_COS: + key = backend.BackendTypeIBMCos + case flowtype.STOR_TYPE_HW_OBS, flowtype.STOR_TYPE_HW_FUSIONSTORAGE, flowtype.STOR_TYPE_HW_FUSIONCLOUD: + key = backend.BackendTypeObs + case flowtype.STOR_TYPE_AZURE_BLOB: + key = backend.BackendTypeAzure + case flowtype.STOR_TYPE_CEPH_S3: + key = backend.BackendTypeCeph + case flowtype.STOR_TYPE_GCP_S3: + key = backend.BackendTypeGcp + default: + log.Log("map tier to storage class name failed: backend type is not support.") + return "", errors.New(DMERR_UnSupportBackendType) + } + + className := "" + log.Logf("key:%s\n", key) + v1, _ := Int2ExtTierMap[key] + v2, ok := (*v1)[tier] + if !ok { + err = fmt.Errorf("tier[%d] is not support for %s", tier, storageType) + } else { + className = v2 + } + + log.Logf("Storage class name of tier[%d] is %s.\n", tier, className) + return className, err +} + +func doInCloudTransition(acReq *datamover.LifecycleActionRequest) error { + log.Logf("in-cloud transition action: transition %s from %d to %d of %s.\n", + acReq.ObjKey, acReq.SourceTier, acReq.TargetTier, acReq.SourceBackend) + + loca, err := getBackendInfo(&acReq.SourceBackend, false) + if err != nil { + log.Logf("in-cloud transition of %s failed because get location failed.\n", acReq.ObjKey) + return err + } + + className, err := getStorageClassName(acReq.TargetTier, loca.StorType) + if err != nil { + log.Logf("in-cloud transition of %s failed because target tier is not supported.\n", acReq.ObjKey) + return err + } + err = changeStorageClass(&acReq.ObjKey, &className, &acReq.BucketName, loca) + if err != nil && err.Error() == DMERR_NoPermission { + loca, err = getBackendInfo(&acReq.SourceBackend, true) + if err != nil { + return err + } + err = changeStorageClass(&acReq.ObjKey, &className, &acReq.BucketName, loca) + } + + if err != nil { + log.Logf("in-cloud transition of %s failed: %v.\n", acReq.ObjKey, err) + return err + } + + // update meta data. + setting := make(map[string]string) + setting[OBJMETA_TIER] = fmt.Sprintf("%d", acReq.TargetTier) + req := osdss3.UpdateObjMetaRequest{ObjKey: acReq.ObjKey, BucketName: acReq.BucketName, Setting: setting, LastModified: acReq.LastModified} + _, err = s3client.UpdateObjMeta(context.Background(), &req) + if err != nil { + // If update failed, it will be redo again in the next round of scheduling + log.Logf("update tier of object[%s] to %d failed:%v.\n", acReq.ObjKey, acReq.TargetTier, err) + } + + return err +} diff --git a/datamover/pkg/drivers/lifecycle/lifecycle.go b/datamover/pkg/drivers/lifecycle/lifecycle.go new file mode 100644 index 000000000..4e7c6c4c2 --- /dev/null +++ b/datamover/pkg/drivers/lifecycle/lifecycle.go @@ -0,0 +1,148 @@ +// Copyright 2019 The OpenSDS Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package lifecycle + +import ( + "encoding/json" + "errors" + "fmt" + "sync" + + "github.com/micro/go-log" + "github.com/micro/go-micro/client" + backend "github.com/opensds/multi-cloud/backend/proto" + flowtype "github.com/opensds/multi-cloud/dataflow/pkg/model" + "github.com/opensds/multi-cloud/dataflow/pkg/utils" + s3mover "github.com/opensds/multi-cloud/datamover/pkg/amazon/s3" + blobmover "github.com/opensds/multi-cloud/datamover/pkg/azure/blob" + cephs3mover "github.com/opensds/multi-cloud/datamover/pkg/ceph/s3" + "github.com/opensds/multi-cloud/datamover/pkg/db" + Gcps3mover "github.com/opensds/multi-cloud/datamover/pkg/gcp/s3" + obsmover "github.com/opensds/multi-cloud/datamover/pkg/hw/obs" + ibmcosmover "github.com/opensds/multi-cloud/datamover/pkg/ibm/cos" + . "github.com/opensds/multi-cloud/datamover/pkg/utils" + datamover "github.com/opensds/multi-cloud/datamover/proto" + osdss3 "github.com/opensds/multi-cloud/s3/proto" +) + +var bkendInfo map[string]*BackendInfo +var s3client osdss3.S3Service +var bkendclient backend.BackendService +var mutex sync.RWMutex + +type Int2String map[int32]string + +// map from cloud vendor name to it's map relation relationship between internal tier to it's storage class name. +var Int2ExtTierMap map[string]*Int2String + +func Init() { + log.Logf("Lifecycle datamover init.") + s3client = osdss3.NewS3Service("s3", client.DefaultClient) + bkendclient = backend.NewBackendService("backend", client.DefaultClient) + bkendInfo = make(map[string]*BackendInfo) +} + +func HandleMsg(msgData []byte) error { + var acReq datamover.LifecycleActionRequest + err := json.Unmarshal(msgData, &acReq) + if err != nil { + log.Logf("unmarshal lifecycle action request failed, err:%v\n", err) + return err + } + + go doAction(&acReq) + + return nil +} + +func doAction(acReq *datamover.LifecycleActionRequest) { + acType := int(acReq.Action) + switch acType { + case utils.ActionCrosscloudTransition: + doCrossCloudTransition(acReq) + case utils.ActionIncloudTransition: + doInCloudTransition(acReq) + case utils.ActionExpiration: + doExpirationAction(acReq) + case utils.AbortIncompleteMultipartUpload: + doAbortUpload(acReq) + default: + log.Logf("unsupported action type: %d.\n", acType) + } +} + +// force means get location information from database not from cache. +func getBackendInfo(backendName *string, force bool) (*BackendInfo, error) { + if !force { + loc, exist := bkendInfo[*backendName] + if exist { + return loc, nil + } + } + + if *backendName == "" { + log.Log("get backend information failed, backendName is null.\n") + return nil, errors.New(DMERR_InternalError) + } + + bk, err := db.DbAdapter.GetBackendByName(*backendName) + if err != nil { + log.Logf("get backend[%s] information failed, err:%v\n", backendName, err) + return nil, err + } else { + loca := &BackendInfo{bk.Type, bk.Region, bk.Endpoint, bk.BucketName, + bk.Access, bk.Security, *backendName} + log.Logf("refresh backend[name:%s, loca:%+v] successfully.\n", *backendName, *loca) + bkendInfo[*backendName] = loca + return loca, nil + } +} + +func deleteObjFromBackend(objKey string, loca *LocationInfo) error { + if loca.VirBucket != "" { + objKey = loca.VirBucket + "/" + objKey + } + var err error = nil + switch loca.StorType { + case flowtype.STOR_TYPE_AWS_S3: + mover := s3mover.S3Mover{} + err = mover.DeleteObj(objKey, loca) + case flowtype.STOR_TYPE_IBM_COS: + mover := ibmcosmover.IBMCOSMover{} + err = mover.DeleteObj(objKey, loca) + case flowtype.STOR_TYPE_HW_OBS, flowtype.STOR_TYPE_HW_FUSIONSTORAGE, flowtype.STOR_TYPE_HW_FUSIONCLOUD: + mover := obsmover.ObsMover{} + err = mover.DeleteObj(objKey, loca) + case flowtype.STOR_TYPE_AZURE_BLOB: + mover := blobmover.BlobMover{} + err = mover.DeleteObj(objKey, loca) + case flowtype.STOR_TYPE_CEPH_S3: + mover := cephs3mover.CephS3Mover{} + err = mover.DeleteObj(objKey, loca) + case flowtype.STOR_TYPE_GCP_S3: + mover := Gcps3mover.GcpS3Mover{} + err = mover.DeleteObj(objKey, loca) + default: + err = fmt.Errorf("unspport storage type:%s", loca.StorType) + } + + if err != nil { + log.Logf("delete object[%s] from backend[type:%s,bucket:%s] failed.\n", objKey, loca.StorType, loca.BucketName) + } else { + log.Logf("delete object[%s] from backend[type:%s,bucket:%s] successfully.\n", objKey, loca.StorType, loca.BucketName) + } + + return err +} diff --git a/datamover/pkg/gcp/s3/gcplifecycle.go b/datamover/pkg/gcp/s3/gcplifecycle.go new file mode 100644 index 000000000..11f65c3fb --- /dev/null +++ b/datamover/pkg/gcp/s3/gcplifecycle.go @@ -0,0 +1,33 @@ +// Copyright 2019 The OpenSDS Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package Gcps3mover + +import ( + "errors" + + "github.com/micro/go-log" + . "github.com/opensds/multi-cloud/datamover/pkg/utils" +) + +func (mover *GcpS3Mover) ChangeStorageClass(objKey *string, newClass *string, bkend *BackendInfo) error { + log.Log("gcp s3 change storage class of %s to %s failed.", objKey, newClass) + return errors.New(DMERR_InternalError) +} + +func (mover *GcpS3Mover) DeleteIncompleteMultipartUpload(objKey, uploadId string, destLoca *LocationInfo) error { + log.Log("gcp s3 does not support to delete incomplete multipart upload.") + + return errors.New(DMERR_InternalError) +} diff --git a/datamover/pkg/gcp/s3/s3mover.go b/datamover/pkg/gcp/s3/s3mover.go index 78efd1325..54a2935d7 100644 --- a/datamover/pkg/gcp/s3/s3mover.go +++ b/datamover/pkg/gcp/s3/s3mover.go @@ -49,29 +49,29 @@ func md5Content(data []byte) string { } func (mover *GcpS3Mover) UploadObj(objKey string, destLoca *LocationInfo, buf []byte) error { - log.Logf("[gcps3mover] UploadObj object, key:%s.", objKey) + log.Logf("[gcps3mover] upload object, key:%s.\n", objKey) sess := NewClient(destLoca.EndPoint, destLoca.Access, destLoca.Security) bucket := sess.NewBucket() gcpObject := bucket.NewObject(destLoca.BucketName) contentMD5 := md5Content(buf) length := int64(len(buf)) body := ioutil.NopCloser(bytes.NewReader(buf)) - log.Logf("[gcps3mover] Try to upload, bucket:%s,obj:%s\n", destLoca.BucketName, objKey) + log.Logf("[gcps3mover] try to upload, bucket:%s,obj:%s\n", destLoca.BucketName, objKey) for tries := 1; tries <= 3; tries++ { err := gcpObject.Create(objKey, contentMD5, "", length, body, models.Private) if err != nil { - log.Logf("[gcps3mover] Upload object[%s] failed %d times, err:%v\n", objKey, tries, err) + log.Logf("[gcps3mover] upload object[%s] failed %d times, err:%v\n", objKey, tries, err) if tries == 3 { return err } } else { - log.Logf("[gcps3mover] Upload object[%s] successfully.", objKey) + log.Logf("[gcps3mover] upload object[%s] successfully.\n", objKey) return nil } } - log.Logf("[gcps3mover] Upload object, bucket:%s,obj:%s, should not be here.\n", destLoca.BucketName, objKey) - return errors.New("internal error") + log.Logf("[gcps3mover] upload object, bucket:%s,obj:%s, should not be here.\n", destLoca.BucketName, objKey) + return errors.New(DMERR_InternalError) } func (mover *GcpS3Mover) DownloadObj(objKey string, srcLoca *LocationInfo, buf []byte) (size int64, err error) { @@ -103,7 +103,7 @@ func (mover *GcpS3Mover) DownloadObj(objKey string, srcLoca *LocationInfo, buf [ } log.Logf("[gcps3mover]download object[bucket:%s,key:%s], should not be here.\n", srcLoca.BucketName, objKey) - return 0, errors.New("internal error") + return 0, errors.New(DMERR_InternalError) } func (mover *GcpS3Mover) MultiPartDownloadInit(srcLoca *LocationInfo) error { @@ -136,7 +136,7 @@ func (mover *GcpS3Mover) DownloadRange(objKey string, srcLoca *LocationInfo, buf size = int64(len(data)) copy(buf, data) if err != nil { - log.Logf("[gcps3mover] Download object[%s] range[%d - %d] faild %d times, err:%v\n", + log.Logf("[gcps3mover] download object[%s] range[%d - %d] failed %d times, err:%v\n", objKey, start, end, tries, err) if tries == 3 { return 0, err @@ -147,11 +147,11 @@ func (mover *GcpS3Mover) DownloadRange(objKey string, srcLoca *LocationInfo, buf } } - log.Logf("[gcps3mover] Download object[%s] range[%d - %d], should not be here.\n", objKey, start, end) - return 0, errors.New("internal error") + log.Logf("[gcps3mover] download object[%s] range[%d - %d], should not be here.\n", objKey, start, end) + return 0, errors.New(DMERR_InternalError) } -func (mover *GcpS3Mover) MultiPartUploadInit(objKey string, destLoca *LocationInfo) error { +func (mover *GcpS3Mover) MultiPartUploadInit(objKey string, destLoca *LocationInfo) (string, error) { sess := NewClient(destLoca.EndPoint, destLoca.Access, destLoca.Security) bucket := sess.NewBucket() gcpObject := bucket.NewObject(destLoca.BucketName) @@ -161,18 +161,18 @@ func (mover *GcpS3Mover) MultiPartUploadInit(objKey string, destLoca *LocationIn resp, err := mover.svc.Initiate(nil) if err != nil { - log.Logf("[gcps3mover] Init multipart upload[objkey:%s] failed %d times.\n", objKey, tries) + log.Logf("[gcps3mover] init multipart upload[objkey:%s] failed %d times.\n", objKey, tries) if tries == 3 { - return err + return "", err } } else { mover.multiUploadInitOut = &CreateMultipartUploadOutput{resp.UploadID} - log.Logf("[gcps3mover] Init multipart upload[objkey:%s] successfully, UploadId:%s\n", objKey, resp.UploadID) - return nil + log.Logf("[gcps3mover] init multipart upload[objkey:%s] successfully, UploadId:%s\n", objKey, resp.UploadID) + return resp.UploadID, nil } } - log.Logf("[gcps3mover] Init multipart upload[objkey:%s], should not be here.\n", objKey) - return errors.New("internal error") + log.Logf("[gcps3mover] init multipart upload[objkey:%s], should not be here.\n", objKey) + return "", errors.New(DMERR_InternalError) } @@ -188,7 +188,7 @@ func (mover *GcpS3Mover) UploadPart(objKey string, destLoca *LocationInfo, upByt for tries := 1; tries <= 3; tries++ { upRes, err := mover.svc.UploadPart(int(partNumber), mover.multiUploadInitOut.UploadID, contentMD5, "", length, body) if err != nil { - log.Logf("[gcps3mover] Upload range[objkey:%s, partnumber#%d, offset#%d] failed %d times, err:%v\n", + log.Logf("[gcps3mover] upload range[objkey:%s, partnumber#%d, offset#%d] failed %d times, err:%v\n", objKey, partNumber, offset, tries, err) if tries == 3 { return err @@ -200,8 +200,8 @@ func (mover *GcpS3Mover) UploadPart(objKey string, destLoca *LocationInfo, upByt return nil } } - log.Logf("[gcps3mover] Upload range[objkey:%s, partnumber#%d, offset#%d], should not be here.\n", objKey, partNumber, offset) - return errors.New("internal error") + log.Logf("[gcps3mover] upload range[objkey:%s, partnumber#%d, offset#%d], should not be here.\n", objKey, partNumber, offset) + return errors.New(DMERR_InternalError) } func (mover *GcpS3Mover) AbortMultipartUpload(objKey string, destLoca *LocationInfo) error { @@ -212,7 +212,7 @@ func (mover *GcpS3Mover) AbortMultipartUpload(objKey string, destLoca *LocationI for tries := 1; tries <= 3; tries++ { err := uploader.RemoveUploads(mover.multiUploadInitOut.UploadID) if err != nil { - log.Logf("[gcps3mover] Abort multipart upload[objkey:%s] for uploadId#%s failed %d times.\n", + log.Logf("[gcps3mover] abort multipart upload[objkey:%s] for uploadId#%s failed %d times.\n", objKey, mover.multiUploadInitOut.UploadID, tries) if tries == 3 { return err @@ -223,9 +223,9 @@ func (mover *GcpS3Mover) AbortMultipartUpload(objKey string, destLoca *LocationI return nil } } - log.Logf("[gcps3mover] Abort multipart upload[objkey:%s] for uploadId#%s, should not be here.\n", + log.Logf("[gcps3mover] abort multipart upload[objkey:%s] for uploadId#%s, should not be here.\n", objKey, mover.multiUploadInitOut.UploadID) - return errors.New("internal error") + return errors.New(DMERR_InternalError) } func (mover *GcpS3Mover) CompleteMultipartUpload(objKey string, destLoca *LocationInfo) error { @@ -251,7 +251,7 @@ func (mover *GcpS3Mover) CompleteMultipartUpload(objKey string, destLoca *Locati } } log.Logf("[gcps3mover] completeMultipartUpload [objkey:%s], should not be here.\n", objKey) - return errors.New("internal error") + return errors.New(DMERR_InternalError) } func (mover *GcpS3Mover) DeleteObj(objKey string, loca *LocationInfo) error { @@ -262,7 +262,7 @@ func (mover *GcpS3Mover) DeleteObj(objKey string, loca *LocationInfo) error { err := gcpObject.Remove(objKey) if err != nil { - log.Logf("[gcps3mover] Error occurred while waiting for object[%s] to be deleted.\n", objKey) + log.Logf("[gcps3mover] error occurred while waiting for object[%s] to be deleted.\n", objKey) return err } @@ -283,7 +283,7 @@ func ListObjs(loca *LocationInfo, filt *pb.Filter) ([]models.GetBucketResponseCo output, err = bucket.Get(string(loca.BucketName), "", "", "", 1000) } if err != nil { - log.Logf("[gcps3mover] List bucket failed, err:%v\n", err) + log.Logf("[gcps3mover] list bucket failed, err:%v\n", err) return nil, err } diff --git a/datamover/pkg/hw/obs/obslifecycle.go b/datamover/pkg/hw/obs/obslifecycle.go new file mode 100644 index 000000000..558fa300c --- /dev/null +++ b/datamover/pkg/hw/obs/obslifecycle.go @@ -0,0 +1,79 @@ +// Copyright 2019 The OpenSDS Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package obsmover + +import ( + "errors" + + "github.com/micro/go-log" + "github.com/opensds/multi-cloud/api/pkg/utils/obs" + . "github.com/opensds/multi-cloud/datamover/pkg/utils" +) + +func (mover *ObsMover) ChangeStorageClass(objKey *string, newClass *string, bkend *BackendInfo) error { + obsClient, err := obs.New(bkend.Access, bkend.Security, bkend.EndPoint) + if err != nil { + log.Logf("[obslifecycle] new client failed when change storage class of obj[%s] to %s failed, err:%v\n", + objKey, newClass, err) + return err + } + + input := &obs.CopyObjectInput{} + input.Bucket = bkend.BucketName + input.Key = *objKey + input.CopySourceBucket = bkend.BucketName + input.CopySourceKey = *objKey + input.MetadataDirective = obs.CopyMetadata + switch *newClass { + case "STANDARD_IA": + input.StorageClass = obs.StorageClassWarm + case "GLACIER": + input.StorageClass = obs.StorageClassCold + default: + log.Logf("[obslifecycle] unspport storage class:%s", newClass) + return errors.New(DMERR_UnSupportStorageClass) + } + _, err = obsClient.CopyObject(input) + if err != nil { + log.Logf("[obslifecycle] change storage class of object[%s] to %s failed: %v\n", objKey, newClass, err) + e := handleHWObsErrors(err) + return e + } + + // TODO: How to make sure copy is complemented? Wait to see if the item got copied (example:svc.WaitUntilObjectExists)? + + return nil +} + +func (mover *ObsMover) DeleteIncompleteMultipartUpload(objKey, uploadId string, loc *LocationInfo) error { + obsClient, err := obs.New(loc.Access, loc.Security, loc.EndPoint) + if err != nil { + log.Logf("[obslifecycle] new client failed when delete incomplete multipart upload[objkey=%s,uploadid=%s] failed, err:%v\n", + objKey, uploadId, err) + return err + } + + input := &obs.AbortMultipartUploadInput{Bucket: loc.BucketName, Key: objKey, UploadId: uploadId} + _, err = obsClient.AbortMultipartUpload(input) + e := handleHWObsErrors(err) + if e == nil || e.Error() == DMERR_NoSuchUpload { + log.Logf("delete incomplete multipart upload[objkey=%s,uploadid=%s] successfully.\n", objKey, uploadId) + return nil + } else { + log.Logf("delete incomplete multipart upload[objkey=%s,uploadid=%s] failed, err:%v.\n", objKey, uploadId, err) + } + + return e +} diff --git a/datamover/pkg/hw/obs/obsmover.go b/datamover/pkg/hw/obs/obsmover.go index 763809e08..5f48ef498 100644 --- a/datamover/pkg/hw/obs/obsmover.go +++ b/datamover/pkg/hw/obs/obsmover.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Huawei Technologies Co., Ltd. All Rights Reserved. +// Copyright 2019 The OpenSDS Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -31,6 +31,28 @@ type ObsMover struct { completeParts []obs.Part //for multipart upload } +func handleHWObsErrors(err error) error { + if err != nil { + if serr, ok := err.(obs.ObsError); ok { // This error is a Service-specific + code := serr.Code + switch code { // Compare serviceCode to ServiceCodeXxx constants + case "SignatureDoesNotMatch": + log.Log("hw-obs error: SignatureDoesNotMatch.") + return errors.New(DMERR_NoPermission) + case "NoSuchKey": + log.Log("hw-obs error: NoSuchKey.") + return errors.New(DMERR_NoSuchKey) + case "NoSuchUpload": + return errors.New(DMERR_NoSuchUpload) + default: + return err + } + } + } + + return nil +} + func (mover *ObsMover) DownloadObj(objKey string, srcLoca *LocationInfo, buf []byte) (size int64, err error) { obsClient, err := obs.New(srcLoca.Access, srcLoca.Security, srcLoca.EndPoint) if err != nil { @@ -45,14 +67,15 @@ func (mover *ObsMover) DownloadObj(objKey string, srcLoca *LocationInfo, buf []b for tries := 1; tries <= 3; tries++ { output, err := obsClient.GetObject(input) if err != nil { - log.Logf("[obsmover] Download object[%s] failed %d times, err:%v", objKey, tries, err) - if tries == 3 { - return 0, err + log.Logf("[obsmover] download object[%s] failed %d times, err:%v", objKey, tries, err) + e := handleHWObsErrors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission || e.Error() == DMERR_NoSuchKey { //If no permission, then no need to retry. + return 0, e } } else { size = 0 defer output.Body.Close() - log.Logf("[obsmover] Get objcet[%s], StorageClass:%s, ETag:%s, ContentType:%s, ContentLength:%d, LastModified:%s\n", + log.Logf("[obsmover] get objcet[%s], StorageClass:%s, ETag:%s, ContentType:%s, ContentLength:%d, LastModified:%s\n", objKey, output.StorageClass, output.ETag, output.ContentType, output.ContentLength, output.LastModified) var readErr error var readCount int = 0 @@ -72,20 +95,20 @@ func (mover *ObsMover) DownloadObj(objKey string, srcLoca *LocationInfo, buf []b if readErr == io.EOF { readErr = nil } - log.Logf("[obsmover] Download object[%s] successfully.", objKey) + log.Logf("[obsmover] download object[%s] successfully.", objKey) return size, readErr } } - log.Logf("[obsmover] Download object[%s], should not be here.", objKey) - return + log.Logf("[obsmover] download object[%s], should not be here.", objKey) + return 0, errors.New(DMERR_InternalError) } func (mover *ObsMover) UploadObj(objKey string, destLoca *LocationInfo, buf []byte) error { - log.Logf("[obsmover] Try to upload object[%s], buf.len=%d.", objKey, len(buf)) + log.Logf("[obsmover] try to upload object[%s], buf.len=%d.", objKey, len(buf)) obsClient, err := obs.New(destLoca.Access, destLoca.Security, destLoca.EndPoint) if err != nil { - log.Logf("[obsmover] Init obs failed for upload object[%s], err:%v.\n", objKey, err) + log.Logf("[obsmover] init obs failed for upload object[%s], err:%v.\n", objKey, err) return err } @@ -93,28 +116,42 @@ func (mover *ObsMover) UploadObj(objKey string, destLoca *LocationInfo, buf []by input.Bucket = destLoca.BucketName input.Key = objKey input.Body = bytes.NewReader(buf) + if destLoca.ClassName != "" { + switch destLoca.ClassName { + case string(obs.StorageClassStandard): + input.StorageClass = obs.StorageClassStandard + case string(obs.StorageClassWarm): + input.StorageClass = obs.StorageClassWarm + case string(obs.StorageClassCold): + input.StorageClass = obs.StorageClassCold + default: + log.Logf("[obsmover] upload object[%s] failed, err: invalid storage class[%s].\n", objKey, destLoca.ClassName) + return errors.New("invalid storage class") + } + } for tries := 1; tries <= 3; tries++ { output, err := obsClient.PutObject(input) if err != nil { - log.Logf("[obsmover] Put object[%s] failed %d times, err: %v\n", objKey, tries, err) - if tries == 3 { - return err + log.Logf("[obsmover] put object[%s] failed %d times, err: %v\n", objKey, tries, err) + e := handleHWObsErrors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { + return e } } else { - log.Logf("[obsmover] Put object[%s] successfully, RequestId:%s, ETag:%s\n", + log.Logf("[obsmover] put object[%s] successfully, RequestId:%s, ETag:%s\n", objKey, output.RequestId, output.ETag) return nil } } log.Logf("[obsmover] Put object[%s], should not be here.\n", objKey) - return nil + return errors.New(DMERR_InternalError) } func (mover *ObsMover) DeleteObj(objKey string, loca *LocationInfo) error { obsClient, err := obs.New(loca.Access, loca.Security, loca.EndPoint) if err != nil { - log.Logf("[obsmover] New client failed when delete obj[objKey:%s] in storage backend[type:hws], err:%v\n", + log.Logf("[obsmover] new client failed when delete obj[objKey:%s] in storage backend[type:hws], err:%v\n", objKey, err) return err } @@ -122,30 +159,31 @@ func (mover *ObsMover) DeleteObj(objKey string, loca *LocationInfo) error { input := &obs.DeleteObjectInput{} input.Bucket = loca.BucketName input.Key = objKey - log.Logf("[obsmover] Try to Delete object[objKey:%s].", objKey) + log.Logf("[obsmover] try to Delete object[objKey:%s].", objKey) for tries := 1; tries <= 3; tries++ { output, err := obsClient.DeleteObject(input) if err != nil { - log.Logf("[obsmover] Delete object[objKey:%s] in storage backend failed %d times, err:%v\n", + log.Logf("[obsmover] delete object[objKey:%s] in storage backend failed %d times, err:%v\n", objKey, tries, err) - if tries == 3 { - return err + e := handleHWObsErrors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { + return e } } else { - log.Logf("[obsmover] Delete object[objKey:%s] in storage backend successfully, RequestId:%s\n", + log.Logf("[obsmover] delete object[objKey:%s] in storage backend successfully, RequestId:%s\n", objKey, output.RequestId) return nil } } - log.Logf("[obsmover] Delete object[objKey:%s] in storage backend, should not be here.\n", objKey) - return errors.New("Internal error") + log.Logf("[obsmover] delete object[objKey:%s] in storage backend, should not be here.\n", objKey) + return errors.New(DMERR_InternalError) } func (mover *ObsMover) MultiPartDownloadInit(srcLoca *LocationInfo) error { var err error mover.obsClient, err = obs.New(srcLoca.Access, srcLoca.Security, srcLoca.EndPoint) if err != nil { - log.Logf("[obsmover] MultiPartDownloadInit failed:%v\n", err) + log.Logf("[obsmover] multiPart download init failed:%v\n", err) } return err } @@ -156,14 +194,15 @@ func (mover *ObsMover) DownloadRange(objKey string, srcLoca *LocationInfo, buf [ input.Key = objKey input.RangeStart = start input.RangeEnd = end - log.Logf("[obsmover] Try to download object[%s] range[%d - %d]...\n", objKey, start, end) + log.Logf("[obsmover] try to download object[%s] range[%d - %d]...\n", objKey, start, end) for tries := 1; tries <= 3; tries++ { output, err := mover.obsClient.GetObject(input) if err != nil { - log.Logf("[obsmover] Download object[%s] range[%d - %d] failed %d times.\n", + log.Logf("[obsmover] download object[%s] range[%d - %d] failed %d times.\n", objKey, start, end, tries) - if tries == 3 { - return 0, err + e := handleHWObsErrors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { + return 0, e } } else { defer output.Body.Close() @@ -180,49 +219,63 @@ func (mover *ObsMover) DownloadRange(objKey string, srcLoca *LocationInfo, buf [ } } if readErr != nil && readErr != io.EOF { - log.Logf("[obsmover] Body.read for object[%s] failed, err:%v\n", objKey, err) + log.Logf("[obsmover] body.read for object[%s] failed, err:%v\n", objKey, err) return 0, readErr } - log.Logf("[obsmover] Download object[%s] range[%d - %d] successfully, readCount=%d.\n", objKey, start, end, readCount) + log.Logf("[obsmover] download object[%s] range[%d - %d] successfully, readCount=%d.\n", objKey, start, end, readCount) return int64(readCount), nil } } - log.Logf("[obsmover] Download object[%s] range[%d - %d], should not be here.\n", objKey, start, end) - return 0, errors.New("Internal error") + log.Logf("[obsmover] download object[%s] range[%d - %d], should not be here.\n", objKey, start, end) + return 0, errors.New(DMERR_InternalError) } -func (mover *ObsMover) MultiPartUploadInit(objKey string, destLoca *LocationInfo) error { +func (mover *ObsMover) MultiPartUploadInit(objKey string, destLoca *LocationInfo) (string, error) { input := &obs.InitiateMultipartUploadInput{} input.Bucket = destLoca.BucketName input.Key = objKey + if destLoca.ClassName != "" { + switch destLoca.ClassName { + case string(obs.StorageClassStandard): + input.StorageClass = obs.StorageClassStandard + case string(obs.StorageClassWarm): + input.StorageClass = obs.StorageClassWarm + case string(obs.StorageClassCold): + input.StorageClass = obs.StorageClassCold + default: + log.Logf("[obsmover] upload object[%s] failed, err: invalid storage class[%s].\n", objKey, destLoca.ClassName) + return "", errors.New("invalid storage class") + } + } var err error = nil mover.obsClient, err = obs.New(destLoca.Access, destLoca.Security, destLoca.EndPoint) if err != nil { - log.Logf("[obsmover] Create obsclient failed in MultiPartUploadInit[obj:%s], err:%v\n", objKey, err) - return err + log.Logf("[obsmover] create obsclient failed in MultiPartUploadInit[obj:%s], err:%v\n", objKey, err) + return "", err } - log.Logf("[obsmover] Try to InitiateMultipartUpload [objkey:%s].\n", objKey) + log.Logf("[obsmover] try to InitiateMultipartUpload [objkey:%s].\n", objKey) for tries := 1; tries <= 3; tries++ { mover.multiUploadInitOut, err = mover.obsClient.InitiateMultipartUpload(input) if err != nil { - log.Logf("[obsmover] InitiateMultipartUpload [objkey:%s] failed %d times. err:%v\n", objKey, tries, err) - if tries == 3 { - return err + log.Logf("[obsmover] init multipart upload [objkey:%s] failed %d times. err:%v\n", objKey, tries, err) + e := handleHWObsErrors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { + return "", e } } else { - log.Logf("[obsmover] Initiate multipart upload [objkey:%s] successfully.\n", objKey) - return nil + log.Logf("[obsmover] initiate multipart upload [objkey:%s] successfully.\n", objKey) + return mover.multiUploadInitOut.UploadId, nil } } - log.Logf("[obsmover] Initiate multipart upload [objkey:%s], should not be here.\n", objKey) - return errors.New("Internal error") + log.Logf("[obsmover] initiate multipart upload [objkey:%s], should not be here.\n", objKey) + return "", errors.New(DMERR_InternalError) } func (mover *ObsMover) UploadPart(objKey string, destLoca *LocationInfo, upBytes int64, buf []byte, partNumber int64, offset int64) error { - log.Logf("[obsmover] Try to upload object[%s] range[partnumber#%d,offset#%d]...\n", objKey, partNumber, offset) + log.Logf("[obsmover] try to upload object[%s] range[partnumber#%d,offset#%d]...\n", objKey, partNumber, offset) uploadPartInput := &obs.UploadPartInput{} uploadPartInput.Bucket = destLoca.BucketName uploadPartInput.Key = objKey @@ -235,13 +288,14 @@ func (mover *ObsMover) UploadPart(objKey string, destLoca *LocationInfo, upBytes uploadPartInput.Body = bytes.NewReader(buf) uploadPartInputOutput, err := mover.obsClient.UploadPart(uploadPartInput) if err != nil { - log.Logf("[obsmover] Upload object[%s] range[partnumber#%d,offset#%d] failed %d times. err:%v\n", + log.Logf("[obsmover] upload object[%s] range[partnumber#%d,offset#%d] failed %d times. err:%v\n", objKey, partNumber, offset, tries, err) - if tries == 3 { - return err + e := handleHWObsErrors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { + return e } } else { - log.Logf("[obsmover] Upload object[%s] range[partnumber#%d,offset#%d] successfully, size:%d\n", + log.Logf("[obsmover] upload object[%s] range[partnumber#%d,offset#%d] successfully, size:%d\n", objKey, partNumber, offset, upBytes) mover.completeParts = append(mover.completeParts, obs.Part{ ETag: uploadPartInputOutput.ETag, @@ -250,9 +304,9 @@ func (mover *ObsMover) UploadPart(objKey string, destLoca *LocationInfo, upBytes } } - log.Logf("[obsmover] Upload object[%s] range[partnumber#%d,offset#%d], should not be here.\n", + log.Logf("[obsmover] upload object[%s] range[partnumber#%d,offset#%d], should not be here.\n", objKey, partNumber, offset) - return errors.New("internal error") + return errors.New(DMERR_InternalError) } func (mover *ObsMover) AbortMultipartUpload(objKey string, destLoca *LocationInfo) (err error) { @@ -260,25 +314,26 @@ func (mover *ObsMover) AbortMultipartUpload(objKey string, destLoca *LocationInf input.Bucket = destLoca.BucketName input.Key = objKey input.UploadId = mover.multiUploadInitOut.UploadId - log.Logf("[obsmover] Try to abort multipartupload, objkey:%s, uploadId:%s.\n", + log.Logf("[obsmover] try to abort multipartupload, objkey:%s, uploadId:%s.\n", objKey, mover.multiUploadInitOut.UploadId) for tries := 1; tries <= 3; tries++ { _, err = mover.obsClient.AbortMultipartUpload(input) if err != nil { - log.Logf("[obsmover] Abort multipartupload failed %d times, objkey:%s, uploadId:%s, err:%v\n", + log.Logf("[obsmover] abort multipartupload failed %d times, objkey:%s, uploadId:%s, err:%v\n", tries, objKey, mover.multiUploadInitOut.UploadId, err) - if tries == 3 { - return err + e := handleHWObsErrors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { + return e } } else { - log.Logf("[obsmover] Abort multipartupload successfully, objkey:%s, uploadId:%s.\n", + log.Logf("[obsmover] abort multipartupload successfully, objkey:%s, uploadId:%s.\n", objKey, mover.multiUploadInitOut.UploadId) return nil } } - log.Logf("[obsmover] Abort multipartupload objkey:%s, uploadId:%s, should not be here.\n", + log.Logf("[obsmover] abort multipartupload objkey:%s, uploadId:%s, should not be here.\n", objKey, mover.multiUploadInitOut.UploadId) - return errors.New("internal error") + return errors.New(DMERR_InternalError) } func (mover *ObsMover) CompleteMultipartUpload(objKey string, destLoca *LocationInfo) (err error) { @@ -287,22 +342,23 @@ func (mover *ObsMover) CompleteMultipartUpload(objKey string, destLoca *Location completeMultipartUploadInput.Key = objKey completeMultipartUploadInput.UploadId = mover.multiUploadInitOut.UploadId completeMultipartUploadInput.Parts = mover.completeParts - log.Logf("[obsmover] Try to CompleteMultipartUpload for object[%s].", objKey) + log.Logf("[obsmover] try to CompleteMultipartUpload for object[%s].", objKey) for tries := 1; tries <= 3; tries++ { _, err = mover.obsClient.CompleteMultipartUpload(completeMultipartUploadInput) if err != nil { - log.Logf("[obsmover] CompleteMultipartUpload for object[%s] failed %d times, err:%v\n", objKey, tries, err) - if tries == 3 { - return err + log.Logf("[obsmover] complete multipart upload for object[%s] failed %d times, err:%v\n", objKey, tries, err) + e := handleHWObsErrors(err) + if tries >= 3 || e.Error() == DMERR_NoPermission { + return e } } else { - log.Logf("[obsmover] CompleteMultipartUpload for object[%s] successfully", objKey) + log.Logf("[obsmover] complete multipart upload for object[%s] successfully\n", objKey) return nil } } - log.Logf("[obsmover] CompleteMultipartUpload for object[%s], should not be here", objKey) - return errors.New("internal error") + log.Logf("[obsmover] complete multipart upload for object[%s], should not be here\n", objKey) + return errors.New(DMERR_InternalError) } //TODO: Need to support list object page by page @@ -317,20 +373,20 @@ func ListObjs(loca *LocationInfo, filt *pb.Filter) ([]obs.Content, error) { } output, err := obsClient.ListObjects(input) if err != nil { - log.Logf("[obsmover] List objects failed, err:%v\n", err) - return nil, err + log.Logf("[obsmover] list objects failed, err:%v\n", err) + return nil, handleHWObsErrors(err) } objs := output.Contents for output.IsTruncated == true { input.Marker = output.NextMarker output, err = obsClient.ListObjects(input) if err != nil { - log.Logf("[obsmover] List objects failed, err:%v\n", err) - return nil, err + log.Logf("[obsmover] list objects failed, err:%v\n", err) + return nil, handleHWObsErrors(err) } objs = append(objs, output.Contents...) } - log.Logf("[obsmover] Number of objects in bucket[%s] is %d.\n", loca.BucketName, len(objs)) + log.Logf("[obsmover] number of objects in bucket[%s] is %d.\n", loca.BucketName, len(objs)) return objs, nil } diff --git a/datamover/pkg/kafka/consumer.go b/datamover/pkg/kafka/consumer.go index 551d3a3f0..f2173a7f4 100644 --- a/datamover/pkg/kafka/consumer.go +++ b/datamover/pkg/kafka/consumer.go @@ -9,6 +9,7 @@ import ( "github.com/Shopify/sarama" cluster "github.com/bsm/sarama-cluster" migration "github.com/opensds/multi-cloud/datamover/pkg/drivers/https" + "github.com/opensds/multi-cloud/datamover/pkg/drivers/lifecycle" ) var consumer *cluster.Consumer @@ -42,6 +43,7 @@ func Init(addrs []string, group string, topics []string) error { } migration.Init() + lifecycle.Init() //log.Logf("Init consumer finish, err:%v\n", err) logger.Println("Init consumer finish") @@ -78,11 +80,15 @@ func LoopConsume() { if ok { switch msg.Topic { case "migration": - //TODO: think about how many jobs can run concurrently - logger.Printf("Got an migration job:%s\n", msg.Value) + // TODO: think about how many jobs can run concurrently + logger.Printf("got an migration job:%s\n", msg.Value) err = migration.HandleMsg(msg.Value) + case "lifecycle": + // Do lifecycle actions. + logger.Printf("got an lifecycle action request:%s\n", msg.Value) + err = lifecycle.HandleMsg(msg.Value) default: - logger.Printf("Not support topic:%s\n", msg.Topic) + logger.Printf("not supported topic:%s\n", msg.Topic) } if err == nil { consumer.MarkOffset(msg, "") diff --git a/datamover/pkg/service.go b/datamover/pkg/service.go index bddb87b9d..2df34ec2b 100644 --- a/datamover/pkg/service.go +++ b/datamover/pkg/service.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Huawei Technologies Co., Ltd. All Rights Reserved. +// Copyright 2019 The OpenSDS Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -36,20 +36,20 @@ func InitDatamoverService() error { for i := 0; i < len(config); i++ { addr := strings.Split(config[i], "//") if len(addr) != 2 { - log.Log("Invalid addr:", config[i]) + log.Log("invalid addr:", config[i]) } else { addrs = append(addrs, addr[1]) } } - topics := []string{"migration"} + topics := []string{"migration", "lifecycle"} err := kafka.Init(addrs, dataMoverGroup, topics) if err != nil { - log.Log("Init kafka consumer failed.") + log.Log("init kafka consumer failed.") return nil } go kafka.LoopConsume() datamoverID := os.Getenv("HOSTNAME") - log.Logf("Init datamover[ID#%s] finished.\n", datamoverID) + log.Logf("init datamover[ID#%s] finished.\n", datamoverID) return nil } diff --git a/datamover/pkg/utils/data_type.go b/datamover/pkg/utils/data_type.go index 600e57b25..e39d13ca1 100644 --- a/datamover/pkg/utils/data_type.go +++ b/datamover/pkg/utils/data_type.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018 Huawei Technologies Co., Ltd. All Rights Reserved. +// Copyright 2019 The OpenSDS Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,6 +23,17 @@ type LocationInfo struct { Access string Security string BakendName string + ClassName string +} + +type BackendInfo struct { + StorType string // aws-s3,azure-blob,hw-obs,ceph-s3 etc. + Region string + EndPoint string + BucketName string // remote bucket name + Access string + Security string + BakendName string } type MoveWorker interface { @@ -31,8 +42,20 @@ type MoveWorker interface { DeleteObj(objKey string, loca *LocationInfo) error MultiPartDownloadInit(srcLoca *LocationInfo) error DownloadRange(objKey string, srcLoca *LocationInfo, buf []byte, start int64, end int64) (size int64, err error) - MultiPartUploadInit(objKey string, destLoca *LocationInfo) error + MultiPartUploadInit(objKey string, destLoca *LocationInfo) (uploadId string, err error) UploadPart(objKey string, destLoca *LocationInfo, upBytes int64, buf []byte, partNumber int64, offset int64) error AbortMultipartUpload(objKey string, destLoca *LocationInfo) error CompleteMultipartUpload(objKey string, destLoca *LocationInfo) error + ChangeStorageClass(objKey *string, newClass *string, bkend *BackendInfo) error +} + +const ( + OBJMETA_TIER = "tier" + OBJMETA_BACKEND = "backend" +) + +type GetMultipartUploadRequest struct { + Bucket string + Prefix string + Days int32 } diff --git a/datamover/pkg/utils/error.go b/datamover/pkg/utils/error.go new file mode 100644 index 000000000..4970420c7 --- /dev/null +++ b/datamover/pkg/utils/error.go @@ -0,0 +1,13 @@ +package utils + +const ( + DMERR_Success = "success" + DMERR_NoPermission = "permissionDenied" + DMERR_InternalError = "internalError" + DMERR_UnSupportBackendType = "unsupport backend type" + DMERR_UnSupportStorageClass = "unsupport storage class" + DMERR_UnSupportOperation = "unsupport operation" + DMERR_NoSuchKey = "noSuchkey" + DMERR_NoSuchUpload = "noSuchUpload" + DMERR_TransitionInprogress = "transition in-progress" +) diff --git a/datamover/proto/datamover.micro.go b/datamover/proto/datamover.micro.go index d56fcde58..e7179f8ce 100644 --- a/datamover/proto/datamover.micro.go +++ b/datamover/proto/datamover.micro.go @@ -13,6 +13,8 @@ It has these top-level messages: Connector RunJobRequest RunJobResponse + LifecycleActionRequest + LifecycleActionResonse */ package datamover @@ -21,10 +23,9 @@ import fmt "fmt" import math "math" import ( - context "context" - client "github.com/micro/go-micro/client" server "github.com/micro/go-micro/server" + context "context" ) // Reference imports to suppress errors if they are not otherwise used. @@ -47,6 +48,7 @@ var _ server.Option type DatamoverService interface { Runjob(ctx context.Context, in *RunJobRequest, opts ...client.CallOption) (*RunJobResponse, error) + DoLifecycleAction(ctx context.Context, in *LifecycleActionRequest, opts ...client.CallOption) (*LifecycleActionResonse, error) } type datamoverService struct { @@ -77,15 +79,27 @@ func (c *datamoverService) Runjob(ctx context.Context, in *RunJobRequest, opts . return out, nil } +func (c *datamoverService) DoLifecycleAction(ctx context.Context, in *LifecycleActionRequest, opts ...client.CallOption) (*LifecycleActionResonse, error) { + req := c.c.NewRequest(c.name, "Datamover.DoLifecycleAction", in) + out := new(LifecycleActionResonse) + err := c.c.Call(ctx, req, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // Server API for Datamover service type DatamoverHandler interface { Runjob(context.Context, *RunJobRequest, *RunJobResponse) error + DoLifecycleAction(context.Context, *LifecycleActionRequest, *LifecycleActionResonse) error } func RegisterDatamoverHandler(s server.Server, hdlr DatamoverHandler, opts ...server.HandlerOption) error { type datamover interface { Runjob(ctx context.Context, in *RunJobRequest, out *RunJobResponse) error + DoLifecycleAction(ctx context.Context, in *LifecycleActionRequest, out *LifecycleActionResonse) error } type Datamover struct { datamover @@ -101,3 +115,7 @@ type datamoverHandler struct { func (h *datamoverHandler) Runjob(ctx context.Context, in *RunJobRequest, out *RunJobResponse) error { return h.DatamoverHandler.Runjob(ctx, in, out) } + +func (h *datamoverHandler) DoLifecycleAction(ctx context.Context, in *LifecycleActionRequest, out *LifecycleActionResonse) error { + return h.DatamoverHandler.DoLifecycleAction(ctx, in, out) +} diff --git a/datamover/proto/datamover.pb.go b/datamover/proto/datamover.pb.go index ad8af18af..4565183f9 100644 --- a/datamover/proto/datamover.pb.go +++ b/datamover/proto/datamover.pb.go @@ -30,7 +30,7 @@ func (m *KV) Reset() { *m = KV{} } func (m *KV) String() string { return proto.CompactTextString(m) } func (*KV) ProtoMessage() {} func (*KV) Descriptor() ([]byte, []int) { - return fileDescriptor_datamover_e265cc4b3f9a1e9d, []int{0} + return fileDescriptor_datamover_a96d9e6bb0d61e1a, []int{0} } func (m *KV) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_KV.Unmarshal(m, b) @@ -76,7 +76,7 @@ func (m *Filter) Reset() { *m = Filter{} } func (m *Filter) String() string { return proto.CompactTextString(m) } func (*Filter) ProtoMessage() {} func (*Filter) Descriptor() ([]byte, []int) { - return fileDescriptor_datamover_e265cc4b3f9a1e9d, []int{1} + return fileDescriptor_datamover_a96d9e6bb0d61e1a, []int{1} } func (m *Filter) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Filter.Unmarshal(m, b) @@ -123,7 +123,7 @@ func (m *Connector) Reset() { *m = Connector{} } func (m *Connector) String() string { return proto.CompactTextString(m) } func (*Connector) ProtoMessage() {} func (*Connector) Descriptor() ([]byte, []int) { - return fileDescriptor_datamover_e265cc4b3f9a1e9d, []int{2} + return fileDescriptor_datamover_a96d9e6bb0d61e1a, []int{2} } func (m *Connector) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Connector.Unmarshal(m, b) @@ -179,7 +179,7 @@ func (m *RunJobRequest) Reset() { *m = RunJobRequest{} } func (m *RunJobRequest) String() string { return proto.CompactTextString(m) } func (*RunJobRequest) ProtoMessage() {} func (*RunJobRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_datamover_e265cc4b3f9a1e9d, []int{3} + return fileDescriptor_datamover_a96d9e6bb0d61e1a, []int{3} } func (m *RunJobRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RunJobRequest.Unmarshal(m, b) @@ -245,7 +245,7 @@ func (m *RunJobResponse) Reset() { *m = RunJobResponse{} } func (m *RunJobResponse) String() string { return proto.CompactTextString(m) } func (*RunJobResponse) ProtoMessage() {} func (*RunJobResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_datamover_e265cc4b3f9a1e9d, []int{4} + return fileDescriptor_datamover_a96d9e6bb0d61e1a, []int{4} } func (m *RunJobResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RunJobResponse.Unmarshal(m, b) @@ -272,37 +272,197 @@ func (m *RunJobResponse) GetErr() string { return "" } +type LifecycleActionRequest struct { + ObjKey string `protobuf:"bytes,1,opt,name=objKey,proto3" json:"objKey,omitempty"` + BucketName string `protobuf:"bytes,2,opt,name=bucketName,proto3" json:"bucketName,omitempty"` + Action int32 `protobuf:"varint,3,opt,name=action,proto3" json:"action,omitempty"` + SourceTier int32 `protobuf:"varint,4,opt,name=sourceTier,proto3" json:"sourceTier,omitempty"` + TargetTier int32 `protobuf:"varint,5,opt,name=targetTier,proto3" json:"targetTier,omitempty"` + SourceBackend string `protobuf:"bytes,6,opt,name=sourceBackend,proto3" json:"sourceBackend,omitempty"` + TargetBackend string `protobuf:"bytes,7,opt,name=targetBackend,proto3" json:"targetBackend,omitempty"` + ObjSize int64 `protobuf:"varint,8,opt,name=objSize,proto3" json:"objSize,omitempty"` + LastModified int64 `protobuf:"varint,9,opt,name=lastModified,proto3" json:"lastModified,omitempty"` + UploadId string `protobuf:"bytes,10,opt,name=uploadId,proto3" json:"uploadId,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LifecycleActionRequest) Reset() { *m = LifecycleActionRequest{} } +func (m *LifecycleActionRequest) String() string { return proto.CompactTextString(m) } +func (*LifecycleActionRequest) ProtoMessage() {} +func (*LifecycleActionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_datamover_a96d9e6bb0d61e1a, []int{5} +} +func (m *LifecycleActionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LifecycleActionRequest.Unmarshal(m, b) +} +func (m *LifecycleActionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LifecycleActionRequest.Marshal(b, m, deterministic) +} +func (dst *LifecycleActionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LifecycleActionRequest.Merge(dst, src) +} +func (m *LifecycleActionRequest) XXX_Size() int { + return xxx_messageInfo_LifecycleActionRequest.Size(m) +} +func (m *LifecycleActionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LifecycleActionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LifecycleActionRequest proto.InternalMessageInfo + +func (m *LifecycleActionRequest) GetObjKey() string { + if m != nil { + return m.ObjKey + } + return "" +} + +func (m *LifecycleActionRequest) GetBucketName() string { + if m != nil { + return m.BucketName + } + return "" +} + +func (m *LifecycleActionRequest) GetAction() int32 { + if m != nil { + return m.Action + } + return 0 +} + +func (m *LifecycleActionRequest) GetSourceTier() int32 { + if m != nil { + return m.SourceTier + } + return 0 +} + +func (m *LifecycleActionRequest) GetTargetTier() int32 { + if m != nil { + return m.TargetTier + } + return 0 +} + +func (m *LifecycleActionRequest) GetSourceBackend() string { + if m != nil { + return m.SourceBackend + } + return "" +} + +func (m *LifecycleActionRequest) GetTargetBackend() string { + if m != nil { + return m.TargetBackend + } + return "" +} + +func (m *LifecycleActionRequest) GetObjSize() int64 { + if m != nil { + return m.ObjSize + } + return 0 +} + +func (m *LifecycleActionRequest) GetLastModified() int64 { + if m != nil { + return m.LastModified + } + return 0 +} + +func (m *LifecycleActionRequest) GetUploadId() string { + if m != nil { + return m.UploadId + } + return "" +} + +type LifecycleActionResonse struct { + Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LifecycleActionResonse) Reset() { *m = LifecycleActionResonse{} } +func (m *LifecycleActionResonse) String() string { return proto.CompactTextString(m) } +func (*LifecycleActionResonse) ProtoMessage() {} +func (*LifecycleActionResonse) Descriptor() ([]byte, []int) { + return fileDescriptor_datamover_a96d9e6bb0d61e1a, []int{6} +} +func (m *LifecycleActionResonse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LifecycleActionResonse.Unmarshal(m, b) +} +func (m *LifecycleActionResonse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LifecycleActionResonse.Marshal(b, m, deterministic) +} +func (dst *LifecycleActionResonse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LifecycleActionResonse.Merge(dst, src) +} +func (m *LifecycleActionResonse) XXX_Size() int { + return xxx_messageInfo_LifecycleActionResonse.Size(m) +} +func (m *LifecycleActionResonse) XXX_DiscardUnknown() { + xxx_messageInfo_LifecycleActionResonse.DiscardUnknown(m) +} + +var xxx_messageInfo_LifecycleActionResonse proto.InternalMessageInfo + +func (m *LifecycleActionResonse) GetErr() string { + if m != nil { + return m.Err + } + return "" +} + func init() { proto.RegisterType((*KV)(nil), "KV") proto.RegisterType((*Filter)(nil), "Filter") proto.RegisterType((*Connector)(nil), "Connector") proto.RegisterType((*RunJobRequest)(nil), "RunJobRequest") proto.RegisterType((*RunJobResponse)(nil), "RunJobResponse") -} - -func init() { proto.RegisterFile("datamover.proto", fileDescriptor_datamover_e265cc4b3f9a1e9d) } - -var fileDescriptor_datamover_e265cc4b3f9a1e9d = []byte{ - // 323 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x51, 0xc1, 0x4e, 0xc2, 0x40, - 0x10, 0xb5, 0x2d, 0x54, 0x18, 0x14, 0xcc, 0x44, 0x4d, 0xa3, 0x89, 0x21, 0x6b, 0x62, 0x88, 0x9a, - 0x1e, 0xf0, 0xa0, 0x67, 0x49, 0x3c, 0x48, 0xe2, 0x61, 0x35, 0xdc, 0x0b, 0x1d, 0xc8, 0x0a, 0xec, - 0xd6, 0xed, 0x2e, 0x91, 0xff, 0xf2, 0x03, 0x4d, 0x97, 0x82, 0x34, 0xde, 0x66, 0xde, 0xcc, 0x7b, - 0x6f, 0x76, 0x1f, 0x74, 0xd2, 0xc4, 0x24, 0x4b, 0xb5, 0x22, 0x1d, 0x67, 0x5a, 0x19, 0xc5, 0xee, - 0xc1, 0x1f, 0x8e, 0xf0, 0x04, 0x82, 0x39, 0xad, 0x23, 0xaf, 0xeb, 0xf5, 0x9a, 0xbc, 0x28, 0xf1, - 0x14, 0xea, 0xab, 0x64, 0x61, 0x29, 0xf2, 0x1d, 0xb6, 0x69, 0xd8, 0x23, 0x84, 0x2f, 0x62, 0x61, - 0x48, 0xe3, 0x39, 0x84, 0x99, 0xa6, 0xa9, 0xf8, 0x2e, 0x49, 0x65, 0x87, 0x67, 0x10, 0x98, 0x64, - 0x16, 0xf9, 0xdd, 0xa0, 0xd7, 0xea, 0x07, 0xf1, 0x70, 0xc4, 0x8b, 0x9e, 0xa5, 0xd0, 0x1c, 0x28, - 0x29, 0x69, 0x62, 0x94, 0x46, 0x84, 0xda, 0xc7, 0x3a, 0xa3, 0x92, 0xe9, 0x6a, 0xbc, 0x02, 0x78, - 0xb6, 0x93, 0x39, 0x99, 0xb7, 0x64, 0xb9, 0x35, 0xdd, 0x43, 0xf0, 0x1a, 0xa0, 0x10, 0x18, 0x28, - 0x39, 0x15, 0xb3, 0x28, 0xf8, 0x93, 0xdf, 0x83, 0xd9, 0x8f, 0x07, 0xc7, 0xdc, 0xca, 0x57, 0x35, - 0xe6, 0xf4, 0x65, 0x29, 0x37, 0xd8, 0x06, 0x5f, 0xa4, 0xa5, 0x91, 0x2f, 0x52, 0xbc, 0x05, 0xc8, - 0x95, 0xd5, 0x13, 0x2a, 0x58, 0xce, 0xa6, 0xd5, 0x87, 0x78, 0x77, 0x1a, 0xdf, 0x9b, 0xe2, 0x0d, - 0x34, 0x52, 0xca, 0x8d, 0xdb, 0x0c, 0xfe, 0x6d, 0xee, 0x66, 0x78, 0x09, 0xb5, 0xa9, 0x58, 0x98, - 0xa8, 0xe6, 0x76, 0x0e, 0xe3, 0xcd, 0x0f, 0x71, 0x07, 0x22, 0x83, 0x23, 0x4d, 0xcb, 0x44, 0xc8, - 0x77, 0x27, 0x1c, 0xd5, 0xbb, 0x5e, 0xaf, 0xc1, 0x2b, 0x18, 0x63, 0xd0, 0xde, 0x5e, 0x9d, 0x67, - 0x4a, 0xe6, 0x54, 0xe4, 0x41, 0x5a, 0x6f, 0xf3, 0x20, 0xad, 0xfb, 0x4f, 0xd0, 0xdc, 0x45, 0x87, - 0x77, 0x10, 0x72, 0x2b, 0x3f, 0xd5, 0x18, 0xdb, 0x71, 0xe5, 0xbd, 0x17, 0x9d, 0xb8, 0xaa, 0xc4, - 0x0e, 0xc6, 0xa1, 0x0b, 0xfa, 0xe1, 0x37, 0x00, 0x00, 0xff, 0xff, 0x07, 0x25, 0xac, 0xd3, 0xfb, - 0x01, 0x00, 0x00, + proto.RegisterType((*LifecycleActionRequest)(nil), "LifecycleActionRequest") + proto.RegisterType((*LifecycleActionResonse)(nil), "LifecycleActionResonse") +} + +func init() { proto.RegisterFile("datamover.proto", fileDescriptor_datamover_a96d9e6bb0d61e1a) } + +var fileDescriptor_datamover_a96d9e6bb0d61e1a = []byte{ + // 494 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0xcd, 0x8e, 0xd3, 0x30, + 0x10, 0x26, 0x49, 0x9b, 0xb6, 0xb3, 0x6c, 0x17, 0x2c, 0x28, 0x56, 0x91, 0x50, 0x15, 0x10, 0xaa, + 0x16, 0x94, 0x43, 0x39, 0x70, 0x66, 0x17, 0x81, 0xa0, 0xc0, 0xc1, 0xbb, 0xda, 0xbb, 0x93, 0x4c, + 0x2b, 0xb7, 0x69, 0x5c, 0x1c, 0x67, 0x45, 0xb9, 0xf1, 0x4e, 0x3c, 0x07, 0xcf, 0x84, 0xec, 0xfc, + 0xb4, 0xd9, 0xed, 0x2d, 0xdf, 0xcf, 0xcc, 0xd8, 0xdf, 0xc4, 0x70, 0x96, 0x70, 0xcd, 0x37, 0xf2, + 0x16, 0x55, 0xb8, 0x55, 0x52, 0xcb, 0xe0, 0x2d, 0xb8, 0xf3, 0x1b, 0xf2, 0x08, 0xbc, 0x35, 0xee, + 0xa8, 0x33, 0x71, 0xa6, 0x03, 0x66, 0x3e, 0xc9, 0x13, 0xe8, 0xde, 0xf2, 0xb4, 0x40, 0xea, 0x5a, + 0xae, 0x04, 0xc1, 0x7b, 0xf0, 0x3f, 0x89, 0x54, 0xa3, 0x22, 0x23, 0xf0, 0xb7, 0x0a, 0x17, 0xe2, + 0x57, 0x55, 0x54, 0x21, 0xf2, 0x14, 0x3c, 0xcd, 0x97, 0xd4, 0x9d, 0x78, 0xd3, 0x93, 0x99, 0x17, + 0xce, 0x6f, 0x98, 0xc1, 0x41, 0x02, 0x83, 0x4b, 0x99, 0x65, 0x18, 0x6b, 0xa9, 0x08, 0x81, 0xce, + 0xf5, 0x6e, 0x8b, 0x55, 0xa5, 0xfd, 0x26, 0x2f, 0x00, 0x2e, 0x8a, 0x78, 0x8d, 0xfa, 0x07, 0xdf, + 0xd4, 0x43, 0x0f, 0x18, 0xf2, 0x12, 0xc0, 0x34, 0xb8, 0x94, 0xd9, 0x42, 0x2c, 0xa9, 0xb7, 0x6f, + 0x7f, 0x40, 0x07, 0x7f, 0x1d, 0x38, 0x65, 0x45, 0xf6, 0x55, 0x46, 0x0c, 0x7f, 0x16, 0x98, 0x6b, + 0x32, 0x04, 0x57, 0x24, 0xd5, 0x20, 0x57, 0x24, 0xe4, 0x1c, 0x20, 0x97, 0x85, 0x8a, 0xd1, 0x54, + 0xd9, 0x31, 0x27, 0x33, 0x08, 0x9b, 0xa3, 0xb1, 0x03, 0x95, 0xbc, 0x86, 0x7e, 0x82, 0xb9, 0xb6, + 0x4e, 0xef, 0x9e, 0xb3, 0xd1, 0xc8, 0x73, 0xe8, 0x2c, 0x44, 0xaa, 0x69, 0xc7, 0x7a, 0x7a, 0x61, + 0x99, 0x10, 0xb3, 0x24, 0x09, 0xe0, 0xa1, 0xc2, 0x0d, 0x17, 0xd9, 0x95, 0x6d, 0x4c, 0xbb, 0x13, + 0x67, 0xda, 0x67, 0x2d, 0x2e, 0x08, 0x60, 0x58, 0x9f, 0x3a, 0xdf, 0xca, 0x2c, 0x47, 0xb3, 0x0f, + 0x54, 0xaa, 0xde, 0x07, 0x2a, 0x15, 0xfc, 0x73, 0x61, 0xf4, 0x4d, 0x2c, 0x30, 0xde, 0xc5, 0x29, + 0x7e, 0x88, 0xb5, 0x90, 0x59, 0x7d, 0xc7, 0x11, 0xf8, 0x32, 0x5a, 0xcd, 0x9b, 0xfd, 0x55, 0xc8, + 0x44, 0x1a, 0xdd, 0x8b, 0x74, 0xcf, 0x98, 0x3a, 0x6e, 0x1b, 0xd9, 0xdb, 0x75, 0x59, 0x85, 0x4c, + 0x5d, 0x99, 0xc2, 0xb5, 0x40, 0x65, 0x6f, 0xd5, 0x65, 0x07, 0x8c, 0xd1, 0x35, 0x57, 0x4b, 0xd4, + 0x56, 0xef, 0x96, 0xfa, 0x9e, 0x21, 0xaf, 0xe0, 0xb4, 0x74, 0x5f, 0xf0, 0x78, 0x8d, 0x59, 0x42, + 0x7d, 0x3b, 0xba, 0x4d, 0x1a, 0x57, 0x59, 0x53, 0xbb, 0x7a, 0xa5, 0xab, 0x45, 0x12, 0x0a, 0x3d, + 0x19, 0xad, 0xae, 0xc4, 0x6f, 0xa4, 0xfd, 0x89, 0x33, 0xf5, 0x58, 0x0d, 0x4d, 0xb0, 0x29, 0xcf, + 0xf5, 0x77, 0x99, 0x88, 0x85, 0xc0, 0x84, 0x0e, 0xac, 0xdc, 0xe2, 0xc8, 0x18, 0xfa, 0xc5, 0x36, + 0x95, 0x3c, 0xf9, 0x92, 0x50, 0xb0, 0xed, 0x1b, 0x1c, 0x9c, 0x1f, 0xc9, 0x33, 0x3f, 0x1e, 0xfe, + 0xec, 0x8f, 0x03, 0x83, 0xe6, 0xe1, 0x90, 0x37, 0xe0, 0xb3, 0x22, 0x5b, 0xc9, 0x88, 0x0c, 0xc3, + 0xd6, 0xdf, 0x36, 0x3e, 0x0b, 0xdb, 0x7b, 0x0c, 0x1e, 0x90, 0xcf, 0xf0, 0xf8, 0xa3, 0xbc, 0x33, + 0x88, 0x3c, 0x0b, 0x8f, 0xaf, 0x72, 0x7c, 0x44, 0xc8, 0xcb, 0x46, 0x91, 0x6f, 0xdf, 0xeb, 0xbb, + 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa4, 0xc8, 0x88, 0x2f, 0xc2, 0x03, 0x00, 0x00, } diff --git a/datamover/proto/datamover.proto b/datamover/proto/datamover.proto index 0a8bc73dd..12ba8a857 100644 --- a/datamover/proto/datamover.proto +++ b/datamover/proto/datamover.proto @@ -2,6 +2,7 @@ syntax = "proto3"; service datamover { rpc Runjob(RunJobRequest) returns (RunJobResponse) {} + rpc DoLifecycleAction(LifecycleActionRequest) returns (LifecycleActionResonse) {} } message KV { @@ -31,3 +32,20 @@ message RunJobRequest{ message RunJobResponse { string err = 1; } + +message LifecycleActionRequest { + string objKey = 1; // for transition and expiration + string bucketName = 2; + int32 action = 3; // 0-Expiration, 1-IncloudTransition, 2-CrossCloudTransition, 3-AbortMultipartUpload + int32 sourceTier = 4; // only for transition + int32 targetTier = 5; // only for transtion + string sourceBackend = 6; // for transition and expiration + string targetBackend = 7; // for transition and abort incomplete multipart upload + int64 objSize = 8; // for transition + int64 lastModified = 9; // for transition and expiration + string uploadId = 10; // only for abort incomplete multipart upload +} + +message LifecycleActionResonse { + string err = 1; +} diff --git a/docker-compose.yml b/docker-compose.yml index 574de891b..53c0379ec 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -17,6 +17,7 @@ services: - KAFKA_ZOOKEEPER_CONNECT=zookeeper:2181 volumes: - /var/run/docker.sock:/var/run/docker.sock + - /etc/localtime:/etc/localtime depends_on: - zookeeper @@ -24,6 +25,7 @@ services: image: opensdsio/multi-cloud-api volumes: - /etc/ssl/certs:/etc/ssl/certs + - /etc/localtime:/etc/localtime ports: - 8089:8089 environment: @@ -41,6 +43,8 @@ services: environment: - MICRO_REGISTRY=mdns - DB_HOST=datastore:27017 + volumes: + - /etc/localtime:/etc/localtime s3: image: opensdsio/multi-cloud-s3 @@ -49,6 +53,8 @@ services: - DB_HOST=datastore:27017 # 0 means use user defined storage class, otherwise use default storage class. - USE_DEFAULT_STORAGE_CLASS=1 + volumes: + - /etc/localtime:/etc/localtime dataflow: image: opensdsio/multi-cloud-dataflow @@ -56,14 +62,19 @@ services: - MICRO_REGISTRY=mdns - DB_HOST=datastore:27017 - KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092 + - LIFECYCLE_CRON_CONFIG=0 0 0 * * ? + volumes: + - /etc/localtime:/etc/localtime depends_on: - zookeeper - kafka + - s3 datamover: image: opensdsio/multi-cloud-datamover volumes: - /etc/ssl/certs:/etc/ssl/certs + - /etc/localtime:/etc/localtime environment: - MICRO_REGISTRY=mdns - DB_HOST=datastore:27017 diff --git a/s3/pkg/db/db.go b/s3/pkg/db/db.go index 06bade3b9..e03afc2e2 100644 --- a/s3/pkg/db/db.go +++ b/s3/pkg/db/db.go @@ -17,9 +17,9 @@ package db import ( "fmt" - . "github.com/opensds/multi-cloud/s3/pkg/utils" "github.com/opensds/multi-cloud/s3/pkg/db/drivers/mongo" . "github.com/opensds/multi-cloud/s3/pkg/exception" + . "github.com/opensds/multi-cloud/s3/pkg/utils" pb "github.com/opensds/multi-cloud/s3/proto" ) @@ -67,5 +67,9 @@ type DBAdapter interface { DeleteObject(in *pb.DeleteObjectInput) S3Error GetObject(in *pb.GetObjectInput, out *pb.Object) S3Error ListObjects(in *pb.ListObjectsRequest, out *[]pb.Object) S3Error - UpdateObjMeta(objKey *string, bucketName *string, setting map[string]interface{}) S3Error + DeleteBucketLifecycle(in *pb.DeleteLifecycleInput) S3Error + UpdateObjMeta(objKey *string, bucketName *string, lastmod int64, setting map[string]interface{}) S3Error + AddMultipartUpload(record *pb.MultipartUploadRecord) S3Error + DeleteMultipartUpload(record *pb.MultipartUploadRecord) S3Error + ListUploadRecords(in *pb.ListMultipartUploadRequest, out *[]pb.MultipartUploadRecord) S3Error } diff --git a/s3/pkg/db/drivers/mongo/bucketlifecycledelete.go b/s3/pkg/db/drivers/mongo/bucketlifecycledelete.go new file mode 100644 index 000000000..1051d35bb --- /dev/null +++ b/s3/pkg/db/drivers/mongo/bucketlifecycledelete.go @@ -0,0 +1,40 @@ +// Copyright 2019 The OpenSDS Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mongo + +import ( + "github.com/globalsign/mgo/bson" + "github.com/micro/go-log" + . "github.com/opensds/multi-cloud/s3/pkg/exception" + pb "github.com/opensds/multi-cloud/s3/proto" +) + +func (ad *adapter) DeleteBucketLifecycle(in *pb.DeleteLifecycleInput) S3Error { + //Check if the connector exist or not + ss := ad.s.Copy() + defer ss.Close() + + //Delete it from database + c := ss.DB(DataBaseName).C(BucketMD) + log.Logf("bucketName is %v:", in.Bucket) + err := c.Update(bson.M{"name": in.Bucket}, bson.M{"$pull": bson.M{"lifecycleconfiguration": bson.M{"id": in.RuleID}}}) + if err != nil { + log.Logf("delete lifecycle for bucket : %s and lifecycle ruleID : %s failed,err:%v.\n", in.Bucket, in.RuleID, err) + return NoSuchBucket + } else { + log.Logf("delete bucket lifecycle with rule id %s from database successfully", in.RuleID) + return NoError + } +} diff --git a/s3/pkg/db/drivers/mongo/listobjects.go b/s3/pkg/db/drivers/mongo/listobjects.go index 059e44ee7..791ad9f93 100644 --- a/s3/pkg/db/drivers/mongo/listobjects.go +++ b/s3/pkg/db/drivers/mongo/listobjects.go @@ -23,6 +23,7 @@ import ( "github.com/micro/go-log" "github.com/opensds/multi-cloud/api/pkg/common" . "github.com/opensds/multi-cloud/s3/pkg/exception" + "github.com/opensds/multi-cloud/s3/pkg/utils" pb "github.com/opensds/multi-cloud/s3/proto" ) @@ -36,14 +37,13 @@ func (ad *adapter) ListObjects(in *pb.ListObjectsRequest, out *[]pb.Object) S3Er filter := []bson.M{} if in.Filter != nil { if in.Filter[common.KObjKey] != "" { - //str := "^" + in.Filter[common.KObjKey] filter = append(filter, bson.M{"objectkey": bson.M{"$regex": in.Filter[common.KObjKey]}}) } if in.Filter[common.KLastModified] != "" { var tmFilter map[string]string err := json.Unmarshal([]byte(in.Filter[common.KLastModified]), &tmFilter) if err != nil { - log.Logf("unmarshal lastmodified value faild:%s\n", err) + log.Logf("unmarshal lastmodified value failed:%s\n", err) return InvalidQueryParameter } for k, v := range tmFilter { @@ -72,19 +72,20 @@ func (ad *adapter) ListObjects(in *pb.ListObjectsRequest, out *[]pb.Object) S3Er log.Logf("invalid storage class:%s\n", in.Filter[common.KStorageTier]) return InvalidQueryParameter } - filter = append(filter, bson.M{"tier": bson.M{"$lt": tier}}) + filter = append(filter, bson.M{"tier": bson.M{"$lte": tier}}) } } - filter = append(filter, bson.M{"initflag": bson.M{"$ne": "0"}}) - filter = append(filter, bson.M{"isdeletemarker": bson.M{"$ne": "1"}}) + filter = append(filter, bson.M{utils.DBKEY_INITFLAG: bson.M{"$ne": "0"}}) + filter = append(filter, bson.M{utils.DBKEY_DELETEMARKER: bson.M{"$ne": "1"}}) log.Logf("filter:%+v\n", filter) var err error offset := int(in.Offset) limit := int(in.Limit) if limit == 0 { - limit = 1000 // as default + // as default + limit = 1000 } if len(filter) > 0 { err = c.Find(bson.M{"$and": filter}).Skip(offset).Limit(limit).All(out) diff --git a/s3/pkg/db/drivers/mongo/multipartuploadrecord.go b/s3/pkg/db/drivers/mongo/multipartuploadrecord.go new file mode 100644 index 000000000..83cd401e7 --- /dev/null +++ b/s3/pkg/db/drivers/mongo/multipartuploadrecord.go @@ -0,0 +1,66 @@ +package mongo + +import ( + "time" + + "github.com/globalsign/mgo" + "github.com/globalsign/mgo/bson" + "github.com/micro/go-log" + . "github.com/opensds/multi-cloud/s3/pkg/exception" + . "github.com/opensds/multi-cloud/s3/pkg/utils" + pb "github.com/opensds/multi-cloud/s3/proto" +) + +var CollMultipartUploadRecord = "multipartUploadRecords" + +func (ad *adapter) AddMultipartUpload(record *pb.MultipartUploadRecord) S3Error { + log.Logf("Add multipart upload: %+v\n", *record) + session := ad.s.Copy() + defer session.Close() + + collection := session.DB(DataBaseName).C(CollMultipartUploadRecord) + err := collection.Insert(record) + if err != nil { + log.Logf("add multipart upload record[uploadid=%s] to database failed: %v\n", record.UploadId, err) + return DBError + } + + log.Logf("add multipart upload record[uploadid=%s] successfully\n", record.UploadId) + return NoError +} + +func (ad *adapter) DeleteMultipartUpload(record *pb.MultipartUploadRecord) S3Error { + log.Logf("Delete multipart upload: %+v\n", *record) + session := ad.s.Copy() + defer session.Close() + + collection := session.DB(DataBaseName).C(CollMultipartUploadRecord) + // objectkey is unique in OpenSDS, uploadid is unique for a specific physical bucket + err := collection.Remove(bson.M{DBKEY_OBJECTKEY: record.ObjectKey, DBKEY_UPLOADID: record.UploadId}) + if err != nil && err != mgo.ErrNotFound { + log.Logf("delete multipart upload record[uploadid=%s] from database failed: %v\n", record.UploadId, err) + return DBError + } + + log.Logf("delete multipart upload record[uploadid=%s] from database sucessfully\n", record.UploadId) + return NoError +} + +func (ad *adapter) ListUploadRecords(in *pb.ListMultipartUploadRequest, out *[]pb.MultipartUploadRecord) S3Error { + ss := ad.s.Copy() + defer ss.Close() + + secs := time.Now().Unix() - int64(in.Days*24*60*60) + log.Logf("list upload records here: bucket=%s, prefix=%s, daysAfterInitiation=%d, limit=%d, offset=%d, secs=%d\n", + in.Bucket, in.Prefix, in.Days, in.Limit, in.Offset, secs) + + c := ss.DB(DataBaseName).C(CollMultipartUploadRecord) + filter := bson.M{"bucket": in.Bucket, "inittime": bson.M{"$lte": secs}, "objectkey": bson.M{"$regex": "^" + in.Prefix}} + err := c.Find(filter).Skip(int(in.Offset)).Limit(int(in.Limit)).All(out) + if err != nil && err != mgo.ErrNotFound { + log.Logf("list upload records failed:%v\n", err) + return DBError + } + + return NoError +} diff --git a/s3/pkg/db/drivers/mongo/objectput.go b/s3/pkg/db/drivers/mongo/objectput.go index ee8001ef8..fd2469c70 100644 --- a/s3/pkg/db/drivers/mongo/objectput.go +++ b/s3/pkg/db/drivers/mongo/objectput.go @@ -19,6 +19,7 @@ import ( "github.com/globalsign/mgo/bson" "github.com/micro/go-log" . "github.com/opensds/multi-cloud/s3/pkg/exception" + . "github.com/opensds/multi-cloud/s3/pkg/utils" pb "github.com/opensds/multi-cloud/s3/proto" ) @@ -27,7 +28,7 @@ func (ad *adapter) CreateObject(in *pb.Object) S3Error { defer ss.Close() out := pb.Object{} c := ss.DB(DataBaseName).C(in.BucketName) - err := c.Find(bson.M{"objectkey": in.ObjectKey}).One(out) + err := c.Find(bson.M{DBKEY_OBJECTKEY: in.ObjectKey}).One(out) if err == mgo.ErrNotFound { err := c.Insert(&in) if err != nil { @@ -45,34 +46,33 @@ func (ad *adapter) UpdateObject(in *pb.Object) S3Error { ss := ad.s.Copy() defer ss.Close() c := ss.DB(DataBaseName).C(in.BucketName) - err := c.Update(bson.M{"objectkey": in.ObjectKey}, in) + log.Logf("update object:%+v\n", *in) + err := c.Update(bson.M{DBKEY_OBJECTKEY: in.ObjectKey}, in) if err == mgo.ErrNotFound { - log.Log("Update object to database failed, err:%v\n", err) + log.Log("update object to database failed, err:%v\n", err) return NoSuchObject } else if err != nil { - log.Log("Update object to database failed, err:%v\n", err) + log.Log("update object to database failed, err:%v\n", err) return InternalError } return NoError } -func (ad *adapter) UpdateObjMeta(objKey *string, bucketName *string, setting map[string]interface{}) S3Error { +func (ad *adapter) UpdateObjMeta(objKey *string, bucketName *string, lastmod int64, setting map[string]interface{}) S3Error { ss := ad.s.Copy() defer ss.Close() c := ss.DB(DataBaseName).C(*bucketName) - selector := bson.M{"objectkey":objKey} - sets := []bson.M{} - for k, v := range setting { - sets = append(sets, bson.M{k:v}) - } - data := bson.M{"$set":sets} - err := c.Update(selector, data) + log.Logf("update object metadata: key=%s, bucket=%s, lastmodified=%d\n", *objKey, *bucketName, lastmod) + selector := bson.M{DBKEY_OBJECTKEY: *objKey, DBKEY_LASTMODIFIED: lastmod} + data := bson.M{"$set": setting} + err := c.Update(selector, data) if err != nil { - log.Logf("update object metadata failed:%v.\n", err) + log.Logf("update object[key=%s] metadata failed:%v.\n", *objKey, err) return DBError } return NoError } + diff --git a/s3/pkg/model/xmlstruct.go b/s3/pkg/model/xmlstruct.go index c32599ce6..7ce523365 100644 --- a/s3/pkg/model/xmlstruct.go +++ b/s3/pkg/model/xmlstruct.go @@ -80,6 +80,39 @@ type ListPartsOutput struct { Parts []Part `xml:"Part"` } +type LifecycleConfiguration struct { + Rule []Rule `xml:"Rule"` +} + +type Rule struct { + ID string `xml:"ID"` + Filter Filter `xml:"Filter"` + Status string `xml:"Status"` + Transition []Transition `xml:"Transition"` + Expiration []Expiration `xml:"Expiration"` + AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload"` +} + +type Filter struct { + Prefix string `xml:"Prefix"` +} + +type Transition struct { + Days int32 `xml:"Days"` + StorageClass string `xml:"StorageClass"` + Backend string `xml:"Backend"` +} + +type Expiration struct { + Days int32 `xml:"Days"` + //Delete marker will be used in later release + //ExpiredObjectDeleteMarker string `xml:"ExpiredObjectDeleteMArker"` +} + +type AbortIncompleteMultipartUpload struct { + DaysAfterInitiation int32 `xml:"DaysAfterInitiation"` +} + type StorageClass struct { Name string `xml:"Name"` Tier int32 `xml:"Tier"` @@ -88,4 +121,4 @@ type StorageClass struct { type ListStorageClasses struct { Xmlns string `xml:"xmlns,attr"` Classes []StorageClass `xml:"Class"` -} +} \ No newline at end of file diff --git a/s3/pkg/service.go b/s3/pkg/service.go index 7cda4da60..26bc151c0 100644 --- a/s3/pkg/service.go +++ b/s3/pkg/service.go @@ -16,10 +16,12 @@ package pkg import ( "context" + "errors" "fmt" "net/http" "os" "strconv" + "strings" "github.com/Azure/azure-storage-blob-go/azblob" "github.com/micro/go-log" @@ -33,10 +35,10 @@ import ( type Int2String map[int32]string type String2Int map[string]int32 -// map from cloud vendor name to it's map relation relationship between internal tier to it's storage class name. +// map from cloud vendor name to a map, which is used to map from internal tier to it's storage class name. var Int2ExtTierMap map[string]*Int2String -// map from cloud vendor name to it's map relation relationship between it's storage class name to internal tier. +// map from cloud vendor name to a map, which is used to map from storage class name to internal tier. var Ext2IntTierMap map[string]*String2Int // map from a specific tier to an array of tiers, that means transition can happens from the specific tier to those tiers in the array. @@ -45,21 +47,21 @@ var SupportedClasses []pb.StorageClass type s3Service struct{} -func getTierFromName(className string) (int32, S3Error) { - v, ok := Ext2IntTierMap[OSTYPE_OPENSDS] +func getNameFromTier(tier int32) (string, error) { + v, ok := Int2ExtTierMap[OSTYPE_OPENSDS] if !ok { - log.Logf("get tier of storage class[%s] failed.\n", className) - return 0, InternalError + log.Logf("get opensds storage class of tier[%d] failed.\n", tier) + return "", errors.New("internal error") } - v2, ok := (*v)[className] + v2, ok := (*v)[tier] if !ok { - log.Logf("get tier of storage class[%s] failed.\n", className) - return 0, InternalError + log.Logf("get opensds storage class of tier[%d] failed.\n", tier) + return "", errors.New("internal error") } - log.Logf("Get tier of storage class[%s] successfully.\n", className) - return v2, NoError + log.Logf("opensds storage class of tier[%d] is %s.\n", tier, v2) + return v2, nil } func loadAWSDefault(i2e *map[string]*Int2String, e2i *map[string]*String2Int) { @@ -174,6 +176,7 @@ func loadDefaultStorageClass() error { SupportedClasses = append(SupportedClasses, pb.StorageClass{Name: string(AWS_STANDARD), Tier: int32(Tier1)}) SupportedClasses = append(SupportedClasses, pb.StorageClass{Name: string(AWS_STANDARD_IA), Tier: int32(Tier99)}) SupportedClasses = append(SupportedClasses, pb.StorageClass{Name: string(AWS_GLACIER), Tier: int32(Tier999)}) + log.Logf("Supported storage classes:%v\n", SupportedClasses) Int2ExtTierMap = make(map[string]*Int2String) @@ -199,9 +202,11 @@ func loadUserDefinedStorageClass() error { } func loadDefaultTransition() error { + // transition from a tier to the same tier is valid in case cross-cloud transition TransitionMap = make(map[int32][]int32) - TransitionMap[Tier99] = []int32{Tier1} - TransitionMap[Tier999] = []int32{Tier1, Tier99} + TransitionMap[Tier1] = []int32{Tier1} + TransitionMap[Tier99] = []int32{Tier1, Tier99} + TransitionMap[Tier999] = []int32{Tier1, Tier99, Tier999} log.Logf("loadDefaultTransition:%+v\n", TransitionMap) return nil @@ -218,7 +223,7 @@ func initStorageClass() { val, err := strconv.ParseInt(set, 10, 64) log.Logf("USE_DEFAULT_STORAGE_CLASS:set=%s, val=%d, err=%v.\n", set, val, err) if err != nil { - log.Logf("invalid USE_DEFAULT_STORAGE_CLASS:%s", set) + log.Logf("invalid USE_DEFAULT_STORAGE_CLASS:%s\n", set) panic("init s3service failed") } @@ -268,11 +273,11 @@ func (b *s3Service) CreateBucket(ctx context.Context, in *pb.Bucket, out *pb.Bas log.Log("CreateBucket is called in s3 service.") bucket := pb.Bucket{} err := db.DbAdapter.GetBucketByName(in.Name, &bucket) - //err := db.DbAdapter.CreateBucket(in) if err.Code != ERR_OK && err.Code != http.StatusNotFound { return err.Error() } + if err.Code == http.StatusNotFound { log.Log(".CreateBucket is called in s3 service.") err1 := db.DbAdapter.CreateBucket(in) @@ -392,7 +397,7 @@ func (b *s3Service) DeleteObject(ctx context.Context, in *pb.DeleteObjectInput, return err.Error() } object.IsDeleteMarker = "1" - log.Log("UpdateObject is called in s3 service.") + log.Log("DeleteObject is called in s3 service.") err1 := db.DbAdapter.UpdateObject(&object) if err1.Code != ERR_OK { return err.Error() @@ -401,6 +406,35 @@ func (b *s3Service) DeleteObject(ctx context.Context, in *pb.DeleteObjectInput, return nil } +func (b *s3Service) DeleteBucketLifecycle(ctx context.Context, in *pb.DeleteLifecycleInput, out *pb.BaseResponse) error { + log.Log("DeleteBucketlifecycle is called in s3 service.") + getlifecycleinput := pb.DeleteLifecycleInput{Bucket: in.Bucket, RuleID: in.RuleID} + log.Logf("Delete bucket lifecycle input in s3 service %s", getlifecycleinput) + err := db.DbAdapter.DeleteBucketLifecycle(&getlifecycleinput) + if err.Code != ERR_OK { + msg := "Delete bucket failed for $1" + out.Msg = strings.Replace(msg, "$1", in.RuleID, 1) + return err.Error() + } + msg := "Delete bucket successfully for $1" + out.Msg = strings.Replace(msg, "$1", in.RuleID, 1) + return nil +} + +func (b *s3Service) UpdateBucket(ctx context.Context, in *pb.Bucket, out *pb.BaseResponse) error { + log.Log("UpdateBucket is called in s3 service.") + + in.Deleted = false + err := db.DbAdapter.UpdateBucket(in) + if err.Code != ERR_OK { + out.ErrorCode = fmt.Sprintf("%d", err.Code) + out.Msg = err.Description + return err.Error() + } + out.Msg = "Update bucket successfully." + return nil +} + func NewS3Service() pb.S3Handler { host := os.Getenv("DB_HOST") dbstor := Database{Credential: "unkonwn", Driver: "mongodb", Endpoint: host} @@ -414,7 +448,7 @@ func NewS3Service() pb.S3Handler { func (b *s3Service) GetTierMap(ctx context.Context, in *pb.BaseRequest, out *pb.GetTierMapResponse) error { log.Log("GetTierMap ...") - //Get map from internal tier to external class name. + // Get map from internal tier to external class name. out.Tier2Name = make(map[string]*pb.Tier2ClassName) for k, v := range Int2ExtTierMap { var val pb.Tier2ClassName @@ -425,7 +459,7 @@ func (b *s3Service) GetTierMap(ctx context.Context, in *pb.BaseRequest, out *pb. out.Tier2Name[k] = &val } - //Get transition map. + // Get transition map. for k, v := range TransitionMap { for _, t := range v { trans := fmt.Sprintf("%d:%d", t, k) @@ -438,18 +472,18 @@ func (b *s3Service) GetTierMap(ctx context.Context, in *pb.BaseRequest, out *pb. } func (b *s3Service) UpdateObjMeta(ctx context.Context, in *pb.UpdateObjMetaRequest, out *pb.BaseResponse) error { - log.Logf("Update meatadata, setting:%v\n", in.Setting) + log.Logf("Update meatadata, objkey:%s, lastmodified:%d, setting:%v\n", in.ObjKey, in.LastModified, in.Setting) valid := make(map[string]struct{}) valid["tier"] = struct{}{} valid["backend"] = struct{}{} - ret, err := CheckReqObjMeta(in.Setting, valid) + set, err := CheckReqObjMeta(in.Setting, valid) if err.Code != ERR_OK { out.ErrorCode = fmt.Sprintf("%s", err.Code) out.Msg = err.Description return err.Error() } - err = db.DbAdapter.UpdateObjMeta(&in.ObjKey, &in.BucketName, ret) + err = db.DbAdapter.UpdateObjMeta(&in.ObjKey, &in.BucketName, in.LastModified, set) if err.Code != ERR_OK { out.ErrorCode = fmt.Sprintf("%s", err.Code) out.Msg = err.Description @@ -474,6 +508,15 @@ func CheckReqObjMeta(req map[string]string, valid map[string]struct{}) (map[stri return nil, BadRequest } ret[k] = v1 + + // update storage class accordingly + name, err := getNameFromTier(int32(v1)) + if err != nil { + + return nil, InternalError + } else { + ret["storageclass"] = name + } } else { ret[k] = v } @@ -495,3 +538,36 @@ func (b *s3Service) GetBackendTypeByTier(ctx context.Context, in *pb.GetBackendT return nil } +func (b *s3Service) AddUploadRecord(ctx context.Context, record *pb.MultipartUploadRecord, out *pb.BaseResponse) error { + log.Logf("add multipart upload record") + err := db.DbAdapter.AddMultipartUpload(record) + if err.Code != ERR_OK { + return err.Error() + } + + return nil +} + +func (b *s3Service) DeleteUploadRecord(ctx context.Context, record *pb.MultipartUploadRecord, out *pb.BaseResponse) error { + log.Logf("delete multipart upload record") + err := db.DbAdapter.DeleteMultipartUpload(record) + if err.Code != ERR_OK { + return err.Error() + } + + return nil +} + +func (b *s3Service) ListUploadRecord(ctx context.Context, in *pb.ListMultipartUploadRequest, out *pb.ListMultipartUploadResponse) error { + log.Logf("list multipart upload records") + records := []pb.MultipartUploadRecord{} + err := db.DbAdapter.ListUploadRecords(in, &records) + if err.Code != ERR_OK { + return err.Error() + } + for i := 0; i < len(records); i++ { + out.Records = append(out.Records, &records[i]) + } + + return nil +} diff --git a/s3/pkg/utils/utils.go b/s3/pkg/utils/utils.go index 6915b62a4..eade22aae 100644 --- a/s3/pkg/utils/utils.go +++ b/s3/pkg/utils/utils.go @@ -23,15 +23,15 @@ type Database struct { // Tier1, Tier99 and Tier999 just like the tiers of hot, warm, cold. // In the future, we will provide the ability for users to add new storage tiers, if we use 1, 2 and 3, then no space for new storage tiers. const ( - Tier1 = 1 - Tier99 = 99 + Tier1 = 1 + Tier99 = 99 Tier999 = 999 ) const ( - AWS_STANDARD = "STANDARD" + AWS_STANDARD = "STANDARD" AWS_STANDARD_IA = "STANDARD_IA" - AWS_GLACIER = "Glacier" + AWS_GLACIER = "GLACIER" ) const ( @@ -40,18 +40,28 @@ const ( const ( GCS_MULTI_REGIONAL = "MULTI_REGIONAL" - GCS_REGIONAL = "REGIONAL" - GCS_NEARLINE = "NEARLINE" - GCS_COLDLINE = "COLDLINE" + GCS_REGIONAL = "REGIONAL" + GCS_NEARLINE = "NEARLINE" + GCS_COLDLINE = "COLDLINE" ) //Object Storage Type const ( - OSTYPE_OPENSDS = "OpenSDS" - OSTYPE_AWS = "aws-s3" - OSTYPE_Azure = "azure-blob" - OSTYPE_OBS = "hw-obs" - OSTYPE_GCS = "gcp-s3" - OSTYPE_CEPTH = "ceph-s3" + OSTYPE_OPENSDS = "OpenSDS" + OSTYPE_AWS = "aws-s3" + OSTYPE_Azure = "azure-blob" + OSTYPE_OBS = "hw-obs" + OSTYPE_GCS = "gcp-s3" + OSTYPE_CEPTH = "ceph-s3" OSTYPE_FUSIONSTORAGE = "fusionstorage-object" ) + +const ( + DBKEY_DELETEMARKER = "isdeletemarker" + DBKEY_INITFLAG = "initflag" + DBKEY_OBJECTKEY = "objectkey" + DBKEY_UPLOADID = "uploadid" + DBKEY_LASTMODIFIED = "lastmodified" + DBKEY_SUPPOSEDSTATUS = "supposedstatus" + DBKEY_LOCKOBJ_OBJKEY = "objkey" +) diff --git a/s3/proto/s3.micro.go b/s3/proto/s3.micro.go index 5f0042e04..43909236f 100644 --- a/s3/proto/s3.micro.go +++ b/s3/proto/s3.micro.go @@ -22,6 +22,7 @@ It has these top-level messages: Tag LifecycleFilter Action + AbortMultipartUpload LifecycleRule ReplicationInfo Bucket @@ -45,6 +46,10 @@ It has these top-level messages: GetStorageClassesResponse GetBackendTypeByTierRequest GetBackendTypeByTierResponse + DeleteLifecycleInput + MultipartUploadRecord + ListMultipartUploadRequest + ListMultipartUploadResponse */ package s3 @@ -90,6 +95,11 @@ type S3Service interface { UpdateObjMeta(ctx context.Context, in *UpdateObjMetaRequest, opts ...client.CallOption) (*BaseResponse, error) GetStorageClasses(ctx context.Context, in *BaseRequest, opts ...client.CallOption) (*GetStorageClassesResponse, error) GetBackendTypeByTier(ctx context.Context, in *GetBackendTypeByTierRequest, opts ...client.CallOption) (*GetBackendTypeByTierResponse, error) + DeleteBucketLifecycle(ctx context.Context, in *DeleteLifecycleInput, opts ...client.CallOption) (*BaseResponse, error) + UpdateBucket(ctx context.Context, in *Bucket, opts ...client.CallOption) (*BaseResponse, error) + AddUploadRecord(ctx context.Context, in *MultipartUploadRecord, opts ...client.CallOption) (*BaseResponse, error) + DeleteUploadRecord(ctx context.Context, in *MultipartUploadRecord, opts ...client.CallOption) (*BaseResponse, error) + ListUploadRecord(ctx context.Context, in *ListMultipartUploadRequest, opts ...client.CallOption) (*ListMultipartUploadResponse, error) } type s3Service struct { @@ -240,6 +250,56 @@ func (c *s3Service) GetBackendTypeByTier(ctx context.Context, in *GetBackendType return out, nil } +func (c *s3Service) DeleteBucketLifecycle(ctx context.Context, in *DeleteLifecycleInput, opts ...client.CallOption) (*BaseResponse, error) { + req := c.c.NewRequest(c.name, "S3.DeleteBucketLifecycle", in) + out := new(BaseResponse) + err := c.c.Call(ctx, req, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *s3Service) UpdateBucket(ctx context.Context, in *Bucket, opts ...client.CallOption) (*BaseResponse, error) { + req := c.c.NewRequest(c.name, "S3.UpdateBucket", in) + out := new(BaseResponse) + err := c.c.Call(ctx, req, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *s3Service) AddUploadRecord(ctx context.Context, in *MultipartUploadRecord, opts ...client.CallOption) (*BaseResponse, error) { + req := c.c.NewRequest(c.name, "S3.AddUploadRecord", in) + out := new(BaseResponse) + err := c.c.Call(ctx, req, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *s3Service) DeleteUploadRecord(ctx context.Context, in *MultipartUploadRecord, opts ...client.CallOption) (*BaseResponse, error) { + req := c.c.NewRequest(c.name, "S3.DeleteUploadRecord", in) + out := new(BaseResponse) + err := c.c.Call(ctx, req, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *s3Service) ListUploadRecord(ctx context.Context, in *ListMultipartUploadRequest, opts ...client.CallOption) (*ListMultipartUploadResponse, error) { + req := c.c.NewRequest(c.name, "S3.ListUploadRecord", in) + out := new(ListMultipartUploadResponse) + err := c.c.Call(ctx, req, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // Server API for S3 service type S3Handler interface { @@ -256,6 +316,11 @@ type S3Handler interface { UpdateObjMeta(context.Context, *UpdateObjMetaRequest, *BaseResponse) error GetStorageClasses(context.Context, *BaseRequest, *GetStorageClassesResponse) error GetBackendTypeByTier(context.Context, *GetBackendTypeByTierRequest, *GetBackendTypeByTierResponse) error + DeleteBucketLifecycle(context.Context, *DeleteLifecycleInput, *BaseResponse) error + UpdateBucket(context.Context, *Bucket, *BaseResponse) error + AddUploadRecord(context.Context, *MultipartUploadRecord, *BaseResponse) error + DeleteUploadRecord(context.Context, *MultipartUploadRecord, *BaseResponse) error + ListUploadRecord(context.Context, *ListMultipartUploadRequest, *ListMultipartUploadResponse) error } func RegisterS3Handler(s server.Server, hdlr S3Handler, opts ...server.HandlerOption) error { @@ -273,6 +338,11 @@ func RegisterS3Handler(s server.Server, hdlr S3Handler, opts ...server.HandlerOp UpdateObjMeta(ctx context.Context, in *UpdateObjMetaRequest, out *BaseResponse) error GetStorageClasses(ctx context.Context, in *BaseRequest, out *GetStorageClassesResponse) error GetBackendTypeByTier(ctx context.Context, in *GetBackendTypeByTierRequest, out *GetBackendTypeByTierResponse) error + DeleteBucketLifecycle(ctx context.Context, in *DeleteLifecycleInput, out *BaseResponse) error + UpdateBucket(ctx context.Context, in *Bucket, out *BaseResponse) error + AddUploadRecord(ctx context.Context, in *MultipartUploadRecord, out *BaseResponse) error + DeleteUploadRecord(ctx context.Context, in *MultipartUploadRecord, out *BaseResponse) error + ListUploadRecord(ctx context.Context, in *ListMultipartUploadRequest, out *ListMultipartUploadResponse) error } type S3 struct { s3 @@ -336,3 +406,23 @@ func (h *s3Handler) GetStorageClasses(ctx context.Context, in *BaseRequest, out func (h *s3Handler) GetBackendTypeByTier(ctx context.Context, in *GetBackendTypeByTierRequest, out *GetBackendTypeByTierResponse) error { return h.S3Handler.GetBackendTypeByTier(ctx, in, out) } + +func (h *s3Handler) DeleteBucketLifecycle(ctx context.Context, in *DeleteLifecycleInput, out *BaseResponse) error { + return h.S3Handler.DeleteBucketLifecycle(ctx, in, out) +} + +func (h *s3Handler) UpdateBucket(ctx context.Context, in *Bucket, out *BaseResponse) error { + return h.S3Handler.UpdateBucket(ctx, in, out) +} + +func (h *s3Handler) AddUploadRecord(ctx context.Context, in *MultipartUploadRecord, out *BaseResponse) error { + return h.S3Handler.AddUploadRecord(ctx, in, out) +} + +func (h *s3Handler) DeleteUploadRecord(ctx context.Context, in *MultipartUploadRecord, out *BaseResponse) error { + return h.S3Handler.DeleteUploadRecord(ctx, in, out) +} + +func (h *s3Handler) ListUploadRecord(ctx context.Context, in *ListMultipartUploadRequest, out *ListMultipartUploadResponse) error { + return h.S3Handler.ListUploadRecord(ctx, in, out) +} diff --git a/s3/proto/s3.pb.go b/s3/proto/s3.pb.go index 303c435eb..77b05fa32 100644 --- a/s3/proto/s3.pb.go +++ b/s3/proto/s3.pb.go @@ -32,7 +32,7 @@ func (m *ServerSideEncryption) Reset() { *m = ServerSideEncryption{} } func (m *ServerSideEncryption) String() string { return proto.CompactTextString(m) } func (*ServerSideEncryption) ProtoMessage() {} func (*ServerSideEncryption) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{0} + return fileDescriptor_s3_38670a1bdb8348d8, []int{0} } func (m *ServerSideEncryption) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ServerSideEncryption.Unmarshal(m, b) @@ -92,7 +92,7 @@ func (m *VersioningConfiguration) Reset() { *m = VersioningConfiguration func (m *VersioningConfiguration) String() string { return proto.CompactTextString(m) } func (*VersioningConfiguration) ProtoMessage() {} func (*VersioningConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{1} + return fileDescriptor_s3_38670a1bdb8348d8, []int{1} } func (m *VersioningConfiguration) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_VersioningConfiguration.Unmarshal(m, b) @@ -138,7 +138,7 @@ func (m *RedirectAllRequestsTo) Reset() { *m = RedirectAllRequestsTo{} } func (m *RedirectAllRequestsTo) String() string { return proto.CompactTextString(m) } func (*RedirectAllRequestsTo) ProtoMessage() {} func (*RedirectAllRequestsTo) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{2} + return fileDescriptor_s3_38670a1bdb8348d8, []int{2} } func (m *RedirectAllRequestsTo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RedirectAllRequestsTo.Unmarshal(m, b) @@ -187,7 +187,7 @@ func (m *Redirect) Reset() { *m = Redirect{} } func (m *Redirect) String() string { return proto.CompactTextString(m) } func (*Redirect) ProtoMessage() {} func (*Redirect) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{3} + return fileDescriptor_s3_38670a1bdb8348d8, []int{3} } func (m *Redirect) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Redirect.Unmarshal(m, b) @@ -254,7 +254,7 @@ func (m *Condition) Reset() { *m = Condition{} } func (m *Condition) String() string { return proto.CompactTextString(m) } func (*Condition) ProtoMessage() {} func (*Condition) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{4} + return fileDescriptor_s3_38670a1bdb8348d8, []int{4} } func (m *Condition) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Condition.Unmarshal(m, b) @@ -300,7 +300,7 @@ func (m *RoutingRules) Reset() { *m = RoutingRules{} } func (m *RoutingRules) String() string { return proto.CompactTextString(m) } func (*RoutingRules) ProtoMessage() {} func (*RoutingRules) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{5} + return fileDescriptor_s3_38670a1bdb8348d8, []int{5} } func (m *RoutingRules) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_RoutingRules.Unmarshal(m, b) @@ -348,7 +348,7 @@ func (m *WebsiteConfiguration) Reset() { *m = WebsiteConfiguration{} } func (m *WebsiteConfiguration) String() string { return proto.CompactTextString(m) } func (*WebsiteConfiguration) ProtoMessage() {} func (*WebsiteConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{6} + return fileDescriptor_s3_38670a1bdb8348d8, []int{6} } func (m *WebsiteConfiguration) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_WebsiteConfiguration.Unmarshal(m, b) @@ -412,7 +412,7 @@ func (m *CORSConfiguration) Reset() { *m = CORSConfiguration{} } func (m *CORSConfiguration) String() string { return proto.CompactTextString(m) } func (*CORSConfiguration) ProtoMessage() {} func (*CORSConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{7} + return fileDescriptor_s3_38670a1bdb8348d8, []int{7} } func (m *CORSConfiguration) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_CORSConfiguration.Unmarshal(m, b) @@ -486,7 +486,7 @@ func (m *Destination) Reset() { *m = Destination{} } func (m *Destination) String() string { return proto.CompactTextString(m) } func (*Destination) ProtoMessage() {} func (*Destination) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{8} + return fileDescriptor_s3_38670a1bdb8348d8, []int{8} } func (m *Destination) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Destination.Unmarshal(m, b) @@ -534,7 +534,7 @@ func (m *ReplicationRole) Reset() { *m = ReplicationRole{} } func (m *ReplicationRole) String() string { return proto.CompactTextString(m) } func (*ReplicationRole) ProtoMessage() {} func (*ReplicationRole) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{9} + return fileDescriptor_s3_38670a1bdb8348d8, []int{9} } func (m *ReplicationRole) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ReplicationRole.Unmarshal(m, b) @@ -594,7 +594,7 @@ func (m *ReplicationConfiguration) Reset() { *m = ReplicationConfigurati func (m *ReplicationConfiguration) String() string { return proto.CompactTextString(m) } func (*ReplicationConfiguration) ProtoMessage() {} func (*ReplicationConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{10} + return fileDescriptor_s3_38670a1bdb8348d8, []int{10} } func (m *ReplicationConfiguration) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ReplicationConfiguration.Unmarshal(m, b) @@ -640,7 +640,7 @@ func (m *Tag) Reset() { *m = Tag{} } func (m *Tag) String() string { return proto.CompactTextString(m) } func (*Tag) ProtoMessage() {} func (*Tag) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{11} + return fileDescriptor_s3_38670a1bdb8348d8, []int{11} } func (m *Tag) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Tag.Unmarshal(m, b) @@ -675,8 +675,8 @@ func (m *Tag) GetVal() string { } type LifecycleFilter struct { - Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"` - Tags []*Tag `protobuf:"bytes,2,rep,name=tags,proto3" json:"tags,omitempty"` + // Object prefix for lifecycle filter + Prefix string `protobuf:"bytes,1,opt,name=Prefix,proto3" json:"Prefix,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -686,7 +686,7 @@ func (m *LifecycleFilter) Reset() { *m = LifecycleFilter{} } func (m *LifecycleFilter) String() string { return proto.CompactTextString(m) } func (*LifecycleFilter) ProtoMessage() {} func (*LifecycleFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{12} + return fileDescriptor_s3_38670a1bdb8348d8, []int{12} } func (m *LifecycleFilter) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LifecycleFilter.Unmarshal(m, b) @@ -713,19 +713,17 @@ func (m *LifecycleFilter) GetPrefix() string { return "" } -func (m *LifecycleFilter) GetTags() []*Tag { - if m != nil { - return m.Tags - } - return nil -} - type Action struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Days int32 `protobuf:"varint,2,opt,name=days,proto3" json:"days,omitempty"` - DeleteMarker string `protobuf:"bytes,4,opt,name=deleteMarker,proto3" json:"deleteMarker,omitempty"` - Tier int32 `protobuf:"varint,5,opt,name=tier,proto3" json:"tier,omitempty"` - Backend string `protobuf:"bytes,6,opt,name=backend,proto3" json:"backend,omitempty"` + // Name of the action transition/expiration + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Days after creation of object + Days int32 `protobuf:"varint,2,opt,name=days,proto3" json:"days,omitempty"` + // Delete marker in case of expiration for versioned bucket + DeleteMarker string `protobuf:"bytes,3,opt,name=deleteMarker,proto3" json:"deleteMarker,omitempty"` + // Storage class tier of the object where object is to be transitioned + Tier int32 `protobuf:"varint,4,opt,name=tier,proto3" json:"tier,omitempty"` + // Destination backend of the object/bucket for Cross-cloud transition + Backend string `protobuf:"bytes,5,opt,name=backend,proto3" json:"backend,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -735,7 +733,7 @@ func (m *Action) Reset() { *m = Action{} } func (m *Action) String() string { return proto.CompactTextString(m) } func (*Action) ProtoMessage() {} func (*Action) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{13} + return fileDescriptor_s3_38670a1bdb8348d8, []int{13} } func (m *Action) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Action.Unmarshal(m, b) @@ -790,21 +788,61 @@ func (m *Action) GetBackend() string { return "" } +type AbortMultipartUpload struct { + // Days after which the abort operation will be performed on incomplete upload + DaysAfterInitiation int32 `protobuf:"varint,1,opt,name=DaysAfterInitiation,proto3" json:"DaysAfterInitiation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AbortMultipartUpload) Reset() { *m = AbortMultipartUpload{} } +func (m *AbortMultipartUpload) String() string { return proto.CompactTextString(m) } +func (*AbortMultipartUpload) ProtoMessage() {} +func (*AbortMultipartUpload) Descriptor() ([]byte, []int) { + return fileDescriptor_s3_38670a1bdb8348d8, []int{14} +} +func (m *AbortMultipartUpload) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AbortMultipartUpload.Unmarshal(m, b) +} +func (m *AbortMultipartUpload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AbortMultipartUpload.Marshal(b, m, deterministic) +} +func (dst *AbortMultipartUpload) XXX_Merge(src proto.Message) { + xxx_messageInfo_AbortMultipartUpload.Merge(dst, src) +} +func (m *AbortMultipartUpload) XXX_Size() int { + return xxx_messageInfo_AbortMultipartUpload.Size(m) +} +func (m *AbortMultipartUpload) XXX_DiscardUnknown() { + xxx_messageInfo_AbortMultipartUpload.DiscardUnknown(m) +} + +var xxx_messageInfo_AbortMultipartUpload proto.InternalMessageInfo + +func (m *AbortMultipartUpload) GetDaysAfterInitiation() int32 { + if m != nil { + return m.DaysAfterInitiation + } + return 0 +} + type LifecycleRule struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` - Filter *LifecycleFilter `protobuf:"bytes,3,opt,name=filter,proto3" json:"filter,omitempty"` - Actions []*Action `protobuf:"bytes,4,rep,name=actions,proto3" json:"actions,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Status string `protobuf:"bytes,2,opt,name=Status,proto3" json:"Status,omitempty"` + Filter *LifecycleFilter `protobuf:"bytes,3,opt,name=Filter,proto3" json:"Filter,omitempty"` + Actions []*Action `protobuf:"bytes,4,rep,name=actions,proto3" json:"actions,omitempty"` + AbortIncompleteMultipartUpload *AbortMultipartUpload `protobuf:"bytes,5,opt,name=AbortIncompleteMultipartUpload,proto3" json:"AbortIncompleteMultipartUpload,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *LifecycleRule) Reset() { *m = LifecycleRule{} } func (m *LifecycleRule) String() string { return proto.CompactTextString(m) } func (*LifecycleRule) ProtoMessage() {} func (*LifecycleRule) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{14} + return fileDescriptor_s3_38670a1bdb8348d8, []int{15} } func (m *LifecycleRule) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_LifecycleRule.Unmarshal(m, b) @@ -824,9 +862,9 @@ func (m *LifecycleRule) XXX_DiscardUnknown() { var xxx_messageInfo_LifecycleRule proto.InternalMessageInfo -func (m *LifecycleRule) GetId() string { +func (m *LifecycleRule) GetID() string { if m != nil { - return m.Id + return m.ID } return "" } @@ -852,6 +890,13 @@ func (m *LifecycleRule) GetActions() []*Action { return nil } +func (m *LifecycleRule) GetAbortIncompleteMultipartUpload() *AbortMultipartUpload { + if m != nil { + return m.AbortIncompleteMultipartUpload + } + return nil +} + type ReplicationInfo struct { Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` Backend string `protobuf:"bytes,2,opt,name=backend,proto3" json:"backend,omitempty"` @@ -865,7 +910,7 @@ func (m *ReplicationInfo) Reset() { *m = ReplicationInfo{} } func (m *ReplicationInfo) String() string { return proto.CompactTextString(m) } func (*ReplicationInfo) ProtoMessage() {} func (*ReplicationInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{15} + return fileDescriptor_s3_38670a1bdb8348d8, []int{16} } func (m *ReplicationInfo) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ReplicationInfo.Unmarshal(m, b) @@ -931,7 +976,7 @@ func (m *Bucket) Reset() { *m = Bucket{} } func (m *Bucket) String() string { return proto.CompactTextString(m) } func (*Bucket) ProtoMessage() {} func (*Bucket) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{16} + return fileDescriptor_s3_38670a1bdb8348d8, []int{17} } func (m *Bucket) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Bucket.Unmarshal(m, b) @@ -1071,7 +1116,7 @@ func (m *Partion) Reset() { *m = Partion{} } func (m *Partion) String() string { return proto.CompactTextString(m) } func (*Partion) ProtoMessage() {} func (*Partion) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{17} + return fileDescriptor_s3_38670a1bdb8348d8, []int{18} } func (m *Partion) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Partion.Unmarshal(m, b) @@ -1141,7 +1186,7 @@ func (m *Version) Reset() { *m = Version{} } func (m *Version) String() string { return proto.CompactTextString(m) } func (*Version) ProtoMessage() {} func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{18} + return fileDescriptor_s3_38670a1bdb8348d8, []int{19} } func (m *Version) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Version.Unmarshal(m, b) @@ -1233,7 +1278,7 @@ func (m *Object) Reset() { *m = Object{} } func (m *Object) String() string { return proto.CompactTextString(m) } func (*Object) ProtoMessage() {} func (*Object) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{19} + return fileDescriptor_s3_38670a1bdb8348d8, []int{20} } func (m *Object) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Object.Unmarshal(m, b) @@ -1453,7 +1498,7 @@ func (m *ListBucketsResponse) Reset() { *m = ListBucketsResponse{} } func (m *ListBucketsResponse) String() string { return proto.CompactTextString(m) } func (*ListBucketsResponse) ProtoMessage() {} func (*ListBucketsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{20} + return fileDescriptor_s3_38670a1bdb8348d8, []int{21} } func (m *ListBucketsResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ListBucketsResponse.Unmarshal(m, b) @@ -1492,7 +1537,7 @@ func (m *BaseResponse) Reset() { *m = BaseResponse{} } func (m *BaseResponse) String() string { return proto.CompactTextString(m) } func (*BaseResponse) ProtoMessage() {} func (*BaseResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{21} + return fileDescriptor_s3_38670a1bdb8348d8, []int{22} } func (m *BaseResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BaseResponse.Unmarshal(m, b) @@ -1537,7 +1582,7 @@ func (m *BaseRequest) Reset() { *m = BaseRequest{} } func (m *BaseRequest) String() string { return proto.CompactTextString(m) } func (*BaseRequest) ProtoMessage() {} func (*BaseRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{22} + return fileDescriptor_s3_38670a1bdb8348d8, []int{23} } func (m *BaseRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_BaseRequest.Unmarshal(m, b) @@ -1581,7 +1626,7 @@ func (m *ListObjectsRequest) Reset() { *m = ListObjectsRequest{} } func (m *ListObjectsRequest) String() string { return proto.CompactTextString(m) } func (*ListObjectsRequest) ProtoMessage() {} func (*ListObjectsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{23} + return fileDescriptor_s3_38670a1bdb8348d8, []int{24} } func (m *ListObjectsRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ListObjectsRequest.Unmarshal(m, b) @@ -1654,7 +1699,7 @@ func (m *ListObjectResponse) Reset() { *m = ListObjectResponse{} } func (m *ListObjectResponse) String() string { return proto.CompactTextString(m) } func (*ListObjectResponse) ProtoMessage() {} func (*ListObjectResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{24} + return fileDescriptor_s3_38670a1bdb8348d8, []int{25} } func (m *ListObjectResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ListObjectResponse.Unmarshal(m, b) @@ -1685,6 +1730,7 @@ type DeleteObjectInput struct { Bucket string `protobuf:"bytes,1,opt,name=Bucket,proto3" json:"Bucket,omitempty"` Key string `protobuf:"bytes,2,opt,name=Key,proto3" json:"Key,omitempty"` Versionid string `protobuf:"bytes,3,opt,name=Versionid,proto3" json:"Versionid,omitempty"` + Lastmodified int64 `protobuf:"varint,4,opt,name=Lastmodified,proto3" json:"Lastmodified,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1694,7 +1740,7 @@ func (m *DeleteObjectInput) Reset() { *m = DeleteObjectInput{} } func (m *DeleteObjectInput) String() string { return proto.CompactTextString(m) } func (*DeleteObjectInput) ProtoMessage() {} func (*DeleteObjectInput) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{25} + return fileDescriptor_s3_38670a1bdb8348d8, []int{26} } func (m *DeleteObjectInput) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_DeleteObjectInput.Unmarshal(m, b) @@ -1735,6 +1781,13 @@ func (m *DeleteObjectInput) GetVersionid() string { return "" } +func (m *DeleteObjectInput) GetLastmodified() int64 { + if m != nil { + return m.Lastmodified + } + return 0 +} + type GetObjectInput struct { Bucket string `protobuf:"bytes,1,opt,name=Bucket,proto3" json:"Bucket,omitempty"` Key string `protobuf:"bytes,2,opt,name=Key,proto3" json:"Key,omitempty"` @@ -1748,7 +1801,7 @@ func (m *GetObjectInput) Reset() { *m = GetObjectInput{} } func (m *GetObjectInput) String() string { return proto.CompactTextString(m) } func (*GetObjectInput) ProtoMessage() {} func (*GetObjectInput) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{26} + return fileDescriptor_s3_38670a1bdb8348d8, []int{27} } func (m *GetObjectInput) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetObjectInput.Unmarshal(m, b) @@ -1802,7 +1855,7 @@ func (m *MultipartUpload) Reset() { *m = MultipartUpload{} } func (m *MultipartUpload) String() string { return proto.CompactTextString(m) } func (*MultipartUpload) ProtoMessage() {} func (*MultipartUpload) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{27} + return fileDescriptor_s3_38670a1bdb8348d8, []int{28} } func (m *MultipartUpload) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_MultipartUpload.Unmarshal(m, b) @@ -1858,7 +1911,7 @@ func (m *ListParts) Reset() { *m = ListParts{} } func (m *ListParts) String() string { return proto.CompactTextString(m) } func (*ListParts) ProtoMessage() {} func (*ListParts) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{28} + return fileDescriptor_s3_38670a1bdb8348d8, []int{29} } func (m *ListParts) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_ListParts.Unmarshal(m, b) @@ -1924,7 +1977,7 @@ func (m *TList) Reset() { *m = TList{} } func (m *TList) String() string { return proto.CompactTextString(m) } func (*TList) ProtoMessage() {} func (*TList) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{29} + return fileDescriptor_s3_38670a1bdb8348d8, []int{30} } func (m *TList) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_TList.Unmarshal(m, b) @@ -1962,7 +2015,7 @@ func (m *Tier2ClassName) Reset() { *m = Tier2ClassName{} } func (m *Tier2ClassName) String() string { return proto.CompactTextString(m) } func (*Tier2ClassName) ProtoMessage() {} func (*Tier2ClassName) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{30} + return fileDescriptor_s3_38670a1bdb8348d8, []int{31} } func (m *Tier2ClassName) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_Tier2ClassName.Unmarshal(m, b) @@ -2001,7 +2054,7 @@ func (m *GetTierMapResponse) Reset() { *m = GetTierMapResponse{} } func (m *GetTierMapResponse) String() string { return proto.CompactTextString(m) } func (*GetTierMapResponse) ProtoMessage() {} func (*GetTierMapResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{31} + return fileDescriptor_s3_38670a1bdb8348d8, []int{32} } func (m *GetTierMapResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetTierMapResponse.Unmarshal(m, b) @@ -2038,7 +2091,8 @@ func (m *GetTierMapResponse) GetTier2Name() map[string]*Tier2ClassName { type UpdateObjMetaRequest struct { ObjKey string `protobuf:"bytes,1,opt,name=ObjKey,proto3" json:"ObjKey,omitempty"` BucketName string `protobuf:"bytes,2,opt,name=BucketName,proto3" json:"BucketName,omitempty"` - Setting map[string]string `protobuf:"bytes,3,rep,name=Setting,proto3" json:"Setting,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + LastModified int64 `protobuf:"varint,3,opt,name=LastModified,proto3" json:"LastModified,omitempty"` + Setting map[string]string `protobuf:"bytes,4,rep,name=Setting,proto3" json:"Setting,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2048,7 +2102,7 @@ func (m *UpdateObjMetaRequest) Reset() { *m = UpdateObjMetaRequest{} } func (m *UpdateObjMetaRequest) String() string { return proto.CompactTextString(m) } func (*UpdateObjMetaRequest) ProtoMessage() {} func (*UpdateObjMetaRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{32} + return fileDescriptor_s3_38670a1bdb8348d8, []int{33} } func (m *UpdateObjMetaRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_UpdateObjMetaRequest.Unmarshal(m, b) @@ -2082,6 +2136,13 @@ func (m *UpdateObjMetaRequest) GetBucketName() string { return "" } +func (m *UpdateObjMetaRequest) GetLastModified() int64 { + if m != nil { + return m.LastModified + } + return 0 +} + func (m *UpdateObjMetaRequest) GetSetting() map[string]string { if m != nil { return m.Setting @@ -2101,7 +2162,7 @@ func (m *StorageClass) Reset() { *m = StorageClass{} } func (m *StorageClass) String() string { return proto.CompactTextString(m) } func (*StorageClass) ProtoMessage() {} func (*StorageClass) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{33} + return fileDescriptor_s3_38670a1bdb8348d8, []int{34} } func (m *StorageClass) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_StorageClass.Unmarshal(m, b) @@ -2146,7 +2207,7 @@ func (m *GetStorageClassesResponse) Reset() { *m = GetStorageClassesResp func (m *GetStorageClassesResponse) String() string { return proto.CompactTextString(m) } func (*GetStorageClassesResponse) ProtoMessage() {} func (*GetStorageClassesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{34} + return fileDescriptor_s3_38670a1bdb8348d8, []int{35} } func (m *GetStorageClassesResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetStorageClassesResponse.Unmarshal(m, b) @@ -2184,7 +2245,7 @@ func (m *GetBackendTypeByTierRequest) Reset() { *m = GetBackendTypeByTie func (m *GetBackendTypeByTierRequest) String() string { return proto.CompactTextString(m) } func (*GetBackendTypeByTierRequest) ProtoMessage() {} func (*GetBackendTypeByTierRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{35} + return fileDescriptor_s3_38670a1bdb8348d8, []int{36} } func (m *GetBackendTypeByTierRequest) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetBackendTypeByTierRequest.Unmarshal(m, b) @@ -2222,7 +2283,7 @@ func (m *GetBackendTypeByTierResponse) Reset() { *m = GetBackendTypeByTi func (m *GetBackendTypeByTierResponse) String() string { return proto.CompactTextString(m) } func (*GetBackendTypeByTierResponse) ProtoMessage() {} func (*GetBackendTypeByTierResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_s3_1244b13629ced826, []int{36} + return fileDescriptor_s3_38670a1bdb8348d8, []int{37} } func (m *GetBackendTypeByTierResponse) XXX_Unmarshal(b []byte) error { return xxx_messageInfo_GetBackendTypeByTierResponse.Unmarshal(m, b) @@ -2249,6 +2310,230 @@ func (m *GetBackendTypeByTierResponse) GetTypes() []string { return nil } +type DeleteLifecycleInput struct { + Bucket string `protobuf:"bytes,1,opt,name=Bucket,proto3" json:"Bucket,omitempty"` + RuleID string `protobuf:"bytes,2,opt,name=ruleID,proto3" json:"ruleID,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteLifecycleInput) Reset() { *m = DeleteLifecycleInput{} } +func (m *DeleteLifecycleInput) String() string { return proto.CompactTextString(m) } +func (*DeleteLifecycleInput) ProtoMessage() {} +func (*DeleteLifecycleInput) Descriptor() ([]byte, []int) { + return fileDescriptor_s3_38670a1bdb8348d8, []int{38} +} +func (m *DeleteLifecycleInput) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteLifecycleInput.Unmarshal(m, b) +} +func (m *DeleteLifecycleInput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteLifecycleInput.Marshal(b, m, deterministic) +} +func (dst *DeleteLifecycleInput) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteLifecycleInput.Merge(dst, src) +} +func (m *DeleteLifecycleInput) XXX_Size() int { + return xxx_messageInfo_DeleteLifecycleInput.Size(m) +} +func (m *DeleteLifecycleInput) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteLifecycleInput.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteLifecycleInput proto.InternalMessageInfo + +func (m *DeleteLifecycleInput) GetBucket() string { + if m != nil { + return m.Bucket + } + return "" +} + +func (m *DeleteLifecycleInput) GetRuleID() string { + if m != nil { + return m.RuleID + } + return "" +} + +type MultipartUploadRecord struct { + ObjectKey string `protobuf:"bytes,1,opt,name=ObjectKey,proto3" json:"ObjectKey,omitempty"` + Bucket string `protobuf:"bytes,2,opt,name=Bucket,proto3" json:"Bucket,omitempty"` + Backend string `protobuf:"bytes,3,opt,name=Backend,proto3" json:"Backend,omitempty"` + UploadId string `protobuf:"bytes,4,opt,name=UploadId,proto3" json:"UploadId,omitempty"` + InitTime int64 `protobuf:"varint,5,opt,name=InitTime,proto3" json:"InitTime,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MultipartUploadRecord) Reset() { *m = MultipartUploadRecord{} } +func (m *MultipartUploadRecord) String() string { return proto.CompactTextString(m) } +func (*MultipartUploadRecord) ProtoMessage() {} +func (*MultipartUploadRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_s3_38670a1bdb8348d8, []int{39} +} +func (m *MultipartUploadRecord) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MultipartUploadRecord.Unmarshal(m, b) +} +func (m *MultipartUploadRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MultipartUploadRecord.Marshal(b, m, deterministic) +} +func (dst *MultipartUploadRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_MultipartUploadRecord.Merge(dst, src) +} +func (m *MultipartUploadRecord) XXX_Size() int { + return xxx_messageInfo_MultipartUploadRecord.Size(m) +} +func (m *MultipartUploadRecord) XXX_DiscardUnknown() { + xxx_messageInfo_MultipartUploadRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_MultipartUploadRecord proto.InternalMessageInfo + +func (m *MultipartUploadRecord) GetObjectKey() string { + if m != nil { + return m.ObjectKey + } + return "" +} + +func (m *MultipartUploadRecord) GetBucket() string { + if m != nil { + return m.Bucket + } + return "" +} + +func (m *MultipartUploadRecord) GetBackend() string { + if m != nil { + return m.Backend + } + return "" +} + +func (m *MultipartUploadRecord) GetUploadId() string { + if m != nil { + return m.UploadId + } + return "" +} + +func (m *MultipartUploadRecord) GetInitTime() int64 { + if m != nil { + return m.InitTime + } + return 0 +} + +type ListMultipartUploadRequest struct { + Bucket string `protobuf:"bytes,1,opt,name=Bucket,proto3" json:"Bucket,omitempty"` + Prefix string `protobuf:"bytes,2,opt,name=Prefix,proto3" json:"Prefix,omitempty"` + Days int32 `protobuf:"varint,3,opt,name=Days,proto3" json:"Days,omitempty"` + Limit int32 `protobuf:"varint,4,opt,name=limit,proto3" json:"limit,omitempty"` + Offset int32 `protobuf:"varint,5,opt,name=offset,proto3" json:"offset,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListMultipartUploadRequest) Reset() { *m = ListMultipartUploadRequest{} } +func (m *ListMultipartUploadRequest) String() string { return proto.CompactTextString(m) } +func (*ListMultipartUploadRequest) ProtoMessage() {} +func (*ListMultipartUploadRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_s3_38670a1bdb8348d8, []int{40} +} +func (m *ListMultipartUploadRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListMultipartUploadRequest.Unmarshal(m, b) +} +func (m *ListMultipartUploadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListMultipartUploadRequest.Marshal(b, m, deterministic) +} +func (dst *ListMultipartUploadRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListMultipartUploadRequest.Merge(dst, src) +} +func (m *ListMultipartUploadRequest) XXX_Size() int { + return xxx_messageInfo_ListMultipartUploadRequest.Size(m) +} +func (m *ListMultipartUploadRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListMultipartUploadRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListMultipartUploadRequest proto.InternalMessageInfo + +func (m *ListMultipartUploadRequest) GetBucket() string { + if m != nil { + return m.Bucket + } + return "" +} + +func (m *ListMultipartUploadRequest) GetPrefix() string { + if m != nil { + return m.Prefix + } + return "" +} + +func (m *ListMultipartUploadRequest) GetDays() int32 { + if m != nil { + return m.Days + } + return 0 +} + +func (m *ListMultipartUploadRequest) GetLimit() int32 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *ListMultipartUploadRequest) GetOffset() int32 { + if m != nil { + return m.Offset + } + return 0 +} + +type ListMultipartUploadResponse struct { + Records []*MultipartUploadRecord `protobuf:"bytes,1,rep,name=records,proto3" json:"records,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListMultipartUploadResponse) Reset() { *m = ListMultipartUploadResponse{} } +func (m *ListMultipartUploadResponse) String() string { return proto.CompactTextString(m) } +func (*ListMultipartUploadResponse) ProtoMessage() {} +func (*ListMultipartUploadResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_s3_38670a1bdb8348d8, []int{41} +} +func (m *ListMultipartUploadResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListMultipartUploadResponse.Unmarshal(m, b) +} +func (m *ListMultipartUploadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListMultipartUploadResponse.Marshal(b, m, deterministic) +} +func (dst *ListMultipartUploadResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListMultipartUploadResponse.Merge(dst, src) +} +func (m *ListMultipartUploadResponse) XXX_Size() int { + return xxx_messageInfo_ListMultipartUploadResponse.Size(m) +} +func (m *ListMultipartUploadResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListMultipartUploadResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListMultipartUploadResponse proto.InternalMessageInfo + +func (m *ListMultipartUploadResponse) GetRecords() []*MultipartUploadRecord { + if m != nil { + return m.Records + } + return nil +} + func init() { proto.RegisterType((*ServerSideEncryption)(nil), "ServerSideEncryption") proto.RegisterType((*VersioningConfiguration)(nil), "VersioningConfiguration") @@ -2264,6 +2549,7 @@ func init() { proto.RegisterType((*Tag)(nil), "Tag") proto.RegisterType((*LifecycleFilter)(nil), "LifecycleFilter") proto.RegisterType((*Action)(nil), "Action") + proto.RegisterType((*AbortMultipartUpload)(nil), "AbortMultipartUpload") proto.RegisterType((*LifecycleRule)(nil), "LifecycleRule") proto.RegisterType((*ReplicationInfo)(nil), "ReplicationInfo") proto.RegisterType((*Bucket)(nil), "Bucket") @@ -2291,147 +2577,168 @@ func init() { proto.RegisterType((*GetStorageClassesResponse)(nil), "GetStorageClassesResponse") proto.RegisterType((*GetBackendTypeByTierRequest)(nil), "GetBackendTypeByTierRequest") proto.RegisterType((*GetBackendTypeByTierResponse)(nil), "GetBackendTypeByTierResponse") -} - -func init() { proto.RegisterFile("s3.proto", fileDescriptor_s3_1244b13629ced826) } - -var fileDescriptor_s3_1244b13629ced826 = []byte{ - // 2180 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x58, 0x4b, 0x6f, 0x1c, 0xc7, - 0xf1, 0xe7, 0x3e, 0xb9, 0x5b, 0xbb, 0x7c, 0xa8, 0xb5, 0xa2, 0x46, 0x2b, 0xda, 0xd6, 0x7f, 0x20, - 0xcb, 0xb4, 0x0f, 0x03, 0x88, 0xfc, 0xc7, 0x96, 0x85, 0x20, 0xb6, 0xf8, 0x90, 0x42, 0x88, 0x2b, - 0x0a, 0x4d, 0x4a, 0x0a, 0x90, 0x43, 0xd0, 0x9c, 0x69, 0x2e, 0xc7, 0x1c, 0xce, 0xac, 0xa7, 0x7b, - 0x25, 0x6e, 0x8e, 0x4e, 0x80, 0x1c, 0x73, 0xc9, 0x25, 0xa7, 0x7c, 0x10, 0x9f, 0x03, 0x04, 0xf9, - 0x1a, 0x39, 0xe5, 0x5b, 0x04, 0xfd, 0x9a, 0xe9, 0xd9, 0x99, 0x95, 0x20, 0x20, 0x39, 0x6d, 0xd7, - 0xaf, 0xab, 0xbb, 0xab, 0xba, 0xab, 0x7e, 0x55, 0xb3, 0xd0, 0x61, 0x3b, 0xde, 0x24, 0x4d, 0x78, - 0xe2, 0xfe, 0xa5, 0x06, 0x83, 0x13, 0x9a, 0xbe, 0xa5, 0xe9, 0x49, 0x18, 0xd0, 0x83, 0xd8, 0x4f, - 0x67, 0x13, 0x1e, 0x26, 0x31, 0xba, 0x07, 0x3d, 0x39, 0x3e, 0xf1, 0x2f, 0xe8, 0x15, 0x75, 0x6a, - 0xf7, 0x6a, 0x5b, 0x5d, 0x6c, 0x43, 0x68, 0x13, 0xba, 0x24, 0x1a, 0x27, 0x69, 0xc8, 0x2f, 0xae, - 0x9c, 0xba, 0x9c, 0xcf, 0x01, 0xb1, 0xfe, 0x8a, 0x30, 0x4e, 0xd3, 0xe7, 0x74, 0x76, 0x18, 0x38, - 0x0d, 0xb5, 0xde, 0x82, 0xd0, 0x10, 0x3a, 0x57, 0x24, 0x0e, 0x78, 0x92, 0xce, 0x9c, 0xe6, 0xbd, - 0xda, 0x56, 0x07, 0x67, 0xb2, 0x7b, 0x0c, 0xb7, 0x5f, 0xd3, 0x94, 0x85, 0x49, 0x1c, 0xc6, 0xe3, - 0xbd, 0x24, 0x3e, 0x0f, 0xc7, 0xd3, 0x94, 0x48, 0xc3, 0x36, 0xa0, 0x7d, 0xc2, 0x09, 0x9f, 0x32, - 0x6d, 0x93, 0x96, 0x84, 0x39, 0xa3, 0x73, 0xb2, 0x4f, 0x23, 0xca, 0xa9, 0x31, 0x27, 0x03, 0xdc, - 0x63, 0xb8, 0x85, 0x69, 0x10, 0xa6, 0xd4, 0xe7, 0x4f, 0xa2, 0x08, 0xd3, 0x1f, 0xa7, 0x94, 0x71, - 0x76, 0x9a, 0x08, 0x2b, 0x2e, 0x12, 0xc6, 0x5f, 0x90, 0xcc, 0xc9, 0x4c, 0x16, 0x73, 0xf2, 0x96, - 0xfc, 0x24, 0x72, 0xea, 0xf7, 0x1a, 0x62, 0xce, 0xc8, 0xee, 0x3f, 0x6b, 0xd0, 0x31, 0x3b, 0x16, - 0x14, 0xf5, 0x26, 0x46, 0x2e, 0x1c, 0x50, 0x9f, 0x3b, 0x60, 0x1b, 0x06, 0x29, 0x9d, 0x44, 0xc4, - 0xa7, 0xcf, 0xe9, 0xec, 0x65, 0x4a, 0xcf, 0xc3, 0xeb, 0x37, 0x21, 0xbf, 0xd0, 0xb7, 0x55, 0x39, - 0x87, 0x1e, 0xc0, 0x6a, 0x8e, 0x4b, 0xed, 0xa6, 0xd4, 0x9e, 0x43, 0xd1, 0x57, 0xb0, 0x7e, 0xc1, - 0xf9, 0xc4, 0xd8, 0xb8, 0x97, 0x04, 0xd4, 0x69, 0x49, 0xcd, 0x12, 0xee, 0xbe, 0x83, 0xee, 0x5e, - 0x12, 0x07, 0xa1, 0xbc, 0xe0, 0x2d, 0x58, 0xbb, 0x34, 0x27, 0x1e, 0xfc, 0x38, 0x25, 0x91, 0xb9, - 0xe9, 0x79, 0x18, 0x7d, 0x0f, 0x77, 0xc5, 0x56, 0x07, 0x69, 0x9a, 0xa4, 0x62, 0x1f, 0x4c, 0xf9, - 0x34, 0x8d, 0x69, 0xa0, 0x57, 0x29, 0x6f, 0xdf, 0xa7, 0xe2, 0xfe, 0x0e, 0xfa, 0x38, 0x99, 0xf2, - 0x30, 0x1e, 0xe3, 0x69, 0x44, 0x19, 0xfa, 0x1c, 0x3a, 0xa9, 0x36, 0x4c, 0x1e, 0xda, 0xdb, 0xee, - 0x7a, 0xc6, 0x52, 0x9c, 0x4d, 0xa1, 0x2d, 0xe8, 0xfa, 0xc6, 0x5e, 0x79, 0x4c, 0x6f, 0x1b, 0xbc, - 0xcc, 0x03, 0x9c, 0x4f, 0xba, 0xff, 0xaa, 0xc1, 0xe0, 0x0d, 0x3d, 0x63, 0x21, 0xa7, 0xc5, 0x30, - 0xba, 0x0f, 0x2b, 0x61, 0x1c, 0xd0, 0xeb, 0xfd, 0xc4, 0x9f, 0x5e, 0xd1, 0x98, 0x6b, 0x1f, 0x8b, - 0xa0, 0xd0, 0xa2, 0xc2, 0xf4, 0x4c, 0x4b, 0xf9, 0x54, 0x04, 0xd1, 0x11, 0xdc, 0x4a, 0xab, 0x82, - 0x4b, 0xbe, 0x63, 0x6f, 0x7b, 0xc3, 0xab, 0x0c, 0x3d, 0x5c, 0xbd, 0x08, 0x3d, 0x84, 0x7e, 0x6a, - 0xdd, 0x89, 0xd3, 0xbc, 0xd7, 0xd8, 0xea, 0x6d, 0xaf, 0x78, 0xf6, 0x45, 0xe1, 0x82, 0x8a, 0xfb, - 0xef, 0x1a, 0xdc, 0xd8, 0x3b, 0xc6, 0x27, 0x45, 0x17, 0x57, 0xa1, 0x1e, 0x06, 0xda, 0xaf, 0x7a, - 0x18, 0x88, 0xc8, 0x21, 0x51, 0x94, 0xbc, 0xa3, 0xc1, 0x88, 0xf2, 0x8b, 0x24, 0x30, 0x2f, 0x34, - 0x87, 0x5a, 0x7a, 0xc7, 0x69, 0x38, 0x0e, 0x63, 0xa6, 0xe3, 0x71, 0x0e, 0xb5, 0xf4, 0x7e, 0x4d, - 0x49, 0x40, 0x53, 0x66, 0x22, 0xb1, 0x88, 0x8a, 0x4b, 0xbc, 0x22, 0xd7, 0x4f, 0xc6, 0xf4, 0x84, - 0x8a, 0x87, 0x61, 0x3a, 0x0c, 0x8b, 0xa0, 0xd8, 0x8d, 0x5e, 0x4f, 0x12, 0x96, 0xef, 0xd6, 0x56, - 0xbb, 0x15, 0x51, 0xf7, 0x10, 0x7a, 0xfb, 0x94, 0xf1, 0x30, 0xce, 0xe8, 0xe0, 0x6c, 0xea, 0x5f, - 0x52, 0xf3, 0x80, 0x5a, 0x42, 0x2e, 0xf4, 0x19, 0x4f, 0x52, 0x32, 0xa6, 0x7b, 0x11, 0x61, 0xc6, - 0xd5, 0x02, 0xe6, 0xfe, 0xa1, 0x06, 0x6b, 0x98, 0x4e, 0xa2, 0xd0, 0x97, 0x7b, 0xe1, 0x24, 0xa2, - 0xa5, 0x4b, 0xdb, 0x80, 0xf6, 0x44, 0xc6, 0xbc, 0xde, 0x41, 0x4b, 0xc8, 0x81, 0x65, 0x1a, 0x93, - 0xb3, 0x88, 0x2a, 0x6e, 0xeb, 0x60, 0x23, 0x22, 0x0f, 0x7a, 0x41, 0x6e, 0xa0, 0xbc, 0x93, 0xde, - 0x76, 0xdf, 0xb3, 0x8c, 0xc6, 0xb6, 0x82, 0xfb, 0x1a, 0x1c, 0xcb, 0x88, 0xe2, 0x13, 0x22, 0x68, - 0xa6, 0x49, 0x64, 0x98, 0x49, 0x8e, 0xd1, 0x03, 0x68, 0xa5, 0x32, 0x30, 0xea, 0x32, 0x30, 0xd6, - 0xbd, 0x39, 0x17, 0xb0, 0x9a, 0x76, 0xbf, 0x84, 0xc6, 0x29, 0x19, 0xa3, 0x75, 0x68, 0x5c, 0xd2, - 0x99, 0xde, 0x41, 0x0c, 0x05, 0xf2, 0x96, 0x44, 0xda, 0x1f, 0x31, 0x74, 0xf7, 0x60, 0xed, 0x28, - 0x3c, 0xa7, 0xfe, 0xcc, 0x8f, 0xe8, 0xd3, 0x30, 0xe2, 0x34, 0xb5, 0xfc, 0xae, 0xcd, 0xf9, 0xdd, - 0xe4, 0x64, 0x6c, 0x0e, 0x6f, 0x7a, 0xa7, 0x64, 0x8c, 0x25, 0xe2, 0xfe, 0x54, 0x83, 0xf6, 0x13, - 0xdf, 0x98, 0x1d, 0xe7, 0x84, 0x2a, 0xc7, 0x02, 0x0b, 0xc8, 0x4c, 0x3d, 0x44, 0x0b, 0xcb, 0xb1, - 0x78, 0xa4, 0x40, 0xf2, 0xf3, 0x88, 0xa4, 0x97, 0x34, 0xd5, 0xf1, 0x53, 0xc0, 0xc4, 0x3a, 0x1e, - 0xd2, 0x54, 0x06, 0x4d, 0x0b, 0xcb, 0xb1, 0xb8, 0xfc, 0x33, 0xe2, 0x5f, 0xd2, 0x38, 0xd0, 0x41, - 0x62, 0x44, 0xf7, 0x8f, 0x35, 0x58, 0xc9, 0x5c, 0x11, 0xc9, 0x51, 0xf5, 0xa0, 0x4c, 0xd5, 0x0f, - 0xfd, 0xa0, 0x4a, 0x42, 0x5b, 0xd0, 0x3e, 0x97, 0xae, 0xeb, 0xac, 0x5d, 0xf7, 0xe6, 0xae, 0x04, - 0xeb, 0x79, 0xf4, 0x7f, 0xb0, 0x4c, 0xa4, 0x9f, 0x26, 0x37, 0x97, 0x3d, 0xe5, 0x37, 0x36, 0xb8, - 0x3b, 0x2e, 0x04, 0xd6, 0x61, 0x7c, 0x9e, 0x58, 0xe7, 0xd6, 0x0a, 0xe7, 0x5a, 0xbe, 0xd4, 0x0b, - 0xbe, 0x94, 0x42, 0xb8, 0x51, 0x11, 0xc2, 0x3f, 0xb7, 0xa0, 0xbd, 0xab, 0x22, 0xbe, 0xea, 0xd2, - 0x07, 0xd0, 0x4a, 0xde, 0xc5, 0x34, 0xd5, 0x5b, 0x2b, 0x41, 0x94, 0x06, 0x39, 0xd8, 0x0f, 0xd9, - 0x24, 0x22, 0x33, 0x59, 0x9a, 0xd4, 0xe6, 0x25, 0x5c, 0x04, 0x0b, 0xf1, 0x23, 0xfd, 0x32, 0x62, - 0x28, 0xcc, 0xf2, 0x53, 0x2a, 0x1d, 0xdb, 0x27, 0x5c, 0x15, 0x95, 0x06, 0x2e, 0x60, 0xa2, 0x18, - 0xf3, 0x94, 0xc4, 0x2c, 0x14, 0x9c, 0xd9, 0x96, 0xf9, 0x91, 0x03, 0xc2, 0x65, 0xf5, 0xc4, 0x81, - 0xb3, 0xac, 0x72, 0x47, 0x8b, 0xe8, 0x10, 0x06, 0xac, 0xa2, 0x1b, 0x71, 0x3a, 0xf2, 0x49, 0x6e, - 0x79, 0x55, 0xad, 0x0a, 0xae, 0x5c, 0x82, 0x30, 0xdc, 0x7e, 0x5b, 0xdd, 0x42, 0x38, 0x5d, 0xb9, - 0x9b, 0xe3, 0x2d, 0x68, 0x31, 0xf0, 0xa2, 0x85, 0xf6, 0x5b, 0x41, 0xf1, 0xad, 0x0e, 0x61, 0xf0, - 0xae, 0xa2, 0xcc, 0x38, 0x3d, 0x6d, 0x78, 0x55, 0x0d, 0xc2, 0x95, 0x4b, 0xd0, 0x03, 0x68, 0xfa, - 0x49, 0xca, 0x9c, 0xbe, 0x5c, 0x8a, 0xbc, 0x12, 0xb1, 0x63, 0x39, 0x8f, 0x5e, 0x81, 0x93, 0x2e, - 0xe0, 0x0d, 0x67, 0x45, 0xae, 0xbd, 0xe3, 0x2d, 0x22, 0x16, 0xbc, 0x70, 0x29, 0x7a, 0x0a, 0x1b, - 0x91, 0x09, 0xfc, 0xe2, 0xa6, 0xab, 0x32, 0xd8, 0x57, 0xbd, 0x42, 0x7e, 0xe1, 0x05, 0xda, 0x22, - 0x4c, 0x14, 0x15, 0xbf, 0x4c, 0xa2, 0xd0, 0x9f, 0x39, 0x6b, 0x2a, 0x7a, 0x6d, 0xcc, 0xfd, 0x53, - 0x0d, 0x96, 0x5f, 0x92, 0x54, 0xea, 0x97, 0x79, 0xea, 0x53, 0x80, 0x09, 0x49, 0xf9, 0x8b, 0xe9, - 0xd5, 0x59, 0x16, 0xc1, 0x16, 0x22, 0x02, 0x9e, 0x85, 0xbf, 0x57, 0xa1, 0xdb, 0xc0, 0x72, 0x2c, - 0x30, 0xca, 0xc9, 0x58, 0xc7, 0xab, 0x1c, 0x0b, 0x4b, 0x22, 0xc2, 0xf8, 0x28, 0x09, 0xc2, 0xf3, - 0x90, 0x06, 0x26, 0x60, 0x6d, 0xcc, 0xfd, 0x73, 0x0d, 0x96, 0x75, 0x38, 0x88, 0xe0, 0x35, 0x01, - 0x60, 0x88, 0x23, 0x07, 0xb2, 0x13, 0xea, 0xd6, 0x09, 0x43, 0xe8, 0x84, 0xec, 0x88, 0x70, 0xca, - 0xb8, 0x4e, 0xa4, 0x4c, 0x2e, 0x9d, 0xde, 0x2c, 0x9f, 0x9e, 0x79, 0xd2, 0xca, 0x3d, 0x71, 0x7f, - 0x5e, 0x86, 0xf6, 0xf1, 0xd9, 0x0f, 0xa2, 0xdd, 0xd9, 0x84, 0x6e, 0x22, 0x47, 0xcf, 0xb3, 0x0b, - 0xca, 0x01, 0x71, 0x4d, 0xea, 0x52, 0xad, 0x16, 0xd3, 0x42, 0x72, 0x0e, 0x68, 0x7c, 0x88, 0x03, - 0x9a, 0x0b, 0x38, 0x40, 0x64, 0x3c, 0xf1, 0x2f, 0xc4, 0x03, 0xf3, 0x34, 0x89, 0x74, 0xfd, 0x2e, - 0x60, 0xc8, 0x03, 0xe4, 0x27, 0x31, 0xa7, 0x31, 0x17, 0x2b, 0x13, 0xa6, 0x7a, 0x33, 0xc5, 0xce, - 0x15, 0x33, 0xa2, 0xcb, 0xd4, 0xe8, 0x41, 0xec, 0x27, 0x41, 0x18, 0x8f, 0x25, 0x17, 0x74, 0xf1, - 0x3c, 0x2c, 0x2b, 0xed, 0xf5, 0x24, 0x4c, 0x29, 0x93, 0x34, 0xd0, 0xc5, 0x46, 0x14, 0x8d, 0x85, - 0x56, 0x3e, 0xa2, 0xf1, 0x98, 0x5f, 0xc8, 0xc4, 0xee, 0xe2, 0x22, 0x28, 0xbf, 0x64, 0x14, 0x70, - 0x3a, 0x9b, 0x50, 0x9d, 0xb8, 0x36, 0x24, 0x6e, 0x50, 0x8b, 0xa3, 0xe0, 0x17, 0x32, 0x65, 0xbb, - 0xd8, 0x42, 0xd0, 0x7d, 0xe8, 0xe8, 0xf7, 0x17, 0x59, 0x29, 0x92, 0xa0, 0x63, 0xb8, 0x03, 0x67, - 0x33, 0x25, 0xba, 0x5e, 0x29, 0xd3, 0xf5, 0x42, 0x7e, 0x5b, 0xfd, 0x78, 0x7e, 0x7b, 0x04, 0xb7, - 0x35, 0x7d, 0x98, 0xee, 0xf2, 0x28, 0x51, 0xf9, 0xac, 0x53, 0x6d, 0xd1, 0xb4, 0xa1, 0xf4, 0xf5, - 0x9c, 0xd2, 0x2d, 0x5e, 0xbb, 0x51, 0xe4, 0xb5, 0x0d, 0x68, 0x87, 0xec, 0xc5, 0x34, 0x8a, 0x1c, - 0xa4, 0xaa, 0x96, 0x92, 0x44, 0xb7, 0x16, 0xb2, 0x7d, 0xbb, 0x76, 0xdf, 0x54, 0xdd, 0x5a, 0x11, - 0x45, 0x8f, 0x61, 0x2d, 0x2d, 0x16, 0x42, 0x67, 0x50, 0x6e, 0x5b, 0x04, 0x8e, 0xe7, 0x15, 0xb3, - 0xac, 0xb8, 0x65, 0xe5, 0xf7, 0x7d, 0xe8, 0x4c, 0x14, 0x61, 0x30, 0x67, 0x43, 0x3f, 0x85, 0x66, - 0x10, 0x9c, 0xcd, 0x64, 0x4d, 0xca, 0xed, 0xf9, 0x26, 0xa5, 0x94, 0x8d, 0x4e, 0x45, 0x36, 0x8a, - 0x6c, 0x8e, 0x43, 0xfe, 0x34, 0x22, 0x63, 0xe7, 0x8e, 0xce, 0x66, 0x2d, 0x0b, 0x9b, 0x0e, 0x4e, - 0xc9, 0xd8, 0x19, 0xaa, 0xec, 0x17, 0xe3, 0xac, 0x43, 0xb9, 0x9b, 0x77, 0x28, 0xee, 0x23, 0xb8, - 0x79, 0x14, 0x32, 0xae, 0x4a, 0x33, 0xc3, 0x94, 0x4d, 0x92, 0x98, 0x51, 0xd1, 0x3a, 0xa8, 0xcc, - 0x14, 0x5d, 0x80, 0x6a, 0x1d, 0x94, 0x0a, 0x36, 0xb8, 0xfb, 0x2b, 0xe8, 0xef, 0x12, 0x46, 0xb3, - 0x25, 0x9b, 0xd0, 0xa5, 0xe6, 0xeb, 0xc9, 0x24, 0x7f, 0x06, 0x88, 0xb7, 0xbc, 0x62, 0x86, 0x8c, - 0xc4, 0xd0, 0xfd, 0x04, 0x7a, 0x6a, 0xbd, 0xfc, 0xa0, 0x98, 0x6f, 0x7f, 0xdc, 0x9f, 0xea, 0x80, - 0x84, 0x65, 0x8a, 0x5a, 0x98, 0x51, 0xdb, 0x30, 0x6d, 0x84, 0xe9, 0x4e, 0x74, 0x53, 0xb1, 0x09, - 0xdd, 0x7d, 0x1a, 0x85, 0x57, 0x21, 0xcf, 0x28, 0x38, 0x07, 0x04, 0xb5, 0x1c, 0xdb, 0xd4, 0x22, - 0x05, 0xf4, 0x0d, 0xb4, 0x55, 0xc7, 0xa4, 0xdb, 0xa3, 0xcf, 0xbc, 0xf2, 0x81, 0x9e, 0xd2, 0x38, - 0x88, 0x79, 0x3a, 0xc3, 0xed, 0xbc, 0xe7, 0x4c, 0xce, 0xcf, 0x19, 0xe5, 0xba, 0xd9, 0xd3, 0x92, - 0x38, 0x46, 0x9e, 0x28, 0xe9, 0xa4, 0x85, 0x95, 0x30, 0xfc, 0x16, 0x7a, 0xd6, 0x26, 0x15, 0xf5, - 0x63, 0x00, 0xad, 0xb7, 0x24, 0x9a, 0x1a, 0x4e, 0x54, 0xc2, 0xe3, 0xfa, 0xa3, 0x9a, 0xfb, 0x9d, - 0x7d, 0x07, 0xd9, 0x4d, 0x7f, 0x09, 0x3d, 0xcb, 0xd0, 0xec, 0x81, 0xb4, 0x96, 0x3d, 0xe7, 0xfe, - 0x16, 0x6e, 0xa8, 0x30, 0x57, 0xc0, 0x61, 0x3c, 0x99, 0x2e, 0xbe, 0xc3, 0x75, 0x68, 0x08, 0xe2, - 0xd6, 0x6f, 0x24, 0x28, 0x7b, 0x13, 0xba, 0xaf, 0xb3, 0x0a, 0xa3, 0xee, 0x2e, 0x07, 0xdc, 0xdf, - 0xc0, 0xea, 0x33, 0xca, 0xff, 0x17, 0x3b, 0xbf, 0x81, 0xb5, 0xd1, 0x34, 0xe2, 0xa1, 0x48, 0x94, - 0x57, 0x93, 0x28, 0x21, 0xc1, 0x47, 0x6c, 0x3d, 0x84, 0xce, 0x54, 0xae, 0xc9, 0xfe, 0xce, 0xc9, - 0x64, 0xf7, 0xaf, 0x35, 0xe8, 0x8a, 0xfb, 0x11, 0xa9, 0xc8, 0xfe, 0x3b, 0x7b, 0x8a, 0xb9, 0x11, - 0xb9, 0x96, 0x3b, 0xea, 0xa2, 0x99, 0xc9, 0xa2, 0x7a, 0xbd, 0xcc, 0x1a, 0x01, 0x4d, 0x40, 0xaa, - 0x78, 0x96, 0x70, 0xf7, 0x2e, 0xb4, 0x4e, 0x85, 0x6d, 0x22, 0x4f, 0x4f, 0x45, 0x9e, 0x8a, 0x87, - 0x6d, 0x61, 0x39, 0x76, 0x39, 0xac, 0x8a, 0xdf, 0x6d, 0x49, 0xcf, 0xb2, 0xd8, 0x7d, 0x05, 0x8d, - 0x88, 0x71, 0xfd, 0xfa, 0x8e, 0x57, 0x9c, 0xf5, 0x8e, 0x18, 0x57, 0x31, 0x2b, 0x94, 0x86, 0x5f, - 0x43, 0xc7, 0x00, 0x76, 0xfc, 0xb5, 0x3e, 0x14, 0x7f, 0x7f, 0xaf, 0x01, 0x7a, 0x46, 0xb9, 0xd8, - 0x7b, 0x44, 0x26, 0x59, 0x00, 0x7e, 0x0a, 0x70, 0x2a, 0x9b, 0x64, 0xc9, 0xe2, 0x35, 0xf9, 0x8f, - 0x93, 0x85, 0xa0, 0xef, 0xa1, 0x2b, 0xcd, 0xd1, 0x85, 0x5e, 0x18, 0xe8, 0x7a, 0xe5, 0x7d, 0xbc, - 0x4c, 0x49, 0x99, 0x9a, 0x2f, 0x1a, 0x8e, 0xb4, 0xbb, 0xd9, 0x64, 0x45, 0xda, 0x7c, 0x6e, 0x9b, - 0xdd, 0xdb, 0x5e, 0x9b, 0xbb, 0x02, 0xdb, 0x8f, 0x7f, 0xd4, 0x60, 0xf0, 0x6a, 0x12, 0x10, 0x99, - 0x07, 0x23, 0xca, 0x89, 0x45, 0x27, 0xc7, 0x67, 0x3f, 0xe4, 0xed, 0x8a, 0x96, 0x84, 0x87, 0xbb, - 0xa5, 0x5e, 0x25, 0x47, 0xd0, 0x2f, 0x61, 0xf9, 0x84, 0x72, 0x2e, 0xba, 0x81, 0x86, 0xf6, 0xaf, - 0x6a, 0x7f, 0x4f, 0x2b, 0x29, 0xff, 0xcc, 0x92, 0xe1, 0x63, 0xe8, 0xdb, 0x13, 0x1f, 0x45, 0x09, - 0x5f, 0x43, 0xff, 0xc4, 0xae, 0xd4, 0x08, 0x9a, 0xd6, 0x7f, 0x82, 0x72, 0x9c, 0x05, 0x90, 0xfe, - 0x84, 0x95, 0x01, 0xb4, 0x0f, 0x77, 0x9e, 0x51, 0x6e, 0x2f, 0xa5, 0x39, 0xdd, 0x7f, 0x01, 0xcb, - 0xbe, 0x82, 0x74, 0x3c, 0xad, 0x78, 0xb6, 0x26, 0x36, 0xb3, 0xee, 0x43, 0xb8, 0xfb, 0x8c, 0xf2, - 0x5d, 0x55, 0x74, 0x45, 0x4f, 0xb2, 0x3b, 0x13, 0xbb, 0x9b, 0xeb, 0xcc, 0x23, 0x37, 0x3f, 0xf8, - 0xff, 0x61, 0xb3, 0x7a, 0x89, 0x3e, 0x7b, 0x00, 0x2d, 0x81, 0x32, 0x1d, 0x47, 0x4a, 0xd8, 0xfe, - 0x5b, 0x0b, 0xea, 0x27, 0x3b, 0x68, 0x47, 0x51, 0x9d, 0x2e, 0x4f, 0xa8, 0xef, 0x59, 0x25, 0x63, - 0x38, 0xf0, 0x2a, 0x4a, 0x97, 0xbb, 0x84, 0xb6, 0xa0, 0xbf, 0x27, 0x3e, 0xf2, 0xa8, 0x4e, 0x67, - 0x53, 0xbb, 0x86, 0x2b, 0x9e, 0x5d, 0xb1, 0x94, 0xa6, 0xa2, 0xc7, 0x0f, 0x6a, 0x7e, 0x06, 0x5d, - 0xe1, 0xc5, 0x9c, 0x9a, 0x19, 0xb8, 0x4b, 0xe8, 0xdb, 0x02, 0x29, 0xa3, 0x9b, 0x15, 0xb5, 0x64, - 0x68, 0x83, 0x55, 0xf6, 0xea, 0x36, 0xda, 0x50, 0x79, 0xa5, 0xbd, 0x59, 0x98, 0xbd, 0x5f, 0xf3, - 0x0b, 0x69, 0xaf, 0x56, 0x5b, 0xf3, 0x8a, 0x3c, 0x3d, 0x34, 0xeb, 0xdc, 0x25, 0xb4, 0x63, 0xae, - 0x40, 0xeb, 0x22, 0xaf, 0x54, 0x30, 0xca, 0xbb, 0x3f, 0x04, 0xc8, 0xd3, 0x79, 0xee, 0x55, 0x6e, - 0x56, 0x64, 0xba, 0xbb, 0x84, 0xbe, 0x81, 0x95, 0x42, 0x86, 0xa0, 0x5b, 0x95, 0x19, 0x53, 0x3e, - 0xeb, 0x3b, 0xb8, 0x51, 0x0a, 0xdc, 0xb9, 0x23, 0x87, 0xde, 0xc2, 0xd0, 0x76, 0x97, 0xd0, 0x2b, - 0x18, 0x54, 0x05, 0x20, 0xda, 0xf4, 0xde, 0x13, 0xca, 0xc3, 0x4f, 0xbc, 0xf7, 0x45, 0xad, 0xbb, - 0x74, 0xd6, 0x96, 0xff, 0x9c, 0xef, 0xfc, 0x27, 0x00, 0x00, 0xff, 0xff, 0x51, 0x03, 0x5d, 0x6f, - 0xa4, 0x18, 0x00, 0x00, + proto.RegisterType((*DeleteLifecycleInput)(nil), "DeleteLifecycleInput") + proto.RegisterType((*MultipartUploadRecord)(nil), "MultipartUploadRecord") + proto.RegisterType((*ListMultipartUploadRequest)(nil), "ListMultipartUploadRequest") + proto.RegisterType((*ListMultipartUploadResponse)(nil), "ListMultipartUploadResponse") +} + +func init() { proto.RegisterFile("s3.proto", fileDescriptor_s3_38670a1bdb8348d8) } + +var fileDescriptor_s3_38670a1bdb8348d8 = []byte{ + // 2453 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x59, 0xcd, 0x72, 0x1b, 0xc7, + 0x11, 0x26, 0xfe, 0x81, 0x06, 0xff, 0x34, 0x02, 0xa9, 0x15, 0x48, 0xcb, 0xca, 0x96, 0x2c, 0x53, + 0x3e, 0x6c, 0x59, 0x54, 0x62, 0xcb, 0xaa, 0x24, 0x36, 0x45, 0x50, 0x32, 0x4a, 0xa4, 0xa8, 0x1a, + 0x52, 0x52, 0x2e, 0xa9, 0xd4, 0x10, 0x3b, 0x04, 0xd7, 0x5a, 0xec, 0xc2, 0x3b, 0x03, 0x89, 0xc8, + 0x25, 0x55, 0xce, 0x21, 0xb7, 0xe4, 0x92, 0x4b, 0x4e, 0x79, 0x10, 0x9f, 0x73, 0xc9, 0x03, 0xe4, + 0x05, 0x72, 0x4a, 0x55, 0x1e, 0x22, 0x35, 0x7f, 0xbb, 0xb3, 0xc0, 0x82, 0x8a, 0x53, 0xf1, 0x89, + 0xd3, 0xdf, 0xf4, 0xcc, 0xf4, 0xf4, 0x74, 0x7f, 0xdd, 0x0b, 0x42, 0x93, 0x3d, 0xf0, 0xc6, 0x49, + 0xcc, 0x63, 0xf7, 0xcf, 0x25, 0xe8, 0x9c, 0xd0, 0xe4, 0x2d, 0x4d, 0x4e, 0x02, 0x9f, 0x1e, 0x44, + 0x83, 0x64, 0x3a, 0xe6, 0x41, 0x1c, 0xa1, 0xdb, 0xd0, 0x96, 0xe3, 0x93, 0xc1, 0x05, 0x1d, 0x51, + 0xa7, 0x74, 0xbb, 0xb4, 0xd3, 0xc2, 0x36, 0x84, 0xb6, 0xa1, 0x45, 0xc2, 0x61, 0x9c, 0x04, 0xfc, + 0x62, 0xe4, 0x94, 0xe5, 0x7c, 0x06, 0x88, 0xf5, 0x23, 0xc2, 0x38, 0x4d, 0x9e, 0xd1, 0x69, 0xdf, + 0x77, 0x2a, 0x6a, 0xbd, 0x05, 0xa1, 0x2e, 0x34, 0x47, 0x24, 0xf2, 0x79, 0x9c, 0x4c, 0x9d, 0xea, + 0xed, 0xd2, 0x4e, 0x13, 0xa7, 0xb2, 0x7b, 0x0c, 0x37, 0x5e, 0xd1, 0x84, 0x05, 0x71, 0x14, 0x44, + 0xc3, 0xfd, 0x38, 0x3a, 0x0f, 0x86, 0x93, 0x84, 0x48, 0xc3, 0x36, 0xa1, 0x7e, 0xc2, 0x09, 0x9f, + 0x30, 0x6d, 0x93, 0x96, 0x84, 0x39, 0x47, 0xe7, 0xa4, 0x47, 0x43, 0xca, 0xa9, 0x31, 0x27, 0x05, + 0xdc, 0x63, 0xd8, 0xc0, 0xd4, 0x0f, 0x12, 0x3a, 0xe0, 0x7b, 0x61, 0x88, 0xe9, 0xb7, 0x13, 0xca, + 0x38, 0x3b, 0x8d, 0x85, 0x15, 0x17, 0x31, 0xe3, 0xcf, 0x49, 0x7a, 0xc9, 0x54, 0x16, 0x73, 0xd2, + 0x4b, 0x83, 0x38, 0x74, 0xca, 0xb7, 0x2b, 0x62, 0xce, 0xc8, 0xee, 0xdf, 0x4b, 0xd0, 0x34, 0x3b, + 0xe6, 0x14, 0xf5, 0x26, 0x46, 0xce, 0x1d, 0x50, 0x9e, 0x39, 0x60, 0x17, 0x3a, 0x09, 0x1d, 0x87, + 0x64, 0x40, 0x9f, 0xd1, 0xe9, 0x8b, 0x84, 0x9e, 0x07, 0x97, 0xaf, 0x03, 0x7e, 0xa1, 0xbd, 0x55, + 0x38, 0x87, 0xee, 0xc2, 0x6a, 0x86, 0x4b, 0xed, 0xaa, 0xd4, 0x9e, 0x41, 0xd1, 0x27, 0xb0, 0x7e, + 0xc1, 0xf9, 0xd8, 0xd8, 0xb8, 0x1f, 0xfb, 0xd4, 0xa9, 0x49, 0xcd, 0x39, 0xdc, 0x7d, 0x07, 0xad, + 0xfd, 0x38, 0xf2, 0x03, 0xe9, 0xe0, 0x1d, 0x58, 0x7b, 0x63, 0x4e, 0x3c, 0xf8, 0x76, 0x42, 0x42, + 0xe3, 0xe9, 0x59, 0x18, 0x7d, 0x05, 0x5b, 0x62, 0xab, 0x83, 0x24, 0x89, 0x13, 0xb1, 0x0f, 0xa6, + 0x7c, 0x92, 0x44, 0xd4, 0xd7, 0xab, 0xd4, 0x6d, 0xaf, 0x52, 0x71, 0x7f, 0x03, 0xcb, 0x38, 0x9e, + 0xf0, 0x20, 0x1a, 0xe2, 0x49, 0x48, 0x19, 0xfa, 0x08, 0x9a, 0x89, 0x36, 0x4c, 0x1e, 0xda, 0xde, + 0x6d, 0x79, 0xc6, 0x52, 0x9c, 0x4e, 0xa1, 0x1d, 0x68, 0x0d, 0x8c, 0xbd, 0xf2, 0x98, 0xf6, 0x2e, + 0x78, 0xe9, 0x0d, 0x70, 0x36, 0xe9, 0xfe, 0xb3, 0x04, 0x9d, 0xd7, 0xf4, 0x8c, 0x05, 0x9c, 0xe6, + 0xc3, 0xe8, 0x0e, 0xac, 0x04, 0x91, 0x4f, 0x2f, 0x7b, 0xf1, 0x60, 0x32, 0xa2, 0x11, 0xd7, 0x77, + 0xcc, 0x83, 0x42, 0x8b, 0x0a, 0xd3, 0x53, 0x2d, 0x75, 0xa7, 0x3c, 0x88, 0x0e, 0x61, 0x23, 0x29, + 0x0a, 0x2e, 0xf9, 0x8e, 0xed, 0xdd, 0x4d, 0xaf, 0x30, 0xf4, 0x70, 0xf1, 0x22, 0x74, 0x1f, 0x96, + 0x13, 0xcb, 0x27, 0x4e, 0xf5, 0x76, 0x65, 0xa7, 0xbd, 0xbb, 0xe2, 0xd9, 0x8e, 0xc2, 0x39, 0x15, + 0xf7, 0x5f, 0x25, 0xb8, 0xb6, 0x7f, 0x8c, 0x4f, 0xf2, 0x57, 0x5c, 0x85, 0x72, 0xe0, 0xeb, 0x7b, + 0x95, 0x03, 0x5f, 0x44, 0x0e, 0x09, 0xc3, 0xf8, 0x1d, 0xf5, 0x8f, 0x28, 0xbf, 0x88, 0x7d, 0xf3, + 0x42, 0x33, 0xa8, 0xa5, 0x77, 0x9c, 0x04, 0xc3, 0x20, 0x62, 0x3a, 0x1e, 0x67, 0x50, 0x4b, 0xef, + 0x6b, 0x4a, 0x7c, 0x9a, 0x30, 0x13, 0x89, 0x79, 0x54, 0x38, 0x71, 0x44, 0x2e, 0xf7, 0x86, 0xf4, + 0x84, 0x8a, 0x87, 0x61, 0x3a, 0x0c, 0xf3, 0xa0, 0xd8, 0x8d, 0x5e, 0x8e, 0x63, 0x96, 0xed, 0x56, + 0x57, 0xbb, 0xe5, 0x51, 0xb7, 0x0f, 0xed, 0x1e, 0x65, 0x3c, 0x88, 0x52, 0x3a, 0x38, 0x9b, 0x0c, + 0xde, 0x50, 0xf3, 0x80, 0x5a, 0x42, 0x2e, 0x2c, 0x33, 0x1e, 0x27, 0x64, 0x48, 0xf7, 0x43, 0xc2, + 0xcc, 0x55, 0x73, 0x98, 0xfb, 0xfb, 0x12, 0xac, 0x61, 0x3a, 0x0e, 0x83, 0x81, 0xdc, 0x0b, 0xc7, + 0x21, 0x9d, 0x73, 0xda, 0x26, 0xd4, 0xc7, 0x32, 0xe6, 0xf5, 0x0e, 0x5a, 0x42, 0x0e, 0x34, 0x68, + 0x44, 0xce, 0x42, 0xaa, 0xb8, 0xad, 0x89, 0x8d, 0x88, 0x3c, 0x68, 0xfb, 0x99, 0x81, 0xd2, 0x27, + 0xed, 0xdd, 0x65, 0xcf, 0x32, 0x1a, 0xdb, 0x0a, 0xee, 0x2b, 0x70, 0x2c, 0x23, 0xf2, 0x4f, 0x88, + 0xa0, 0x9a, 0xc4, 0xa1, 0x61, 0x26, 0x39, 0x46, 0x77, 0xa1, 0x96, 0xc8, 0xc0, 0x28, 0xcb, 0xc0, + 0x58, 0xf7, 0x66, 0xae, 0x80, 0xd5, 0xb4, 0x7b, 0x0f, 0x2a, 0xa7, 0x64, 0x88, 0xd6, 0xa1, 0xf2, + 0x86, 0x4e, 0xf5, 0x0e, 0x62, 0x28, 0x90, 0xb7, 0x24, 0xd4, 0xf7, 0x11, 0x43, 0xf7, 0x1e, 0xac, + 0x1d, 0x06, 0xe7, 0x74, 0x30, 0x1d, 0x84, 0xf4, 0x49, 0x10, 0x72, 0x9a, 0x88, 0x7b, 0xab, 0x5c, + 0x37, 0x7e, 0x55, 0x92, 0xfb, 0x5d, 0x09, 0xea, 0x7b, 0x03, 0x63, 0x5c, 0x94, 0xd1, 0xa6, 0x1c, + 0x0b, 0xcc, 0x27, 0x53, 0xe5, 0xee, 0x1a, 0x96, 0x63, 0xf1, 0x14, 0xbe, 0x64, 0xe1, 0x23, 0x92, + 0xbc, 0xa1, 0x89, 0x8e, 0xa6, 0x1c, 0x26, 0xd6, 0xf1, 0x80, 0x26, 0xd2, 0x5b, 0x35, 0x2c, 0xc7, + 0xc2, 0xc5, 0x67, 0x64, 0xf0, 0x86, 0x46, 0xbe, 0x8e, 0x18, 0x23, 0xba, 0x5f, 0x43, 0x67, 0xef, + 0x2c, 0x4e, 0xf8, 0xd1, 0x24, 0xe4, 0xc1, 0x98, 0x24, 0xfc, 0xe5, 0x38, 0x8c, 0x89, 0x8f, 0x3e, + 0x85, 0xeb, 0x3d, 0x32, 0x65, 0x7b, 0xe7, 0x9c, 0x26, 0xfd, 0x28, 0xe0, 0x81, 0x7a, 0x82, 0x92, + 0xdc, 0xb4, 0x68, 0x4a, 0xf0, 0xc3, 0x4a, 0x7a, 0x75, 0x91, 0x4c, 0x22, 0x00, 0xfa, 0x3d, 0x13, + 0x00, 0xfd, 0x9e, 0x55, 0x6f, 0xca, 0xb9, 0x7a, 0xb3, 0x03, 0x75, 0xe5, 0x2a, 0x9d, 0xe5, 0xeb, + 0xde, 0x8c, 0x0b, 0xb1, 0x9e, 0x47, 0x3f, 0x81, 0x06, 0x91, 0x1e, 0x33, 0xb9, 0xdc, 0xf0, 0x94, + 0x07, 0xb1, 0xc1, 0xd1, 0xaf, 0xe1, 0x96, 0xbc, 0x50, 0x3f, 0x1a, 0xc4, 0xa3, 0xb1, 0xf4, 0x4b, + 0xfe, 0x6a, 0xd2, 0x03, 0xed, 0xdd, 0x0d, 0xaf, 0xe8, 0xde, 0xf8, 0x3d, 0x8b, 0xdd, 0x61, 0x2e, + 0xce, 0xfb, 0xd1, 0x79, 0x2c, 0xae, 0xc5, 0x72, 0x65, 0x54, 0x49, 0xb6, 0xd3, 0xcb, 0x39, 0xa7, + 0xcf, 0x65, 0x54, 0xa5, 0x20, 0xa3, 0xbe, 0xaf, 0x41, 0xfd, 0xb1, 0x4a, 0xc0, 0xa2, 0xe8, 0xe8, + 0x40, 0x2d, 0x7e, 0x17, 0xd1, 0x44, 0x6f, 0xad, 0x04, 0x51, 0xa9, 0xe4, 0xa0, 0x17, 0xb0, 0x71, + 0x48, 0xa6, 0xb2, 0x52, 0xaa, 0xcd, 0xe7, 0x70, 0x11, 0xbb, 0x64, 0x10, 0x6a, 0xa2, 0x11, 0x43, + 0x61, 0xd6, 0x20, 0xa1, 0xf2, 0x62, 0x3d, 0xc2, 0x55, 0x8d, 0xab, 0xe0, 0x1c, 0x26, 0x7a, 0x03, + 0x9e, 0x90, 0x88, 0x05, 0x82, 0xc2, 0xeb, 0x32, 0x5d, 0x33, 0x40, 0x5c, 0x59, 0xc5, 0xa2, 0xef, + 0x34, 0x54, 0x2a, 0x6b, 0x11, 0xf5, 0xa1, 0xc3, 0x0a, 0x9a, 0x23, 0xa7, 0xa9, 0x1f, 0xa3, 0xa8, + 0x73, 0xc2, 0x85, 0x4b, 0x10, 0x86, 0x1b, 0x6f, 0x8b, 0x3b, 0x1a, 0xa7, 0x25, 0x77, 0x73, 0xbc, + 0x05, 0x1d, 0x0f, 0x5e, 0xb4, 0xd0, 0x7e, 0x2b, 0xc8, 0xbf, 0x55, 0x1f, 0x3a, 0xef, 0x0a, 0xaa, + 0x9e, 0xd3, 0xd6, 0x86, 0x17, 0x95, 0x44, 0x5c, 0xb8, 0x04, 0xdd, 0x85, 0xea, 0x20, 0x4e, 0x98, + 0xb3, 0x2c, 0x97, 0x22, 0x6f, 0xae, 0xce, 0x60, 0x39, 0x8f, 0x5e, 0x82, 0x93, 0x2c, 0xa0, 0x31, + 0x67, 0x45, 0xae, 0xbd, 0xe9, 0x2d, 0xe2, 0x39, 0xbc, 0x70, 0x29, 0x7a, 0x02, 0x9b, 0xa1, 0xc9, + 0xab, 0xfc, 0xa6, 0xab, 0x32, 0x97, 0x56, 0xbd, 0x5c, 0xfa, 0xe2, 0x05, 0xda, 0x22, 0x4c, 0x54, + 0x65, 0x78, 0x11, 0x87, 0xc1, 0x60, 0xea, 0xac, 0xa9, 0xe8, 0xb5, 0x31, 0xf7, 0x0f, 0x25, 0x68, + 0xbc, 0x20, 0x89, 0xd4, 0x9f, 0xa7, 0xcd, 0x5b, 0x00, 0x22, 0xa5, 0x9e, 0x4f, 0x46, 0x67, 0x69, + 0x04, 0x5b, 0x88, 0x08, 0x78, 0x16, 0xfc, 0x56, 0x85, 0x6e, 0x05, 0xcb, 0xb1, 0xc0, 0x28, 0x27, + 0x43, 0x1d, 0xaf, 0x72, 0x2c, 0x2c, 0x09, 0x09, 0xe3, 0x47, 0xb1, 0x1f, 0x9c, 0x07, 0xd4, 0x37, + 0x01, 0x6b, 0x63, 0xee, 0x9f, 0x4a, 0xd0, 0xd0, 0xe1, 0x20, 0x82, 0xd7, 0x04, 0x80, 0x29, 0x4c, + 0x19, 0x90, 0x9e, 0x50, 0xb6, 0x4e, 0xe8, 0x42, 0x33, 0x60, 0x87, 0x84, 0x53, 0xc6, 0x75, 0x22, + 0xa5, 0xf2, 0xdc, 0xe9, 0xd5, 0xf9, 0xd3, 0xd3, 0x9b, 0xd4, 0xb2, 0x9b, 0xb8, 0xdf, 0x37, 0xa0, + 0x7e, 0x7c, 0xf6, 0x8d, 0xe8, 0xbe, 0xb6, 0xa1, 0x15, 0xcb, 0xd1, 0xb3, 0xd4, 0x41, 0x19, 0x20, + 0xdc, 0xa4, 0x9c, 0x6a, 0x75, 0xbc, 0x16, 0x92, 0x71, 0x40, 0xe5, 0x7d, 0x1c, 0x50, 0x5d, 0xc0, + 0x01, 0x22, 0xe3, 0xc9, 0xe0, 0x42, 0x3c, 0x30, 0x4f, 0xe2, 0x50, 0x17, 0x87, 0x1c, 0x86, 0x3c, + 0x40, 0x83, 0x38, 0xe2, 0x34, 0xe2, 0x62, 0x65, 0xcc, 0x54, 0xab, 0xa8, 0x3a, 0x8a, 0x82, 0x19, + 0xd1, 0xf4, 0x6a, 0xf4, 0x20, 0x1a, 0xc4, 0x7e, 0x10, 0x0d, 0x25, 0x17, 0xb4, 0xf0, 0x2c, 0x2c, + 0x0b, 0xff, 0xe5, 0x38, 0x48, 0x28, 0x93, 0x34, 0xd0, 0xc2, 0x46, 0x14, 0x7d, 0x8e, 0x56, 0x3e, + 0xa4, 0xd1, 0x90, 0x5f, 0xc8, 0xc4, 0x6e, 0xe1, 0x3c, 0x28, 0x3f, 0xac, 0x14, 0x70, 0x3a, 0x1d, + 0x53, 0x9d, 0xb8, 0x36, 0x24, 0x3c, 0xa8, 0xc5, 0x23, 0xff, 0x67, 0x32, 0x65, 0x5b, 0xd8, 0x42, + 0xd0, 0x1d, 0x68, 0xea, 0xf7, 0x17, 0x59, 0x29, 0x92, 0xa0, 0x69, 0xb8, 0x03, 0xa7, 0x33, 0x73, + 0x74, 0xbd, 0x32, 0x4f, 0xd7, 0x0b, 0xf9, 0x6d, 0xf5, 0x87, 0xf3, 0xdb, 0x43, 0xb8, 0xa1, 0xe9, + 0xc3, 0x34, 0xbb, 0x87, 0xb1, 0xca, 0x67, 0x9d, 0x6a, 0x8b, 0xa6, 0x0d, 0xa5, 0xaf, 0x67, 0x94, + 0x6e, 0xf1, 0xda, 0xb5, 0x3c, 0xaf, 0x6d, 0x42, 0x3d, 0x60, 0xcf, 0x27, 0x61, 0xe8, 0x20, 0x55, + 0xb5, 0x94, 0x24, 0x9a, 0xc7, 0x80, 0xf5, 0xec, 0x26, 0xe3, 0xba, 0x6a, 0x1e, 0xf3, 0x28, 0x7a, + 0x04, 0x6b, 0x49, 0xbe, 0x10, 0x3a, 0x9d, 0xf9, 0x2e, 0x4a, 0xe0, 0x78, 0x56, 0x31, 0xcd, 0x8a, + 0x0d, 0x2b, 0xbf, 0xef, 0x40, 0x73, 0xac, 0x08, 0x83, 0x39, 0x9b, 0xfa, 0x29, 0x34, 0x83, 0xe0, + 0x74, 0x06, 0x39, 0x50, 0xe5, 0x64, 0xc8, 0x9c, 0x1b, 0x52, 0xa3, 0xea, 0x9d, 0x92, 0x21, 0x96, + 0xc8, 0x5c, 0x36, 0x3a, 0x05, 0xd9, 0x28, 0xb2, 0x39, 0x0a, 0xf8, 0x93, 0x90, 0x0c, 0x9d, 0x9b, + 0x3a, 0x9b, 0xb5, 0x2c, 0x6c, 0x3a, 0x38, 0x25, 0x43, 0xa7, 0xab, 0xb2, 0x5f, 0x8c, 0xd3, 0x56, + 0x6a, 0x2b, 0x6b, 0xa5, 0xdc, 0x87, 0x70, 0xfd, 0x30, 0x60, 0x5c, 0x95, 0x66, 0x86, 0x29, 0x1b, + 0xc7, 0x11, 0xa3, 0xa2, 0x33, 0x51, 0x99, 0x29, 0xba, 0x00, 0xd5, 0x99, 0x28, 0x15, 0x6c, 0x70, + 0xf7, 0x97, 0xb0, 0xfc, 0x98, 0x30, 0x9a, 0x2e, 0xd9, 0x86, 0x16, 0x35, 0x1f, 0x73, 0x26, 0xf9, + 0x53, 0x40, 0xbc, 0xe5, 0x88, 0x19, 0x32, 0x12, 0x43, 0xf7, 0x03, 0x68, 0xab, 0xf5, 0xf2, 0xfb, + 0x66, 0xb6, 0xbd, 0x76, 0xbf, 0x2b, 0x03, 0x12, 0x96, 0x29, 0x6a, 0x61, 0x46, 0x6d, 0xd3, 0xb4, + 0x11, 0xa6, 0x3b, 0xd1, 0x4d, 0xc5, 0x36, 0xb4, 0x7a, 0x34, 0x0c, 0x46, 0x01, 0x4f, 0x29, 0x38, + 0x03, 0x04, 0xb5, 0x1c, 0xdb, 0xd4, 0x22, 0x05, 0xf4, 0x79, 0xda, 0xa8, 0xa9, 0xee, 0xeb, 0x43, + 0x6f, 0xfe, 0x40, 0x4f, 0x69, 0x1c, 0x44, 0x3c, 0x99, 0xa6, 0x7d, 0xdb, 0x26, 0xd4, 0xe3, 0xf3, + 0x73, 0x46, 0xb9, 0x64, 0x98, 0x1a, 0xd6, 0x92, 0x38, 0x46, 0x9e, 0x28, 0xe9, 0xa4, 0x86, 0x95, + 0xd0, 0xfd, 0x02, 0xda, 0xd6, 0x26, 0x05, 0xf5, 0xa3, 0x03, 0xb5, 0xb7, 0x24, 0x9c, 0x18, 0x4e, + 0x54, 0xc2, 0xa3, 0xf2, 0xc3, 0x92, 0xfb, 0xa5, 0xed, 0x83, 0xd4, 0xd3, 0xf7, 0xa0, 0x6d, 0x19, + 0x9a, 0x3e, 0x90, 0xd6, 0xb2, 0xe7, 0xdc, 0xdf, 0xc1, 0x35, 0x15, 0xe6, 0x0a, 0xe8, 0x47, 0xe3, + 0xc9, 0x62, 0x1f, 0xae, 0x43, 0x45, 0x10, 0xb7, 0x7e, 0x23, 0x41, 0xd9, 0xdb, 0xd0, 0x7a, 0x95, + 0x56, 0x18, 0xe5, 0xbb, 0x0c, 0x10, 0x31, 0x7a, 0x48, 0x18, 0x1f, 0xcd, 0x54, 0x0c, 0x1b, 0x73, + 0x7f, 0x05, 0xab, 0x4f, 0x29, 0xff, 0x11, 0x4e, 0x77, 0x5f, 0xc3, 0xda, 0x6c, 0x97, 0xff, 0xdf, + 0x6f, 0xdd, 0x85, 0xe6, 0x44, 0xae, 0x49, 0x7f, 0x81, 0x4a, 0x65, 0xf7, 0x2f, 0x25, 0x68, 0x09, + 0x1f, 0x8a, 0x74, 0x65, 0xff, 0x9f, 0x3d, 0xc5, 0xdc, 0x11, 0xb9, 0x94, 0x3b, 0x6a, 0x37, 0xa5, + 0xb2, 0xa8, 0x70, 0x2f, 0xd2, 0x66, 0x41, 0x93, 0x94, 0x2a, 0xb0, 0x73, 0xb8, 0xbb, 0x05, 0xb5, + 0x53, 0x61, 0x9b, 0xc8, 0xe5, 0x53, 0x91, 0xcb, 0xe2, 0xf1, 0x6b, 0x58, 0x8e, 0x5d, 0x0e, 0xab, + 0xe2, 0xef, 0xae, 0xa4, 0x70, 0x59, 0x10, 0x3f, 0x81, 0x4a, 0xc8, 0xb8, 0x8e, 0x10, 0xc7, 0xcb, + 0xcf, 0x7a, 0x87, 0x8c, 0xab, 0xb8, 0x16, 0x4a, 0xdd, 0xcf, 0xa0, 0x69, 0x00, 0x3b, 0x46, 0x6b, + 0xef, 0x8b, 0xd1, 0xbf, 0x95, 0x00, 0x3d, 0xa5, 0x5c, 0xec, 0x7d, 0x44, 0xc6, 0x69, 0x90, 0xde, + 0x02, 0x38, 0x95, 0x8d, 0xb4, 0xfe, 0xd0, 0xaa, 0x88, 0x5a, 0x95, 0x21, 0xe8, 0x2b, 0x68, 0x49, + 0x73, 0x74, 0x33, 0x20, 0x0c, 0x74, 0xbd, 0xf9, 0x7d, 0xbc, 0x54, 0x49, 0x99, 0x9a, 0x2d, 0xea, + 0x1e, 0xe9, 0xeb, 0xa6, 0x93, 0x05, 0xa9, 0xf5, 0x91, 0x6d, 0x76, 0x7b, 0x77, 0x6d, 0xc6, 0x05, + 0xf6, 0x3d, 0xfe, 0x5d, 0x82, 0xce, 0xcb, 0xb1, 0x4f, 0x64, 0xae, 0x1c, 0x51, 0x4e, 0x2c, 0xca, + 0x39, 0x3e, 0xfb, 0x26, 0x6b, 0x69, 0xb4, 0x24, 0x6e, 0xf8, 0x78, 0xae, 0x9f, 0xc9, 0x10, 0x93, + 0x1e, 0x29, 0x85, 0x57, 0xb2, 0xf4, 0x48, 0x29, 0xfc, 0xe7, 0xd0, 0x38, 0xa1, 0x9c, 0x8b, 0xae, + 0xa2, 0xaa, 0x7d, 0x50, 0x64, 0x83, 0xa7, 0x95, 0x94, 0x0f, 0xcc, 0x92, 0xee, 0x23, 0x58, 0xb6, + 0x27, 0x7e, 0x10, 0xb5, 0x7c, 0x06, 0xcb, 0x27, 0x76, 0xc5, 0x47, 0x50, 0xb5, 0x7e, 0xea, 0x94, + 0xe3, 0x34, 0xc8, 0xf4, 0x37, 0xbb, 0x0c, 0xb2, 0x1e, 0xdc, 0x7c, 0x4a, 0xb9, 0xbd, 0x94, 0x66, + 0x65, 0xe3, 0x63, 0x68, 0x0c, 0x14, 0xa4, 0x63, 0x6e, 0xc5, 0xb3, 0x35, 0xb1, 0x99, 0x75, 0xef, + 0xc3, 0xd6, 0x53, 0xca, 0x1f, 0xab, 0xe2, 0x2d, 0x7a, 0x9b, 0xc7, 0x53, 0xb1, 0xbb, 0x71, 0x79, + 0x16, 0xdd, 0xd9, 0xc1, 0x3f, 0x85, 0xed, 0xe2, 0x25, 0xfa, 0xec, 0x0e, 0xd4, 0x04, 0xca, 0x74, + 0xac, 0x29, 0xc1, 0x7d, 0x02, 0x1d, 0x45, 0x80, 0xe9, 0xc7, 0xc0, 0xd5, 0x2c, 0xb4, 0x09, 0xf5, + 0x64, 0x12, 0xd2, 0x7e, 0xcf, 0x7c, 0xd4, 0x2b, 0xc9, 0xfd, 0x6b, 0x09, 0x36, 0x66, 0x3f, 0xae, + 0xe9, 0x20, 0x4e, 0x7c, 0xc1, 0x52, 0xc7, 0xb3, 0x4d, 0x6f, 0x0a, 0x58, 0xe7, 0x94, 0x73, 0xe7, + 0x38, 0xd0, 0xd0, 0x57, 0xd1, 0x5c, 0x61, 0x44, 0x41, 0x15, 0x2f, 0x0d, 0x8d, 0xa8, 0x46, 0x37, + 0x95, 0xc5, 0x5c, 0x3f, 0x0a, 0xf8, 0x69, 0x30, 0x32, 0x3d, 0x78, 0x2a, 0xbb, 0x7f, 0x2c, 0x41, + 0x57, 0x50, 0xc3, 0x9c, 0x95, 0x57, 0x17, 0xce, 0xec, 0xe7, 0x9c, 0xb2, 0xfd, 0x73, 0x8e, 0x78, + 0x82, 0x1e, 0x99, 0xaa, 0x8f, 0xf9, 0x1a, 0x96, 0xe3, 0xac, 0xbe, 0x55, 0xad, 0xfa, 0xb6, 0xa8, + 0x1a, 0xba, 0xc7, 0xb0, 0x55, 0x68, 0x8f, 0x7e, 0xaf, 0x4f, 0xa1, 0x91, 0x48, 0x0f, 0x9a, 0x58, + 0xd9, 0xf4, 0x0a, 0x1d, 0x8c, 0x8d, 0xda, 0xee, 0x3f, 0x1a, 0x50, 0x3e, 0x79, 0x80, 0x1e, 0xa8, + 0xf2, 0xa7, 0x5b, 0x16, 0xb4, 0xec, 0x59, 0x6d, 0x44, 0xb7, 0xe3, 0x15, 0xb4, 0x33, 0xee, 0x12, + 0xda, 0x81, 0xe5, 0x7d, 0xf1, 0xe1, 0x4f, 0xf5, 0xb5, 0x4d, 0x3f, 0xd3, 0x5d, 0xf1, 0xec, 0x2e, + 0x46, 0x69, 0xaa, 0x88, 0x79, 0xaf, 0xe6, 0x87, 0xd0, 0x12, 0x11, 0x39, 0xa3, 0x66, 0x06, 0xee, + 0x12, 0xfa, 0x22, 0x57, 0xa8, 0xd1, 0xf5, 0x82, 0xfe, 0xa2, 0x6b, 0x83, 0x45, 0xf6, 0xea, 0x4f, + 0x2b, 0x53, 0xde, 0x0b, 0xed, 0x4d, 0x29, 0xe3, 0x6a, 0xcd, 0x8f, 0xa5, 0xbd, 0x5a, 0x6d, 0xcd, + 0xcb, 0xd7, 0xe5, 0xae, 0x59, 0xe7, 0x2e, 0xa1, 0x07, 0xc6, 0x05, 0x5a, 0x17, 0x79, 0x73, 0x4d, + 0xc4, 0xfc, 0xee, 0xf7, 0x01, 0x32, 0xfa, 0x9e, 0x79, 0x95, 0xeb, 0x05, 0xcc, 0xee, 0x2e, 0xa1, + 0xcf, 0x61, 0x25, 0xc7, 0x76, 0x68, 0xa3, 0x90, 0xfd, 0xe6, 0xcf, 0xfa, 0x12, 0xae, 0xcd, 0x91, + 0xd0, 0xcc, 0x91, 0x5d, 0x6f, 0x21, 0x4d, 0xb9, 0x4b, 0xe8, 0x25, 0x74, 0x8a, 0xc8, 0x04, 0x6d, + 0x7b, 0x57, 0xd0, 0x52, 0xf7, 0x03, 0xef, 0x2a, 0x06, 0x92, 0x76, 0x6d, 0xd8, 0xb1, 0x93, 0x72, + 0x0e, 0xda, 0xf0, 0x8a, 0x58, 0xe8, 0x8a, 0xc7, 0x7c, 0x6f, 0xf0, 0x3d, 0x82, 0xb5, 0x3d, 0xdf, + 0xcf, 0x31, 0xd1, 0x82, 0x04, 0x9a, 0x5f, 0xfb, 0x0b, 0x40, 0xca, 0x9c, 0xff, 0x6d, 0xf9, 0x31, + 0xac, 0x8b, 0x98, 0xcd, 0x2d, 0xde, 0xf2, 0x16, 0x73, 0x4f, 0x77, 0xdb, 0xbb, 0x82, 0x08, 0xdc, + 0xa5, 0xb3, 0xba, 0xfc, 0x9f, 0xd8, 0x83, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x96, 0xbc, 0x9b, + 0x3d, 0x7e, 0x1c, 0x00, 0x00, } diff --git a/s3/proto/s3.proto b/s3/proto/s3.proto index d823345cc..a61aed06f 100644 --- a/s3/proto/s3.proto +++ b/s3/proto/s3.proto @@ -1,7 +1,6 @@ syntax = "proto3"; service S3 { - rpc ListBuckets(BaseRequest) returns (ListBucketsResponse) {} rpc CreateBucket(Bucket) returns (BaseResponse) {} rpc DeleteBucket(Bucket) returns (BaseResponse) {} @@ -13,8 +12,13 @@ service S3 { rpc DeleteObject(DeleteObjectInput) returns (BaseResponse) {} rpc GetTierMap(BaseRequest) returns (GetTierMapResponse) {} rpc UpdateObjMeta(UpdateObjMetaRequest) returns (BaseResponse) {} - rpc GetStorageClasses(BaseRequest) returns (GetStorageClassesResponse) {} + rpc GetStorageClasses(BaseRequest) returns (GetStorageClassesResponse) {} rpc GetBackendTypeByTier(GetBackendTypeByTierRequest) returns (GetBackendTypeByTierResponse) {} + rpc DeleteBucketLifecycle(DeleteLifecycleInput) returns (BaseResponse) {} + rpc UpdateBucket(Bucket) returns (BaseResponse) {} + rpc AddUploadRecord(MultipartUploadRecord) returns (BaseResponse) {} + rpc DeleteUploadRecord(MultipartUploadRecord) returns (BaseResponse) {} + rpc ListUploadRecord(ListMultipartUploadRequest) returns (ListMultipartUploadResponse) {} } message ServerSideEncryption { @@ -91,23 +95,37 @@ message Tag { } message LifecycleFilter { - string prefix = 1; - repeated Tag tags = 2; + //Object prefix for lifecycle filter + string Prefix = 1; + //Tags will not be used for current release + //repeated Tag tags = 2; } message Action { + //Name of the action transition/expiration string name = 1; // expiration or transition + //Days after creation of object int32 days = 2; - string deleteMarker = 4; // for versioning - int32 tier = 5; // only for transition - string backend = 6; // for cross-cloud transition, if it is not set, that means in-cloud transition + //Delete marker in case of expiration for versioned bucket + string deleteMarker = 3; + //Storage class tier of the object where object is to be transitioned + int32 tier = 4; + // Destination backend of the object/bucket for Cross-cloud transition + string backend = 5; + +} + +message AbortMultipartUpload { + //Days after which the abort operation will be performed on incomplete upload + int32 DaysAfterInitiation = 1; } message LifecycleRule { - string id = 1; - string status = 2; - LifecycleFilter filter = 3; + string ID = 1; + string Status = 2; + LifecycleFilter Filter = 3; repeated Action actions = 4; + AbortMultipartUpload AbortIncompleteMultipartUpload = 5; } message ReplicationInfo { @@ -172,20 +190,18 @@ message Object { string isDeleteMarker = 19; repeated ReplicationInfo replicationInfo = 20; int64 size = 21; - repeated Partion partions =22; + repeated Partion partions = 22; repeated Tag tags = 23; - int64 lastModified =24; - string initFlag=25; - string ETag=26; + int64 lastModified = 24; + string initFlag = 25; + string ETag = 26; int32 tier = 27; } - message ListBucketsResponse { repeated Bucket buckets = 1; } - message BaseResponse { string errorCode = 1; string msg = 2; @@ -213,6 +229,7 @@ message DeleteObjectInput { string Bucket = 1; string Key = 2; string Versionid = 3; + int64 Lastmodified = 4; } message GetObjectInput { @@ -251,7 +268,8 @@ message GetTierMapResponse { message UpdateObjMetaRequest { string ObjKey = 1; string BucketName = 2; - map Setting = 3; + int64 LastModified = 3; + map Setting = 4; } message StorageClass { @@ -270,3 +288,28 @@ message GetBackendTypeByTierRequest { message GetBackendTypeByTierResponse { repeated string Types = 1; } + +message DeleteLifecycleInput { + string Bucket = 1; + string ruleID = 2; +} + +message MultipartUploadRecord { + string ObjectKey = 1; + string Bucket = 2; + string Backend = 3; + string UploadId = 4; + int64 InitTime = 5; +} + +message ListMultipartUploadRequest { + string Bucket = 1; + string Prefix = 2; + int32 Days = 3; + int32 limit = 4; + int32 offset = 5; +} + +message ListMultipartUploadResponse { + repeated MultipartUploadRecord records = 1; +} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go index d9aa3c42d..63b0f08be 100644 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -186,7 +186,6 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) { if b&0x80 == 0 { goto done } - // x -= 0x80 << 63 // Always zero. return 0, errOverflow diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go new file mode 100644 index 000000000..35b882c09 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/deprecated.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "errors" + +// Deprecated: do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go index d4db5a1c1..f9b6e41b3 100644 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ b/vendor/github.com/golang/protobuf/proto/equal.go @@ -246,7 +246,8 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { return false } - m1, m2 := e1.value, e2.value + m1 := extensionAsLegacyType(e1.value) + m2 := extensionAsLegacyType(e2.value) if m1 == nil && m2 == nil { // Both have only encoded form. diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index 816a3b9d6..fa88add30 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -185,9 +185,25 @@ type Extension struct { // extension will have only enc set. When such an extension is // accessed using GetExtension (or GetExtensions) desc and value // will be set. - desc *ExtensionDesc + desc *ExtensionDesc + + // value is a concrete value for the extension field. Let the type of + // desc.ExtensionType be the "API type" and the type of Extension.value + // be the "storage type". The API type and storage type are the same except: + // * For scalars (except []byte), the API type uses *T, + // while the storage type uses T. + // * For repeated fields, the API type uses []T, while the storage type + // uses *[]T. + // + // The reason for the divergence is so that the storage type more naturally + // matches what is expected of when retrieving the values through the + // protobuf reflection APIs. + // + // The value may only be populated if desc is also populated. value interface{} - enc []byte + + // enc is the raw bytes for the extension field. + enc []byte } // SetRawExtension is for testing only. @@ -334,7 +350,7 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // descriptors with the same field number. return nil, errors.New("proto: descriptor conflict") } - return e.value, nil + return extensionAsLegacyType(e.value), nil } if extension.ExtensionType == nil { @@ -349,11 +365,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // Remember the decoded version and drop the encoded version. // That way it is safe to mutate what we return. - e.value = v + e.value = extensionAsStorageType(v) e.desc = extension e.enc = nil emap[extension.Field] = e - return e.value, nil + return extensionAsLegacyType(e.value), nil } // defaultExtensionValue returns the default value for extension. @@ -488,7 +504,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error } typ := reflect.TypeOf(extension.ExtensionType) if typ != reflect.TypeOf(value) { - return errors.New("proto: bad extension value type") + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) } // nil extension values need to be caught early, because the // encoder can't distinguish an ErrNil due to a nil extension @@ -500,7 +516,7 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error } extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: value} + extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)} return nil } @@ -541,3 +557,51 @@ func RegisterExtension(desc *ExtensionDesc) { func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { return extensionMaps[reflect.TypeOf(pb).Elem()] } + +// extensionAsLegacyType converts an value in the storage type as the API type. +// See Extension.value. +func extensionAsLegacyType(v interface{}) interface{} { + switch rv := reflect.ValueOf(v); rv.Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + // Represent primitive types as a pointer to the value. + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + case reflect.Ptr: + // Represent slice types as the value itself. + switch rv.Type().Elem().Kind() { + case reflect.Slice: + if rv.IsNil() { + v = reflect.Zero(rv.Type().Elem()).Interface() + } else { + v = rv.Elem().Interface() + } + } + } + return v +} + +// extensionAsStorageType converts an value in the API type as the storage type. +// See Extension.value. +func extensionAsStorageType(v interface{}) interface{} { + switch rv := reflect.ValueOf(v); rv.Kind() { + case reflect.Ptr: + // Represent slice types as the value itself. + switch rv.Type().Elem().Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + if rv.IsNil() { + v = reflect.Zero(rv.Type().Elem()).Interface() + } else { + v = rv.Elem().Interface() + } + } + case reflect.Slice: + // Represent slice types as a pointer to the value. + if rv.Type().Elem().Kind() != reflect.Uint8 { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + } + } + return v +} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go index 75565cc6d..fdd328bb7 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -341,26 +341,6 @@ type Message interface { ProtoMessage() } -// Stats records allocation details about the protocol buffer encoders -// and decoders. Useful for tuning the library itself. -type Stats struct { - Emalloc uint64 // mallocs in encode - Dmalloc uint64 // mallocs in decode - Encode uint64 // number of encodes - Decode uint64 // number of decodes - Chit uint64 // number of cache hits - Cmiss uint64 // number of cache misses - Size uint64 // number of sizes -} - -// Set to true to enable stats collection. -const collectStats = false - -var stats Stats - -// GetStats returns a copy of the global Stats structure. -func GetStats() Stats { return stats } - // A Buffer is a buffer manager for marshaling and unmarshaling // protocol buffers. It may be reused between invocations to // reduce memory usage. It is not necessary to use a Buffer; @@ -960,13 +940,19 @@ func isProto3Zero(v reflect.Value) bool { return false } -// ProtoPackageIsVersion2 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion2 = true +const ( + // ProtoPackageIsVersion3 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion3 = true + + // ProtoPackageIsVersion2 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion2 = true -// ProtoPackageIsVersion1 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the proto package. -const ProtoPackageIsVersion1 = true + // ProtoPackageIsVersion1 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion1 = true +) // InternalMessageInfo is a type used internally by generated .pb.go files. // This type is not intended to be used by non-generated code. diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go index 3b6ca41d5..f48a75676 100644 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -36,13 +36,7 @@ package proto */ import ( - "bytes" - "encoding/json" "errors" - "fmt" - "reflect" - "sort" - "sync" ) // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. @@ -145,46 +139,9 @@ func skipVarint(buf []byte) []byte { return buf[i+1:] } -// MarshalMessageSet encodes the extension map represented by m in the message set wire format. -// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSet(exts interface{}) ([]byte, error) { - return marshalMessageSet(exts, false) -} - -// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. -func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { - switch exts := exts.(type) { - case *XXX_InternalExtensions: - var u marshalInfo - siz := u.sizeMessageSet(exts) - b := make([]byte, 0, siz) - return u.appendMessageSet(b, exts, deterministic) - - case map[int32]Extension: - // This is an old-style extension map. - // Wrap it in a new-style XXX_InternalExtensions. - ie := XXX_InternalExtensions{ - p: &struct { - mu sync.Mutex - extensionMap map[int32]Extension - }{ - extensionMap: exts, - }, - } - - var u marshalInfo - siz := u.sizeMessageSet(&ie) - b := make([]byte, 0, siz) - return u.appendMessageSet(b, &ie, deterministic) - - default: - return nil, errors.New("proto: not an extension map") - } -} - -// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. // It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSet(buf []byte, exts interface{}) error { +func unmarshalMessageSet(buf []byte, exts interface{}) error { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: @@ -222,93 +179,3 @@ func UnmarshalMessageSet(buf []byte, exts interface{}) error { } return nil } - -// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. -// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { - var m map[int32]Extension - switch exts := exts.(type) { - case *XXX_InternalExtensions: - var mu sync.Locker - m, mu = exts.extensionsRead() - if m != nil { - // Keep the extensions map locked until we're done marshaling to prevent - // races between marshaling and unmarshaling the lazily-{en,de}coded - // values. - mu.Lock() - defer mu.Unlock() - } - case map[int32]Extension: - m = exts - default: - return nil, errors.New("proto: not an extension map") - } - var b bytes.Buffer - b.WriteByte('{') - - // Process the map in key order for deterministic output. - ids := make([]int32, 0, len(m)) - for id := range m { - ids = append(ids, id) - } - sort.Sort(int32Slice(ids)) // int32Slice defined in text.go - - for i, id := range ids { - ext := m[id] - msd, ok := messageSetMap[id] - if !ok { - // Unknown type; we can't render it, so skip it. - continue - } - - if i > 0 && b.Len() > 1 { - b.WriteByte(',') - } - - fmt.Fprintf(&b, `"[%s]":`, msd.name) - - x := ext.value - if x == nil { - x = reflect.New(msd.t.Elem()).Interface() - if err := Unmarshal(ext.enc, x.(Message)); err != nil { - return nil, err - } - } - d, err := json.Marshal(x) - if err != nil { - return nil, err - } - b.Write(d) - } - b.WriteByte('}') - return b.Bytes(), nil -} - -// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. -// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. -func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { - // Common-case fast path. - if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { - return nil - } - - // This is fairly tricky, and it's not clear that it is needed. - return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") -} - -// A global registry of types that can be used in a MessageSet. - -var messageSetMap = make(map[int32]messageSetDesc) - -type messageSetDesc struct { - t reflect.Type // pointer to struct - name string -} - -// RegisterMessageSetType is called from the generated code. -func RegisterMessageSetType(m Message, fieldNum int32, name string) { - messageSetMap[fieldNum] = messageSetDesc{ - t: reflect.TypeOf(m), - name: name, - } -} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go index b6cad9083..94fa9194a 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -79,10 +79,13 @@ func toPointer(i *Message) pointer { // toAddrPointer converts an interface to a pointer that points to // the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { +func toAddrPointer(i *interface{}, isptr, deref bool) pointer { v := reflect.ValueOf(*i) u := reflect.New(v.Type()) u.Elem().Set(v) + if deref { + u = u.Elem() + } return pointer{v: u} } diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go index d55a335d9..dbfffe071 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -85,16 +85,21 @@ func toPointer(i *Message) pointer { // toAddrPointer converts an interface to a pointer that points to // the interface data. -func toAddrPointer(i *interface{}, isptr bool) pointer { +func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) { // Super-tricky - read or get the address of data word of interface value. if isptr { // The interface is of pointer type, thus it is a direct interface. // The data word is the pointer data itself. We take its address. - return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } else { + // The interface is not of pointer type. The data word is the pointer + // to the data. + p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } - // The interface is not of pointer type. The data word is the pointer - // to the data. - return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} + if deref { + p.p = *(*unsafe.Pointer)(p.p) + } + return p } // valToPointer converts v to a pointer. v must be of pointer type. diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index 50b99b83a..79668ff5c 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -334,9 +334,6 @@ func GetProperties(t reflect.Type) *StructProperties { sprop, ok := propertiesMap[t] propertiesMu.RUnlock() if ok { - if collectStats { - stats.Chit++ - } return sprop } @@ -346,17 +343,20 @@ func GetProperties(t reflect.Type) *StructProperties { return sprop } +type ( + oneofFuncsIface interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + oneofWrappersIface interface { + XXX_OneofWrappers() []interface{} + } +) + // getPropertiesLocked requires that propertiesMu is held. func getPropertiesLocked(t reflect.Type) *StructProperties { if prop, ok := propertiesMap[t]; ok { - if collectStats { - stats.Chit++ - } return prop } - if collectStats { - stats.Cmiss++ - } prop := new(StructProperties) // in case of recursive protos, fill this in now. @@ -391,13 +391,14 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { // Re-order prop.order. sort.Sort(prop) - type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + var oots []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oots = m.XXX_OneofFuncs() + case oneofWrappersIface: + oots = m.XXX_OneofWrappers() } - if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { - var oots []interface{} - _, _, _, oots = om.XXX_OneofFuncs() - + if len(oots) > 0 { // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) for _, oot := range oots { diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go index b16794496..5cb11fa95 100644 --- a/vendor/github.com/golang/protobuf/proto/table_marshal.go +++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go @@ -87,6 +87,7 @@ type marshalElemInfo struct { sizer sizer marshaler marshaler isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) + deref bool // dereference the pointer before operating on it; implies isptr } var ( @@ -320,8 +321,11 @@ func (u *marshalInfo) computeMarshalInfo() { // get oneof implementers var oneofImplementers []interface{} - if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() } n := t.NumField() @@ -407,13 +411,22 @@ func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { panic("tag is not an integer") } wt := wiretype(tags[0]) + if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct { + t = t.Elem() + } sizer, marshaler := typeMarshaler(t, tags, false, false) + var deref bool + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + t = reflect.PtrTo(t) + deref = true + } e = &marshalElemInfo{ wiretag: uint64(tag)<<3 | wt, tagsize: SizeVarint(uint64(tag) << 3), sizer: sizer, marshaler: marshaler, isptr: t.Kind() == reflect.Ptr, + deref: deref, } // update cache @@ -448,7 +461,7 @@ func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { fi.field = toField(f) - fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. fi.isPointer = true fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) @@ -476,10 +489,6 @@ func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofI } } -type oneofMessage interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) -} - // wiretype returns the wire encoding of the type. func wiretype(encoding string) uint64 { switch encoding { @@ -2310,8 +2319,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { for _, k := range m.MapKeys() { ki := k.Interface() vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + kaddr := toAddrPointer(&ki, false, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) n += siz + SizeVarint(uint64(siz)) + tagsize } @@ -2329,8 +2338,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { for _, k := range keys { ki := k.Interface() vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + kaddr := toAddrPointer(&ki, false, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value b = appendVarint(b, tag) siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) b = appendVarint(b, uint64(siz)) @@ -2399,7 +2408,7 @@ func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { // the last time this function was called. ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) n += ei.sizer(p, ei.tagsize) } mu.Unlock() @@ -2434,7 +2443,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) if !nerr.Merge(err) { return b, err @@ -2465,7 +2474,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) if !nerr.Merge(err) { return b, err @@ -2510,7 +2519,7 @@ func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) n += ei.sizer(p, 1) // message, tag = 3 (size=1) } mu.Unlock() @@ -2553,7 +2562,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) if !nerr.Merge(err) { return b, err @@ -2591,7 +2600,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) b = append(b, 1<<3|WireEndGroup) if !nerr.Merge(err) { @@ -2621,7 +2630,7 @@ func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) n += ei.sizer(p, ei.tagsize) } return n @@ -2656,7 +2665,7 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr) + p := toAddrPointer(&v, ei.isptr, ei.deref) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) if !nerr.Merge(err) { return b, err diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go index ebf1caa56..acee2fc52 100644 --- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go +++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go @@ -136,7 +136,7 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error { u.computeUnmarshalInfo() } if u.isMessageSet { - return UnmarshalMessageSet(b, m.offset(u.extensions).toExtensions()) + return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions()) } var reqMask uint64 // bitmask of required fields we've seen. var errLater error @@ -362,46 +362,48 @@ func (u *unmarshalInfo) computeUnmarshalInfo() { } // Find any types associated with oneof fields. - // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? - fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") - if fn.IsValid() { - res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} - for i := res.Len() - 1; i >= 0; i-- { - v := res.Index(i) // interface{} - tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break - } + var oneofImplementers []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + for _, v := range oneofImplementers { + tptr := reflect.TypeOf(v) // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break } + } - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) - } + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) } } + } // Get extension ranges, if any. - fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") if fn.IsValid() { if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { panic("a message with extensions, but no extensions field in " + t.Name()) @@ -1948,7 +1950,7 @@ func encodeVarint(b []byte, x uint64) []byte { // If there is an error, it returns 0,0. func decodeVarint(b []byte) (uint64, int) { var x, y uint64 - if len(b) <= 0 { + if len(b) == 0 { goto bad } x = uint64(b[0])