diff --git a/cmd/gateway/azure/gateway-azure.go b/cmd/gateway/azure/gateway-azure.go deleted file mode 100644 index b2ebf49..0000000 --- a/cmd/gateway/azure/gateway-azure.go +++ /dev/null @@ -1,1431 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2017-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package azure - -import ( - "bytes" - "context" - "crypto/rand" - "encoding/base64" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "path" - "sort" - "strconv" - "strings" - "time" - - "github.com/minio/minio/pkg/env" - - "github.com/Azure/azure-pipeline-go/pipeline" - "github.com/Azure/azure-storage-blob-go/azblob" - humanize "github.com/dustin/go-humanize" - "github.com/minio/cli" - miniogopolicy "github.com/minio/minio-go/v6/pkg/policy" - "github.com/minio/minio/cmd/logger" - "github.com/minio/minio/pkg/auth" - "github.com/minio/minio/pkg/bucket/policy" - "github.com/minio/minio/pkg/bucket/policy/condition" - sha256 "github.com/minio/sha256-simd" - - minio "github.com/minio/minio/cmd" -) - -var ( - azureUploadChunkSize = getUploadChunkSizeFromEnv(azureChunkSizeEnvVar, strconv.Itoa(azureDefaultUploadChunkSize/humanize.MiByte)) - azureSdkTimeout = time.Duration(azureUploadChunkSize/humanize.MiByte) * azureSdkTimeoutPerMb - azureUploadConcurrency = azureUploadMaxMemoryUsage / azureUploadChunkSize -) - -const ( - // The defaultDialTimeout for communicating with the cloud backends is set - // to 30 seconds in utils.go; the Azure SDK recommends to set a timeout of 60 - // seconds per MB of data a client expects to upload so we must transfer less - // than 0.5 MB per chunk to stay within the defaultDialTimeout tolerance. - // See https://github.com/Azure/azure-storage-blob-go/blob/fc70003/azblob/zc_policy_retry.go#L39-L44 for more details. - // To change the upload chunk size, set the environmental variable MINIO_AZURE_CHUNK_SIZE_MB with a (float) value between 0 and 100 - azureDefaultUploadChunkSize = 25 * humanize.MiByte - azureSdkTimeoutPerMb = 60 * time.Second - azureUploadMaxMemoryUsage = 100 * humanize.MiByte - azureChunkSizeEnvVar = "MINIO_AZURE_CHUNK_SIZE_MB" - - azureDownloadRetryAttempts = 5 - azureBlockSize = 100 * humanize.MiByte - azureS3MinPartSize = 5 * humanize.MiByte - metadataObjectNameTemplate = minio.GatewayMinioSysTmp + "multipart/v1/%s.%x/azure.json" - azureBackend = "azure" - azureMarkerPrefix = "{minio}" - metadataPartNamePrefix = minio.GatewayMinioSysTmp + "multipart/v1/%s.%x" - maxPartsCount = 10000 -) - -func init() { - const azureGatewayTemplate = `NAME: - {{.HelpName}} - {{.Usage}} - -USAGE: - {{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} [ENDPOINT] -{{if .VisibleFlags}} -FLAGS: - {{range .VisibleFlags}}{{.}} - {{end}}{{end}} -ENDPOINT: - Azure server endpoint. Default ENDPOINT is https://core.windows.net - -EXAMPLES: - 1. Start minio gateway server for Azure Blob Storage backend on custom endpoint. - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}azureaccountname - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}azureaccountkey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_AZURE_CHUNK_SIZE_MB {{.AssignmentOperator}}0.25 - {{.Prompt}} {{.HelpName}} https://azureaccountname.blob.custom.azure.endpoint - - 2. Start minio gateway server for Azure Blob Storage backend with edge caching enabled. - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}azureaccountname - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}azureaccountkey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1,/mnt/drive2,/mnt/drive3,/mnt/drive4" - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXCLUDE{{.AssignmentOperator}}"bucket1/*,*.png" - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}90 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_AFTER{{.AssignmentOperator}}3 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_LOW{{.AssignmentOperator}}75 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_HIGH{{.AssignmentOperator}}85 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_AZURE_CHUNK_SIZE_MB {{.AssignmentOperator}}25 - {{.Prompt}} {{.HelpName}} -` - - minio.RegisterGatewayCommand(cli.Command{ - Name: azureBackend, - Usage: "Microsoft Azure Blob Storage", - Action: azureGatewayMain, - CustomHelpTemplate: azureGatewayTemplate, - HideHelpCommand: true, - }) -} - -// Returns true if marker was returned by Azure, i.e prefixed with -// {minio} -func isAzureMarker(marker string) bool { - return strings.HasPrefix(marker, azureMarkerPrefix) -} - -// Handler for 'minio gateway azure' command line. -func azureGatewayMain(ctx *cli.Context) { - // Validate gateway arguments. - host := ctx.Args().First() - - serverAddr := ctx.GlobalString("address") - if serverAddr == "" || serverAddr == ":"+minio.GlobalMinioDefaultPort { - serverAddr = ctx.String("address") - } - // Validate gateway arguments. - logger.FatalIf(minio.ValidateGatewayArguments(serverAddr, host), "Invalid argument") - - minio.StartGateway(ctx, &Azure{host}) -} - -// getUploadChunkSizeFromEnv returns the parsed chunk size from the environmental variable 'MINIO_AZURE_CHUNK_SIZE_MB' -// The environmental variable should be a floating point number between 0 and 100 representing the MegaBytes -// The returned value is an int representing the size in bytes -func getUploadChunkSizeFromEnv(envvar string, defaultValue string) int { - envChunkSize := env.Get(envvar, defaultValue) - - i, err := strconv.ParseFloat(envChunkSize, 64) - if err != nil { - logger.LogIf(context.Background(), err) - return azureDefaultUploadChunkSize - } - - if i <= 0 || i > 100 { - logger.LogIf(context.Background(), fmt.Errorf("ENV '%v' should be a floating point value between 0 and 100.\n"+ - "The upload chunk size is set to its default: %s\n", azureChunkSizeEnvVar, defaultValue)) - return azureDefaultUploadChunkSize - } - - return int(i * humanize.MiByte) -} - -// Azure implements Gateway. -type Azure struct { - host string -} - -// Name implements Gateway interface. -func (g *Azure) Name() string { - return azureBackend -} - -// NewGatewayLayer initializes azure blob storage client and returns AzureObjects. -func (g *Azure) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error) { - endpointURL, err := parseStorageEndpoint(g.host, creds.AccessKey) - if err != nil { - return nil, err - } - - credential, err := azblob.NewSharedKeyCredential(creds.AccessKey, creds.SecretKey) - if err != nil { - if _, ok := err.(base64.CorruptInputError); ok { - return &azureObjects{}, errors.New("invalid Azure credentials") - } - return &azureObjects{}, err - } - - metrics := minio.NewMetrics() - - t := &minio.MetricsTransport{ - Transport: minio.NewGatewayHTTPTransport(), - Metrics: metrics, - } - - httpClient := &http.Client{Transport: t} - userAgent := fmt.Sprintf("APN/1.0 MinIO/1.0 MinIO/%s", minio.Version) - - pipeline := azblob.NewPipeline(credential, azblob.PipelineOptions{ - Retry: azblob.RetryOptions{ - TryTimeout: azureSdkTimeout, - }, - HTTPSender: pipeline.FactoryFunc(func(next pipeline.Policy, po *pipeline.PolicyOptions) pipeline.PolicyFunc { - return func(ctx context.Context, request pipeline.Request) (pipeline.Response, error) { - request.Header.Set("User-Agent", userAgent) - resp, err := httpClient.Do(request.WithContext(ctx)) - return pipeline.NewHTTPResponse(resp), err - } - }), - }) - - client := azblob.NewServiceURL(*endpointURL, pipeline) - - return &azureObjects{ - endpoint: endpointURL.String(), - httpClient: httpClient, - client: client, - metrics: metrics, - }, nil -} - -func parseStorageEndpoint(host string, accountName string) (*url.URL, error) { - var endpoint string - - // Load the endpoint url if supplied by the user. - if host != "" { - host, secure, err := minio.ParseGatewayEndpoint(host) - if err != nil { - return nil, err - } - - var protocol string - if secure { - protocol = "https" - } else { - protocol = "http" - } - - // for containerized storage deployments like Azurite or IoT Edge Storage, - // account resolution isn't handled via a hostname prefix like - // `http://${account}.host/${path}` but instead via a route prefix like - // `http://host/${account}/${path}` so adjusting for that here - if !strings.HasPrefix(host, fmt.Sprintf("%s.", accountName)) { - host = fmt.Sprintf("%s/%s", host, accountName) - } - - endpoint = fmt.Sprintf("%s://%s", protocol, host) - } else { - endpoint = fmt.Sprintf("https://%s.blob.core.windows.net", accountName) - } - - return url.Parse(endpoint) -} - -// Production - Azure gateway is production ready. -func (g *Azure) Production() bool { - return true -} - -// s3MetaToAzureProperties converts metadata meant for S3 PUT/COPY -// object into Azure data structures - BlobMetadata and -// BlobProperties. -// -// BlobMetadata contains user defined key-value pairs and each key is -// automatically prefixed with `X-Ms-Meta-` by the Azure SDK. S3 -// user-metadata is translated to Azure metadata by removing the -// `X-Amz-Meta-` prefix. -// -// BlobProperties contains commonly set metadata for objects such as -// Content-Encoding, etc. Such metadata that is accepted by S3 is -// copied into BlobProperties. -// -// Header names are canonicalized as in http.Header. -func s3MetaToAzureProperties(ctx context.Context, s3Metadata map[string]string) (azblob.Metadata, azblob.BlobHTTPHeaders, error) { - for k := range s3Metadata { - if strings.Contains(k, "--") { - return azblob.Metadata{}, azblob.BlobHTTPHeaders{}, minio.UnsupportedMetadata{} - } - } - - // Encoding technique for each key is used here is as follows - // Each '-' is converted to '_' - // Each '_' is converted to '__' - // With this basic assumption here are some of the expected - // translations for these keys. - // i: 'x-S3cmd_attrs' -> o: 'x_s3cmd__attrs' (mixed) - // i: 'x__test__value' -> o: 'x____test____value' (double '_') - encodeKey := func(key string) string { - tokens := strings.Split(key, "_") - for i := range tokens { - tokens[i] = strings.Replace(tokens[i], "-", "_", -1) - } - return strings.Join(tokens, "__") - } - var blobMeta azblob.Metadata = make(map[string]string) - var err error - var props azblob.BlobHTTPHeaders - for k, v := range s3Metadata { - k = http.CanonicalHeaderKey(k) - switch { - case strings.HasPrefix(k, "X-Amz-Meta-"): - // Strip header prefix, to let Azure SDK - // handle it for storage. - k = strings.Replace(k, "X-Amz-Meta-", "", 1) - blobMeta[encodeKey(k)] = v - // All cases below, extract common metadata that is - // accepted by S3 into BlobProperties for setting on - // Azure - see - // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html - case k == "Cache-Control": - props.CacheControl = v - case k == "Content-Disposition": - props.ContentDisposition = v - case k == "Content-Encoding": - props.ContentEncoding = v - case k == "Content-Md5": - props.ContentMD5, err = base64.StdEncoding.DecodeString(v) - case k == "Content-Type": - props.ContentType = v - case k == "Content-Language": - props.ContentLanguage = v - } - } - return blobMeta, props, err -} - -const ( - partMetaVersionV1 = "1" -) - -// partMetadataV1 struct holds the part specific metadata for -// multipart operations. -type partMetadataV1 struct { - Version string `json:"version"` - Size int64 `json:"Size"` - BlockIDs []string `json:"blockIDs"` - ETag string `json:"etag"` -} - -// Returns the initialized part metadata struct -func newPartMetaV1(uploadID string, partID int) (partMeta *partMetadataV1) { - p := &partMetadataV1{} - p.Version = partMetaVersionV1 - return p -} - -func s3StorageClassToAzureTier(sc string) azblob.AccessTierType { - switch sc { - case "REDUCED_REDUNDANCY": - return azblob.AccessTierCool - case "STANDARD": - return azblob.AccessTierHot - } - return azblob.AccessTierHot -} - -func azureTierToS3StorageClass(tierType string) string { - switch azblob.AccessTierType(tierType) { - case azblob.AccessTierCool: - return "REDUCED_REDUNDANCY" - case azblob.AccessTierHot: - return "STANDARD" - default: - return "STANDARD" - } - -} - -// azurePropertiesToS3Meta converts Azure metadata/properties to S3 -// metadata. It is the reverse of s3MetaToAzureProperties. Azure's -// `.GetMetadata()` lower-cases all header keys, so this is taken into -// account by this function. -func azurePropertiesToS3Meta(meta azblob.Metadata, props azblob.BlobHTTPHeaders, contentLength int64) map[string]string { - // Decoding technique for each key is used here is as follows - // Each '_' is converted to '-' - // Each '__' is converted to '_' - // With this basic assumption here are some of the expected - // translations for these keys. - // i: 'x_s3cmd__attrs' -> o: 'x-s3cmd_attrs' (mixed) - // i: 'x____test____value' -> o: 'x__test__value' (double '_') - decodeKey := func(key string) string { - tokens := strings.Split(key, "__") - for i := range tokens { - tokens[i] = strings.Replace(tokens[i], "_", "-", -1) - } - return strings.Join(tokens, "_") - } - - s3Metadata := make(map[string]string) - for k, v := range meta { - // k's `x-ms-meta-` prefix is already stripped by - // Azure SDK, so we add the AMZ prefix. - k = "X-Amz-Meta-" + decodeKey(k) - k = http.CanonicalHeaderKey(k) - s3Metadata[k] = v - } - - // Add each property from BlobProperties that is supported by - // S3 PUT/COPY common metadata. - if props.CacheControl != "" { - s3Metadata["Cache-Control"] = props.CacheControl - } - if props.ContentDisposition != "" { - s3Metadata["Content-Disposition"] = props.ContentDisposition - } - if props.ContentEncoding != "" { - s3Metadata["Content-Encoding"] = props.ContentEncoding - } - if contentLength != 0 { - s3Metadata["Content-Length"] = fmt.Sprintf("%d", contentLength) - } - if len(props.ContentMD5) != 0 { - s3Metadata["Content-MD5"] = base64.StdEncoding.EncodeToString(props.ContentMD5) - } - if props.ContentType != "" { - s3Metadata["Content-Type"] = props.ContentType - } - if props.ContentLanguage != "" { - s3Metadata["Content-Language"] = props.ContentLanguage - } - return s3Metadata -} - -// azureObjects - Implements Object layer for Azure blob storage. -type azureObjects struct { - minio.GatewayUnsupported - endpoint string - httpClient *http.Client - metrics *minio.Metrics - client azblob.ServiceURL // Azure sdk client -} - -// Convert azure errors to minio object layer errors. -func azureToObjectError(err error, params ...string) error { - if err == nil { - return nil - } - - bucket := "" - object := "" - if len(params) >= 1 { - bucket = params[0] - } - if len(params) == 2 { - object = params[1] - } - - azureErr, ok := err.(azblob.StorageError) - if !ok { - // We don't interpret non Azure errors. As azure errors will - // have StatusCode to help to convert to object errors. - return err - } - - serviceCode := string(azureErr.ServiceCode()) - statusCode := azureErr.Response().StatusCode - - return azureCodesToObjectError(err, serviceCode, statusCode, bucket, object) -} - -func azureCodesToObjectError(err error, serviceCode string, statusCode int, bucket string, object string) error { - switch serviceCode { - case "ContainerAlreadyExists": - err = minio.BucketExists{Bucket: bucket} - case "InvalidResourceName": - err = minio.BucketNameInvalid{Bucket: bucket} - case "RequestBodyTooLarge": - err = minio.PartTooBig{} - case "InvalidMetadata": - err = minio.UnsupportedMetadata{} - case "BlobAccessTierNotSupportedForAccountType": - err = minio.NotImplemented{} - default: - switch statusCode { - case http.StatusNotFound: - if object != "" { - err = minio.ObjectNotFound{ - Bucket: bucket, - Object: object, - } - } else { - err = minio.BucketNotFound{Bucket: bucket} - } - case http.StatusBadRequest: - err = minio.BucketNameInvalid{Bucket: bucket} - } - } - return err -} - -// getAzureUploadID - returns new upload ID which is hex encoded 8 bytes random value. -// this 8 byte restriction is needed because Azure block id has a restriction of length -// upto 8 bytes. -func getAzureUploadID() (string, error) { - var id [8]byte - - n, err := io.ReadFull(rand.Reader, id[:]) - if err != nil { - return "", err - } - if n != len(id) { - return "", fmt.Errorf("Unexpected random data size. Expected: %d, read: %d)", len(id), n) - } - - return hex.EncodeToString(id[:]), nil -} - -// checkAzureUploadID - returns error in case of given string is upload ID. -func checkAzureUploadID(ctx context.Context, uploadID string) (err error) { - if len(uploadID) != 16 { - return minio.MalformedUploadID{ - UploadID: uploadID, - } - } - - if _, err = hex.DecodeString(uploadID); err != nil { - return minio.MalformedUploadID{ - UploadID: uploadID, - } - } - - return nil -} - -// parses partID from part metadata file name -func parseAzurePart(metaPartFileName, prefix string) (partID int, err error) { - partStr := strings.TrimPrefix(metaPartFileName, prefix+minio.SlashSeparator) - if partID, err = strconv.Atoi(partStr); err != nil || partID <= 0 { - err = fmt.Errorf("invalid part number in block id '%s'", string(partID)) - return - } - return -} - -// GetMetrics returns this gateway's metrics -func (a *azureObjects) GetMetrics(ctx context.Context) (*minio.Metrics, error) { - return a.metrics, nil -} - -// Shutdown - save any gateway metadata to disk -// if necessary and reload upon next restart. -func (a *azureObjects) Shutdown(ctx context.Context) error { - return nil -} - -// StorageInfo - Not relevant to Azure backend. -func (a *azureObjects) StorageInfo(ctx context.Context, _ bool) (si minio.StorageInfo, _ []error) { - si.Backend.Type = minio.BackendGateway - si.Backend.GatewayOnline = minio.IsBackendOnline(ctx, a.httpClient, a.endpoint) - return si, nil -} - -// MakeBucketWithLocation - Create a new container on azure backend. -func (a *azureObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { - if lockEnabled { - return minio.NotImplemented{} - } - - // Verify if bucket (container-name) is valid. - // IsValidBucketName has same restrictions as container names mentioned - // in azure documentation, so we will simply use the same function here. - // Ref - https://docs.microsoft.com/en-us/rest/api/storageservices/naming-and-referencing-containers--blobs--and-metadata - if !minio.IsValidBucketName(bucket) { - return minio.BucketNameInvalid{Bucket: bucket} - } - - containerURL := a.client.NewContainerURL(bucket) - _, err := containerURL.Create(ctx, azblob.Metadata{}, azblob.PublicAccessNone) - return azureToObjectError(err, bucket) -} - -// GetBucketInfo - Get bucket metadata.. -func (a *azureObjects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, e error) { - // Azure does not have an equivalent call, hence use - // ListContainers with prefix - - marker := azblob.Marker{} - - for marker.NotDone() { - resp, err := a.client.ListContainersSegment(ctx, marker, azblob.ListContainersSegmentOptions{ - Prefix: bucket, - }) - - if err != nil { - return bi, azureToObjectError(err, bucket) - } - - for _, container := range resp.ContainerItems { - if container.Name == bucket { - t := container.Properties.LastModified - return minio.BucketInfo{ - Name: bucket, - Created: t, - }, nil - } // else continue - } - - marker = resp.NextMarker - } - return bi, minio.BucketNotFound{Bucket: bucket} -} - -// ListBuckets - Lists all azure containers, uses Azure equivalent `ServiceURL.ListContainersSegment`. -func (a *azureObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketInfo, err error) { - marker := azblob.Marker{} - - for marker.NotDone() { - resp, err := a.client.ListContainersSegment(ctx, marker, azblob.ListContainersSegmentOptions{}) - - if err != nil { - return nil, azureToObjectError(err) - } - - for _, container := range resp.ContainerItems { - t := container.Properties.LastModified - buckets = append(buckets, minio.BucketInfo{ - Name: container.Name, - Created: t, - }) - } - - marker = resp.NextMarker - } - return buckets, nil -} - -// DeleteBucket - delete a container on azure, uses Azure equivalent `ContainerURL.Delete`. -func (a *azureObjects) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error { - if !forceDelete { - // Check if the container is empty before deleting it. - result, err := a.ListObjects(ctx, bucket, "", "", "", 1) - if err != nil { - return azureToObjectError(err, bucket) - } - if len(result.Objects) > 0 { - return minio.BucketNotEmpty{Bucket: bucket} - } - } - - containerURL := a.client.NewContainerURL(bucket) - _, err := containerURL.Delete(ctx, azblob.ContainerAccessConditions{}) - return azureToObjectError(err, bucket) -} - -// ListObjects - lists all blobs on azure with in a container filtered by prefix -// and marker, uses Azure equivalent `ContainerURL.ListBlobsHierarchySegment`. -// To accommodate S3-compatible applications using -// ListObjectsV1 to use object keys as markers to control the -// listing of objects, we use the following encoding scheme to -// distinguish between Azure continuation tokens and application -// supplied markers. -// -// - NextMarker in ListObjectsV1 response is constructed by -// prefixing "{minio}" to the Azure continuation token, -// e.g, "{minio}CgRvYmoz" -// -// - Application supplied markers are used as-is to list -// object keys that appear after it in the lexicographical order. -func (a *azureObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (result minio.ListObjectsInfo, err error) { - var objects []minio.ObjectInfo - var prefixes []string - - azureListMarker := azblob.Marker{} - if isAzureMarker(marker) { - // If application is using Azure continuation token we should - // strip the azureTokenPrefix we added in the previous list response. - azureMarker := strings.TrimPrefix(marker, azureMarkerPrefix) - azureListMarker.Val = &azureMarker - } - - containerURL := a.client.NewContainerURL(bucket) - for len(objects) == 0 && len(prefixes) == 0 { - resp, err := containerURL.ListBlobsHierarchySegment(ctx, azureListMarker, delimiter, azblob.ListBlobsSegmentOptions{ - Prefix: prefix, - MaxResults: int32(maxKeys), - }) - if err != nil { - return result, azureToObjectError(err, bucket, prefix) - } - - for _, blob := range resp.Segment.BlobItems { - if delimiter == "" && strings.HasPrefix(blob.Name, minio.GatewayMinioSysTmp) { - // We filter out minio.GatewayMinioSysTmp entries in the recursive listing. - continue - } - if !isAzureMarker(marker) && blob.Name <= marker { - // If the application used ListObjectsV1 style marker then we - // skip all the entries till we reach the marker. - continue - } - // Populate correct ETag's if possible, this code primarily exists - // because AWS S3 indicates that - // - // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html - // - // Objects created by the PUT Object, POST Object, or Copy operation, - // or through the AWS Management Console, and are encrypted by SSE-S3 - // or plaintext, have ETags that are an MD5 digest of their object data. - // - // Some applications depend on this behavior refer https://github.com/minio/minio/issues/6550 - // So we handle it here and make this consistent. - etag := minio.ToS3ETag(string(blob.Properties.Etag)) - switch { - case len(blob.Properties.ContentMD5) != 0: - etag = hex.EncodeToString(blob.Properties.ContentMD5) - case blob.Metadata["md5sum"] != "": - etag = blob.Metadata["md5sum"] - delete(blob.Metadata, "md5sum") - } - - objects = append(objects, minio.ObjectInfo{ - Bucket: bucket, - Name: blob.Name, - ModTime: blob.Properties.LastModified, - Size: *blob.Properties.ContentLength, - ETag: etag, - ContentType: *blob.Properties.ContentType, - ContentEncoding: *blob.Properties.ContentEncoding, - UserDefined: blob.Metadata, - }) - } - - for _, blobPrefix := range resp.Segment.BlobPrefixes { - if blobPrefix.Name == minio.GatewayMinioSysTmp { - // We don't do strings.HasPrefix(blob.Name, minio.GatewayMinioSysTmp) here so that - // we can use tools like mc to inspect the contents of minio.sys.tmp/ - // It is OK to allow listing of minio.sys.tmp/ in non-recursive mode as it aids in debugging. - continue - } - if !isAzureMarker(marker) && blobPrefix.Name <= marker { - // If the application used ListObjectsV1 style marker then we - // skip all the entries till we reach the marker. - continue - } - prefixes = append(prefixes, blobPrefix.Name) - } - - azureListMarker = resp.NextMarker - if !azureListMarker.NotDone() { - // Reached end of listing. - break - } - } - - result.Objects = objects - result.Prefixes = prefixes - if azureListMarker.NotDone() { - // We add the {minio} prefix so that we know in the subsequent request that this - // marker is a azure continuation token and not ListObjectV1 marker. - result.NextMarker = azureMarkerPrefix + *azureListMarker.Val - result.IsTruncated = true - } - return result, nil -} - -// ListObjectsV2 - list all blobs in Azure bucket filtered by prefix -func (a *azureObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result minio.ListObjectsV2Info, err error) { - marker := continuationToken - if marker == "" { - marker = startAfter - } - - var resultV1 minio.ListObjectsInfo - resultV1, err = a.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys) - if err != nil { - return result, err - } - - result.Objects = resultV1.Objects - result.Prefixes = resultV1.Prefixes - result.ContinuationToken = continuationToken - result.NextContinuationToken = resultV1.NextMarker - result.IsTruncated = (resultV1.NextMarker != "") - return result, nil -} - -// GetObjectNInfo - returns object info and locked object ReadCloser -func (a *azureObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header, lockType minio.LockType, opts minio.ObjectOptions) (gr *minio.GetObjectReader, err error) { - var objInfo minio.ObjectInfo - objInfo, err = a.GetObjectInfo(ctx, bucket, object, opts) - if err != nil { - return nil, err - } - - var startOffset, length int64 - startOffset, length, err = rs.GetOffsetLength(objInfo.Size) - if err != nil { - return nil, err - } - - pr, pw := io.Pipe() - go func() { - err := a.GetObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, opts) - pw.CloseWithError(err) - }() - // Setup cleanup function to cause the above go-routine to - // exit in case of partial read - pipeCloser := func() { pr.Close() } - return minio.NewGetObjectReaderFromReader(pr, objInfo, opts, pipeCloser) -} - -// GetObject - reads an object from azure. Supports additional -// parameters like offset and length which are synonymous with -// HTTP Range requests. -// -// startOffset indicates the starting read location of the object. -// length indicates the total length of the object. -func (a *azureObjects) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error { - // startOffset cannot be negative. - if startOffset < 0 { - return azureToObjectError(minio.InvalidRange{}, bucket, object) - } - - blobURL := a.client.NewContainerURL(bucket).NewBlobURL(object) - blob, err := blobURL.Download(ctx, startOffset, length, azblob.BlobAccessConditions{}, false) - if err != nil { - return azureToObjectError(err, bucket, object) - } - - rc := blob.Body(azblob.RetryReaderOptions{MaxRetryRequests: azureDownloadRetryAttempts}) - - _, err = io.Copy(writer, rc) - rc.Close() - return err -} - -// GetObjectInfo - reads blob metadata properties and replies back minio.ObjectInfo, -// uses Azure equivalent `BlobURL.GetProperties`. -func (a *azureObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { - blobURL := a.client.NewContainerURL(bucket).NewBlobURL(object) - blob, err := blobURL.GetProperties(ctx, azblob.BlobAccessConditions{}) - if err != nil { - return objInfo, azureToObjectError(err, bucket, object) - } - - // Populate correct ETag's if possible, this code primarily exists - // because AWS S3 indicates that - // - // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonResponseHeaders.html - // - // Objects created by the PUT Object, POST Object, or Copy operation, - // or through the AWS Management Console, and are encrypted by SSE-S3 - // or plaintext, have ETags that are an MD5 digest of their object data. - // - // Some applications depend on this behavior refer https://github.com/minio/minio/issues/6550 - // So we handle it here and make this consistent. - etag := minio.ToS3ETag(string(blob.ETag())) - metadata := blob.NewMetadata() - contentMD5 := blob.ContentMD5() - switch { - case len(contentMD5) != 0: - etag = hex.EncodeToString(contentMD5) - case metadata["md5sum"] != "": - etag = metadata["md5sum"] - delete(metadata, "md5sum") - } - - return minio.ObjectInfo{ - Bucket: bucket, - UserDefined: azurePropertiesToS3Meta(metadata, blob.NewHTTPHeaders(), blob.ContentLength()), - ETag: etag, - ModTime: blob.LastModified(), - Name: object, - Size: blob.ContentLength(), - ContentType: blob.ContentType(), - ContentEncoding: blob.ContentEncoding(), - StorageClass: azureTierToS3StorageClass(blob.AccessTier()), - }, nil -} - -// PutObject - Create a new blob with the incoming data, -// uses Azure equivalent `UploadStreamToBlockBlob`. -func (a *azureObjects) PutObject(ctx context.Context, bucket, object string, r *minio.PutObjReader, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { - data := r.Reader - - if data.Size() > azureBlockSize/2 { - if len(opts.UserDefined) == 0 { - opts.UserDefined = map[string]string{} - } - - // Save md5sum for future processing on the object. - opts.UserDefined["x-amz-meta-md5sum"] = r.MD5CurrentHexString() - } - - metadata, properties, err := s3MetaToAzureProperties(ctx, opts.UserDefined) - if err != nil { - return objInfo, azureToObjectError(err, bucket, object) - } - blobURL := a.client.NewContainerURL(bucket).NewBlockBlobURL(object) - - _, err = azblob.UploadStreamToBlockBlob(ctx, data, blobURL, azblob.UploadStreamToBlockBlobOptions{ - BufferSize: azureUploadChunkSize, - MaxBuffers: azureUploadConcurrency, - BlobHTTPHeaders: properties, - Metadata: metadata, - }) - if err != nil { - return objInfo, azureToObjectError(err, bucket, object) - } - - return a.GetObjectInfo(ctx, bucket, object, opts) -} - -// CopyObject - Copies a blob from source container to destination container. -// Uses Azure equivalent `BlobURL.StartCopyFromURL`. -func (a *azureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { - if srcOpts.CheckCopyPrecondFn != nil && srcOpts.CheckCopyPrecondFn(srcInfo, "") { - return minio.ObjectInfo{}, minio.PreConditionFailed{} - } - srcBlob := a.client.NewContainerURL(srcBucket).NewBlobURL(srcObject) - srcBlobURL := srcBlob.URL() - - srcProps, err := srcBlob.GetProperties(ctx, azblob.BlobAccessConditions{}) - if err != nil { - return objInfo, azureToObjectError(err, srcBucket, srcObject) - } - destBlob := a.client.NewContainerURL(destBucket).NewBlobURL(destObject) - - azureMeta, props, err := s3MetaToAzureProperties(ctx, srcInfo.UserDefined) - if err != nil { - return objInfo, azureToObjectError(err, srcBucket, srcObject) - } - props.ContentMD5 = srcProps.ContentMD5() - res, err := destBlob.StartCopyFromURL(ctx, srcBlobURL, azureMeta, azblob.ModifiedAccessConditions{}, azblob.BlobAccessConditions{}) - if err != nil { - return objInfo, azureToObjectError(err, srcBucket, srcObject) - } - // StartCopyFromURL is an asynchronous operation so need to poll for completion, - // see https://docs.microsoft.com/en-us/rest/api/storageservices/copy-blob#remarks. - copyStatus := res.CopyStatus() - for copyStatus != azblob.CopyStatusSuccess { - destProps, err := destBlob.GetProperties(ctx, azblob.BlobAccessConditions{}) - if err != nil { - return objInfo, azureToObjectError(err, srcBucket, srcObject) - } - copyStatus = destProps.CopyStatus() - } - - // Azure will copy metadata from the source object when an empty metadata map is provided. - // To handle the case where the source object should be copied without its metadata, - // the metadata must be removed from the dest. object after the copy completes - if len(azureMeta) == 0 { - _, err = destBlob.SetMetadata(ctx, azureMeta, azblob.BlobAccessConditions{}) - if err != nil { - return objInfo, azureToObjectError(err, srcBucket, srcObject) - } - } - - _, err = destBlob.SetHTTPHeaders(ctx, props, azblob.BlobAccessConditions{}) - if err != nil { - return objInfo, azureToObjectError(err, srcBucket, srcObject) - } - - if _, ok := srcInfo.UserDefined["x-amz-storage-class"]; ok { - _, err = destBlob.SetTier(ctx, s3StorageClassToAzureTier(srcInfo.UserDefined["x-amz-storage-class"]), - azblob.LeaseAccessConditions{}) - if err != nil { - return objInfo, azureToObjectError(err, srcBucket, srcObject) - } - } - - return a.GetObjectInfo(ctx, destBucket, destObject, dstOpts) -} - -// DeleteObject - Deletes a blob on azure container, uses Azure -// equivalent `BlobURL.Delete`. -func (a *azureObjects) DeleteObject(ctx context.Context, bucket, object string) error { - blob := a.client.NewContainerURL(bucket).NewBlobURL(object) - _, err := blob.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}) - if err != nil { - return azureToObjectError(err, bucket, object) - } - return nil -} - -func (a *azureObjects) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { - errs := make([]error, len(objects)) - for idx, object := range objects { - errs[idx] = a.DeleteObject(ctx, bucket, object) - } - return errs, nil -} - -// ListMultipartUploads - It's decided not to support List Multipart Uploads, hence returning empty result. -func (a *azureObjects) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result minio.ListMultipartsInfo, err error) { - // It's decided not to support List Multipart Uploads, hence returning empty result. - return result, nil -} - -type azureMultipartMetadata struct { - Name string `json:"name"` - Metadata map[string]string `json:"metadata"` -} - -func getAzureMetadataObjectName(objectName, uploadID string) string { - return fmt.Sprintf(metadataObjectNameTemplate, uploadID, sha256.Sum256([]byte(objectName))) -} - -// gets the name of part metadata file for multipart upload operations -func getAzureMetadataPartName(objectName, uploadID string, partID int) string { - partMetaPrefix := getAzureMetadataPartPrefix(uploadID, objectName) - return path.Join(partMetaPrefix, fmt.Sprintf("%d", partID)) -} - -// gets the prefix of part metadata file -func getAzureMetadataPartPrefix(uploadID, objectName string) string { - return fmt.Sprintf(metadataPartNamePrefix, uploadID, sha256.Sum256([]byte(objectName))) -} - -func (a *azureObjects) checkUploadIDExists(ctx context.Context, bucketName, objectName, uploadID string) (err error) { - blobURL := a.client.NewContainerURL(bucketName).NewBlobURL( - getAzureMetadataObjectName(objectName, uploadID)) - _, err = blobURL.GetProperties(ctx, azblob.BlobAccessConditions{}) - err = azureToObjectError(err, bucketName, objectName) - oerr := minio.ObjectNotFound{ - Bucket: bucketName, - Object: objectName, - } - if err == oerr { - err = minio.InvalidUploadID{ - UploadID: uploadID, - } - } - return err -} - -// NewMultipartUpload - Use Azure equivalent `BlobURL.Upload`. -func (a *azureObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts minio.ObjectOptions) (uploadID string, err error) { - uploadID, err = getAzureUploadID() - if err != nil { - logger.LogIf(ctx, err) - return "", err - } - metadataObject := getAzureMetadataObjectName(object, uploadID) - - var jsonData []byte - if jsonData, err = json.Marshal(azureMultipartMetadata{Name: object, Metadata: opts.UserDefined}); err != nil { - logger.LogIf(ctx, err) - return "", err - } - - blobURL := a.client.NewContainerURL(bucket).NewBlockBlobURL(metadataObject) - _, err = blobURL.Upload(ctx, bytes.NewReader(jsonData), azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{}) - if err != nil { - return "", azureToObjectError(err, bucket, metadataObject) - } - - return uploadID, nil -} - -func (a *azureObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, uploadID string, partID int, - startOffset int64, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (info minio.PartInfo, err error) { - return a.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.PutObjReader, dstOpts) -} - -// PutObjectPart - Use Azure equivalent `BlobURL.StageBlock`. -func (a *azureObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *minio.PutObjReader, opts minio.ObjectOptions) (info minio.PartInfo, err error) { - data := r.Reader - if err = a.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { - return info, err - } - - if err = checkAzureUploadID(ctx, uploadID); err != nil { - return info, err - } - - partMetaV1 := newPartMetaV1(uploadID, partID) - subPartSize, subPartNumber := int64(azureUploadChunkSize), 1 - for remainingSize := data.Size(); remainingSize > 0; remainingSize -= subPartSize { - if remainingSize < subPartSize { - subPartSize = remainingSize - } - - id := base64.StdEncoding.EncodeToString([]byte(minio.MustGetUUID())) - blobURL := a.client.NewContainerURL(bucket).NewBlockBlobURL(object) - body, err := ioutil.ReadAll(io.LimitReader(data, subPartSize)) - if err != nil { - return info, azureToObjectError(err, bucket, object) - } - _, err = blobURL.StageBlock(ctx, id, bytes.NewReader(body), azblob.LeaseAccessConditions{}, nil) - if err != nil { - return info, azureToObjectError(err, bucket, object) - } - partMetaV1.BlockIDs = append(partMetaV1.BlockIDs, id) - subPartNumber++ - } - - partMetaV1.ETag = r.MD5CurrentHexString() - partMetaV1.Size = data.Size() - - // maintain per part md5sum in a temporary part metadata file until upload - // is finalized. - metadataObject := getAzureMetadataPartName(object, uploadID, partID) - var jsonData []byte - if jsonData, err = json.Marshal(partMetaV1); err != nil { - logger.LogIf(ctx, err) - return info, err - } - - blobURL := a.client.NewContainerURL(bucket).NewBlockBlobURL(metadataObject) - _, err = blobURL.Upload(ctx, bytes.NewReader(jsonData), azblob.BlobHTTPHeaders{}, azblob.Metadata{}, azblob.BlobAccessConditions{}) - if err != nil { - return info, azureToObjectError(err, bucket, metadataObject) - } - - info.PartNumber = partID - info.ETag = partMetaV1.ETag - info.LastModified = minio.UTCNow() - info.Size = data.Size() - return info, nil -} - -// GetMultipartInfo returns multipart info of the uploadId of the object -func (a *azureObjects) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts minio.ObjectOptions) (result minio.MultipartInfo, err error) { - if err = a.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { - return result, err - } - - result.Bucket = bucket - result.Object = object - result.UploadID = uploadID - return result, nil -} - -// ListObjectParts - Use Azure equivalent `ContainerURL.ListBlobsHierarchySegment`. -func (a *azureObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts minio.ObjectOptions) (result minio.ListPartsInfo, err error) { - if err = a.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { - return result, err - } - - result.Bucket = bucket - result.Object = object - result.UploadID = uploadID - result.MaxParts = maxParts - - azureListMarker := "" - marker := azblob.Marker{Val: &azureListMarker} - - var parts []minio.PartInfo - var delimiter string - maxKeys := maxPartsCount - if partNumberMarker == 0 { - maxKeys = maxParts - } - prefix := getAzureMetadataPartPrefix(uploadID, object) - containerURL := a.client.NewContainerURL(bucket) - resp, err := containerURL.ListBlobsHierarchySegment(ctx, marker, delimiter, azblob.ListBlobsSegmentOptions{ - Prefix: prefix, - MaxResults: int32(maxKeys), - }) - if err != nil { - return result, azureToObjectError(err, bucket, prefix) - } - - for _, blob := range resp.Segment.BlobItems { - if delimiter == "" && !strings.HasPrefix(blob.Name, minio.GatewayMinioSysTmp) { - // We filter out non minio.GatewayMinioSysTmp entries in the recursive listing. - continue - } - // filter temporary metadata file for blob - if strings.HasSuffix(blob.Name, "azure.json") { - continue - } - if !isAzureMarker(*marker.Val) && blob.Name <= *marker.Val { - // If the application used ListObjectsV1 style marker then we - // skip all the entries till we reach the marker. - continue - } - partNumber, err := parseAzurePart(blob.Name, prefix) - if err != nil { - return result, azureToObjectError(fmt.Errorf("Unexpected error"), bucket, object) - } - var metadata partMetadataV1 - blobURL := containerURL.NewBlobURL(blob.Name) - blob, err := blobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false) - if err != nil { - return result, azureToObjectError(fmt.Errorf("Unexpected error"), bucket, object) - } - metadataReader := blob.Body(azblob.RetryReaderOptions{MaxRetryRequests: azureDownloadRetryAttempts}) - if err = json.NewDecoder(metadataReader).Decode(&metadata); err != nil { - logger.LogIf(ctx, err) - return result, azureToObjectError(err, bucket, object) - } - parts = append(parts, minio.PartInfo{ - PartNumber: partNumber, - Size: metadata.Size, - ETag: metadata.ETag, - }) - } - sort.Slice(parts, func(i int, j int) bool { - return parts[i].PartNumber < parts[j].PartNumber - }) - partsCount := 0 - i := 0 - if partNumberMarker != 0 { - // If the marker was set, skip the entries till the marker. - for _, part := range parts { - i++ - if part.PartNumber == partNumberMarker { - break - } - } - } - for partsCount < maxParts && i < len(parts) { - result.Parts = append(result.Parts, parts[i]) - i++ - partsCount++ - } - - if i < len(parts) { - result.IsTruncated = true - if partsCount != 0 { - result.NextPartNumberMarker = result.Parts[partsCount-1].PartNumber - } - } - result.PartNumberMarker = partNumberMarker - return result, nil -} - -// AbortMultipartUpload - Not Implemented. -// There is no corresponding API in azure to abort an incomplete upload. The uncommmitted blocks -// gets deleted after one week. -func (a *azureObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) (err error) { - if err = a.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { - return err - } - var partNumberMarker int - for { - lpi, err := a.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxPartsCount, minio.ObjectOptions{}) - if err != nil { - break - } - for _, part := range lpi.Parts { - pblob := a.client.NewContainerURL(bucket).NewBlobURL( - getAzureMetadataPartName(object, uploadID, part.PartNumber)) - pblob.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}) - } - partNumberMarker = lpi.NextPartNumberMarker - if !lpi.IsTruncated { - break - } - } - - blobURL := a.client.NewContainerURL(bucket).NewBlobURL( - getAzureMetadataObjectName(object, uploadID)) - _, err = blobURL.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}) - return err -} - -// CompleteMultipartUpload - Use Azure equivalent `BlobURL.CommitBlockList`. -func (a *azureObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []minio.CompletePart, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { - metadataObject := getAzureMetadataObjectName(object, uploadID) - if err = a.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { - return objInfo, err - } - - if err = checkAzureUploadID(ctx, uploadID); err != nil { - return objInfo, err - } - - blobURL := a.client.NewContainerURL(bucket).NewBlobURL(metadataObject) - blob, err := blobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false) - if err != nil { - return objInfo, azureToObjectError(err, bucket, metadataObject) - } - - var metadata azureMultipartMetadata - metadataReader := blob.Body(azblob.RetryReaderOptions{MaxRetryRequests: azureDownloadRetryAttempts}) - if err = json.NewDecoder(metadataReader).Decode(&metadata); err != nil { - logger.LogIf(ctx, err) - return objInfo, azureToObjectError(err, bucket, metadataObject) - } - - objBlob := a.client.NewContainerURL(bucket).NewBlockBlobURL(object) - - var allBlocks []string - for i, part := range uploadedParts { - var partMetadata partMetadataV1 - partMetadataObject := getAzureMetadataPartName(object, uploadID, part.PartNumber) - pblobURL := a.client.NewContainerURL(bucket).NewBlobURL(partMetadataObject) - pblob, err := pblobURL.Download(ctx, 0, azblob.CountToEnd, azblob.BlobAccessConditions{}, false) - if err != nil { - return objInfo, azureToObjectError(err, bucket, partMetadataObject) - } - - partMetadataReader := pblob.Body(azblob.RetryReaderOptions{MaxRetryRequests: azureDownloadRetryAttempts}) - if err = json.NewDecoder(partMetadataReader).Decode(&partMetadata); err != nil { - logger.LogIf(ctx, err) - return objInfo, azureToObjectError(err, bucket, partMetadataObject) - } - - if partMetadata.ETag != part.ETag { - return objInfo, minio.InvalidPart{} - } - allBlocks = append(allBlocks, partMetadata.BlockIDs...) - if i < (len(uploadedParts)-1) && partMetadata.Size < azureS3MinPartSize { - return objInfo, minio.PartTooSmall{ - PartNumber: uploadedParts[i].PartNumber, - PartSize: partMetadata.Size, - PartETag: uploadedParts[i].ETag, - } - } - } - - objMetadata, objProperties, err := s3MetaToAzureProperties(ctx, metadata.Metadata) - if err != nil { - return objInfo, azureToObjectError(err, bucket, object) - } - objMetadata["md5sum"] = minio.ComputeCompleteMultipartMD5(uploadedParts) - - _, err = objBlob.CommitBlockList(ctx, allBlocks, objProperties, objMetadata, azblob.BlobAccessConditions{}) - if err != nil { - return objInfo, azureToObjectError(err, bucket, object) - } - var partNumberMarker int - for { - lpi, err := a.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxPartsCount, minio.ObjectOptions{}) - if err != nil { - break - } - for _, part := range lpi.Parts { - pblob := a.client.NewContainerURL(bucket).NewBlobURL( - getAzureMetadataPartName(object, uploadID, part.PartNumber)) - pblob.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}) - } - partNumberMarker = lpi.NextPartNumberMarker - if !lpi.IsTruncated { - break - } - } - - _, derr := blobURL.Delete(ctx, azblob.DeleteSnapshotsOptionNone, azblob.BlobAccessConditions{}) - logger.GetReqInfo(ctx).AppendTags("uploadID", uploadID) - logger.LogIf(ctx, derr) - - return a.GetObjectInfo(ctx, bucket, object, minio.ObjectOptions{}) -} - -// SetBucketPolicy - Azure supports three types of container policies: -// azblob.PublicAccessContainer - readonly in minio terminology -// azblob.PublicAccessBlob - readonly without listing in minio terminology -// azblob.PublicAccessNone - none in minio terminology -// As the common denominator for minio and azure is readonly and none, we support -// these two policies at the bucket level. -func (a *azureObjects) SetBucketPolicy(ctx context.Context, bucket string, bucketPolicy *policy.Policy) error { - policyInfo, err := minio.PolicyToBucketAccessPolicy(bucketPolicy) - if err != nil { - // This should not happen. - logger.LogIf(ctx, err) - return azureToObjectError(err, bucket) - } - - var policies []minio.BucketAccessPolicy - for prefix, policy := range miniogopolicy.GetPolicies(policyInfo.Statements, bucket, "") { - policies = append(policies, minio.BucketAccessPolicy{ - Prefix: prefix, - Policy: policy, - }) - } - prefix := bucket + "/*" // For all objects inside the bucket. - if len(policies) != 1 { - return minio.NotImplemented{} - } - if policies[0].Prefix != prefix { - return minio.NotImplemented{} - } - if policies[0].Policy != miniogopolicy.BucketPolicyReadOnly { - return minio.NotImplemented{} - } - perm := azblob.PublicAccessContainer - container := a.client.NewContainerURL(bucket) - _, err = container.SetAccessPolicy(ctx, perm, nil, azblob.ContainerAccessConditions{}) - return azureToObjectError(err, bucket) -} - -// GetBucketPolicy - Get the container ACL and convert it to canonical []bucketAccessPolicy -func (a *azureObjects) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) { - container := a.client.NewContainerURL(bucket) - perm, err := container.GetAccessPolicy(ctx, azblob.LeaseAccessConditions{}) - if err != nil { - return nil, azureToObjectError(err, bucket) - } - - permAccessType := perm.BlobPublicAccess() - - if permAccessType == azblob.PublicAccessNone { - return nil, minio.BucketPolicyNotFound{Bucket: bucket} - } else if permAccessType != azblob.PublicAccessContainer { - return nil, azureToObjectError(minio.NotImplemented{}) - } - - return &policy.Policy{ - Version: policy.DefaultVersion, - Statements: []policy.Statement{ - policy.NewStatement( - policy.Allow, - policy.NewPrincipal("*"), - policy.NewActionSet( - policy.GetBucketLocationAction, - policy.ListBucketAction, - policy.GetObjectAction, - ), - policy.NewResourceSet( - policy.NewResource(bucket, ""), - policy.NewResource(bucket, "*"), - ), - condition.NewFunctions(), - ), - }, - }, nil -} - -// DeleteBucketPolicy - Set the container ACL to "private" -func (a *azureObjects) DeleteBucketPolicy(ctx context.Context, bucket string) error { - perm := azblob.PublicAccessNone - containerURL := a.client.NewContainerURL(bucket) - _, err := containerURL.SetAccessPolicy(ctx, perm, nil, azblob.ContainerAccessConditions{}) - return azureToObjectError(err) -} - -// IsCompressionSupported returns whether compression is applicable for this layer. -func (a *azureObjects) IsCompressionSupported() bool { - return false -} - -// IsReady returns whether the layer is ready to take requests. -func (a *azureObjects) IsReady(ctx context.Context) bool { - return minio.IsBackendOnline(ctx, a.httpClient, a.endpoint) -} diff --git a/cmd/gateway/azure/gateway-azure_test.go b/cmd/gateway/azure/gateway-azure_test.go deleted file mode 100644 index 449aa7d..0000000 --- a/cmd/gateway/azure/gateway-azure_test.go +++ /dev/null @@ -1,340 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package azure - -import ( - "encoding/base64" - "fmt" - "net/http" - "os" - "reflect" - "strconv" - "testing" - - "github.com/dustin/go-humanize" - - "github.com/Azure/azure-storage-blob-go/azblob" - minio "github.com/minio/minio/cmd" -) - -func TestParseStorageEndpoint(t *testing.T) { - testCases := []struct { - host string - accountName string - expectedURL string - expectedErr error - }{ - { - "", "myaccount", "https://myaccount.blob.core.windows.net", nil, - }, - { - "myaccount.blob.core.usgovcloudapi.net", "myaccount", "https://myaccount.blob.core.usgovcloudapi.net", nil, - }, - { - "http://localhost:10000", "myaccount", "http://localhost:10000/myaccount", nil, - }, - } - for i, testCase := range testCases { - endpointURL, err := parseStorageEndpoint(testCase.host, testCase.accountName) - if err != testCase.expectedErr { - t.Errorf("Test %d: Expected error %s, got %s", i+1, testCase.expectedErr, err) - } - if endpointURL.String() != testCase.expectedURL { - t.Errorf("Test %d: Expected URL %s, got %s", i+1, testCase.expectedURL, endpointURL.String()) - } - } -} - -// Test canonical metadata. -func TestS3MetaToAzureProperties(t *testing.T) { - headers := map[string]string{ - "accept-encoding": "gzip", - "content-encoding": "gzip", - "cache-control": "age: 3600", - "content-disposition": "dummy", - "content-length": "10", - "content-type": "application/javascript", - "X-Amz-Meta-Hdr": "value", - "X-Amz-Meta-X_test_key": "value", - "X-Amz-Meta-X__test__key": "value", - "X-Amz-Meta-X-Test__key": "value", - "X-Amz-Meta-X-Amz-Key": "hu3ZSqtqwn+aL4V2VhAeov4i+bG3KyCtRMSXQFRHXOk=", - "X-Amz-Meta-X-Amz-Matdesc": "{}", - "X-Amz-Meta-X-Amz-Iv": "eWmyryl8kq+EVnnsE7jpOg==", - } - // Only X-Amz-Meta- prefixed entries will be returned in - // Metadata (without the prefix!) - expectedHeaders := map[string]string{ - "Hdr": "value", - "X__test__key": "value", - "X____test____key": "value", - "X_Test____key": "value", - "X_Amz_Key": "hu3ZSqtqwn+aL4V2VhAeov4i+bG3KyCtRMSXQFRHXOk=", - "X_Amz_Matdesc": "{}", - "X_Amz_Iv": "eWmyryl8kq+EVnnsE7jpOg==", - } - meta, _, err := s3MetaToAzureProperties(minio.GlobalContext, headers) - if err != nil { - t.Fatalf("Test failed, with %s", err) - } - if !reflect.DeepEqual(map[string]string(meta), expectedHeaders) { - t.Fatalf("Test failed, expected %#v, got %#v", expectedHeaders, meta) - } - headers = map[string]string{ - "invalid--meta": "value", - } - _, _, err = s3MetaToAzureProperties(minio.GlobalContext, headers) - if err != nil { - if _, ok := err.(minio.UnsupportedMetadata); !ok { - t.Fatalf("Test failed with unexpected error %s, expected UnsupportedMetadata", err) - } - } - - headers = map[string]string{ - "content-md5": "Dce7bmCX61zvxzP5QmfelQ==", - } - _, props, err := s3MetaToAzureProperties(minio.GlobalContext, headers) - if err != nil { - t.Fatalf("Test failed, with %s", err) - } - if base64.StdEncoding.EncodeToString(props.ContentMD5) != headers["content-md5"] { - t.Fatalf("Test failed, expected %s, got %s", headers["content-md5"], props.ContentMD5) - } -} - -func TestAzurePropertiesToS3Meta(t *testing.T) { - // Just one testcase. Adding more test cases does not add value to the testcase - // as azureToS3Metadata() just adds a prefix. - metadata := map[string]string{ - "first_name": "myname", - "x_test_key": "value", - "x_test__key": "value", - "x__test__key": "value", - "x____test____key": "value", - "x_amz_key": "hu3ZSqtqwn+aL4V2VhAeov4i+bG3KyCtRMSXQFRHXOk=", - "x_amz_matdesc": "{}", - "x_amz_iv": "eWmyryl8kq+EVnnsE7jpOg==", - } - expectedMeta := map[string]string{ - "X-Amz-Meta-First-Name": "myname", - "X-Amz-Meta-X-Test-Key": "value", - "X-Amz-Meta-X-Test_key": "value", - "X-Amz-Meta-X_test_key": "value", - "X-Amz-Meta-X__test__key": "value", - "X-Amz-Meta-X-Amz-Key": "hu3ZSqtqwn+aL4V2VhAeov4i+bG3KyCtRMSXQFRHXOk=", - "X-Amz-Meta-X-Amz-Matdesc": "{}", - "X-Amz-Meta-X-Amz-Iv": "eWmyryl8kq+EVnnsE7jpOg==", - "Cache-Control": "max-age: 3600", - "Content-Disposition": "dummy", - "Content-Encoding": "gzip", - "Content-Length": "10", - "Content-MD5": base64.StdEncoding.EncodeToString([]byte("base64-md5")), - "Content-Type": "application/javascript", - } - actualMeta := azurePropertiesToS3Meta(metadata, azblob.BlobHTTPHeaders{ - CacheControl: "max-age: 3600", - ContentDisposition: "dummy", - ContentEncoding: "gzip", - ContentMD5: []byte("base64-md5"), - ContentType: "application/javascript", - }, 10) - if !reflect.DeepEqual(actualMeta, expectedMeta) { - t.Fatalf("Test failed, expected %#v, got %#v", expectedMeta, actualMeta) - } -} - -// Add tests for azure to object error (top level). -func TestAzureToObjectError(t *testing.T) { - testCases := []struct { - actualErr error - expectedErr error - bucket, object string - }{ - { - nil, nil, "", "", - }, - { - fmt.Errorf("Non azure error"), - fmt.Errorf("Non azure error"), "", "", - }, - } - for i, testCase := range testCases { - if err := azureToObjectError(testCase.actualErr, testCase.bucket, testCase.object); err != nil { - if err.Error() != testCase.expectedErr.Error() { - t.Errorf("Test %d: Expected error %s, got %s", i+1, testCase.expectedErr, err) - } - } else { - if testCase.expectedErr != nil { - t.Errorf("Test %d expected an error but one was not produced", i+1) - } - } - } -} - -// Add tests for azure to object error (internal). -func TestAzureCodesToObjectError(t *testing.T) { - testCases := []struct { - originalErr error - actualServiceCode string - actualStatusCode int - expectedErr error - bucket, object string - }{ - { - nil, "ContainerAlreadyExists", 0, - minio.BucketExists{Bucket: "bucket"}, "bucket", "", - }, - { - nil, "InvalidResourceName", 0, - minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "", - }, - { - nil, "RequestBodyTooLarge", 0, - minio.PartTooBig{}, "", "", - }, - { - nil, "InvalidMetadata", 0, - minio.UnsupportedMetadata{}, "", "", - }, - { - nil, "", http.StatusNotFound, - minio.ObjectNotFound{ - Bucket: "bucket", - Object: "object", - }, "bucket", "object", - }, - { - nil, "", http.StatusNotFound, - minio.BucketNotFound{Bucket: "bucket"}, "bucket", "", - }, - { - nil, "", http.StatusBadRequest, - minio.BucketNameInvalid{Bucket: "bucket."}, "bucket.", "", - }, - { - fmt.Errorf("unhandled azure error"), "", http.StatusForbidden, - fmt.Errorf("unhandled azure error"), "", "", - }, - } - for i, testCase := range testCases { - if err := azureCodesToObjectError(testCase.originalErr, testCase.actualServiceCode, testCase.actualStatusCode, testCase.bucket, testCase.object); err != nil { - if err.Error() != testCase.expectedErr.Error() { - t.Errorf("Test %d: Expected error %s, got %s", i+1, testCase.expectedErr, err) - } - } else { - if testCase.expectedErr != nil { - t.Errorf("Test %d expected an error but one was not produced", i+1) - } - } - } -} - -func TestAnonErrToObjectErr(t *testing.T) { - testCases := []struct { - name string - statusCode int - params []string - wantErr error - }{ - {"ObjectNotFound", - http.StatusNotFound, - []string{"testBucket", "testObject"}, - minio.ObjectNotFound{Bucket: "testBucket", Object: "testObject"}, - }, - {"BucketNotFound", - http.StatusNotFound, - []string{"testBucket", ""}, - minio.BucketNotFound{Bucket: "testBucket"}, - }, - {"ObjectNameInvalid", - http.StatusBadRequest, - []string{"testBucket", "testObject"}, - minio.ObjectNameInvalid{Bucket: "testBucket", Object: "testObject"}, - }, - {"BucketNameInvalid", - http.StatusBadRequest, - []string{"testBucket", ""}, - minio.BucketNameInvalid{Bucket: "testBucket"}, - }, - } - for _, test := range testCases { - t.Run(test.name, func(t *testing.T) { - if err := minio.AnonErrToObjectErr(test.statusCode, test.params...); !reflect.DeepEqual(err, test.wantErr) { - t.Errorf("anonErrToObjectErr() error = %v, wantErr %v", err, test.wantErr) - } - }) - } -} - -func TestCheckAzureUploadID(t *testing.T) { - invalidUploadIDs := []string{ - "123456789abcdefg", - "hello world", - "0x1234567890", - "1234567890abcdef1234567890abcdef", - } - - for _, uploadID := range invalidUploadIDs { - if err := checkAzureUploadID(minio.GlobalContext, uploadID); err == nil { - t.Fatalf("%s: expected: , got: ", uploadID) - } - } - - validUploadIDs := []string{ - "1234567890abcdef", - "1122334455667788", - } - - for _, uploadID := range validUploadIDs { - if err := checkAzureUploadID(minio.GlobalContext, uploadID); err != nil { - t.Fatalf("%s: expected: , got: %s", uploadID, err) - } - } -} - -func TestParsingUploadChunkSize(t *testing.T) { - key := "MINIO_AZURE_CHUNK_SIZE_MB" - invalidValues := []string{ - "", - "0,3", - "100.1", - "-1", - } - - for i, chunkValue := range invalidValues { - os.Setenv(key, chunkValue) - result := getUploadChunkSizeFromEnv(key, strconv.Itoa(azureDefaultUploadChunkSize/humanize.MiByte)) - if result != azureDefaultUploadChunkSize { - t.Errorf("Test %d: expected: %d, got: %d", i+1, azureDefaultUploadChunkSize, result) - } - } - - validValues := []string{ - "1", - "1.25", - "50", - "99", - } - for i, chunkValue := range validValues { - os.Setenv(key, chunkValue) - result := getUploadChunkSizeFromEnv(key, strconv.Itoa(azureDefaultUploadChunkSize/humanize.MiByte)) - if result == azureDefaultUploadChunkSize { - t.Errorf("Test %d: expected: %d, got: %d", i+1, azureDefaultUploadChunkSize, result) - } - } - -} diff --git a/cmd/gateway/gateway.go b/cmd/gateway/gateway.go deleted file mode 100644 index 627ae11..0000000 --- a/cmd/gateway/gateway.go +++ /dev/null @@ -1,39 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2017-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package gateway - -import ( - // Import all gateways please keep the order - - // NAS - _ "github.com/minio/minio/cmd/gateway/nas" - - // Azure - _ "github.com/minio/minio/cmd/gateway/azure" - - // S3 - _ "github.com/minio/minio/cmd/gateway/s3" - - // HDFS - _ "github.com/minio/minio/cmd/gateway/hdfs" - - // GCS (use only if you must, GCS already supports S3 API) - _ "github.com/minio/minio/cmd/gateway/gcs" - // gateway functionality is frozen, no new gateways are being implemented - // or considered for upstream inclusion at this point in time. if needed - // please keep a fork of the project. -) diff --git a/cmd/gateway/gcs/gateway-gcs.go b/cmd/gateway/gcs/gateway-gcs.go deleted file mode 100644 index 00df5c1..0000000 --- a/cmd/gateway/gcs/gateway-gcs.go +++ /dev/null @@ -1,1505 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2017-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package gcs - -import ( - "context" - "encoding/base64" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "math" - "net/http" - "os" - "path" - "strconv" - - "regexp" - "strings" - "time" - - "cloud.google.com/go/storage" - humanize "github.com/dustin/go-humanize" - "github.com/minio/cli" - miniogopolicy "github.com/minio/minio-go/v6/pkg/policy" - "github.com/minio/minio/cmd/logger" - "github.com/minio/minio/pkg/auth" - "github.com/minio/minio/pkg/bucket/policy" - "github.com/minio/minio/pkg/bucket/policy/condition" - "github.com/minio/minio/pkg/env" - - "google.golang.org/api/googleapi" - "google.golang.org/api/iterator" - "google.golang.org/api/option" - - minio "github.com/minio/minio/cmd" -) - -var ( - // Project ID format is not valid. - errGCSInvalidProjectID = fmt.Errorf("GCS project id is either empty or invalid") - - // Project ID not found - errGCSProjectIDNotFound = fmt.Errorf("Unknown project id") - - // Invalid format. - errGCSFormat = fmt.Errorf("Unknown format") -) - -const ( - // Path where multipart objects are saved. - // If we change the backend format we will use a different url path like /multipart/v2 - // but we will not migrate old data. - gcsMinioMultipartPathV1 = minio.GatewayMinioSysTmp + "multipart/v1" - - // Multipart meta file. - gcsMinioMultipartMeta = "gcs.json" - - // gcs.json version number - gcsMinioMultipartMetaCurrentVersion = "1" - - // token prefixed with GCS returned marker to differentiate - // from user supplied marker. - gcsTokenPrefix = "{minio}" - - // Maximum component object count to create a composite object. - // Refer https://cloud.google.com/storage/docs/composite-objects - gcsMaxComponents = 32 - - // Every 24 hours we scan minio.sys.tmp to delete expired multiparts in minio.sys.tmp - gcsCleanupInterval = time.Hour * 24 - - // The cleanup routine deletes files older than 2 weeks in minio.sys.tmp - gcsMultipartExpiry = time.Hour * 24 * 14 - - // Project ID key in credentials.json - gcsProjectIDKey = "project_id" - - gcsBackend = "gcs" -) - -func init() { - const gcsGatewayTemplate = `NAME: - {{.HelpName}} - {{.Usage}} - -USAGE: - {{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} [PROJECTID] -{{if .VisibleFlags}} -FLAGS: - {{range .VisibleFlags}}{{.}} - {{end}}{{end}} -PROJECTID: - optional GCS project-id expected GOOGLE_APPLICATION_CREDENTIALS env is not set - -GOOGLE_APPLICATION_CREDENTIALS: - path to credentials.json, generated it from here https://developers.google.com/identity/protocols/application-default-credentials - -EXAMPLES: - 1. Start minio gateway server for GCS backend - {{.Prompt}} {{.EnvVarSetCommand}} GOOGLE_APPLICATION_CREDENTIALS{{.AssignmentOperator}}/path/to/credentials.json - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accesskey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey - {{.Prompt}} {{.HelpName}} mygcsprojectid - - 2. Start minio gateway server for GCS backend with edge caching enabled - {{.Prompt}} {{.EnvVarSetCommand}} GOOGLE_APPLICATION_CREDENTIALS{{.AssignmentOperator}}/path/to/credentials.json - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accesskey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1,/mnt/drive2,/mnt/drive3,/mnt/drive4" - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXCLUDE{{.AssignmentOperator}}"bucket1/*;*.png" - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_AFTER{{.AssignmentOperator}}3 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_LOW{{.AssignmentOperator}}75 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_HIGH{{.AssignmentOperator}}85 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}90 - {{.Prompt}} {{.HelpName}} mygcsprojectid -` - - minio.RegisterGatewayCommand(cli.Command{ - Name: gcsBackend, - Usage: "Google Cloud Storage", - Action: gcsGatewayMain, - CustomHelpTemplate: gcsGatewayTemplate, - HideHelpCommand: true, - }) -} - -// Handler for 'minio gateway gcs' command line. -func gcsGatewayMain(ctx *cli.Context) { - projectID := ctx.Args().First() - if projectID == "" && os.Getenv("GOOGLE_APPLICATION_CREDENTIALS") == "" { - logger.LogIf(minio.GlobalContext, errGCSProjectIDNotFound, logger.Application) - cli.ShowCommandHelpAndExit(ctx, "gcs", 1) - } - if projectID != "" && !isValidGCSProjectIDFormat(projectID) { - reqInfo := (&logger.ReqInfo{}).AppendTags("projectID", ctx.Args().First()) - contxt := logger.SetReqInfo(minio.GlobalContext, reqInfo) - logger.LogIf(contxt, errGCSInvalidProjectID, logger.Application) - cli.ShowCommandHelpAndExit(ctx, "gcs", 1) - } - - minio.StartGateway(ctx, &GCS{projectID}) -} - -// GCS implements Azure. -type GCS struct { - projectID string -} - -// Name returns the name of gcs ObjectLayer. -func (g *GCS) Name() string { - return gcsBackend -} - -// NewGatewayLayer returns gcs ObjectLayer. -func (g *GCS) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error) { - ctx := minio.GlobalContext - - var err error - if g.projectID == "" { - // If project ID is not provided on command line, we figure it out - // from the credentials.json file. - g.projectID, err = gcsParseProjectID(env.Get("GOOGLE_APPLICATION_CREDENTIALS", "")) - if err != nil { - return nil, err - } - } - - metrics := minio.NewMetrics() - - t := &minio.MetricsTransport{ - Transport: minio.NewGatewayHTTPTransport(), - Metrics: metrics, - } - - // Initialize a GCS client. - // Send user-agent in this format for Google to obtain usage insights while participating in the - // Google Cloud Technology Partners (https://cloud.google.com/partners/) - client, err := storage.NewClient(ctx, option.WithUserAgent(fmt.Sprintf("MinIO/%s (GPN:MinIO;)", minio.Version))) - if err != nil { - return nil, err - } - - gcs := &gcsGateway{ - client: client, - projectID: g.projectID, - metrics: metrics, - httpClient: &http.Client{ - Transport: t, - }, - } - - // Start background process to cleanup old files in minio.sys.tmp - go gcs.CleanupGCSMinioSysTmp(ctx) - return gcs, nil -} - -// Production - GCS gateway is production ready. -func (g *GCS) Production() bool { - return true -} - -// Stored in gcs.json - Contents of this file is not used anywhere. It can be -// used for debugging purposes. -type gcsMultipartMetaV1 struct { - Version string `json:"version"` // Version number - Bucket string `json:"bucket"` // Bucket name - Object string `json:"object"` // Object name -} - -// Returns name of the multipart meta object. -func gcsMultipartMetaName(uploadID string) string { - return fmt.Sprintf("%s/%s/%s", gcsMinioMultipartPathV1, uploadID, gcsMinioMultipartMeta) -} - -// Returns name of the part object. -func gcsMultipartDataName(uploadID string, partNumber int, etag string) string { - return fmt.Sprintf("%s/%s/%05d.%s", gcsMinioMultipartPathV1, uploadID, partNumber, etag) -} - -// Convert MinIO errors to minio object layer errors. -func gcsToObjectError(err error, params ...string) error { - if err == nil { - return nil - } - - bucket := "" - object := "" - uploadID := "" - if len(params) >= 1 { - bucket = params[0] - } - if len(params) == 2 { - object = params[1] - } - if len(params) == 3 { - uploadID = params[2] - } - - // in some cases just a plain error is being returned - switch err.Error() { - case "storage: bucket doesn't exist": - err = minio.BucketNotFound{ - Bucket: bucket, - } - return err - case "storage: object doesn't exist": - if uploadID != "" { - err = minio.InvalidUploadID{ - UploadID: uploadID, - } - } else { - err = minio.ObjectNotFound{ - Bucket: bucket, - Object: object, - } - } - return err - } - - googleAPIErr, ok := err.(*googleapi.Error) - if !ok { - // We don't interpret non MinIO errors. As minio errors will - // have StatusCode to help to convert to object errors. - return err - } - - if len(googleAPIErr.Errors) == 0 { - return err - } - - reason := googleAPIErr.Errors[0].Reason - message := googleAPIErr.Errors[0].Message - - switch reason { - case "required": - // Anonymous users does not have storage.xyz access to project 123. - fallthrough - case "keyInvalid": - fallthrough - case "forbidden": - err = minio.PrefixAccessDenied{ - Bucket: bucket, - Object: object, - } - case "invalid": - err = minio.BucketNameInvalid{ - Bucket: bucket, - } - case "notFound": - if object != "" { - err = minio.ObjectNotFound{ - Bucket: bucket, - Object: object, - } - break - } - err = minio.BucketNotFound{Bucket: bucket} - case "conflict": - if message == "You already own this bucket. Please select another name." { - err = minio.BucketAlreadyOwnedByYou{Bucket: bucket} - break - } - if message == "Sorry, that name is not available. Please try a different one." { - err = minio.BucketAlreadyExists{Bucket: bucket} - break - } - err = minio.BucketNotEmpty{Bucket: bucket} - } - - return err -} - -// gcsProjectIDRegex defines a valid gcs project id format -var gcsProjectIDRegex = regexp.MustCompile("^[a-z][a-z0-9-]{5,29}$") - -// isValidGCSProjectIDFormat - checks if a given project id format is valid or not. -// Project IDs must start with a lowercase letter and can have lowercase ASCII letters, -// digits or hyphens. Project IDs must be between 6 and 30 characters. -// Ref: https://cloud.google.com/resource-manager/reference/rest/v1/projects#Project (projectId section) -func isValidGCSProjectIDFormat(projectID string) bool { - // Checking projectID format - return gcsProjectIDRegex.MatchString(projectID) -} - -// gcsGateway - Implements gateway for MinIO and GCS compatible object storage servers. -type gcsGateway struct { - minio.GatewayUnsupported - client *storage.Client - httpClient *http.Client - metrics *minio.Metrics - projectID string -} - -// Returns projectID from the GOOGLE_APPLICATION_CREDENTIALS file. -func gcsParseProjectID(credsFile string) (projectID string, err error) { - contents, err := ioutil.ReadFile(credsFile) - if err != nil { - return projectID, err - } - googleCreds := make(map[string]string) - if err = json.Unmarshal(contents, &googleCreds); err != nil { - return projectID, err - } - return googleCreds[gcsProjectIDKey], err -} - -// GetMetrics returns this gateway's metrics -func (l *gcsGateway) GetMetrics(ctx context.Context) (*minio.Metrics, error) { - return l.metrics, nil -} - -// Cleanup old files in minio.sys.tmp of the given bucket. -func (l *gcsGateway) CleanupGCSMinioSysTmpBucket(ctx context.Context, bucket string) { - it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{Prefix: minio.GatewayMinioSysTmp, Versions: false}) - for { - attrs, err := it.Next() - if err != nil { - if err != iterator.Done { - reqInfo := &logger.ReqInfo{BucketName: bucket} - ctx := logger.SetReqInfo(minio.GlobalContext, reqInfo) - logger.LogIf(ctx, err) - } - return - } - if time.Since(attrs.Updated) > gcsMultipartExpiry { - // Delete files older than 2 weeks. - err := l.client.Bucket(bucket).Object(attrs.Name).Delete(ctx) - if err != nil { - reqInfo := &logger.ReqInfo{BucketName: bucket, ObjectName: attrs.Name} - ctx := logger.SetReqInfo(minio.GlobalContext, reqInfo) - logger.LogIf(ctx, err) - return - } - } - } -} - -// Cleanup old files in minio.sys.tmp of all buckets. -func (l *gcsGateway) CleanupGCSMinioSysTmp(ctx context.Context) { - for { - it := l.client.Buckets(ctx, l.projectID) - for { - attrs, err := it.Next() - if err != nil { - break - } - l.CleanupGCSMinioSysTmpBucket(ctx, attrs.Name) - } - // Run the cleanup loop every 1 day. - time.Sleep(gcsCleanupInterval) - } -} - -// Shutdown - save any gateway metadata to disk -// if necessary and reload upon next restart. -func (l *gcsGateway) Shutdown(ctx context.Context) error { - return nil -} - -// StorageInfo - Not relevant to GCS backend. -func (l *gcsGateway) StorageInfo(ctx context.Context, _ bool) (si minio.StorageInfo, _ []error) { - si.Backend.Type = minio.BackendGateway - si.Backend.GatewayOnline = minio.IsBackendOnline(ctx, l.httpClient, "https://storage.googleapis.com") - return si, nil -} - -// MakeBucketWithLocation - Create a new container on GCS backend. -func (l *gcsGateway) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { - if lockEnabled { - return minio.NotImplemented{} - } - - bkt := l.client.Bucket(bucket) - - // we'll default to the us multi-region in case of us-east-1 - if location == "us-east-1" { - location = "us" - } - - err := bkt.Create(ctx, l.projectID, &storage.BucketAttrs{ - Location: location, - }) - logger.LogIf(ctx, err) - return gcsToObjectError(err, bucket) -} - -// GetBucketInfo - Get bucket metadata.. -func (l *gcsGateway) GetBucketInfo(ctx context.Context, bucket string) (minio.BucketInfo, error) { - attrs, err := l.client.Bucket(bucket).Attrs(ctx) - if err != nil { - logger.LogIf(ctx, err) - return minio.BucketInfo{}, gcsToObjectError(err, bucket) - } - - return minio.BucketInfo{ - Name: attrs.Name, - Created: attrs.Created, - }, nil -} - -// ListBuckets lists all buckets under your project-id on GCS. -func (l *gcsGateway) ListBuckets(ctx context.Context) (buckets []minio.BucketInfo, err error) { - it := l.client.Buckets(ctx, l.projectID) - - // Iterate and capture all the buckets. - for { - attrs, ierr := it.Next() - if ierr == iterator.Done { - break - } - - if ierr != nil { - return buckets, gcsToObjectError(ierr) - } - - buckets = append(buckets, minio.BucketInfo{ - Name: attrs.Name, - Created: attrs.Created, - }) - } - - return buckets, nil -} - -// DeleteBucket delete a bucket on GCS. -func (l *gcsGateway) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error { - itObject := l.client.Bucket(bucket).Objects(ctx, &storage.Query{ - Delimiter: minio.SlashSeparator, - Versions: false, - }) - // We list the bucket and if we find any objects we return BucketNotEmpty error. If we - // find only "minio.sys.tmp/" then we remove it before deleting the bucket. - gcsMinioPathFound := false - nonGCSMinioPathFound := false - for { - objAttrs, err := itObject.Next() - if err == iterator.Done { - break - } - if err != nil { - logger.LogIf(ctx, err) - return gcsToObjectError(err) - } - if objAttrs.Prefix == minio.GatewayMinioSysTmp { - gcsMinioPathFound = true - continue - } - nonGCSMinioPathFound = true - break - } - if nonGCSMinioPathFound { - logger.LogIf(ctx, minio.BucketNotEmpty{}) - return gcsToObjectError(minio.BucketNotEmpty{}) - } - if gcsMinioPathFound { - // Remove minio.sys.tmp before deleting the bucket. - itObject = l.client.Bucket(bucket).Objects(ctx, &storage.Query{Versions: false, Prefix: minio.GatewayMinioSysTmp}) - for { - objAttrs, err := itObject.Next() - if err == iterator.Done { - break - } - if err != nil { - logger.LogIf(ctx, err) - return gcsToObjectError(err) - } - err = l.client.Bucket(bucket).Object(objAttrs.Name).Delete(ctx) - if err != nil { - logger.LogIf(ctx, err) - return gcsToObjectError(err) - } - } - } - err := l.client.Bucket(bucket).Delete(ctx) - logger.LogIf(ctx, err) - return gcsToObjectError(err, bucket) -} - -func toGCSPageToken(name string) string { - length := uint16(len(name)) - - b := []byte{ - 0xa, - byte(length & 0xFF), - } - - length = length >> 7 - if length > 0 { - b = append(b, byte(length&0xFF)) - } - - b = append(b, []byte(name)...) - - return base64.StdEncoding.EncodeToString(b) -} - -// Returns true if marker was returned by GCS, i.e prefixed with -// ##minio by minio gcs minio. -func isGCSMarker(marker string) bool { - return strings.HasPrefix(marker, gcsTokenPrefix) -} - -// ListObjects - lists all blobs in GCS bucket filtered by prefix -func (l *gcsGateway) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (minio.ListObjectsInfo, error) { - if maxKeys == 0 { - return minio.ListObjectsInfo{}, nil - } - - it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{ - Delimiter: delimiter, - Prefix: prefix, - Versions: false, - }) - - // To accommodate S3-compatible applications using - // ListObjectsV1 to use object keys as markers to control the - // listing of objects, we use the following encoding scheme to - // distinguish between GCS continuation tokens and application - // supplied markers. - // - // - NextMarker in ListObjectsV1 response is constructed by - // prefixing "{minio}" to the GCS continuation token, - // e.g, "{minio}CgRvYmoz" - // - // - Application supplied markers are transformed to a - // GCS continuation token. - - // If application is using GCS continuation token we should - // strip the gcsTokenPrefix we added. - token := "" - if marker != "" { - if isGCSMarker(marker) { - token = strings.TrimPrefix(marker, gcsTokenPrefix) - } else { - token = toGCSPageToken(marker) - } - } - nextMarker := "" - - var prefixes []string - var objects []minio.ObjectInfo - var nextPageToken string - var err error - - pager := iterator.NewPager(it, maxKeys, token) - for { - gcsObjects := make([]*storage.ObjectAttrs, 0) - nextPageToken, err = pager.NextPage(&gcsObjects) - if err != nil { - logger.LogIf(ctx, err) - return minio.ListObjectsInfo{}, gcsToObjectError(err, bucket, prefix) - } - - for _, attrs := range gcsObjects { - - // Due to minio.GatewayMinioSysTmp keys being skipped, the number of objects + prefixes - // returned may not total maxKeys. This behavior is compatible with the S3 spec which - // allows the response to include less keys than maxKeys. - if attrs.Prefix == minio.GatewayMinioSysTmp { - // We don't return our metadata prefix. - continue - } - if !strings.HasPrefix(prefix, minio.GatewayMinioSysTmp) { - // If client lists outside gcsMinioPath then we filter out gcsMinioPath/* entries. - // But if the client lists inside gcsMinioPath then we return the entries in gcsMinioPath/ - // which will be helpful to observe the "directory structure" for debugging purposes. - if strings.HasPrefix(attrs.Prefix, minio.GatewayMinioSysTmp) || - strings.HasPrefix(attrs.Name, minio.GatewayMinioSysTmp) { - continue - } - } - - if attrs.Prefix != "" { - prefixes = append(prefixes, attrs.Prefix) - } else { - objects = append(objects, fromGCSAttrsToObjectInfo(attrs)) - } - - // The NextMarker property should only be set in the response if a delimiter is used - if delimiter != "" { - if attrs.Prefix > nextMarker { - nextMarker = attrs.Prefix - } else if attrs.Name > nextMarker { - nextMarker = attrs.Name - } - } - } - - // Exit the loop if at least one item can be returned from - // the current page or there are no more pages available - if nextPageToken == "" || len(prefixes)+len(objects) > 0 { - break - } - } - - if nextPageToken == "" { - nextMarker = "" - } else if nextMarker != "" { - nextMarker = gcsTokenPrefix + toGCSPageToken(nextMarker) - } - - return minio.ListObjectsInfo{ - IsTruncated: nextPageToken != "", - NextMarker: nextMarker, - Prefixes: prefixes, - Objects: objects, - }, nil -} - -// ListObjectsV2 - lists all blobs in GCS bucket filtered by prefix -func (l *gcsGateway) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (minio.ListObjectsV2Info, error) { - if maxKeys == 0 { - return minio.ListObjectsV2Info{ContinuationToken: continuationToken}, nil - } - - it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{ - Delimiter: delimiter, - Prefix: prefix, - Versions: false, - }) - - token := continuationToken - if token == "" && startAfter != "" { - token = toGCSPageToken(startAfter) - } - - var prefixes []string - var objects []minio.ObjectInfo - var nextPageToken string - var err error - - pager := iterator.NewPager(it, maxKeys, token) - for { - gcsObjects := make([]*storage.ObjectAttrs, 0) - nextPageToken, err = pager.NextPage(&gcsObjects) - if err != nil { - logger.LogIf(ctx, err) - return minio.ListObjectsV2Info{}, gcsToObjectError(err, bucket, prefix) - } - - for _, attrs := range gcsObjects { - - // Due to minio.GatewayMinioSysTmp keys being skipped, the number of objects + prefixes - // returned may not total maxKeys. This behavior is compatible with the S3 spec which - // allows the response to include less keys than maxKeys. - if attrs.Prefix == minio.GatewayMinioSysTmp { - // We don't return our metadata prefix. - continue - } - if !strings.HasPrefix(prefix, minio.GatewayMinioSysTmp) { - // If client lists outside gcsMinioPath then we filter out gcsMinioPath/* entries. - // But if the client lists inside gcsMinioPath then we return the entries in gcsMinioPath/ - // which will be helpful to observe the "directory structure" for debugging purposes. - if strings.HasPrefix(attrs.Prefix, minio.GatewayMinioSysTmp) || - strings.HasPrefix(attrs.Name, minio.GatewayMinioSysTmp) { - continue - } - } - - if attrs.Prefix != "" { - prefixes = append(prefixes, attrs.Prefix) - } else { - objects = append(objects, fromGCSAttrsToObjectInfo(attrs)) - } - } - - // Exit the loop if at least one item can be returned from - // the current page or there are no more pages available - if nextPageToken == "" || len(prefixes)+len(objects) > 0 { - break - } - } - - return minio.ListObjectsV2Info{ - IsTruncated: nextPageToken != "", - ContinuationToken: continuationToken, - NextContinuationToken: nextPageToken, - Prefixes: prefixes, - Objects: objects, - }, nil -} - -// GetObjectNInfo - returns object info and locked object ReadCloser -func (l *gcsGateway) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header, lockType minio.LockType, opts minio.ObjectOptions) (gr *minio.GetObjectReader, err error) { - var objInfo minio.ObjectInfo - objInfo, err = l.GetObjectInfo(ctx, bucket, object, opts) - if err != nil { - return nil, err - } - - var startOffset, length int64 - startOffset, length, err = rs.GetOffsetLength(objInfo.Size) - if err != nil { - return nil, err - } - - pr, pw := io.Pipe() - go func() { - err := l.GetObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, opts) - pw.CloseWithError(err) - }() - // Setup cleanup function to cause the above go-routine to - // exit in case of partial read - pipeCloser := func() { pr.Close() } - return minio.NewGetObjectReaderFromReader(pr, objInfo, opts, pipeCloser) -} - -// GetObject - reads an object from GCS. Supports additional -// parameters like offset and length which are synonymous with -// HTTP Range requests. -// -// startOffset indicates the starting read location of the object. -// length indicates the total length of the object. -func (l *gcsGateway) GetObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error { - // if we want to mimic S3 behavior exactly, we need to verify if bucket exists first, - // otherwise gcs will just return object not exist in case of non-existing bucket - if _, err := l.client.Bucket(bucket).Attrs(ctx); err != nil { - logger.LogIf(ctx, err, logger.Application) - return gcsToObjectError(err, bucket) - } - - // GCS storage decompresses a gzipped object by default and returns the data. - // Refer to https://cloud.google.com/storage/docs/transcoding#decompressive_transcoding - // Need to set `Accept-Encoding` header to `gzip` when issuing a GetObject call, to be able - // to download the object in compressed state. - // Calling ReadCompressed with true accomplishes that. - object := l.client.Bucket(bucket).Object(key).ReadCompressed(true) - - r, err := object.NewRangeReader(ctx, startOffset, length) - if err != nil { - logger.LogIf(ctx, err, logger.Application) - return gcsToObjectError(err, bucket, key) - } - defer r.Close() - - if _, err := io.Copy(writer, r); err != nil { - logger.LogIf(ctx, err) - return gcsToObjectError(err, bucket, key) - } - - return nil -} - -// fromGCSAttrsToObjectInfo converts GCS BucketAttrs to gateway ObjectInfo -func fromGCSAttrsToObjectInfo(attrs *storage.ObjectAttrs) minio.ObjectInfo { - // All google cloud storage objects have a CRC32c hash, whereas composite objects may not have a MD5 hash - // Refer https://cloud.google.com/storage/docs/hashes-etags. Use CRC32C for ETag - metadata := make(map[string]string) - var ( - expiry time.Time - e error - ) - for k, v := range attrs.Metadata { - k = http.CanonicalHeaderKey(k) - // Translate the GCS custom metadata prefix - if strings.HasPrefix(k, "X-Goog-Meta-") { - k = strings.Replace(k, "X-Goog-Meta-", "X-Amz-Meta-", 1) - } - if k == "Expires" { - if expiry, e = time.Parse(http.TimeFormat, v); e == nil { - expiry = expiry.UTC() - } - continue - } - metadata[k] = v - } - if attrs.ContentType != "" { - metadata["Content-Type"] = attrs.ContentType - } - if attrs.ContentEncoding != "" { - metadata["Content-Encoding"] = attrs.ContentEncoding - } - if attrs.CacheControl != "" { - metadata["Cache-Control"] = attrs.CacheControl - } - if attrs.ContentDisposition != "" { - metadata["Content-Disposition"] = attrs.ContentDisposition - } - if attrs.ContentLanguage != "" { - metadata["Content-Language"] = attrs.ContentLanguage - } - - etag := hex.EncodeToString(attrs.MD5) - if etag == "" { - etag = minio.ToS3ETag(fmt.Sprintf("%d", attrs.CRC32C)) - } - return minio.ObjectInfo{ - Name: attrs.Name, - Bucket: attrs.Bucket, - ModTime: attrs.Updated, - Size: attrs.Size, - ETag: etag, - UserDefined: metadata, - ContentType: attrs.ContentType, - ContentEncoding: attrs.ContentEncoding, - Expires: expiry, - } -} - -// applyMetadataToGCSAttrs applies metadata to a GCS ObjectAttrs instance -func applyMetadataToGCSAttrs(metadata map[string]string, attrs *storage.ObjectAttrs) { - attrs.Metadata = make(map[string]string) - for k, v := range metadata { - k = http.CanonicalHeaderKey(k) - switch { - case strings.HasPrefix(k, "X-Amz-Meta-"): - // Translate the S3 user-defined metadata prefix - k = strings.Replace(k, "X-Amz-Meta-", "x-goog-meta-", 1) - attrs.Metadata[k] = v - case k == "Content-Type": - attrs.ContentType = v - case k == "Content-Encoding": - attrs.ContentEncoding = v - case k == "Cache-Control": - attrs.CacheControl = v - case k == "Content-Disposition": - attrs.ContentDisposition = v - case k == "Content-Language": - attrs.ContentLanguage = v - } - } -} - -// GetObjectInfo - reads object info and replies back ObjectInfo -func (l *gcsGateway) GetObjectInfo(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) { - // if we want to mimic S3 behavior exactly, we need to verify if bucket exists first, - // otherwise gcs will just return object not exist in case of non-existing bucket - if _, err := l.client.Bucket(bucket).Attrs(ctx); err != nil { - logger.LogIf(ctx, err, logger.Application) - return minio.ObjectInfo{}, gcsToObjectError(err, bucket) - } - - attrs, err := l.client.Bucket(bucket).Object(object).Attrs(ctx) - if err != nil { - logger.LogIf(ctx, err) - return minio.ObjectInfo{}, gcsToObjectError(err, bucket, object) - } - - return fromGCSAttrsToObjectInfo(attrs), nil -} - -// PutObject - Create a new object with the incoming data, -func (l *gcsGateway) PutObject(ctx context.Context, bucket string, key string, r *minio.PutObjReader, opts minio.ObjectOptions) (minio.ObjectInfo, error) { - data := r.Reader - - nctx, cancel := context.WithCancel(ctx) - - defer cancel() - - // if we want to mimic S3 behavior exactly, we need to verify if bucket exists first, - // otherwise gcs will just return object not exist in case of non-existing bucket - if _, err := l.client.Bucket(bucket).Attrs(nctx); err != nil { - logger.LogIf(ctx, err, logger.Application) - return minio.ObjectInfo{}, gcsToObjectError(err, bucket) - } - - object := l.client.Bucket(bucket).Object(key) - - w := object.NewWriter(nctx) - - // Disable "chunked" uploading in GCS client if the size of the data to be uploaded is below - // the current chunk-size of the writer. This avoids an unnecessary memory allocation. - if data.Size() < int64(w.ChunkSize) { - w.ChunkSize = 0 - } - applyMetadataToGCSAttrs(opts.UserDefined, &w.ObjectAttrs) - - if _, err := io.Copy(w, data); err != nil { - // Close the object writer upon error. - logger.LogIf(ctx, err) - return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key) - } - - // Close the object writer upon success. - if err := w.Close(); err != nil { - logger.LogIf(ctx, err) - return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key) - } - - return fromGCSAttrsToObjectInfo(w.Attrs()), nil -} - -// CopyObject - Copies a blob from source container to destination container. -func (l *gcsGateway) CopyObject(ctx context.Context, srcBucket string, srcObject string, destBucket string, destObject string, - srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (minio.ObjectInfo, error) { - if srcOpts.CheckCopyPrecondFn != nil && srcOpts.CheckCopyPrecondFn(srcInfo, "") { - return minio.ObjectInfo{}, minio.PreConditionFailed{} - } - src := l.client.Bucket(srcBucket).Object(srcObject) - dst := l.client.Bucket(destBucket).Object(destObject) - - copier := dst.CopierFrom(src) - applyMetadataToGCSAttrs(srcInfo.UserDefined, &copier.ObjectAttrs) - - attrs, err := copier.Run(ctx) - if err != nil { - logger.LogIf(ctx, err) - return minio.ObjectInfo{}, gcsToObjectError(err, destBucket, destObject) - } - - return fromGCSAttrsToObjectInfo(attrs), nil -} - -// DeleteObject - Deletes a blob in bucket -func (l *gcsGateway) DeleteObject(ctx context.Context, bucket string, object string) error { - err := l.client.Bucket(bucket).Object(object).Delete(ctx) - if err != nil { - logger.LogIf(ctx, err) - return gcsToObjectError(err, bucket, object) - } - - return nil -} - -func (l *gcsGateway) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { - errs := make([]error, len(objects)) - for idx, object := range objects { - errs[idx] = l.DeleteObject(ctx, bucket, object) - } - return errs, nil -} - -// NewMultipartUpload - upload object in multiple parts -func (l *gcsGateway) NewMultipartUpload(ctx context.Context, bucket string, key string, o minio.ObjectOptions) (uploadID string, err error) { - // generate new uploadid - uploadID = minio.MustGetUUID() - - // generate name for part zero - meta := gcsMultipartMetaName(uploadID) - - w := l.client.Bucket(bucket).Object(meta).NewWriter(ctx) - defer w.Close() - - applyMetadataToGCSAttrs(o.UserDefined, &w.ObjectAttrs) - - if err = json.NewEncoder(w).Encode(gcsMultipartMetaV1{ - gcsMinioMultipartMetaCurrentVersion, - bucket, - key, - }); err != nil { - logger.LogIf(ctx, err) - return "", gcsToObjectError(err, bucket, key) - } - return uploadID, nil -} - -// ListMultipartUploads - lists the (first) multipart upload for an object -// matched _exactly_ by the prefix -func (l *gcsGateway) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (minio.ListMultipartsInfo, error) { - // List objects under /gcsMinioMultipartPathV1 - it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{ - Prefix: gcsMinioMultipartPathV1, - }) - - var uploads []minio.MultipartInfo - - for { - attrs, err := it.Next() - if err == iterator.Done { - break - } - - if err != nil { - logger.LogIf(ctx, err) - return minio.ListMultipartsInfo{ - KeyMarker: keyMarker, - UploadIDMarker: uploadIDMarker, - MaxUploads: maxUploads, - Prefix: prefix, - Delimiter: delimiter, - }, gcsToObjectError(err) - } - - // Skip entries other than gcs.json - if !strings.HasSuffix(attrs.Name, gcsMinioMultipartMeta) { - continue - } - - // Extract multipart upload information from gcs.json - obj := l.client.Bucket(bucket).Object(attrs.Name) - objReader, rErr := obj.NewReader(ctx) - if rErr != nil { - logger.LogIf(ctx, rErr) - return minio.ListMultipartsInfo{}, rErr - } - defer objReader.Close() - - var mpMeta gcsMultipartMetaV1 - dec := json.NewDecoder(objReader) - decErr := dec.Decode(&mpMeta) - if decErr != nil { - logger.LogIf(ctx, decErr) - return minio.ListMultipartsInfo{}, decErr - } - - if prefix == mpMeta.Object { - // Extract uploadId - // E.g minio.sys.tmp/multipart/v1/d063ad89-fdc4-4ea3-a99e-22dba98151f5/gcs.json - components := strings.SplitN(attrs.Name, minio.SlashSeparator, 5) - if len(components) != 5 { - compErr := errors.New("Invalid multipart upload format") - logger.LogIf(ctx, compErr) - return minio.ListMultipartsInfo{}, compErr - } - upload := minio.MultipartInfo{ - Object: mpMeta.Object, - UploadID: components[3], - Initiated: attrs.Created, - } - uploads = append(uploads, upload) - } - } - - return minio.ListMultipartsInfo{ - KeyMarker: keyMarker, - UploadIDMarker: uploadIDMarker, - MaxUploads: maxUploads, - Prefix: prefix, - Delimiter: delimiter, - Uploads: uploads, - NextKeyMarker: "", - NextUploadIDMarker: "", - IsTruncated: false, - }, nil -} - -// Checks if minio.sys.tmp/multipart/v1//gcs.json exists, returns -// an object layer compatible error upon any error. -func (l *gcsGateway) checkUploadIDExists(ctx context.Context, bucket string, key string, uploadID string) error { - _, err := l.client.Bucket(bucket).Object(gcsMultipartMetaName(uploadID)).Attrs(ctx) - logger.LogIf(ctx, err) - return gcsToObjectError(err, bucket, key, uploadID) -} - -// PutObjectPart puts a part of object in bucket -func (l *gcsGateway) PutObjectPart(ctx context.Context, bucket string, key string, uploadID string, partNumber int, r *minio.PutObjReader, opts minio.ObjectOptions) (minio.PartInfo, error) { - data := r.Reader - if err := l.checkUploadIDExists(ctx, bucket, key, uploadID); err != nil { - return minio.PartInfo{}, err - } - etag := data.MD5HexString() - if etag == "" { - // Generate random ETag. - etag = minio.GenETag() - } - object := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, partNumber, etag)) - w := object.NewWriter(ctx) - // Disable "chunked" uploading in GCS client. If enabled, it can cause a corner case - // where it tries to upload 0 bytes in the last chunk and get error from server. - w.ChunkSize = 0 - if _, err := io.Copy(w, data); err != nil { - // Make sure to close object writer upon error. - w.Close() - logger.LogIf(ctx, err) - return minio.PartInfo{}, gcsToObjectError(err, bucket, key) - } - // Make sure to close the object writer upon success. - if err := w.Close(); err != nil { - logger.LogIf(ctx, err) - return minio.PartInfo{}, gcsToObjectError(err, bucket, key) - } - return minio.PartInfo{ - PartNumber: partNumber, - ETag: etag, - LastModified: minio.UTCNow(), - Size: data.Size(), - }, nil - -} - -// gcsGetPartInfo returns PartInfo of a given object part -func gcsGetPartInfo(ctx context.Context, attrs *storage.ObjectAttrs) (minio.PartInfo, error) { - components := strings.SplitN(attrs.Name, minio.SlashSeparator, 5) - if len(components) != 5 { - logger.LogIf(ctx, errors.New("Invalid multipart upload format")) - return minio.PartInfo{}, errors.New("Invalid multipart upload format") - } - - partComps := strings.SplitN(components[4], ".", 2) - if len(partComps) != 2 { - logger.LogIf(ctx, errors.New("Invalid multipart part format")) - return minio.PartInfo{}, errors.New("Invalid multipart part format") - } - - partNum, pErr := strconv.Atoi(partComps[0]) - if pErr != nil { - logger.LogIf(ctx, pErr) - return minio.PartInfo{}, errors.New("Invalid part number") - } - - return minio.PartInfo{ - PartNumber: partNum, - LastModified: attrs.Updated, - Size: attrs.Size, - ETag: partComps[1], - }, nil -} - -// GetMultipartInfo returns multipart info of the uploadId of the object -func (l *gcsGateway) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts minio.ObjectOptions) (result minio.MultipartInfo, err error) { - result.Bucket = bucket - result.Object = object - result.UploadID = uploadID - return result, nil -} - -// ListObjectParts returns all object parts for specified object in specified bucket -func (l *gcsGateway) ListObjectParts(ctx context.Context, bucket string, key string, uploadID string, partNumberMarker int, maxParts int, opts minio.ObjectOptions) (minio.ListPartsInfo, error) { - it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{ - Prefix: path.Join(gcsMinioMultipartPathV1, uploadID), - }) - - var ( - count int - partInfos []minio.PartInfo - ) - - isTruncated := true - for count < maxParts { - attrs, err := it.Next() - if err == iterator.Done { - isTruncated = false - break - } - - if err != nil { - logger.LogIf(ctx, err) - return minio.ListPartsInfo{}, gcsToObjectError(err) - } - - if strings.HasSuffix(attrs.Name, gcsMinioMultipartMeta) { - continue - } - - partInfo, pErr := gcsGetPartInfo(ctx, attrs) - if pErr != nil { - logger.LogIf(ctx, pErr) - return minio.ListPartsInfo{}, pErr - } - - if partInfo.PartNumber <= partNumberMarker { - continue - } - - partInfos = append(partInfos, partInfo) - count++ - } - - nextPartNumberMarker := 0 - if isTruncated { - nextPartNumberMarker = partInfos[maxParts-1].PartNumber - } - - return minio.ListPartsInfo{ - Bucket: bucket, - Object: key, - UploadID: uploadID, - PartNumberMarker: partNumberMarker, - NextPartNumberMarker: nextPartNumberMarker, - MaxParts: maxParts, - Parts: partInfos, - IsTruncated: isTruncated, - }, nil -} - -// Called by AbortMultipartUpload and CompleteMultipartUpload for cleaning up. -func (l *gcsGateway) cleanupMultipartUpload(ctx context.Context, bucket, key, uploadID string) error { - prefix := fmt.Sprintf("%s/%s/", gcsMinioMultipartPathV1, uploadID) - - // iterate through all parts and delete them - it := l.client.Bucket(bucket).Objects(ctx, &storage.Query{Prefix: prefix, Versions: false}) - - for { - attrs, err := it.Next() - if err == iterator.Done { - break - } - if err != nil { - logger.LogIf(ctx, err) - return gcsToObjectError(err, bucket, key) - } - - object := l.client.Bucket(bucket).Object(attrs.Name) - // Ignore the error as parallel AbortMultipartUpload might have deleted it. - object.Delete(ctx) - } - - return nil -} - -// AbortMultipartUpload aborts a ongoing multipart upload -func (l *gcsGateway) AbortMultipartUpload(ctx context.Context, bucket string, key string, uploadID string) error { - if err := l.checkUploadIDExists(ctx, bucket, key, uploadID); err != nil { - return err - } - return l.cleanupMultipartUpload(ctx, bucket, key, uploadID) -} - -// CompleteMultipartUpload completes ongoing multipart upload and finalizes object -// Note that there is a limit (currently 32) to the number of components that can -// be composed in a single operation. There is a per-project rate limit (currently 200) -// to the number of source objects you can compose per second. -func (l *gcsGateway) CompleteMultipartUpload(ctx context.Context, bucket string, key string, uploadID string, uploadedParts []minio.CompletePart, opts minio.ObjectOptions) (minio.ObjectInfo, error) { - meta := gcsMultipartMetaName(uploadID) - object := l.client.Bucket(bucket).Object(meta) - - partZeroAttrs, err := object.Attrs(ctx) - if err != nil { - logger.LogIf(ctx, err) - return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key, uploadID) - } - - r, err := object.NewReader(ctx) - if err != nil { - logger.LogIf(ctx, err) - return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key) - } - defer r.Close() - - // Check version compatibility of the meta file before compose() - multipartMeta := gcsMultipartMetaV1{} - if err = json.NewDecoder(r).Decode(&multipartMeta); err != nil { - logger.LogIf(ctx, err) - return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key) - } - - if multipartMeta.Version != gcsMinioMultipartMetaCurrentVersion { - logger.LogIf(ctx, errGCSFormat) - return minio.ObjectInfo{}, gcsToObjectError(errGCSFormat, bucket, key) - } - - // Validate if the gcs.json stores valid entries for the bucket and key. - if multipartMeta.Bucket != bucket || multipartMeta.Object != key { - return minio.ObjectInfo{}, gcsToObjectError(minio.InvalidUploadID{ - UploadID: uploadID, - }, bucket, key) - } - - var parts []*storage.ObjectHandle - partSizes := make([]int64, len(uploadedParts)) - for i, uploadedPart := range uploadedParts { - parts = append(parts, l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, - uploadedPart.PartNumber, uploadedPart.ETag))) - partAttr, pErr := l.client.Bucket(bucket).Object(gcsMultipartDataName(uploadID, uploadedPart.PartNumber, uploadedPart.ETag)).Attrs(ctx) - if pErr != nil { - logger.LogIf(ctx, pErr) - return minio.ObjectInfo{}, gcsToObjectError(pErr, bucket, key, uploadID) - } - partSizes[i] = partAttr.Size - } - - // Error out if parts except last part sizing < 5MiB. - for i, size := range partSizes[:len(partSizes)-1] { - if size < 5*humanize.MiByte { - logger.LogIf(ctx, minio.PartTooSmall{ - PartNumber: uploadedParts[i].PartNumber, - PartSize: size, - PartETag: uploadedParts[i].ETag, - }) - return minio.ObjectInfo{}, minio.PartTooSmall{ - PartNumber: uploadedParts[i].PartNumber, - PartSize: size, - PartETag: uploadedParts[i].ETag, - } - } - } - - // Returns name of the composed object. - gcsMultipartComposeName := func(uploadID string, composeNumber int) string { - return fmt.Sprintf("%s/tmp/%s/composed-object-%05d", minio.GatewayMinioSysTmp, uploadID, composeNumber) - } - - composeCount := int(math.Ceil(float64(len(parts)) / float64(gcsMaxComponents))) - if composeCount > 1 { - // Create composes of every 32 parts. - composeParts := make([]*storage.ObjectHandle, composeCount) - for i := 0; i < composeCount; i++ { - // Create 'composed-object-N' using next 32 parts. - composeParts[i] = l.client.Bucket(bucket).Object(gcsMultipartComposeName(uploadID, i)) - start := i * gcsMaxComponents - end := start + gcsMaxComponents - if end > len(parts) { - end = len(parts) - } - - composer := composeParts[i].ComposerFrom(parts[start:end]...) - composer.ContentType = partZeroAttrs.ContentType - composer.Metadata = partZeroAttrs.Metadata - - if _, err = composer.Run(ctx); err != nil { - logger.LogIf(ctx, err) - return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key) - } - } - - // As composes are successfully created, final object needs to be created using composes. - parts = composeParts - } - - composer := l.client.Bucket(bucket).Object(key).ComposerFrom(parts...) - composer.ContentType = partZeroAttrs.ContentType - composer.ContentEncoding = partZeroAttrs.ContentEncoding - composer.CacheControl = partZeroAttrs.CacheControl - composer.ContentDisposition = partZeroAttrs.ContentDisposition - composer.ContentLanguage = partZeroAttrs.ContentLanguage - composer.Metadata = partZeroAttrs.Metadata - attrs, err := composer.Run(ctx) - if err != nil { - logger.LogIf(ctx, err) - return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key) - } - if err = l.cleanupMultipartUpload(ctx, bucket, key, uploadID); err != nil { - return minio.ObjectInfo{}, gcsToObjectError(err, bucket, key) - } - return fromGCSAttrsToObjectInfo(attrs), nil -} - -// SetBucketPolicy - Set policy on bucket -func (l *gcsGateway) SetBucketPolicy(ctx context.Context, bucket string, bucketPolicy *policy.Policy) error { - policyInfo, err := minio.PolicyToBucketAccessPolicy(bucketPolicy) - if err != nil { - logger.LogIf(ctx, err) - return gcsToObjectError(err, bucket) - } - - var policies []minio.BucketAccessPolicy - for prefix, policy := range miniogopolicy.GetPolicies(policyInfo.Statements, bucket, "") { - policies = append(policies, minio.BucketAccessPolicy{ - Prefix: prefix, - Policy: policy, - }) - } - - prefix := bucket + "/*" // For all objects inside the bucket. - - if len(policies) != 1 { - logger.LogIf(ctx, minio.NotImplemented{}) - return minio.NotImplemented{} - } - if policies[0].Prefix != prefix { - logger.LogIf(ctx, minio.NotImplemented{}) - return minio.NotImplemented{} - } - - acl := l.client.Bucket(bucket).ACL() - if policies[0].Policy == miniogopolicy.BucketPolicyNone { - if err := acl.Delete(ctx, storage.AllUsers); err != nil { - logger.LogIf(ctx, err) - return gcsToObjectError(err, bucket) - } - return nil - } - - var role storage.ACLRole - switch policies[0].Policy { - case miniogopolicy.BucketPolicyReadOnly: - role = storage.RoleReader - case miniogopolicy.BucketPolicyWriteOnly: - role = storage.RoleWriter - default: - logger.LogIf(ctx, minio.NotImplemented{}) - return minio.NotImplemented{} - } - - if err := acl.Set(ctx, storage.AllUsers, role); err != nil { - logger.LogIf(ctx, err) - return gcsToObjectError(err, bucket) - } - - return nil -} - -// GetBucketPolicy - Get policy on bucket -func (l *gcsGateway) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) { - rules, err := l.client.Bucket(bucket).ACL().List(ctx) - if err != nil { - return nil, gcsToObjectError(err, bucket) - } - - var readOnly, writeOnly bool - for _, r := range rules { - if r.Entity != storage.AllUsers || r.Role == storage.RoleOwner { - continue - } - - switch r.Role { - case storage.RoleReader: - readOnly = true - case storage.RoleWriter: - writeOnly = true - } - } - - actionSet := policy.NewActionSet() - if readOnly { - actionSet.Add(policy.GetBucketLocationAction) - actionSet.Add(policy.ListBucketAction) - actionSet.Add(policy.GetObjectAction) - } - if writeOnly { - actionSet.Add(policy.GetBucketLocationAction) - actionSet.Add(policy.ListBucketMultipartUploadsAction) - actionSet.Add(policy.AbortMultipartUploadAction) - actionSet.Add(policy.DeleteObjectAction) - actionSet.Add(policy.ListMultipartUploadPartsAction) - actionSet.Add(policy.PutObjectAction) - } - - // Return NoSuchBucketPolicy error, when policy is not set - if len(actionSet) == 0 { - return nil, gcsToObjectError(minio.BucketPolicyNotFound{}, bucket) - } - - return &policy.Policy{ - Version: policy.DefaultVersion, - Statements: []policy.Statement{ - policy.NewStatement( - policy.Allow, - policy.NewPrincipal("*"), - actionSet, - policy.NewResourceSet( - policy.NewResource(bucket, ""), - policy.NewResource(bucket, "*"), - ), - condition.NewFunctions(), - ), - }, - }, nil -} - -// DeleteBucketPolicy - Delete all policies on bucket -func (l *gcsGateway) DeleteBucketPolicy(ctx context.Context, bucket string) error { - // This only removes the storage.AllUsers policies - if err := l.client.Bucket(bucket).ACL().Delete(ctx, storage.AllUsers); err != nil { - return gcsToObjectError(err, bucket) - } - - return nil -} - -// IsCompressionSupported returns whether compression is applicable for this layer. -func (l *gcsGateway) IsCompressionSupported() bool { - return false -} - -// IsReady returns whether the layer is ready to take requests. -func (l *gcsGateway) IsReady(ctx context.Context) bool { - return minio.IsBackendOnline(ctx, l.httpClient, "https://storage.googleapis.com") -} diff --git a/cmd/gateway/gcs/gateway-gcs_test.go b/cmd/gateway/gcs/gateway-gcs_test.go deleted file mode 100644 index 8dc008f..0000000 --- a/cmd/gateway/gcs/gateway-gcs_test.go +++ /dev/null @@ -1,490 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package gcs - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "reflect" - "testing" - "time" - - "cloud.google.com/go/storage" - "google.golang.org/api/googleapi" - - miniogo "github.com/minio/minio-go/v6" - minio "github.com/minio/minio/cmd" -) - -func TestToGCSPageToken(t *testing.T) { - testCases := []struct { - Name string - Token string - }{ - { - Name: "A", - Token: "CgFB", - }, - { - Name: "AAAAAAAAAA", - Token: "CgpBQUFBQUFBQUFB", - }, - { - Name: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", - Token: "CmRBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFB", - }, - { - Name: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", - Token: "CpEDQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUE=", - }, - { - Name: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", - Token: "CpIDQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFB", - }, - { - Name: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", - Token: "CpMDQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQQ==", - }, - { - Name: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA", - Token: "CvQDQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUFBQUE=", - }, - } - - for i, testCase := range testCases { - if toGCSPageToken(testCase.Name) != testCase.Token { - t.Errorf("Test %d: Expected %s, got %s", i+1, toGCSPageToken(testCase.Name), testCase.Token) - } - } - -} - -// TestIsValidGCSProjectIDFormat tests isValidGCSProjectIDFormat -func TestValidGCSProjectIDFormat(t *testing.T) { - testCases := []struct { - ProjectID string - Valid bool - }{ - {"", false}, - {"a", false}, - {"Abc", false}, - {"1bcd", false}, - // 5 chars - {"abcdb", false}, - // 6 chars - {"abcdbz", true}, - // 30 chars - {"project-id-1-project-id-more-1", true}, - // 31 chars - {"project-id-1-project-id-more-11", false}, - {"storage.googleapis.com", false}, - {"http://storage.googleapis.com", false}, - {"http://localhost:9000", false}, - {"project-id-1", true}, - {"project-id-1988832", true}, - {"projectid1414", true}, - } - - for i, testCase := range testCases { - valid := isValidGCSProjectIDFormat(testCase.ProjectID) - if valid != testCase.Valid { - t.Errorf("Test %d: Expected %v, got %v", i+1, valid, testCase.Valid) - } - } -} - -// Test for isGCSMarker. -func TestIsGCSMarker(t *testing.T) { - testCases := []struct { - marker string - expected bool - }{ - { - marker: "{minio}gcs123", - expected: true, - }, - { - marker: "{mini_no}tgcs123", - expected: false, - }, - { - marker: "{minioagainnotgcs123", - expected: false, - }, - { - marker: "obj1", - expected: false, - }, - } - - for i, tc := range testCases { - if actual := isGCSMarker(tc.marker); actual != tc.expected { - t.Errorf("Test %d: marker is %s, expected %v but got %v", - i+1, tc.marker, tc.expected, actual) - } - } -} - -// Test for gcsMultipartMetaName. -func TestGCSMultipartMetaName(t *testing.T) { - uploadID := "a" - expected := path.Join(gcsMinioMultipartPathV1, uploadID, gcsMinioMultipartMeta) - got := gcsMultipartMetaName(uploadID) - if expected != got { - t.Errorf("expected: %s, got: %s", expected, got) - } -} - -// Test for gcsMultipartDataName. -func TestGCSMultipartDataName(t *testing.T) { - var ( - uploadID = "a" - etag = "b" - partNumber = 1 - ) - expected := path.Join(gcsMinioMultipartPathV1, uploadID, fmt.Sprintf("%05d.%s", partNumber, etag)) - got := gcsMultipartDataName(uploadID, partNumber, etag) - if expected != got { - t.Errorf("expected: %s, got: %s", expected, got) - } -} - -func TestFromMinioClientListBucketResultToV2Info(t *testing.T) { - - listBucketResult := miniogo.ListBucketResult{ - IsTruncated: false, - Marker: "testMarker", - NextMarker: "testMarker2", - CommonPrefixes: []miniogo.CommonPrefix{{Prefix: "one"}, {Prefix: "two"}}, - Contents: []miniogo.ObjectInfo{{Key: "testobj", ContentType: ""}}, - } - - listBucketV2Info := minio.ListObjectsV2Info{ - Prefixes: []string{"one", "two"}, - Objects: []minio.ObjectInfo{{Name: "testobj", Bucket: "testbucket", UserDefined: map[string]string{"Content-Type": ""}}}, - IsTruncated: false, - ContinuationToken: "testMarker", - NextContinuationToken: "testMarker2", - } - - if got := minio.FromMinioClientListBucketResultToV2Info("testbucket", listBucketResult); !reflect.DeepEqual(got, listBucketV2Info) { - t.Errorf("fromMinioClientListBucketResultToV2Info() = %v, want %v", got, listBucketV2Info) - } -} - -// Test for gcsParseProjectID -func TestGCSParseProjectID(t *testing.T) { - f, err := ioutil.TempFile("", "") - if err != nil { - t.Error(err) - return - } - defer os.Remove(f.Name()) - defer f.Close() - - contents := ` -{ - "type": "service_account", - "project_id": "miniotesting" -} -` - f.WriteString(contents) - projectID, err := gcsParseProjectID(f.Name()) - if err != nil { - t.Fatal(err) - } - if projectID != "miniotesting" { - t.Errorf(`Expected projectID value to be "miniotesting"`) - } - - if _, err = gcsParseProjectID("non-existent"); err == nil { - t.Errorf(`Expected to fail but succeeded reading "non-existent"`) - } - - f.WriteString(`,}`) - - if _, err := gcsParseProjectID(f.Name()); err == nil { - t.Errorf(`Expected to fail reading corrupted credentials file`) - } -} - -func TestGCSToObjectError(t *testing.T) { - testCases := []struct { - params []string - gcsErr error - expectedErr error - }{ - { - []string{}, nil, nil, - }, - { - []string{}, fmt.Errorf("Not *Error"), fmt.Errorf("Not *Error"), - }, - { - []string{"bucket"}, - fmt.Errorf("storage: bucket doesn't exist"), - minio.BucketNotFound{ - Bucket: "bucket", - }, - }, - { - []string{"bucket", "object"}, - fmt.Errorf("storage: object doesn't exist"), - minio.ObjectNotFound{ - Bucket: "bucket", - Object: "object", - }, - }, - { - []string{"bucket", "object", "uploadID"}, - fmt.Errorf("storage: object doesn't exist"), - minio.InvalidUploadID{ - UploadID: "uploadID", - }, - }, - { - []string{}, - fmt.Errorf("Unknown error"), - fmt.Errorf("Unknown error"), - }, - { - []string{"bucket", "object"}, - &googleapi.Error{ - Message: "No list of errors", - }, - &googleapi.Error{ - Message: "No list of errors", - }, - }, - { - []string{"bucket", "object"}, - &googleapi.Error{ - Errors: []googleapi.ErrorItem{{ - Reason: "conflict", - Message: "You already own this bucket. Please select another name.", - }}, - }, - minio.BucketAlreadyOwnedByYou{ - Bucket: "bucket", - }, - }, - { - []string{"bucket", "object"}, - &googleapi.Error{ - Errors: []googleapi.ErrorItem{{ - Reason: "conflict", - Message: "Sorry, that name is not available. Please try a different one.", - }}, - }, - minio.BucketAlreadyExists{ - Bucket: "bucket", - }, - }, - { - []string{"bucket", "object"}, - &googleapi.Error{ - Errors: []googleapi.ErrorItem{{ - Reason: "conflict", - }}, - }, - minio.BucketNotEmpty{Bucket: "bucket"}, - }, - { - []string{"bucket"}, - &googleapi.Error{ - Errors: []googleapi.ErrorItem{{ - Reason: "notFound", - }}, - }, - minio.BucketNotFound{ - Bucket: "bucket", - }, - }, - { - []string{"bucket", "object"}, - &googleapi.Error{ - Errors: []googleapi.ErrorItem{{ - Reason: "notFound", - }}, - }, - minio.ObjectNotFound{ - Bucket: "bucket", - Object: "object", - }, - }, - { - []string{"bucket"}, - &googleapi.Error{ - Errors: []googleapi.ErrorItem{{ - Reason: "invalid", - }}, - }, - minio.BucketNameInvalid{ - Bucket: "bucket", - }, - }, - { - []string{"bucket", "object"}, - &googleapi.Error{ - Errors: []googleapi.ErrorItem{{ - Reason: "forbidden", - }}, - }, - minio.PrefixAccessDenied{ - Bucket: "bucket", - Object: "object", - }, - }, - { - []string{"bucket", "object"}, - &googleapi.Error{ - Errors: []googleapi.ErrorItem{{ - Reason: "keyInvalid", - }}, - }, - minio.PrefixAccessDenied{ - Bucket: "bucket", - Object: "object", - }, - }, - { - []string{"bucket", "object"}, - &googleapi.Error{ - Errors: []googleapi.ErrorItem{{ - Reason: "required", - }}, - }, - minio.PrefixAccessDenied{ - Bucket: "bucket", - Object: "object", - }, - }, - } - - for i, testCase := range testCases { - actualErr := gcsToObjectError(testCase.gcsErr, testCase.params...) - if actualErr != nil { - if actualErr.Error() != testCase.expectedErr.Error() { - t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.expectedErr, actualErr) - } - } - } -} - -func TestS3MetaToGCSAttributes(t *testing.T) { - headers := map[string]string{ - "accept-encoding": "gzip", - "content-encoding": "gzip", - "cache-control": "age: 3600", - "content-disposition": "dummy", - "content-type": "application/javascript", - "Content-Language": "en", - "X-Amz-Meta-Hdr": "value", - "X-Amz-Meta-X-Amz-Key": "hu3ZSqtqwn+aL4V2VhAeov4i+bG3KyCtRMSXQFRHXOk=", - "X-Amz-Meta-X-Amz-Matdesc": "{}", - "X-Amz-Meta-X-Amz-Iv": "eWmyryl8kq+EVnnsE7jpOg==", - } - // Only X-Amz-Meta- prefixed entries will be returned in - // Metadata (without the prefix!) - expectedHeaders := map[string]string{ - "x-goog-meta-Hdr": "value", - "x-goog-meta-X-Amz-Key": "hu3ZSqtqwn+aL4V2VhAeov4i+bG3KyCtRMSXQFRHXOk=", - "x-goog-meta-X-Amz-Matdesc": "{}", - "x-goog-meta-X-Amz-Iv": "eWmyryl8kq+EVnnsE7jpOg==", - } - - attrs := storage.ObjectAttrs{} - applyMetadataToGCSAttrs(headers, &attrs) - - if !reflect.DeepEqual(attrs.Metadata, expectedHeaders) { - t.Fatalf("Test failed, expected %#v, got %#v", expectedHeaders, attrs.Metadata) - } - - if attrs.CacheControl != headers["cache-control"] { - t.Fatalf("Test failed with Cache-Control mistmatch, expected %s, got %s", headers["cache-control"], attrs.CacheControl) - } - if attrs.ContentDisposition != headers["content-disposition"] { - t.Fatalf("Test failed with Content-Disposition mistmatch, expected %s, got %s", headers["content-disposition"], attrs.ContentDisposition) - } - if attrs.ContentEncoding != headers["content-encoding"] { - t.Fatalf("Test failed with Content-Encoding mistmatch, expected %s, got %s", headers["content-encoding"], attrs.ContentEncoding) - } - if attrs.ContentLanguage != headers["Content-Language"] { - t.Fatalf("Test failed with Content-Language mistmatch, expected %s, got %s", headers["Content-Language"], attrs.ContentLanguage) - } - if attrs.ContentType != headers["content-type"] { - t.Fatalf("Test failed with Content-Type mistmatch, expected %s, got %s", headers["content-type"], attrs.ContentType) - } -} - -func TestGCSAttrsToObjectInfo(t *testing.T) { - metadata := map[string]string{ - "x-goog-meta-Hdr": "value", - "x-goog-meta-x_amz_key": "hu3ZSqtqwn+aL4V2VhAeov4i+bG3KyCtRMSXQFRHXOk=", - "x-goog-meta-x-amz-matdesc": "{}", - "x-goog-meta-X-Amz-Iv": "eWmyryl8kq+EVnnsE7jpOg==", - } - expectedMeta := map[string]string{ - "X-Amz-Meta-Hdr": "value", - "X-Amz-Meta-X_amz_key": "hu3ZSqtqwn+aL4V2VhAeov4i+bG3KyCtRMSXQFRHXOk=", - "X-Amz-Meta-X-Amz-Matdesc": "{}", - "X-Amz-Meta-X-Amz-Iv": "eWmyryl8kq+EVnnsE7jpOg==", - "Cache-Control": "max-age: 3600", - "Content-Disposition": "dummy", - "Content-Encoding": "gzip", - "Content-Language": "en", - "Content-Type": "application/javascript", - } - - attrs := storage.ObjectAttrs{ - Name: "test-obj", - Bucket: "test-bucket", - Updated: time.Now(), - Size: 123, - CRC32C: 45312398, - CacheControl: "max-age: 3600", - ContentDisposition: "dummy", - ContentEncoding: "gzip", - ContentLanguage: "en", - ContentType: "application/javascript", - Metadata: metadata, - } - expectedETag := minio.ToS3ETag(fmt.Sprintf("%d", attrs.CRC32C)) - - objInfo := fromGCSAttrsToObjectInfo(&attrs) - if !reflect.DeepEqual(objInfo.UserDefined, expectedMeta) { - t.Fatalf("Test failed, expected %#v, got %#v", expectedMeta, objInfo.UserDefined) - } - - if objInfo.Name != attrs.Name { - t.Fatalf("Test failed with Name mistmatch, expected %s, got %s", attrs.Name, objInfo.Name) - } - if objInfo.Bucket != attrs.Bucket { - t.Fatalf("Test failed with Bucket mistmatch, expected %s, got %s", attrs.Bucket, objInfo.Bucket) - } - if objInfo.ModTime != attrs.Updated { - t.Fatalf("Test failed with ModTime mistmatch, expected %s, got %s", attrs.Updated, objInfo.ModTime) - } - if objInfo.Size != attrs.Size { - t.Fatalf("Test failed with Size mistmatch, expected %d, got %d", attrs.Size, objInfo.Size) - } - if objInfo.ETag != expectedETag { - t.Fatalf("Test failed with ETag mistmatch, expected %s, got %s", expectedETag, objInfo.ETag) - } -} diff --git a/cmd/gateway/hdfs/gateway-hdfs-utils.go b/cmd/gateway/hdfs/gateway-hdfs-utils.go deleted file mode 100644 index e366419..0000000 --- a/cmd/gateway/hdfs/gateway-hdfs-utils.go +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2019 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package hdfs - -import ( - "strings" - - "github.com/minio/minio-go/v6/pkg/s3utils" - minio "github.com/minio/minio/cmd" -) - -const ( - // Minio meta bucket. - minioMetaBucket = ".minio.sys" - - // Minio Tmp meta prefix. - minioMetaTmpBucket = minioMetaBucket + "/tmp" - - // Minio reserved bucket name. - minioReservedBucket = "minio" -) - -// Ignores all reserved bucket names or invalid bucket names. -func isReservedOrInvalidBucket(bucketEntry string, strict bool) bool { - bucketEntry = strings.TrimSuffix(bucketEntry, minio.SlashSeparator) - if strict { - if err := s3utils.CheckValidBucketNameStrict(bucketEntry); err != nil { - return true - } - } else { - if err := s3utils.CheckValidBucketName(bucketEntry); err != nil { - return true - } - } - return isMinioMetaBucket(bucketEntry) || isMinioReservedBucket(bucketEntry) -} - -// Returns true if input bucket is a reserved minio meta bucket '.minio.sys'. -func isMinioMetaBucket(bucketName string) bool { - return bucketName == minioMetaBucket -} - -// Returns true if input bucket is a reserved minio bucket 'minio'. -func isMinioReservedBucket(bucketName string) bool { - return bucketName == minioReservedBucket -} - -// byBucketName is a collection satisfying sort.Interface. -type byBucketName []minio.BucketInfo - -func (d byBucketName) Len() int { return len(d) } -func (d byBucketName) Swap(i, j int) { d[i], d[j] = d[j], d[i] } -func (d byBucketName) Less(i, j int) bool { return d[i].Name < d[j].Name } diff --git a/cmd/gateway/hdfs/gateway-hdfs.go b/cmd/gateway/hdfs/gateway-hdfs.go deleted file mode 100644 index d7c7473..0000000 --- a/cmd/gateway/hdfs/gateway-hdfs.go +++ /dev/null @@ -1,766 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2019 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package hdfs - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "net/http" - "os" - "os/user" - "path" - "sort" - "strings" - "syscall" - "time" - - "github.com/colinmarc/hdfs/v2" - "github.com/colinmarc/hdfs/v2/hadoopconf" - "github.com/minio/cli" - "github.com/minio/minio-go/v6/pkg/s3utils" - minio "github.com/minio/minio/cmd" - "github.com/minio/minio/cmd/logger" - "github.com/minio/minio/pkg/auth" - "github.com/minio/minio/pkg/env" - xnet "github.com/minio/minio/pkg/net" - krb "gopkg.in/jcmturner/gokrb5.v7/client" - "gopkg.in/jcmturner/gokrb5.v7/config" - "gopkg.in/jcmturner/gokrb5.v7/credentials" -) - -const ( - hdfsBackend = "hdfs" - - hdfsSeparator = minio.SlashSeparator -) - -func init() { - const hdfsGatewayTemplate = `NAME: - {{.HelpName}} - {{.Usage}} - -USAGE: - {{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} HDFS-NAMENODE [HDFS-NAMENODE...] -{{if .VisibleFlags}} -FLAGS: - {{range .VisibleFlags}}{{.}} - {{end}}{{end}} -HDFS-NAMENODE: - HDFS namenode URI - -EXAMPLES: - 1. Start minio gateway server for HDFS backend - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accesskey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey - {{.Prompt}} {{.HelpName}} hdfs://namenode:8200 - - 2. Start minio gateway server for HDFS with edge caching enabled - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accesskey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1,/mnt/drive2,/mnt/drive3,/mnt/drive4" - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXCLUDE{{.AssignmentOperator}}"bucket1/*,*.png" - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}90 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_AFTER{{.AssignmentOperator}}3 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_LOW{{.AssignmentOperator}}75 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_HIGH{{.AssignmentOperator}}85 - {{.Prompt}} {{.HelpName}} hdfs://namenode:8200 -` - - minio.RegisterGatewayCommand(cli.Command{ - Name: hdfsBackend, - Usage: "Hadoop Distributed File System (HDFS)", - Action: hdfsGatewayMain, - CustomHelpTemplate: hdfsGatewayTemplate, - HideHelpCommand: true, - }) -} - -// Handler for 'minio gateway hdfs' command line. -func hdfsGatewayMain(ctx *cli.Context) { - // Validate gateway arguments. - if ctx.Args().First() == "help" { - cli.ShowCommandHelpAndExit(ctx, hdfsBackend, 1) - } - - minio.StartGateway(ctx, &HDFS{args: ctx.Args()}) -} - -// HDFS implements Gateway. -type HDFS struct { - args []string -} - -// Name implements Gateway interface. -func (g *HDFS) Name() string { - return hdfsBackend -} - -func getKerberosClient() (*krb.Client, error) { - cfg, err := config.Load(env.Get("KRB5_CONFIG", "/etc/krb5.conf")) - if err != nil { - return nil, err - } - - u, err := user.Current() - if err != nil { - return nil, err - } - - // Determine the ccache location from the environment, falling back to the default location. - ccachePath := env.Get("KRB5CCNAME", fmt.Sprintf("/tmp/krb5cc_%s", u.Uid)) - if strings.Contains(ccachePath, ":") { - if strings.HasPrefix(ccachePath, "FILE:") { - ccachePath = strings.TrimPrefix(ccachePath, "FILE:") - } else { - return nil, fmt.Errorf("unable to use kerberos ccache: %s", ccachePath) - } - } - - ccache, err := credentials.LoadCCache(ccachePath) - if err != nil { - return nil, err - } - - return krb.NewClientFromCCache(ccache, cfg) -} - -// NewGatewayLayer returns hdfs gatewaylayer. -func (g *HDFS) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error) { - dialFunc := (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).DialContext - - hconfig, err := hadoopconf.LoadFromEnvironment() - if err != nil { - return nil, err - } - - opts := hdfs.ClientOptionsFromConf(hconfig) - opts.NamenodeDialFunc = dialFunc - opts.DatanodeDialFunc = dialFunc - - // Not addresses found, load it from command line. - if len(opts.Addresses) == 0 { - var addresses []string - for _, s := range g.args { - u, err := xnet.ParseURL(s) - if err != nil { - return nil, err - } - addresses = append(addresses, u.Host) - } - opts.Addresses = addresses - } - - u, err := user.Current() - if err != nil { - return nil, fmt.Errorf("Unable to lookup local user: %s", err) - } - - if opts.KerberosClient != nil { - opts.KerberosClient, err = getKerberosClient() - if err != nil { - return nil, fmt.Errorf("Unable to initialize kerberos client: %s", err) - } - } else { - opts.User = env.Get("HADOOP_USER_NAME", u.Username) - } - - clnt, err := hdfs.NewClient(opts) - if err != nil { - return nil, err - } - - if err = clnt.MkdirAll(minio.PathJoin(hdfsSeparator, minioMetaTmpBucket), os.FileMode(0755)); err != nil { - return nil, err - } - - return &hdfsObjects{clnt: clnt, listPool: minio.NewTreeWalkPool(time.Minute * 30)}, nil -} - -// Production - hdfs gateway is production ready. -func (g *HDFS) Production() bool { - return true -} - -func (n *hdfsObjects) Shutdown(ctx context.Context) error { - return n.clnt.Close() -} - -func (n *hdfsObjects) StorageInfo(ctx context.Context, _ bool) (si minio.StorageInfo, errs []error) { - fsInfo, err := n.clnt.StatFs() - if err != nil { - return minio.StorageInfo{}, []error{err} - } - si.Used = []uint64{fsInfo.Used} - si.Backend.Type = minio.BackendGateway - si.Backend.GatewayOnline = true - return si, nil -} - -// hdfsObjects implements gateway for Minio and S3 compatible object storage servers. -type hdfsObjects struct { - minio.GatewayUnsupported - clnt *hdfs.Client - listPool *minio.TreeWalkPool -} - -func hdfsToObjectErr(ctx context.Context, err error, params ...string) error { - if err == nil { - return nil - } - bucket := "" - object := "" - uploadID := "" - switch len(params) { - case 3: - uploadID = params[2] - fallthrough - case 2: - object = params[1] - fallthrough - case 1: - bucket = params[0] - } - - switch { - case os.IsNotExist(err): - if uploadID != "" { - return minio.InvalidUploadID{ - UploadID: uploadID, - } - } - if object != "" { - return minio.ObjectNotFound{Bucket: bucket, Object: object} - } - return minio.BucketNotFound{Bucket: bucket} - case os.IsExist(err): - if object != "" { - return minio.PrefixAccessDenied{Bucket: bucket, Object: object} - } - return minio.BucketAlreadyOwnedByYou{Bucket: bucket} - case errors.Is(err, syscall.ENOTEMPTY): - if object != "" { - return minio.PrefixAccessDenied{Bucket: bucket, Object: object} - } - return minio.BucketNotEmpty{Bucket: bucket} - default: - logger.LogIf(ctx, err) - return err - } -} - -// hdfsIsValidBucketName verifies whether a bucket name is valid. -func hdfsIsValidBucketName(bucket string) bool { - return s3utils.CheckValidBucketNameStrict(bucket) == nil -} - -func (n *hdfsObjects) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error { - if !hdfsIsValidBucketName(bucket) { - return minio.BucketNameInvalid{Bucket: bucket} - } - if forceDelete { - return hdfsToObjectErr(ctx, n.clnt.RemoveAll(minio.PathJoin(hdfsSeparator, bucket)), bucket) - } - return hdfsToObjectErr(ctx, n.clnt.Remove(minio.PathJoin(hdfsSeparator, bucket)), bucket) -} - -func (n *hdfsObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { - if lockEnabled { - return minio.NotImplemented{} - } - - if !hdfsIsValidBucketName(bucket) { - return minio.BucketNameInvalid{Bucket: bucket} - } - return hdfsToObjectErr(ctx, n.clnt.Mkdir(minio.PathJoin(hdfsSeparator, bucket), os.FileMode(0755)), bucket) -} - -func (n *hdfsObjects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, err error) { - fi, err := n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket)) - if err != nil { - return bi, hdfsToObjectErr(ctx, err, bucket) - } - // As hdfs.Stat() doesn't carry anything other than ModTime(), use ModTime() as CreatedTime. - return minio.BucketInfo{ - Name: bucket, - Created: fi.ModTime(), - }, nil -} - -func (n *hdfsObjects) ListBuckets(ctx context.Context) (buckets []minio.BucketInfo, err error) { - entries, err := n.clnt.ReadDir(hdfsSeparator) - if err != nil { - logger.LogIf(ctx, err) - return nil, hdfsToObjectErr(ctx, err) - } - - for _, entry := range entries { - // Ignore all reserved bucket names and invalid bucket names. - if isReservedOrInvalidBucket(entry.Name(), false) { - continue - } - buckets = append(buckets, minio.BucketInfo{ - Name: entry.Name(), - // As hdfs.Stat() doesnt carry CreatedTime, use ModTime() as CreatedTime. - Created: entry.ModTime(), - }) - } - - // Sort bucket infos by bucket name. - sort.Sort(byBucketName(buckets)) - return buckets, nil -} - -func (n *hdfsObjects) listDirFactory() minio.ListDirFunc { - // listDir - lists all the entries at a given prefix and given entry in the prefix. - listDir := func(bucket, prefixDir, prefixEntry string) (emptyDir bool, entries []string) { - f, err := n.clnt.Open(minio.PathJoin(hdfsSeparator, bucket, prefixDir)) - if err != nil { - if os.IsNotExist(err) { - err = nil - } - logger.LogIf(minio.GlobalContext, err) - return - } - defer f.Close() - fis, err := f.Readdir(0) - if err != nil { - logger.LogIf(minio.GlobalContext, err) - return - } - if len(fis) == 0 { - return true, nil - } - for _, fi := range fis { - if fi.IsDir() { - entries = append(entries, fi.Name()+hdfsSeparator) - } else { - entries = append(entries, fi.Name()) - } - } - return false, minio.FilterMatchingPrefix(entries, prefixEntry) - } - - // Return list factory instance. - return listDir -} - -// ListObjects lists all blobs in HDFS bucket filtered by prefix. -func (n *hdfsObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, err error) { - if _, err := n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket)); err != nil { - return loi, hdfsToObjectErr(ctx, err, bucket) - } - - getObjectInfo := func(ctx context.Context, bucket, entry string) (minio.ObjectInfo, error) { - fi, err := n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket, entry)) - if err != nil { - return minio.ObjectInfo{}, hdfsToObjectErr(ctx, err, bucket, entry) - } - return minio.ObjectInfo{ - Bucket: bucket, - Name: entry, - ModTime: fi.ModTime(), - Size: fi.Size(), - IsDir: fi.IsDir(), - AccTime: fi.(*hdfs.FileInfo).AccessTime(), - }, nil - } - - return minio.ListObjects(ctx, n, bucket, prefix, marker, delimiter, maxKeys, n.listPool, n.listDirFactory(), getObjectInfo, getObjectInfo) -} - -// deleteObject deletes a file path if its empty. If it's successfully deleted, -// it will recursively move up the tree, deleting empty parent directories -// until it finds one with files in it. Returns nil for a non-empty directory. -func (n *hdfsObjects) deleteObject(basePath, deletePath string) error { - if basePath == deletePath { - return nil - } - - // Attempt to remove path. - if err := n.clnt.Remove(deletePath); err != nil { - if errors.Is(err, syscall.ENOTEMPTY) { - // Ignore errors if the directory is not empty. The server relies on - // this functionality, and sometimes uses recursion that should not - // error on parent directories. - return nil - } - return err - } - - // Trailing slash is removed when found to ensure - // slashpath.Dir() to work as intended. - deletePath = strings.TrimSuffix(deletePath, hdfsSeparator) - deletePath = path.Dir(deletePath) - - // Delete parent directory. Errors for parent directories shouldn't trickle down. - n.deleteObject(basePath, deletePath) - - return nil -} - -// ListObjectsV2 lists all blobs in HDFS bucket filtered by prefix -func (n *hdfsObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, - fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, err error) { - // fetchOwner is not supported and unused. - marker := continuationToken - if marker == "" { - marker = startAfter - } - resultV1, err := n.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys) - if err != nil { - return loi, err - } - return minio.ListObjectsV2Info{ - Objects: resultV1.Objects, - Prefixes: resultV1.Prefixes, - ContinuationToken: continuationToken, - NextContinuationToken: resultV1.NextMarker, - IsTruncated: resultV1.IsTruncated, - }, nil -} - -func (n *hdfsObjects) DeleteObject(ctx context.Context, bucket, object string) error { - return hdfsToObjectErr(ctx, n.deleteObject(minio.PathJoin(hdfsSeparator, bucket), minio.PathJoin(hdfsSeparator, bucket, object)), bucket, object) -} - -func (n *hdfsObjects) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { - errs := make([]error, len(objects)) - for idx, object := range objects { - errs[idx] = n.DeleteObject(ctx, bucket, object) - } - return errs, nil -} - -func (n *hdfsObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header, lockType minio.LockType, opts minio.ObjectOptions) (gr *minio.GetObjectReader, err error) { - objInfo, err := n.GetObjectInfo(ctx, bucket, object, opts) - if err != nil { - return nil, err - } - - var startOffset, length int64 - startOffset, length, err = rs.GetOffsetLength(objInfo.Size) - if err != nil { - return nil, err - } - - pr, pw := io.Pipe() - go func() { - nerr := n.GetObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, opts) - pw.CloseWithError(nerr) - }() - - // Setup cleanup function to cause the above go-routine to - // exit in case of partial read - pipeCloser := func() { pr.Close() } - return minio.NewGetObjectReaderFromReader(pr, objInfo, opts, pipeCloser) - -} - -func (n *hdfsObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (minio.ObjectInfo, error) { - cpSrcDstSame := minio.IsStringEqual(minio.PathJoin(hdfsSeparator, srcBucket, srcObject), minio.PathJoin(hdfsSeparator, dstBucket, dstObject)) - if cpSrcDstSame { - return n.GetObjectInfo(ctx, srcBucket, srcObject, minio.ObjectOptions{}) - } - - return n.PutObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, minio.ObjectOptions{ - ServerSideEncryption: dstOpts.ServerSideEncryption, - UserDefined: srcInfo.UserDefined, - }) -} - -func (n *hdfsObjects) GetObject(ctx context.Context, bucket, key string, startOffset, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error { - if _, err := n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket)); err != nil { - return hdfsToObjectErr(ctx, err, bucket) - } - rd, err := n.clnt.Open(minio.PathJoin(hdfsSeparator, bucket, key)) - if err != nil { - return hdfsToObjectErr(ctx, err, bucket, key) - } - defer rd.Close() - _, err = io.Copy(writer, io.NewSectionReader(rd, startOffset, length)) - if err == io.ErrClosedPipe { - // hdfs library doesn't send EOF correctly, so io.Copy attempts - // to write which returns io.ErrClosedPipe - just ignore - // this for now. - err = nil - } - return hdfsToObjectErr(ctx, err, bucket, key) -} - -func (n *hdfsObjects) isObjectDir(ctx context.Context, bucket, object string) bool { - f, err := n.clnt.Open(minio.PathJoin(hdfsSeparator, bucket, object)) - if err != nil { - if os.IsNotExist(err) { - return false - } - logger.LogIf(ctx, err) - return false - } - defer f.Close() - fis, err := f.Readdir(1) - if err != nil && err != io.EOF { - logger.LogIf(ctx, err) - return false - } - // Readdir returns an io.EOF when len(fis) == 0. - return len(fis) == 0 -} - -// GetObjectInfo reads object info and replies back ObjectInfo. -func (n *hdfsObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { - _, err = n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket)) - if err != nil { - return objInfo, hdfsToObjectErr(ctx, err, bucket) - } - if strings.HasSuffix(object, hdfsSeparator) && !n.isObjectDir(ctx, bucket, object) { - return objInfo, hdfsToObjectErr(ctx, os.ErrNotExist, bucket, object) - } - - fi, err := n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket, object)) - if err != nil { - return objInfo, hdfsToObjectErr(ctx, err, bucket, object) - } - return minio.ObjectInfo{ - Bucket: bucket, - Name: object, - ModTime: fi.ModTime(), - Size: fi.Size(), - IsDir: fi.IsDir(), - AccTime: fi.(*hdfs.FileInfo).AccessTime(), - }, nil -} - -func (n *hdfsObjects) PutObject(ctx context.Context, bucket string, object string, r *minio.PutObjReader, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { - _, err = n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket)) - if err != nil { - return objInfo, hdfsToObjectErr(ctx, err, bucket) - } - - name := minio.PathJoin(hdfsSeparator, bucket, object) - - // If its a directory create a prefix { - if strings.HasSuffix(object, hdfsSeparator) && r.Size() == 0 { - if err = n.clnt.MkdirAll(name, os.FileMode(0755)); err != nil { - n.deleteObject(minio.PathJoin(hdfsSeparator, bucket), name) - return objInfo, hdfsToObjectErr(ctx, err, bucket, object) - } - } else { - tmpname := minio.PathJoin(hdfsSeparator, minioMetaTmpBucket, minio.MustGetUUID()) - var w *hdfs.FileWriter - w, err = n.clnt.Create(tmpname) - if err != nil { - return objInfo, hdfsToObjectErr(ctx, err, bucket, object) - } - defer n.deleteObject(minio.PathJoin(hdfsSeparator, minioMetaTmpBucket), tmpname) - if _, err = io.Copy(w, r); err != nil { - w.Close() - return objInfo, hdfsToObjectErr(ctx, err, bucket, object) - } - dir := path.Dir(name) - if dir != "" { - if err = n.clnt.MkdirAll(dir, os.FileMode(0755)); err != nil { - w.Close() - n.deleteObject(minio.PathJoin(hdfsSeparator, bucket), dir) - return objInfo, hdfsToObjectErr(ctx, err, bucket, object) - } - } - w.Close() - if err = n.clnt.Rename(tmpname, name); err != nil { - return objInfo, hdfsToObjectErr(ctx, err, bucket, object) - } - } - fi, err := n.clnt.Stat(name) - if err != nil { - return objInfo, hdfsToObjectErr(ctx, err, bucket, object) - } - return minio.ObjectInfo{ - Bucket: bucket, - Name: object, - ETag: r.MD5CurrentHexString(), - ModTime: fi.ModTime(), - Size: fi.Size(), - IsDir: fi.IsDir(), - AccTime: fi.(*hdfs.FileInfo).AccessTime(), - }, nil -} - -func (n *hdfsObjects) NewMultipartUpload(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (uploadID string, err error) { - _, err = n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket)) - if err != nil { - return uploadID, hdfsToObjectErr(ctx, err, bucket) - } - - uploadID = minio.MustGetUUID() - if err = n.clnt.CreateEmptyFile(minio.PathJoin(hdfsSeparator, minioMetaTmpBucket, uploadID)); err != nil { - return uploadID, hdfsToObjectErr(ctx, err, bucket) - } - - return uploadID, nil -} - -func (n *hdfsObjects) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, err error) { - _, err = n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket)) - if err != nil { - return lmi, hdfsToObjectErr(ctx, err, bucket) - } - - // It's decided not to support List Multipart Uploads, hence returning empty result. - return lmi, nil -} - -func (n *hdfsObjects) checkUploadIDExists(ctx context.Context, bucket, object, uploadID string) (err error) { - _, err = n.clnt.Stat(minio.PathJoin(hdfsSeparator, minioMetaTmpBucket, uploadID)) - if err != nil { - return hdfsToObjectErr(ctx, err, bucket, object, uploadID) - } - return nil -} - -// GetMultipartInfo returns multipart info of the uploadId of the object -func (n *hdfsObjects) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts minio.ObjectOptions) (result minio.MultipartInfo, err error) { - _, err = n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket)) - if err != nil { - return result, hdfsToObjectErr(ctx, err, bucket) - } - - if err = n.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { - return result, err - } - - result.Bucket = bucket - result.Object = object - result.UploadID = uploadID - return result, nil -} - -func (n *hdfsObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker int, maxParts int, opts minio.ObjectOptions) (result minio.ListPartsInfo, err error) { - _, err = n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket)) - if err != nil { - return result, hdfsToObjectErr(ctx, err, bucket) - } - - if err = n.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { - return result, err - } - - // It's decided not to support List parts, hence returning empty result. - return result, nil -} - -func (n *hdfsObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, - startOffset int64, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (minio.PartInfo, error) { - return n.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.PutObjReader, dstOpts) -} - -func (n *hdfsObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *minio.PutObjReader, opts minio.ObjectOptions) (info minio.PartInfo, err error) { - _, err = n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket)) - if err != nil { - return info, hdfsToObjectErr(ctx, err, bucket) - } - - var w *hdfs.FileWriter - w, err = n.clnt.Append(minio.PathJoin(hdfsSeparator, minioMetaTmpBucket, uploadID)) - if err != nil { - return info, hdfsToObjectErr(ctx, err, bucket, object, uploadID) - } - defer w.Close() - _, err = io.Copy(w, r.Reader) - if err != nil { - return info, hdfsToObjectErr(ctx, err, bucket, object, uploadID) - } - - info.PartNumber = partID - info.ETag = r.MD5CurrentHexString() - info.LastModified = minio.UTCNow() - info.Size = r.Reader.Size() - - return info, nil -} - -func (n *hdfsObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, parts []minio.CompletePart, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { - _, err = n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket)) - if err != nil { - return objInfo, hdfsToObjectErr(ctx, err, bucket) - } - - if err = n.checkUploadIDExists(ctx, bucket, object, uploadID); err != nil { - return objInfo, err - } - - name := minio.PathJoin(hdfsSeparator, bucket, object) - dir := path.Dir(name) - if dir != "" { - if err = n.clnt.MkdirAll(dir, os.FileMode(0755)); err != nil { - return objInfo, hdfsToObjectErr(ctx, err, bucket, object) - } - } - - err = n.clnt.Rename(minio.PathJoin(hdfsSeparator, minioMetaTmpBucket, uploadID), name) - // Object already exists is an error on HDFS - // remove it and then create it again. - if os.IsExist(err) { - if err = n.clnt.Remove(name); err != nil { - if dir != "" { - n.deleteObject(minio.PathJoin(hdfsSeparator, bucket), dir) - } - return objInfo, hdfsToObjectErr(ctx, err, bucket, object) - } - if err = n.clnt.Rename(minio.PathJoin(hdfsSeparator, minioMetaTmpBucket, uploadID), name); err != nil { - if dir != "" { - n.deleteObject(minio.PathJoin(hdfsSeparator, bucket), dir) - } - return objInfo, hdfsToObjectErr(ctx, err, bucket, object) - } - } - fi, err := n.clnt.Stat(name) - if err != nil { - return objInfo, hdfsToObjectErr(ctx, err, bucket, object) - } - - // Calculate s3 compatible md5sum for complete multipart. - s3MD5 := minio.ComputeCompleteMultipartMD5(parts) - - return minio.ObjectInfo{ - Bucket: bucket, - Name: object, - ETag: s3MD5, - ModTime: fi.ModTime(), - Size: fi.Size(), - IsDir: fi.IsDir(), - AccTime: fi.(*hdfs.FileInfo).AccessTime(), - }, nil -} - -func (n *hdfsObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string) (err error) { - _, err = n.clnt.Stat(minio.PathJoin(hdfsSeparator, bucket)) - if err != nil { - return hdfsToObjectErr(ctx, err, bucket) - } - return hdfsToObjectErr(ctx, n.clnt.Remove(minio.PathJoin(hdfsSeparator, minioMetaTmpBucket, uploadID)), bucket, object, uploadID) -} - -// IsReady returns whether the layer is ready to take requests. -func (n *hdfsObjects) IsReady(ctx context.Context) bool { - si, _ := n.StorageInfo(ctx, false) - return si.Backend.GatewayOnline -} diff --git a/cmd/gateway/nas/gateway-nas.go b/cmd/gateway/nas/gateway-nas.go deleted file mode 100644 index e368b97..0000000 --- a/cmd/gateway/nas/gateway-nas.go +++ /dev/null @@ -1,132 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package nas - -import ( - "context" - - "github.com/minio/cli" - minio "github.com/minio/minio/cmd" - "github.com/minio/minio/pkg/auth" -) - -const ( - nasBackend = "nas" -) - -func init() { - const nasGatewayTemplate = `NAME: - {{.HelpName}} - {{.Usage}} - -USAGE: - {{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} PATH -{{if .VisibleFlags}} -FLAGS: - {{range .VisibleFlags}}{{.}} - {{end}}{{end}} -PATH: - path to NAS mount point - -EXAMPLES: - 1. Start minio gateway server for NAS backend - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accesskey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey - {{.Prompt}} {{.HelpName}} /shared/nasvol - - 2. Start minio gateway server for NAS with edge caching enabled - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accesskey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1,/mnt/drive2,/mnt/drive3,/mnt/drive4" - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXCLUDE{{.AssignmentOperator}}"bucket1/*,*.png" - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}90 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_AFTER{{.AssignmentOperator}}3 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_LOW{{.AssignmentOperator}}75 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_HIGH{{.AssignmentOperator}}85 - - {{.Prompt}} {{.HelpName}} /shared/nasvol -` - - minio.RegisterGatewayCommand(cli.Command{ - Name: nasBackend, - Usage: "Network-attached storage (NAS)", - Action: nasGatewayMain, - CustomHelpTemplate: nasGatewayTemplate, - HideHelpCommand: true, - }) -} - -// Handler for 'minio gateway nas' command line. -func nasGatewayMain(ctx *cli.Context) { - // Validate gateway arguments. - if !ctx.Args().Present() || ctx.Args().First() == "help" { - cli.ShowCommandHelpAndExit(ctx, nasBackend, 1) - } - - minio.StartGateway(ctx, &NAS{ctx.Args().First()}) -} - -// NAS implements Gateway. -type NAS struct { - path string -} - -// Name implements Gateway interface. -func (g *NAS) Name() string { - return nasBackend -} - -// NewGatewayLayer returns nas gatewaylayer. -func (g *NAS) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error) { - var err error - newObject, err := minio.NewFSObjectLayer(g.path) - if err != nil { - return nil, err - } - return &nasObjects{newObject}, nil -} - -// Production - nas gateway is production ready. -func (g *NAS) Production() bool { - return true -} - -// IsListenBucketSupported returns whether listen bucket notification is applicable for this gateway. -func (n *nasObjects) IsListenBucketSupported() bool { - return false -} - -func (n *nasObjects) StorageInfo(ctx context.Context, _ bool) (si minio.StorageInfo, _ []error) { - si, errs := n.ObjectLayer.StorageInfo(ctx, false) - si.Backend.GatewayOnline = si.Backend.Type == minio.BackendFS - si.Backend.Type = minio.BackendGateway - return si, errs -} - -// nasObjects implements gateway for MinIO and S3 compatible object storage servers. -type nasObjects struct { - minio.ObjectLayer -} - -// IsReady returns whether the layer is ready to take requests. -func (n *nasObjects) IsReady(ctx context.Context) bool { - si, _ := n.StorageInfo(ctx, false) - return si.Backend.GatewayOnline -} - -func (n *nasObjects) IsTaggingSupported() bool { - return true -} diff --git a/cmd/gateway/s3/gateway-s3-metadata.go b/cmd/gateway/s3/gateway-s3-metadata.go deleted file mode 100644 index 0a53439..0000000 --- a/cmd/gateway/s3/gateway-s3-metadata.go +++ /dev/null @@ -1,176 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3 - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "net/http" - "time" - - jsoniter "github.com/json-iterator/go" - minio "github.com/minio/minio/cmd" - "github.com/minio/minio/cmd/logger" - "github.com/minio/minio/pkg/hash" -) - -var ( - errGWMetaNotFound = errors.New("dare.meta file not found") - errGWMetaInvalidFormat = errors.New("dare.meta format is invalid") -) - -// A gwMetaV1 represents `gw.json` metadata header. -type gwMetaV1 struct { - Version string `json:"version"` // Version of the current `gw.json`. - Format string `json:"format"` // Format of the current `gw.json`. - Stat minio.StatInfo `json:"stat"` // Stat of the current object `gw.json`. - ETag string `json:"etag"` // ETag of the current object - - // Metadata map for current object `gw.json`. - Meta map[string]string `json:"meta,omitempty"` - // Captures all the individual object `gw.json`. - Parts []minio.ObjectPartInfo `json:"parts,omitempty"` -} - -// Gateway metadata constants. -const ( - // Gateway meta version. - gwMetaVersion = "1.0.0" - - // Gateway meta version. - gwMetaVersion100 = "1.0.0" - - // Gateway meta format string. - gwMetaFormat = "gw" - - // Add new constants here. -) - -// newGWMetaV1 - initializes new gwMetaV1, adds version. -func newGWMetaV1() (gwMeta gwMetaV1) { - gwMeta = gwMetaV1{} - gwMeta.Version = gwMetaVersion - gwMeta.Format = gwMetaFormat - return gwMeta -} - -// IsValid - tells if the format is sane by validating the version -// string, format fields. -func (m gwMetaV1) IsValid() bool { - return ((m.Version == gwMetaVersion || m.Version == gwMetaVersion100) && - m.Format == gwMetaFormat) -} - -// Converts metadata to object info. -func (m gwMetaV1) ToObjectInfo(bucket, object string) minio.ObjectInfo { - filterKeys := append([]string{ - "ETag", - "Content-Length", - "Last-Modified", - "Content-Type", - "Expires", - }, defaultFilterKeys...) - objInfo := minio.ObjectInfo{ - IsDir: false, - Bucket: bucket, - Name: object, - Size: m.Stat.Size, - ModTime: m.Stat.ModTime, - ContentType: m.Meta["content-type"], - ContentEncoding: m.Meta["content-encoding"], - ETag: minio.CanonicalizeETag(m.ETag), - UserDefined: minio.CleanMinioInternalMetadataKeys(minio.CleanMetadataKeys(m.Meta, filterKeys...)), - Parts: m.Parts, - } - - if sc, ok := m.Meta["x-amz-storage-class"]; ok { - objInfo.StorageClass = sc - } - var ( - t time.Time - e error - ) - if exp, ok := m.Meta["expires"]; ok { - if t, e = time.Parse(http.TimeFormat, exp); e == nil { - objInfo.Expires = t.UTC() - } - } - // Success. - return objInfo -} - -// ObjectToPartOffset - translate offset of an object to offset of its individual part. -func (m gwMetaV1) ObjectToPartOffset(ctx context.Context, offset int64) (partIndex int, partOffset int64, err error) { - if offset == 0 { - // Special case - if offset is 0, then partIndex and partOffset are always 0. - return 0, 0, nil - } - partOffset = offset - // Seek until object offset maps to a particular part offset. - for i, part := range m.Parts { - partIndex = i - // Offset is smaller than size we have reached the proper part offset. - if partOffset < part.Size { - return partIndex, partOffset, nil - } - // Continue to towards the next part. - partOffset -= part.Size - } - logger.LogIf(ctx, minio.InvalidRange{}) - // Offset beyond the size of the object return InvalidRange. - return 0, 0, minio.InvalidRange{} -} - -// Constructs GWMetaV1 using `jsoniter` lib to retrieve each field. -func gwMetaUnmarshalJSON(ctx context.Context, gwMetaBuf []byte) (gwMeta gwMetaV1, err error) { - var json = jsoniter.ConfigCompatibleWithStandardLibrary - err = json.Unmarshal(gwMetaBuf, &gwMeta) - return gwMeta, err -} - -// readGWMeta reads `dare.meta` and returns back GW metadata structure. -func readGWMetadata(ctx context.Context, buf bytes.Buffer) (gwMeta gwMetaV1, err error) { - if buf.Len() == 0 { - return gwMetaV1{}, errGWMetaNotFound - } - gwMeta, err = gwMetaUnmarshalJSON(ctx, buf.Bytes()) - if err != nil { - return gwMetaV1{}, err - } - if !gwMeta.IsValid() { - return gwMetaV1{}, errGWMetaInvalidFormat - } - // Return structured `dare.meta`. - return gwMeta, nil -} - -// getGWMetadata - unmarshals dare.meta into a *minio.PutObjReader -func getGWMetadata(ctx context.Context, bucket, prefix string, gwMeta gwMetaV1) (*minio.PutObjReader, error) { - // Marshal json. - metadataBytes, err := json.Marshal(&gwMeta) - if err != nil { - logger.LogIf(ctx, err) - return nil, err - } - hashReader, err := hash.NewReader(bytes.NewReader(metadataBytes), int64(len(metadataBytes)), "", "", int64(len(metadataBytes)), false) - if err != nil { - return nil, err - } - return minio.NewPutObjReader(hashReader, nil, nil), nil -} diff --git a/cmd/gateway/s3/gateway-s3-metadata_test.go b/cmd/gateway/s3/gateway-s3-metadata_test.go deleted file mode 100644 index ba1f237..0000000 --- a/cmd/gateway/s3/gateway-s3-metadata_test.go +++ /dev/null @@ -1,79 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3 - -import ( - "bytes" - "testing" - - minio "github.com/minio/minio/cmd" -) - -// Tests for GW metadata format validity. -func TestGWMetaFormatValid(t *testing.T) { - tests := []struct { - name int - version string - format string - want bool - }{ - {1, "123", "fs", false}, - {2, "123", gwMetaFormat, false}, - {3, gwMetaVersion, "test", false}, - {4, gwMetaVersion100, "hello", false}, - {5, gwMetaVersion, gwMetaFormat, true}, - {6, gwMetaVersion100, gwMetaFormat, true}, - } - for _, tt := range tests { - m := newGWMetaV1() - m.Version = tt.version - m.Format = tt.format - if got := m.IsValid(); got != tt.want { - t.Errorf("Test %d: Expected %v but received %v", tt.name, got, tt.want) - } - } -} - -// Tests for reading GW metadata info. -func TestReadGWMetadata(t *testing.T) { - tests := []struct { - metaStr string - pass bool - }{ - {`{"version": "` + gwMetaVersion + `", "format":"` + gwMetaFormat + `", "stat": {"size": 132, "modTime": "2018-08-31T22:25:39.23626461Z" }}`, true}, - {`{"version": "` + gwMetaVersion + `", "format":"` + gwMetaFormat + `", "stat": {"size": 132, "modTime": "0000-00-00T00:00:00.00000000Z" }}`, false}, - {`{"version": "` + gwMetaVersion + `", "format":"` + gwMetaFormat + `", "stat": {"size": 5242880, "modTime": "2018-08-31T22:25:39.23626461Z" },"meta":{"content-type":"application/octet-stream","etag":"57c743902b2fc8eea6ba3bb4fc58c8e8"},"parts":[{"number":1,"name":"part.1","etag":"","size":5242880}]}`, true}, - {`{"version": "` + gwMetaVersion + `", "format":"` + gwMetaFormat + `", "stat": {"size": 68190720, "modTime": "2018-08-31T22:25:39.23626461Z" },"meta":{"X-Minio-Internal-Encrypted-Multipart":"","X-Minio-Internal-Server-Side-Encryption-Iv":"kdbOcKdXD3Sew8tOiHe5eI9xkX1oQ2W9JURz0oslCZA=","X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm":"DAREv2-HMAC-SHA256","X-Minio-Internal-Server-Side-Encryption-Sealed-Key":"IAAfAMfqKrxMXC9LuiI7ENP+p0xArepzAiIeB/MftFp7Xmq2OzDkKlmNbj5RKI89RrjiAbOVLSSEMvqQsrIrTQ==","content-type":"text/plain; charset=utf-8","etag":"2b137fa4ab80126af54623b010c98de6-2"},"parts":[{"number":1,"name":"part.1","etag":"c5cac075eefdab801a5198812f51b36e","size":67141632},{"number":2,"name":"part.2","etag":"ccdf4b774bc3be8eef9a8987309e8171","size":1049088}]}`, true}, - {`{"version": "` + gwMetaVersion + `", "format":"` + gwMetaFormat + `", "stat": {"size": "68190720", "modTime": "2018-08-31T22:25:39.23626461Z" },"meta":{"X-Minio-Internal-Encrypted-Multipart":"","X-Minio-Internal-Server-Side-Encryption-Iv":"kdbOcKdXD3Sew8tOiHe5eI9xkX1oQ2W9JURz0oslCZA=","X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm":"DAREv2-HMAC-SHA256","X-Minio-Internal-Server-Side-Encryption-Sealed-Key":"IAAfAMfqKrxMXC9LuiI7ENP+p0xArepzAiIeB/MftFp7Xmq2OzDkKlmNbj5RKI89RrjiAbOVLSSEMvqQsrIrTQ==","content-type":"text/plain; charset=utf-8","etag":"2b137fa4ab80126af54623b010c98de6-2"},"parts":"123"}`, false}, - } - - for i, tt := range tests { - buf := bytes.NewBufferString(tt.metaStr) - m, err := readGWMetadata(minio.GlobalContext, *buf) - if err != nil && tt.pass { - t.Errorf("Test %d: Expected parse gw metadata to succeed, but failed, %s", i+1, err) - } - if err == nil && !tt.pass { - t.Errorf("Test %d: Expected parse gw metadata to succeed, but failed", i+1) - } - if err == nil { - if m.Version != gwMetaVersion { - t.Errorf("Test %d: Expected version %s, but failed with %s", i+1, gwMetaVersion, m.Version) - } - } - } -} diff --git a/cmd/gateway/s3/gateway-s3-sse.go b/cmd/gateway/s3/gateway-s3-sse.go deleted file mode 100644 index 7cb172d..0000000 --- a/cmd/gateway/s3/gateway-s3-sse.go +++ /dev/null @@ -1,793 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3 - -import ( - "bytes" - "context" - "io" - "net/http" - "path" - "strconv" - "strings" - "time" - - "github.com/minio/minio-go/v6/pkg/encrypt" - minio "github.com/minio/minio/cmd" - - "github.com/minio/minio/cmd/logger" -) - -const ( - // name of custom multipart metadata file for s3 backend. - gwdareMetaJSON string = "dare.meta" - - // name of temporary per part metadata file - gwpartMetaJSON string = "part.meta" - // custom multipart files are stored under the defaultMinioGWPrefix - defaultMinioGWPrefix = ".minio" - defaultGWContentFileName = "data" -) - -// s3EncObjects is a wrapper around s3Objects and implements gateway calls for -// custom large objects encrypted at the gateway -type s3EncObjects struct { - s3Objects -} - -/* - NOTE: - Custom gateway encrypted objects are stored on backend as follows: - obj/.minio/data <= encrypted content - obj/.minio/dare.meta <= metadata - - When a multipart upload operation is in progress, the metadata set during - NewMultipartUpload is stored in obj/.minio/uploadID/dare.meta and each - UploadPart operation saves additional state of the part's encrypted ETag and - encrypted size in obj/.minio/uploadID/part1/part.meta - - All the part metadata and temp dare.meta are cleaned up when upload completes -*/ - -// ListObjects lists all blobs in S3 bucket filtered by prefix -func (l *s3EncObjects) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, e error) { - var continuationToken, startAfter string - res, err := l.ListObjectsV2(ctx, bucket, prefix, continuationToken, delimiter, maxKeys, false, startAfter) - if err != nil { - return loi, err - } - loi.IsTruncated = res.IsTruncated - loi.NextMarker = res.NextContinuationToken - loi.Objects = res.Objects - loi.Prefixes = res.Prefixes - return loi, nil - -} - -// ListObjectsV2 lists all blobs in S3 bucket filtered by prefix -func (l *s3EncObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, e error) { - - var objects []minio.ObjectInfo - var prefixes []string - var isTruncated bool - - // filter out objects that contain a .minio prefix, but is not a dare.meta metadata file. - for { - loi, e = l.s3Objects.ListObjectsV2(ctx, bucket, prefix, continuationToken, delimiter, 1000, fetchOwner, startAfter) - if e != nil { - return loi, minio.ErrorRespToObjectError(e, bucket) - } - for _, obj := range loi.Objects { - startAfter = obj.Name - continuationToken = loi.NextContinuationToken - isTruncated = loi.IsTruncated - - if !isGWObject(obj.Name) { - continue - } - // get objectname and ObjectInfo from the custom metadata file - if strings.HasSuffix(obj.Name, gwdareMetaJSON) { - objSlice := strings.Split(obj.Name, minio.SlashSeparator+defaultMinioGWPrefix) - gwMeta, e := l.getGWMetadata(ctx, bucket, getDareMetaPath(objSlice[0])) - if e != nil { - continue - } - oInfo := gwMeta.ToObjectInfo(bucket, objSlice[0]) - objects = append(objects, oInfo) - } else { - objects = append(objects, obj) - } - if len(objects) > maxKeys { - break - } - } - for _, p := range loi.Prefixes { - objName := strings.TrimSuffix(p, minio.SlashSeparator) - gm, err := l.getGWMetadata(ctx, bucket, getDareMetaPath(objName)) - // if prefix is actually a custom multi-part object, append it to objects - if err == nil { - objects = append(objects, gm.ToObjectInfo(bucket, objName)) - continue - } - isPrefix := l.isPrefix(ctx, bucket, p, fetchOwner, startAfter) - if isPrefix { - prefixes = append(prefixes, p) - } - } - if (len(objects) > maxKeys) || !loi.IsTruncated { - break - } - } - - loi.IsTruncated = isTruncated - loi.ContinuationToken = continuationToken - loi.Objects = make([]minio.ObjectInfo, 0) - loi.Prefixes = make([]string, 0) - loi.Objects = append(loi.Objects, objects...) - - for _, pfx := range prefixes { - if pfx != prefix { - loi.Prefixes = append(loi.Prefixes, pfx) - } - } - // Set continuation token if s3 returned truncated list - if isTruncated { - if len(objects) > 0 { - loi.NextContinuationToken = objects[len(objects)-1].Name - } - } - return loi, nil -} - -// isGWObject returns true if it is a custom object -func isGWObject(objName string) bool { - isEncrypted := strings.Contains(objName, defaultMinioGWPrefix) - if !isEncrypted { - return true - } - // ignore temp part.meta files - if strings.Contains(objName, gwpartMetaJSON) { - return false - } - - pfxSlice := strings.Split(objName, minio.SlashSeparator) - var i1, i2 int - for i := len(pfxSlice) - 1; i >= 0; i-- { - p := pfxSlice[i] - if p == defaultMinioGWPrefix { - i1 = i - } - if p == gwdareMetaJSON { - i2 = i - } - if i1 > 0 && i2 > 0 { - break - } - } - // incomplete uploads would have a uploadID between defaultMinioGWPrefix and gwdareMetaJSON - return i2 > 0 && i1 > 0 && i2-i1 == 1 -} - -// isPrefix returns true if prefix exists and is not an incomplete multipart upload entry -func (l *s3EncObjects) isPrefix(ctx context.Context, bucket, prefix string, fetchOwner bool, startAfter string) bool { - var continuationToken, delimiter string - - for { - loi, e := l.s3Objects.ListObjectsV2(ctx, bucket, prefix, continuationToken, delimiter, 1000, fetchOwner, startAfter) - if e != nil { - return false - } - for _, obj := range loi.Objects { - if isGWObject(obj.Name) { - return true - } - } - - continuationToken = loi.NextContinuationToken - if !loi.IsTruncated { - break - } - } - return false -} - -// GetObject reads an object from S3. Supports additional -// parameters like offset and length which are synonymous with -// HTTP Range requests. -func (l *s3EncObjects) GetObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error { - return l.getObject(ctx, bucket, key, startOffset, length, writer, etag, opts) -} - -func (l *s3EncObjects) isGWEncrypted(ctx context.Context, bucket, object string) bool { - _, err := l.s3Objects.GetObjectInfo(ctx, bucket, getDareMetaPath(object), minio.ObjectOptions{}) - return err == nil -} - -// getDaremetadata fetches dare.meta from s3 backend and marshals into a structured format. -func (l *s3EncObjects) getGWMetadata(ctx context.Context, bucket, metaFileName string) (m gwMetaV1, err error) { - oi, err1 := l.s3Objects.GetObjectInfo(ctx, bucket, metaFileName, minio.ObjectOptions{}) - if err1 != nil { - return m, err1 - } - var buffer bytes.Buffer - err = l.s3Objects.GetObject(ctx, bucket, metaFileName, 0, oi.Size, &buffer, oi.ETag, minio.ObjectOptions{}) - if err != nil { - return m, err - } - return readGWMetadata(ctx, buffer) -} - -// writes dare metadata to the s3 backend -func (l *s3EncObjects) writeGWMetadata(ctx context.Context, bucket, metaFileName string, m gwMetaV1, o minio.ObjectOptions) error { - reader, err := getGWMetadata(ctx, bucket, metaFileName, m) - if err != nil { - logger.LogIf(ctx, err) - return err - } - _, err = l.s3Objects.PutObject(ctx, bucket, metaFileName, reader, o) - return err -} - -// returns path of temporary metadata json file for the upload -func getTmpDareMetaPath(object, uploadID string) string { - return path.Join(getGWMetaPath(object), uploadID, gwdareMetaJSON) -} - -// returns path of metadata json file for encrypted objects -func getDareMetaPath(object string) string { - return path.Join(getGWMetaPath(object), gwdareMetaJSON) -} - -// returns path of temporary part metadata file for multipart uploads -func getPartMetaPath(object, uploadID string, partID int) string { - return path.Join(object, defaultMinioGWPrefix, uploadID, strconv.Itoa(partID), gwpartMetaJSON) -} - -// deletes the custom dare metadata file saved at the backend -func (l *s3EncObjects) deleteGWMetadata(ctx context.Context, bucket, metaFileName string) error { - return l.s3Objects.DeleteObject(ctx, bucket, metaFileName) -} - -func (l *s3EncObjects) getObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error { - var o minio.ObjectOptions - if minio.GlobalGatewaySSE.SSEC() { - o = opts - } - dmeta, err := l.getGWMetadata(ctx, bucket, getDareMetaPath(key)) - if err != nil { - // unencrypted content - return l.s3Objects.GetObject(ctx, bucket, key, startOffset, length, writer, etag, o) - } - if startOffset < 0 { - logger.LogIf(ctx, minio.InvalidRange{}) - } - - // For negative length read everything. - if length < 0 { - length = dmeta.Stat.Size - startOffset - } - // Reply back invalid range if the input offset and length fall out of range. - if startOffset > dmeta.Stat.Size || startOffset+length > dmeta.Stat.Size { - logger.LogIf(ctx, minio.InvalidRange{OffsetBegin: startOffset, OffsetEnd: length, ResourceSize: dmeta.Stat.Size}) - return minio.InvalidRange{OffsetBegin: startOffset, OffsetEnd: length, ResourceSize: dmeta.Stat.Size} - } - // Get start part index and offset. - _, partOffset, err := dmeta.ObjectToPartOffset(ctx, startOffset) - if err != nil { - return minio.InvalidRange{OffsetBegin: startOffset, OffsetEnd: length, ResourceSize: dmeta.Stat.Size} - } - - // Calculate endOffset according to length - endOffset := startOffset - if length > 0 { - endOffset += length - 1 - } - - // Get last part index to read given length. - if _, _, err := dmeta.ObjectToPartOffset(ctx, endOffset); err != nil { - return minio.InvalidRange{OffsetBegin: startOffset, OffsetEnd: length, ResourceSize: dmeta.Stat.Size} - } - return l.s3Objects.GetObject(ctx, bucket, key, partOffset, endOffset, writer, dmeta.ETag, o) -} - -// GetObjectNInfo - returns object info and locked object ReadCloser -func (l *s3EncObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header, lockType minio.LockType, o minio.ObjectOptions) (gr *minio.GetObjectReader, err error) { - var opts minio.ObjectOptions - if minio.GlobalGatewaySSE.SSEC() { - opts = o - } - objInfo, err := l.GetObjectInfo(ctx, bucket, object, opts) - if err != nil { - return l.s3Objects.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts) - } - fn, off, length, err := minio.NewGetObjectReader(rs, objInfo, o) - if err != nil { - return nil, minio.ErrorRespToObjectError(err) - } - if l.isGWEncrypted(ctx, bucket, object) { - object = getGWContentPath(object) - } - pr, pw := io.Pipe() - go func() { - err := l.getObject(ctx, bucket, object, off, length, pw, objInfo.ETag, opts) - pw.CloseWithError(err) - }() - - // Setup cleanup function to cause the above go-routine to - // exit in case of partial read - pipeCloser := func() { pr.Close() } - return fn(pr, h, o.CheckCopyPrecondFn, pipeCloser) -} - -// GetObjectInfo reads object info and replies back ObjectInfo -// For custom gateway encrypted large objects, the ObjectInfo is retrieved from the dare.meta file. -func (l *s3EncObjects) GetObjectInfo(ctx context.Context, bucket string, object string, o minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { - var opts minio.ObjectOptions - if minio.GlobalGatewaySSE.SSEC() { - opts = o - } - - gwMeta, err := l.getGWMetadata(ctx, bucket, getDareMetaPath(object)) - if err != nil { - return l.s3Objects.GetObjectInfo(ctx, bucket, object, opts) - } - return gwMeta.ToObjectInfo(bucket, object), nil -} - -// CopyObject copies an object from source bucket to a destination bucket. -func (l *s3EncObjects) CopyObject(ctx context.Context, srcBucket string, srcObject string, dstBucket string, dstObject string, srcInfo minio.ObjectInfo, s, d minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { - cpSrcDstSame := strings.EqualFold(path.Join(srcBucket, srcObject), path.Join(dstBucket, dstObject)) - if cpSrcDstSame { - var gwMeta gwMetaV1 - if s.ServerSideEncryption != nil && d.ServerSideEncryption != nil && - ((s.ServerSideEncryption.Type() == encrypt.SSEC && d.ServerSideEncryption.Type() == encrypt.SSEC) || - (s.ServerSideEncryption.Type() == encrypt.S3 && d.ServerSideEncryption.Type() == encrypt.S3)) { - gwMeta, err = l.getGWMetadata(ctx, srcBucket, getDareMetaPath(srcObject)) - if err != nil { - return - } - header := make(http.Header) - if d.ServerSideEncryption != nil { - d.ServerSideEncryption.Marshal(header) - } - for k, v := range header { - srcInfo.UserDefined[k] = v[0] - } - gwMeta.Meta = srcInfo.UserDefined - if err = l.writeGWMetadata(ctx, dstBucket, getDareMetaPath(dstObject), gwMeta, minio.ObjectOptions{}); err != nil { - return objInfo, minio.ErrorRespToObjectError(err) - } - return gwMeta.ToObjectInfo(dstBucket, dstObject), nil - } - } - dstOpts := minio.ObjectOptions{ServerSideEncryption: d.ServerSideEncryption, UserDefined: srcInfo.UserDefined} - return l.PutObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, dstOpts) -} - -// DeleteObject deletes a blob in bucket -// For custom gateway encrypted large objects, cleans up encrypted content and metadata files -// from the backend. -func (l *s3EncObjects) DeleteObject(ctx context.Context, bucket string, object string) error { - - // Get dare meta json - if _, err := l.getGWMetadata(ctx, bucket, getDareMetaPath(object)); err != nil { - return l.s3Objects.DeleteObject(ctx, bucket, object) - } - // delete encrypted object - l.s3Objects.DeleteObject(ctx, bucket, getGWContentPath(object)) - return l.deleteGWMetadata(ctx, bucket, getDareMetaPath(object)) -} - -// ListMultipartUploads lists all multipart uploads. -func (l *s3EncObjects) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, e error) { - - lmi, e = l.s3Objects.ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) - if e != nil { - return - } - lmi.KeyMarker = strings.TrimSuffix(lmi.KeyMarker, getGWContentPath(minio.SlashSeparator)) - lmi.NextKeyMarker = strings.TrimSuffix(lmi.NextKeyMarker, getGWContentPath(minio.SlashSeparator)) - for i := range lmi.Uploads { - lmi.Uploads[i].Object = strings.TrimSuffix(lmi.Uploads[i].Object, getGWContentPath(minio.SlashSeparator)) - } - return -} - -// NewMultipartUpload uploads object in multiple parts -func (l *s3EncObjects) NewMultipartUpload(ctx context.Context, bucket string, object string, o minio.ObjectOptions) (uploadID string, err error) { - var sseOpts encrypt.ServerSide - if o.ServerSideEncryption == nil { - return l.s3Objects.NewMultipartUpload(ctx, bucket, object, minio.ObjectOptions{UserDefined: o.UserDefined}) - } - // Decide if sse options needed to be passed to backend - if (minio.GlobalGatewaySSE.SSEC() && o.ServerSideEncryption.Type() == encrypt.SSEC) || - (minio.GlobalGatewaySSE.SSES3() && o.ServerSideEncryption.Type() == encrypt.S3) { - sseOpts = o.ServerSideEncryption - } - - uploadID, err = l.s3Objects.NewMultipartUpload(ctx, bucket, getGWContentPath(object), minio.ObjectOptions{ServerSideEncryption: sseOpts}) - if err != nil { - return - } - // Create uploadID and write a temporary dare.meta object under object/uploadID prefix - gwmeta := newGWMetaV1() - gwmeta.Meta = o.UserDefined - gwmeta.Stat.ModTime = time.Now().UTC() - err = l.writeGWMetadata(ctx, bucket, getTmpDareMetaPath(object, uploadID), gwmeta, minio.ObjectOptions{}) - if err != nil { - return uploadID, minio.ErrorRespToObjectError(err) - } - return uploadID, nil -} - -// PutObject creates a new object with the incoming data, -func (l *s3EncObjects) PutObject(ctx context.Context, bucket string, object string, data *minio.PutObjReader, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { - var sseOpts encrypt.ServerSide - // Decide if sse options needed to be passed to backend - if opts.ServerSideEncryption != nil && - ((minio.GlobalGatewaySSE.SSEC() && opts.ServerSideEncryption.Type() == encrypt.SSEC) || - (minio.GlobalGatewaySSE.SSES3() && opts.ServerSideEncryption.Type() == encrypt.S3) || - opts.ServerSideEncryption.Type() == encrypt.KMS) { - sseOpts = opts.ServerSideEncryption - } - if opts.ServerSideEncryption == nil { - defer l.deleteGWMetadata(ctx, bucket, getDareMetaPath(object)) - defer l.DeleteObject(ctx, bucket, getGWContentPath(object)) - return l.s3Objects.PutObject(ctx, bucket, object, data, minio.ObjectOptions{UserDefined: opts.UserDefined}) - } - - oi, err := l.s3Objects.PutObject(ctx, bucket, getGWContentPath(object), data, minio.ObjectOptions{ServerSideEncryption: sseOpts}) - if err != nil { - return objInfo, minio.ErrorRespToObjectError(err) - } - - gwMeta := newGWMetaV1() - gwMeta.Meta = make(map[string]string) - for k, v := range opts.UserDefined { - gwMeta.Meta[k] = v - } - encMD5 := data.MD5CurrentHexString() - - gwMeta.ETag = encMD5 - gwMeta.Stat.Size = oi.Size - gwMeta.Stat.ModTime = time.Now().UTC() - if err = l.writeGWMetadata(ctx, bucket, getDareMetaPath(object), gwMeta, minio.ObjectOptions{}); err != nil { - return objInfo, minio.ErrorRespToObjectError(err) - } - objInfo = gwMeta.ToObjectInfo(bucket, object) - // delete any unencrypted content of the same name created previously - l.s3Objects.DeleteObject(ctx, bucket, object) - return objInfo, nil -} - -// PutObjectPart puts a part of object in bucket -func (l *s3EncObjects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *minio.PutObjReader, opts minio.ObjectOptions) (pi minio.PartInfo, e error) { - - if opts.ServerSideEncryption == nil { - return l.s3Objects.PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts) - } - - var s3Opts minio.ObjectOptions - // for sse-s3 encryption options should not be passed to backend - if opts.ServerSideEncryption != nil && opts.ServerSideEncryption.Type() == encrypt.SSEC && minio.GlobalGatewaySSE.SSEC() { - s3Opts = opts - } - - uploadPath := getTmpGWMetaPath(object, uploadID) - tmpDareMeta := path.Join(uploadPath, gwdareMetaJSON) - _, err := l.s3Objects.GetObjectInfo(ctx, bucket, tmpDareMeta, minio.ObjectOptions{}) - if err != nil { - return pi, minio.InvalidUploadID{UploadID: uploadID} - } - - pi, e = l.s3Objects.PutObjectPart(ctx, bucket, getGWContentPath(object), uploadID, partID, data, s3Opts) - if e != nil { - return - } - gwMeta := newGWMetaV1() - gwMeta.Parts = make([]minio.ObjectPartInfo, 1) - // Add incoming part. - gwMeta.Parts[0] = minio.ObjectPartInfo{ - Number: partID, - ETag: pi.ETag, - Size: pi.Size, - } - gwMeta.ETag = data.MD5CurrentHexString() // encrypted ETag - gwMeta.Stat.Size = pi.Size - gwMeta.Stat.ModTime = pi.LastModified - - if err = l.writeGWMetadata(ctx, bucket, getPartMetaPath(object, uploadID, partID), gwMeta, minio.ObjectOptions{}); err != nil { - return pi, minio.ErrorRespToObjectError(err) - } - return minio.PartInfo{ - Size: gwMeta.Stat.Size, - ETag: minio.CanonicalizeETag(gwMeta.ETag), - LastModified: gwMeta.Stat.ModTime, - PartNumber: partID, - }, nil -} - -// CopyObjectPart creates a part in a multipart upload by copying -// existing object or a part of it. -func (l *s3EncObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string, - partID int, startOffset, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (p minio.PartInfo, err error) { - return l.PutObjectPart(ctx, destBucket, destObject, uploadID, partID, srcInfo.PutObjReader, dstOpts) -} - -// GetMultipartInfo returns multipart info of the uploadId of the object -func (l *s3EncObjects) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts minio.ObjectOptions) (result minio.MultipartInfo, err error) { - result.Bucket = bucket - result.Object = object - result.UploadID = uploadID - // We do not store parts uploaded so far in the dare.meta. Only CompleteMultipartUpload finalizes the parts under upload prefix.Otherwise, - // there could be situations of dare.meta getting corrupted by competing upload parts. - dm, err := l.getGWMetadata(ctx, bucket, getTmpDareMetaPath(object, uploadID)) - if err != nil { - return l.s3Objects.GetMultipartInfo(ctx, bucket, object, uploadID, opts) - } - result.UserDefined = dm.ToObjectInfo(bucket, object).UserDefined - return result, nil -} - -// ListObjectParts returns all object parts for specified object in specified bucket -func (l *s3EncObjects) ListObjectParts(ctx context.Context, bucket string, object string, uploadID string, partNumberMarker int, maxParts int, opts minio.ObjectOptions) (lpi minio.ListPartsInfo, e error) { - // We do not store parts uploaded so far in the dare.meta. Only CompleteMultipartUpload finalizes the parts under upload prefix.Otherwise, - // there could be situations of dare.meta getting corrupted by competing upload parts. - dm, err := l.getGWMetadata(ctx, bucket, getTmpDareMetaPath(object, uploadID)) - if err != nil { - return l.s3Objects.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts) - } - - lpi, err = l.s3Objects.ListObjectParts(ctx, bucket, getGWContentPath(object), uploadID, partNumberMarker, maxParts, opts) - if err != nil { - return lpi, err - } - for i, part := range lpi.Parts { - partMeta, err := l.getGWMetadata(ctx, bucket, getPartMetaPath(object, uploadID, part.PartNumber)) - if err != nil || len(partMeta.Parts) == 0 { - return lpi, minio.InvalidPart{} - } - lpi.Parts[i].ETag = partMeta.ETag - } - lpi.UserDefined = dm.ToObjectInfo(bucket, object).UserDefined - lpi.Object = object - return lpi, nil -} - -// AbortMultipartUpload aborts a ongoing multipart upload -func (l *s3EncObjects) AbortMultipartUpload(ctx context.Context, bucket string, object string, uploadID string) error { - if _, err := l.getGWMetadata(ctx, bucket, getTmpDareMetaPath(object, uploadID)); err != nil { - return l.s3Objects.AbortMultipartUpload(ctx, bucket, object, uploadID) - } - - if err := l.s3Objects.AbortMultipartUpload(ctx, bucket, getGWContentPath(object), uploadID); err != nil { - return err - } - - uploadPrefix := getTmpGWMetaPath(object, uploadID) - var continuationToken, startAfter, delimiter string - for { - loi, err := l.s3Objects.ListObjectsV2(ctx, bucket, uploadPrefix, continuationToken, delimiter, 1000, false, startAfter) - if err != nil { - return minio.InvalidUploadID{UploadID: uploadID} - } - for _, obj := range loi.Objects { - if err := l.s3Objects.DeleteObject(ctx, bucket, obj.Name); err != nil { - return minio.ErrorRespToObjectError(err) - } - startAfter = obj.Name - } - continuationToken = loi.NextContinuationToken - if !loi.IsTruncated { - break - } - } - return nil -} - -// CompleteMultipartUpload completes ongoing multipart upload and finalizes object -func (l *s3EncObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []minio.CompletePart, opts minio.ObjectOptions) (oi minio.ObjectInfo, e error) { - - tmpMeta, err := l.getGWMetadata(ctx, bucket, getTmpDareMetaPath(object, uploadID)) - if err != nil { - oi, e = l.s3Objects.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts) - if e == nil { - // delete any encrypted version of object that might exist - defer l.deleteGWMetadata(ctx, bucket, getDareMetaPath(object)) - defer l.DeleteObject(ctx, bucket, getGWContentPath(object)) - } - return oi, e - } - gwMeta := newGWMetaV1() - gwMeta.Meta = make(map[string]string) - for k, v := range tmpMeta.Meta { - gwMeta.Meta[k] = v - } - // Allocate parts similar to incoming slice. - gwMeta.Parts = make([]minio.ObjectPartInfo, len(uploadedParts)) - - bkUploadedParts := make([]minio.CompletePart, len(uploadedParts)) - // Calculate full object size. - var objectSize int64 - - // Validate each part and then commit to disk. - for i, part := range uploadedParts { - partMeta, err := l.getGWMetadata(ctx, bucket, getPartMetaPath(object, uploadID, part.PartNumber)) - if err != nil || len(partMeta.Parts) == 0 { - return oi, minio.InvalidPart{} - } - bkUploadedParts[i] = minio.CompletePart{PartNumber: part.PartNumber, ETag: partMeta.Parts[0].ETag} - gwMeta.Parts[i] = partMeta.Parts[0] - objectSize += partMeta.Parts[0].Size - } - oi, e = l.s3Objects.CompleteMultipartUpload(ctx, bucket, getGWContentPath(object), uploadID, bkUploadedParts, opts) - if e != nil { - return oi, e - } - - //delete any unencrypted version of object that might be on the backend - defer l.s3Objects.DeleteObject(ctx, bucket, object) - - // Save the final object size and modtime. - gwMeta.Stat.Size = objectSize - gwMeta.Stat.ModTime = time.Now().UTC() - gwMeta.ETag = oi.ETag - - if err = l.writeGWMetadata(ctx, bucket, getDareMetaPath(object), gwMeta, minio.ObjectOptions{}); err != nil { - return oi, minio.ErrorRespToObjectError(err) - } - // Clean up any uploaded parts that are not being committed by this CompleteMultipart operation - var continuationToken, startAfter, delimiter string - uploadPrefix := getTmpGWMetaPath(object, uploadID) - done := false - for { - loi, lerr := l.s3Objects.ListObjectsV2(ctx, bucket, uploadPrefix, continuationToken, delimiter, 1000, false, startAfter) - if lerr != nil { - break - } - for _, obj := range loi.Objects { - if !strings.HasPrefix(obj.Name, uploadPrefix) { - done = true - break - } - startAfter = obj.Name - l.s3Objects.DeleteObject(ctx, bucket, obj.Name) - } - continuationToken = loi.NextContinuationToken - if !loi.IsTruncated || done { - break - } - } - - return gwMeta.ToObjectInfo(bucket, object), nil -} - -// getTmpGWMetaPath returns the prefix under which uploads in progress are stored on backend -func getTmpGWMetaPath(object, uploadID string) string { - return path.Join(object, defaultMinioGWPrefix, uploadID) -} - -// getGWMetaPath returns the prefix under which custom object metadata and object are stored on backend after upload completes -func getGWMetaPath(object string) string { - return path.Join(object, defaultMinioGWPrefix) -} - -// getGWContentPath returns the prefix under which custom object is stored on backend after upload completes -func getGWContentPath(object string) string { - return path.Join(object, defaultMinioGWPrefix, defaultGWContentFileName) -} - -// Clean-up the stale incomplete encrypted multipart uploads. Should be run in a Go routine. -func (l *s3EncObjects) cleanupStaleEncMultipartUploads(ctx context.Context, cleanupInterval, expiry time.Duration) { - ticker := time.NewTicker(cleanupInterval) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - l.cleanupStaleEncMultipartUploadsOnGW(ctx, expiry) - } - } -} - -// cleanupStaleMultipartUploads removes old custom encryption multipart uploads on backend -func (l *s3EncObjects) cleanupStaleEncMultipartUploadsOnGW(ctx context.Context, expiry time.Duration) { - for { - buckets, err := l.s3Objects.ListBuckets(ctx) - if err != nil { - break - } - for _, b := range buckets { - expParts := l.getStalePartsForBucket(ctx, b.Name, expiry) - for k := range expParts { - l.s3Objects.DeleteObject(ctx, b.Name, k) - } - } - } -} - -func (l *s3EncObjects) getStalePartsForBucket(ctx context.Context, bucket string, expiry time.Duration) (expParts map[string]string) { - var prefix, continuationToken, delimiter, startAfter string - expParts = make(map[string]string) - now := time.Now() - for { - loi, err := l.s3Objects.ListObjectsV2(ctx, bucket, prefix, continuationToken, delimiter, 1000, false, startAfter) - if err != nil { - break - } - for _, obj := range loi.Objects { - startAfter = obj.Name - if !strings.Contains(obj.Name, defaultMinioGWPrefix) { - continue - } - - if isGWObject(obj.Name) { - continue - } - - // delete temporary part.meta or dare.meta files for incomplete uploads that are past expiry - if (strings.HasSuffix(obj.Name, gwpartMetaJSON) || strings.HasSuffix(obj.Name, gwdareMetaJSON)) && - now.Sub(obj.ModTime) > expiry { - expParts[obj.Name] = "" - } - } - continuationToken = loi.NextContinuationToken - if !loi.IsTruncated { - break - } - } - return -} - -func (l *s3EncObjects) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error { - var prefix, continuationToken, delimiter, startAfter string - expParts := make(map[string]string) - - for { - loi, err := l.s3Objects.ListObjectsV2(ctx, bucket, prefix, continuationToken, delimiter, 1000, false, startAfter) - if err != nil { - break - } - for _, obj := range loi.Objects { - startAfter = obj.Name - if !strings.Contains(obj.Name, defaultMinioGWPrefix) { - return minio.BucketNotEmpty{} - } - if isGWObject(obj.Name) { - return minio.BucketNotEmpty{} - } - // delete temporary part.meta or dare.meta files for incomplete uploads - if strings.HasSuffix(obj.Name, gwpartMetaJSON) || strings.HasSuffix(obj.Name, gwdareMetaJSON) { - expParts[obj.Name] = "" - } - } - continuationToken = loi.NextContinuationToken - if !loi.IsTruncated { - break - } - } - for k := range expParts { - l.s3Objects.DeleteObject(ctx, bucket, k) - } - err := l.Client.RemoveBucket(bucket) - if err != nil { - return minio.ErrorRespToObjectError(err, bucket) - } - return nil -} diff --git a/cmd/gateway/s3/gateway-s3-utils.go b/cmd/gateway/s3/gateway-s3-utils.go deleted file mode 100644 index 784f244..0000000 --- a/cmd/gateway/s3/gateway-s3-utils.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3 - -import ( - minio "github.com/minio/minio/cmd" -) - -// List of header keys to be filtered, usually -// from all S3 API http responses. -var defaultFilterKeys = []string{ - "Connection", - "Transfer-Encoding", - "Accept-Ranges", - "Date", - "Server", - "Vary", - "x-amz-bucket-region", - "x-amz-request-id", - "x-amz-id-2", - "Content-Security-Policy", - "X-Xss-Protection", - - // Add new headers to be ignored. -} - -// FromGatewayObjectPart converts ObjectInfo for custom part stored as object to PartInfo -func FromGatewayObjectPart(partID int, oi minio.ObjectInfo) (pi minio.PartInfo) { - return minio.PartInfo{ - Size: oi.Size, - ETag: minio.CanonicalizeETag(oi.ETag), - LastModified: oi.ModTime, - PartNumber: partID, - } -} diff --git a/cmd/gateway/s3/gateway-s3.go b/cmd/gateway/s3/gateway-s3.go deleted file mode 100644 index 61ab1ee..0000000 --- a/cmd/gateway/s3/gateway-s3.go +++ /dev/null @@ -1,760 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2017-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3 - -import ( - "context" - "encoding/json" - "io" - "math/rand" - "net/http" - "net/url" - "strings" - "time" - - "github.com/minio/cli" - miniogo "github.com/minio/minio-go/v6" - "github.com/minio/minio-go/v6/pkg/credentials" - "github.com/minio/minio-go/v6/pkg/tags" - minio "github.com/minio/minio/cmd" - - "github.com/minio/minio-go/v6/pkg/encrypt" - "github.com/minio/minio-go/v6/pkg/s3utils" - xhttp "github.com/minio/minio/cmd/http" - "github.com/minio/minio/cmd/logger" - "github.com/minio/minio/pkg/auth" - "github.com/minio/minio/pkg/bucket/policy" -) - -const ( - s3Backend = "s3" -) - -func init() { - const s3GatewayTemplate = `NAME: - {{.HelpName}} - {{.Usage}} - -USAGE: - {{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} [ENDPOINT] -{{if .VisibleFlags}} -FLAGS: - {{range .VisibleFlags}}{{.}} - {{end}}{{end}} -ENDPOINT: - s3 server endpoint. Default ENDPOINT is https://s3.amazonaws.com - -EXAMPLES: - 1. Start minio gateway server for AWS S3 backend - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accesskey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey - {{.Prompt}} {{.HelpName}} - - 2. Start minio gateway server for AWS S3 backend with edge caching enabled - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ACCESS_KEY{{.AssignmentOperator}}accesskey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_SECRET_KEY{{.AssignmentOperator}}secretkey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1,/mnt/drive2,/mnt/drive3,/mnt/drive4" - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXCLUDE{{.AssignmentOperator}}"bucket1/*,*.png" - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}90 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_AFTER{{.AssignmentOperator}}3 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_LOW{{.AssignmentOperator}}75 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_HIGH{{.AssignmentOperator}}85 - {{.Prompt}} {{.HelpName}} -` - - minio.RegisterGatewayCommand(cli.Command{ - Name: s3Backend, - Usage: "Amazon Simple Storage Service (S3)", - Action: s3GatewayMain, - CustomHelpTemplate: s3GatewayTemplate, - HideHelpCommand: true, - }) -} - -// Handler for 'minio gateway s3' command line. -func s3GatewayMain(ctx *cli.Context) { - args := ctx.Args() - if !ctx.Args().Present() { - args = cli.Args{"https://s3.amazonaws.com"} - } - - serverAddr := ctx.GlobalString("address") - if serverAddr == "" || serverAddr == ":"+minio.GlobalMinioDefaultPort { - serverAddr = ctx.String("address") - } - // Validate gateway arguments. - logger.FatalIf(minio.ValidateGatewayArguments(serverAddr, args.First()), "Invalid argument") - - // Start the gateway.. - minio.StartGateway(ctx, &S3{args.First()}) -} - -// S3 implements Gateway. -type S3 struct { - host string -} - -// Name implements Gateway interface. -func (g *S3) Name() string { - return s3Backend -} - -const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" -const ( - letterIdxBits = 6 // 6 bits to represent a letter index - letterIdxMask = 1<= 0; { - if remain == 0 { - cache, remain = src.Int63(), letterIdxMax - } - if idx := int(cache & letterIdxMask); idx < len(letterBytes) { - b[i] = letterBytes[idx] - i-- - } - cache >>= letterIdxBits - remain-- - } - return prefix + string(b[0:30-len(prefix)]) -} - -// Chains all credential types, in the following order: -// - AWS env vars (i.e. AWS_ACCESS_KEY_ID) -// - AWS creds file (i.e. AWS_SHARED_CREDENTIALS_FILE or ~/.aws/credentials) -// - Static credentials provided by user (i.e. MINIO_ACCESS_KEY) -var defaultProviders = []credentials.Provider{ - &credentials.EnvAWS{}, - &credentials.FileAWSCredentials{}, - &credentials.EnvMinio{}, -} - -// Chains all credential types, in the following order: -// - AWS env vars (i.e. AWS_ACCESS_KEY_ID) -// - AWS creds file (i.e. AWS_SHARED_CREDENTIALS_FILE or ~/.aws/credentials) -// - IAM profile based credentials. (performs an HTTP -// call to a pre-defined endpoint, only valid inside -// configured ec2 instances) -var defaultAWSCredProviders = []credentials.Provider{ - &credentials.EnvAWS{}, - &credentials.FileAWSCredentials{}, - &credentials.IAM{ - Client: &http.Client{ - Transport: minio.NewGatewayHTTPTransport(), - }, - }, - &credentials.EnvMinio{}, -} - -// newS3 - Initializes a new client by auto probing S3 server signature. -func newS3(urlStr string) (*miniogo.Core, error) { - if urlStr == "" { - urlStr = "https://s3.amazonaws.com" - } - - u, err := url.Parse(urlStr) - if err != nil { - return nil, err - } - - // Override default params if the host is provided - endpoint, secure, err := minio.ParseGatewayEndpoint(urlStr) - if err != nil { - return nil, err - } - - var creds *credentials.Credentials - if s3utils.IsAmazonEndpoint(*u) { - // If we see an Amazon S3 endpoint, then we use more ways to fetch backend credentials. - // Specifically IAM style rotating credentials are only supported with AWS S3 endpoint. - creds = credentials.NewChainCredentials(defaultAWSCredProviders) - - } else { - creds = credentials.NewChainCredentials(defaultProviders) - } - - options := miniogo.Options{ - Creds: creds, - Secure: secure, - Region: s3utils.GetRegionFromURL(*u), - BucketLookup: miniogo.BucketLookupAuto, - } - - clnt, err := miniogo.NewWithOptions(endpoint, &options) - if err != nil { - return nil, err - } - - return &miniogo.Core{Client: clnt}, nil -} - -// NewGatewayLayer returns s3 ObjectLayer. -func (g *S3) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error) { - // creds are ignored here, since S3 gateway implements chaining - // all credentials. - clnt, err := newS3(g.host) - if err != nil { - return nil, err - } - - metrics := minio.NewMetrics() - - t := &minio.MetricsTransport{ - Transport: minio.NewGatewayHTTPTransport(), - Metrics: metrics, - } - - // Set custom transport - clnt.SetCustomTransport(t) - - probeBucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "probe-bucket-sign-") - - // Check if the provided keys are valid. - if _, err = clnt.BucketExists(probeBucketName); err != nil { - if miniogo.ToErrorResponse(err).Code != "AccessDenied" { - return nil, err - } - } - - s := s3Objects{ - Client: clnt, - Metrics: metrics, - HTTPClient: &http.Client{ - Transport: t, - }, - } - - // Enables single encryption of KMS is configured. - if minio.GlobalKMS != nil { - encS := s3EncObjects{s} - - // Start stale enc multipart uploads cleanup routine. - go encS.cleanupStaleEncMultipartUploads(minio.GlobalContext, - minio.GlobalMultipartCleanupInterval, minio.GlobalMultipartExpiry) - - return &encS, nil - } - return &s, nil -} - -// Production - s3 gateway is production ready. -func (g *S3) Production() bool { - return true -} - -// s3Objects implements gateway for MinIO and S3 compatible object storage servers. -type s3Objects struct { - minio.GatewayUnsupported - Client *miniogo.Core - HTTPClient *http.Client - Metrics *minio.Metrics -} - -// GetMetrics returns this gateway's metrics -func (l *s3Objects) GetMetrics(ctx context.Context) (*minio.Metrics, error) { - return l.Metrics, nil -} - -// Shutdown saves any gateway metadata to disk -// if necessary and reload upon next restart. -func (l *s3Objects) Shutdown(ctx context.Context) error { - return nil -} - -// StorageInfo is not relevant to S3 backend. -func (l *s3Objects) StorageInfo(ctx context.Context, _ bool) (si minio.StorageInfo, _ []error) { - si.Backend.Type = minio.BackendGateway - si.Backend.GatewayOnline = minio.IsBackendOnline(ctx, l.HTTPClient, l.Client.EndpointURL().String()) - return si, nil -} - -// MakeBucket creates a new container on S3 backend. -func (l *s3Objects) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error { - if lockEnabled { - return minio.NotImplemented{} - } - - // Verify if bucket name is valid. - // We are using a separate helper function here to validate bucket - // names instead of IsValidBucketName() because there is a possibility - // that certains users might have buckets which are non-DNS compliant - // in us-east-1 and we might severely restrict them by not allowing - // access to these buckets. - // Ref - http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html - if s3utils.CheckValidBucketName(bucket) != nil { - return minio.BucketNameInvalid{Bucket: bucket} - } - err := l.Client.MakeBucket(bucket, location) - if err != nil { - return minio.ErrorRespToObjectError(err, bucket) - } - return err -} - -// GetBucketInfo gets bucket metadata.. -func (l *s3Objects) GetBucketInfo(ctx context.Context, bucket string) (bi minio.BucketInfo, e error) { - buckets, err := l.Client.ListBuckets() - if err != nil { - // Listbuckets may be disallowed, proceed to check if - // bucket indeed exists, if yes return success. - var ok bool - if ok, err = l.Client.BucketExists(bucket); err != nil { - return bi, minio.ErrorRespToObjectError(err, bucket) - } - if !ok { - return bi, minio.BucketNotFound{Bucket: bucket} - } - return minio.BucketInfo{ - Name: bi.Name, - Created: time.Now().UTC(), - }, nil - } - - for _, bi := range buckets { - if bi.Name != bucket { - continue - } - - return minio.BucketInfo{ - Name: bi.Name, - Created: bi.CreationDate, - }, nil - } - - return bi, minio.BucketNotFound{Bucket: bucket} -} - -// ListBuckets lists all S3 buckets -func (l *s3Objects) ListBuckets(ctx context.Context) ([]minio.BucketInfo, error) { - buckets, err := l.Client.ListBuckets() - if err != nil { - return nil, minio.ErrorRespToObjectError(err) - } - - b := make([]minio.BucketInfo, len(buckets)) - for i, bi := range buckets { - b[i] = minio.BucketInfo{ - Name: bi.Name, - Created: bi.CreationDate, - } - } - - return b, err -} - -// DeleteBucket deletes a bucket on S3 -func (l *s3Objects) DeleteBucket(ctx context.Context, bucket string, forceDelete bool) error { - err := l.Client.RemoveBucket(bucket) - if err != nil { - return minio.ErrorRespToObjectError(err, bucket) - } - return nil -} - -// ListObjects lists all blobs in S3 bucket filtered by prefix -func (l *s3Objects) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, e error) { - result, err := l.Client.ListObjects(bucket, prefix, marker, delimiter, maxKeys) - if err != nil { - return loi, minio.ErrorRespToObjectError(err, bucket) - } - - return minio.FromMinioClientListBucketResult(bucket, result), nil -} - -// ListObjectsV2 lists all blobs in S3 bucket filtered by prefix -func (l *s3Objects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, e error) { - result, err := l.Client.ListObjectsV2(bucket, prefix, continuationToken, fetchOwner, delimiter, maxKeys, startAfter) - if err != nil { - return loi, minio.ErrorRespToObjectError(err, bucket) - } - - return minio.FromMinioClientListBucketV2Result(bucket, result), nil -} - -// GetObjectNInfo - returns object info and locked object ReadCloser -func (l *s3Objects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header, lockType minio.LockType, opts minio.ObjectOptions) (gr *minio.GetObjectReader, err error) { - var objInfo minio.ObjectInfo - objInfo, err = l.GetObjectInfo(ctx, bucket, object, opts) - if err != nil { - return nil, minio.ErrorRespToObjectError(err, bucket, object) - } - - var startOffset, length int64 - startOffset, length, err = rs.GetOffsetLength(objInfo.Size) - if err != nil { - return nil, minio.ErrorRespToObjectError(err, bucket, object) - } - - pr, pw := io.Pipe() - go func() { - err := l.GetObject(ctx, bucket, object, startOffset, length, pw, objInfo.ETag, opts) - pw.CloseWithError(err) - }() - // Setup cleanup function to cause the above go-routine to - // exit in case of partial read - pipeCloser := func() { pr.Close() } - return minio.NewGetObjectReaderFromReader(pr, objInfo, opts, pipeCloser) -} - -// GetObject reads an object from S3. Supports additional -// parameters like offset and length which are synonymous with -// HTTP Range requests. -// -// startOffset indicates the starting read location of the object. -// length indicates the total length of the object. -func (l *s3Objects) GetObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string, o minio.ObjectOptions) error { - if length < 0 && length != -1 { - return minio.ErrorRespToObjectError(minio.InvalidRange{}, bucket, key) - } - - opts := miniogo.GetObjectOptions{} - opts.ServerSideEncryption = o.ServerSideEncryption - - if startOffset >= 0 && length >= 0 { - if err := opts.SetRange(startOffset, startOffset+length-1); err != nil { - return minio.ErrorRespToObjectError(err, bucket, key) - } - } - object, _, _, err := l.Client.GetObject(bucket, key, opts) - if err != nil { - return minio.ErrorRespToObjectError(err, bucket, key) - } - defer object.Close() - if _, err := io.Copy(writer, object); err != nil { - return minio.ErrorRespToObjectError(err, bucket, key) - } - return nil -} - -// GetObjectInfo reads object info and replies back ObjectInfo -func (l *s3Objects) GetObjectInfo(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { - oi, err := l.Client.StatObject(bucket, object, miniogo.StatObjectOptions{ - GetObjectOptions: miniogo.GetObjectOptions{ - ServerSideEncryption: opts.ServerSideEncryption, - }, - }) - if err != nil { - return minio.ObjectInfo{}, minio.ErrorRespToObjectError(err, bucket, object) - } - - return minio.FromMinioClientObjectInfo(bucket, oi), nil -} - -// PutObject creates a new object with the incoming data, -func (l *s3Objects) PutObject(ctx context.Context, bucket string, object string, r *minio.PutObjReader, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { - data := r.Reader - var tagMap map[string]string - if tagstr, ok := opts.UserDefined[xhttp.AmzObjectTagging]; ok && tagstr != "" { - tagObj, err := tags.ParseObjectTags(tagstr) - if err != nil { - return objInfo, minio.ErrorRespToObjectError(err, bucket, object) - } - tagMap = tagObj.ToMap() - delete(opts.UserDefined, xhttp.AmzObjectTagging) - } - putOpts := miniogo.PutObjectOptions{ - UserMetadata: opts.UserDefined, - ServerSideEncryption: opts.ServerSideEncryption, - UserTags: tagMap, - } - oi, err := l.Client.PutObject(bucket, object, data, data.Size(), data.MD5Base64String(), data.SHA256HexString(), putOpts) - if err != nil { - return objInfo, minio.ErrorRespToObjectError(err, bucket, object) - } - - // On success, populate the key & metadata so they are present in the notification - oi.Key = object - oi.Metadata = minio.ToMinioClientObjectInfoMetadata(opts.UserDefined) - - return minio.FromMinioClientObjectInfo(bucket, oi), nil -} - -// CopyObject copies an object from source bucket to a destination bucket. -func (l *s3Objects) CopyObject(ctx context.Context, srcBucket string, srcObject string, dstBucket string, dstObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { - if srcOpts.CheckCopyPrecondFn != nil && srcOpts.CheckCopyPrecondFn(srcInfo, "") { - return minio.ObjectInfo{}, minio.PreConditionFailed{} - } - // Set this header such that following CopyObject() always sets the right metadata on the destination. - // metadata input is already a trickled down value from interpreting x-amz-metadata-directive at - // handler layer. So what we have right now is supposed to be applied on the destination object anyways. - // So preserve it by adding "REPLACE" directive to save all the metadata set by CopyObject API. - srcInfo.UserDefined["x-amz-metadata-directive"] = "REPLACE" - srcInfo.UserDefined["x-amz-copy-source-if-match"] = srcInfo.ETag - header := make(http.Header) - if srcOpts.ServerSideEncryption != nil { - encrypt.SSECopy(srcOpts.ServerSideEncryption).Marshal(header) - } - - if dstOpts.ServerSideEncryption != nil { - dstOpts.ServerSideEncryption.Marshal(header) - } - - for k, v := range header { - srcInfo.UserDefined[k] = v[0] - } - - if _, err = l.Client.CopyObject(srcBucket, srcObject, dstBucket, dstObject, srcInfo.UserDefined); err != nil { - return objInfo, minio.ErrorRespToObjectError(err, srcBucket, srcObject) - } - return l.GetObjectInfo(ctx, dstBucket, dstObject, dstOpts) -} - -// DeleteObject deletes a blob in bucket -func (l *s3Objects) DeleteObject(ctx context.Context, bucket string, object string) error { - err := l.Client.RemoveObject(bucket, object) - if err != nil { - return minio.ErrorRespToObjectError(err, bucket, object) - } - - return nil -} - -func (l *s3Objects) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) { - errs := make([]error, len(objects)) - for idx, object := range objects { - errs[idx] = l.DeleteObject(ctx, bucket, object) - } - return errs, nil -} - -// ListMultipartUploads lists all multipart uploads. -func (l *s3Objects) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, e error) { - result, err := l.Client.ListMultipartUploads(bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) - if err != nil { - return lmi, err - } - - return minio.FromMinioClientListMultipartsInfo(result), nil -} - -// NewMultipartUpload upload object in multiple parts -func (l *s3Objects) NewMultipartUpload(ctx context.Context, bucket string, object string, o minio.ObjectOptions) (uploadID string, err error) { - var tagMap map[string]string - if tagStr, ok := o.UserDefined[xhttp.AmzObjectTagging]; ok { - tagObj, err := tags.Parse(tagStr, true) - if err != nil { - return uploadID, minio.ErrorRespToObjectError(err, bucket, object) - } - tagMap = tagObj.ToMap() - delete(o.UserDefined, xhttp.AmzObjectTagging) - } - // Create PutObject options - opts := miniogo.PutObjectOptions{ - UserMetadata: o.UserDefined, - ServerSideEncryption: o.ServerSideEncryption, - UserTags: tagMap, - } - uploadID, err = l.Client.NewMultipartUpload(bucket, object, opts) - if err != nil { - return uploadID, minio.ErrorRespToObjectError(err, bucket, object) - } - return uploadID, nil -} - -// PutObjectPart puts a part of object in bucket -func (l *s3Objects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, r *minio.PutObjReader, opts minio.ObjectOptions) (pi minio.PartInfo, e error) { - data := r.Reader - info, err := l.Client.PutObjectPart(bucket, object, uploadID, partID, data, data.Size(), data.MD5Base64String(), data.SHA256HexString(), opts.ServerSideEncryption) - if err != nil { - return pi, minio.ErrorRespToObjectError(err, bucket, object) - } - - return minio.FromMinioClientObjectPart(info), nil -} - -// CopyObjectPart creates a part in a multipart upload by copying -// existing object or a part of it. -func (l *s3Objects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string, - partID int, startOffset, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (p minio.PartInfo, err error) { - if srcOpts.CheckCopyPrecondFn != nil && srcOpts.CheckCopyPrecondFn(srcInfo, "") { - return minio.PartInfo{}, minio.PreConditionFailed{} - } - srcInfo.UserDefined = map[string]string{ - "x-amz-copy-source-if-match": srcInfo.ETag, - } - header := make(http.Header) - if srcOpts.ServerSideEncryption != nil { - encrypt.SSECopy(srcOpts.ServerSideEncryption).Marshal(header) - } - - if dstOpts.ServerSideEncryption != nil { - dstOpts.ServerSideEncryption.Marshal(header) - } - for k, v := range header { - srcInfo.UserDefined[k] = v[0] - } - - completePart, err := l.Client.CopyObjectPart(srcBucket, srcObject, destBucket, destObject, - uploadID, partID, startOffset, length, srcInfo.UserDefined) - if err != nil { - return p, minio.ErrorRespToObjectError(err, srcBucket, srcObject) - } - p.PartNumber = completePart.PartNumber - p.ETag = completePart.ETag - return p, nil -} - -// GetMultipartInfo returns multipart info of the uploadId of the object -func (l *s3Objects) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts minio.ObjectOptions) (result minio.MultipartInfo, err error) { - result.Bucket = bucket - result.Object = object - result.UploadID = uploadID - return result, nil -} - -// ListObjectParts returns all object parts for specified object in specified bucket -func (l *s3Objects) ListObjectParts(ctx context.Context, bucket string, object string, uploadID string, partNumberMarker int, maxParts int, opts minio.ObjectOptions) (lpi minio.ListPartsInfo, e error) { - result, err := l.Client.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts) - if err != nil { - return lpi, err - } - lpi = minio.FromMinioClientListPartsInfo(result) - if lpi.IsTruncated && maxParts > len(lpi.Parts) { - partNumberMarker = lpi.NextPartNumberMarker - for { - result, err = l.Client.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts) - if err != nil { - return lpi, err - } - - nlpi := minio.FromMinioClientListPartsInfo(result) - - partNumberMarker = nlpi.NextPartNumberMarker - - lpi.Parts = append(lpi.Parts, nlpi.Parts...) - if !nlpi.IsTruncated { - break - } - } - } - return lpi, nil -} - -// AbortMultipartUpload aborts a ongoing multipart upload -func (l *s3Objects) AbortMultipartUpload(ctx context.Context, bucket string, object string, uploadID string) error { - err := l.Client.AbortMultipartUpload(bucket, object, uploadID) - return minio.ErrorRespToObjectError(err, bucket, object) -} - -// CompleteMultipartUpload completes ongoing multipart upload and finalizes object -func (l *s3Objects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, uploadedParts []minio.CompletePart, opts minio.ObjectOptions) (oi minio.ObjectInfo, e error) { - etag, err := l.Client.CompleteMultipartUpload(bucket, object, uploadID, minio.ToMinioClientCompleteParts(uploadedParts)) - if err != nil { - return oi, minio.ErrorRespToObjectError(err, bucket, object) - } - - return minio.ObjectInfo{Bucket: bucket, Name: object, ETag: strings.Trim(etag, "\"")}, nil -} - -// SetBucketPolicy sets policy on bucket -func (l *s3Objects) SetBucketPolicy(ctx context.Context, bucket string, bucketPolicy *policy.Policy) error { - data, err := json.Marshal(bucketPolicy) - if err != nil { - // This should not happen. - logger.LogIf(ctx, err) - return minio.ErrorRespToObjectError(err, bucket) - } - - if err := l.Client.SetBucketPolicy(bucket, string(data)); err != nil { - return minio.ErrorRespToObjectError(err, bucket) - } - - return nil -} - -// GetBucketPolicy will get policy on bucket -func (l *s3Objects) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) { - data, err := l.Client.GetBucketPolicy(bucket) - if err != nil { - return nil, minio.ErrorRespToObjectError(err, bucket) - } - - bucketPolicy, err := policy.ParseConfig(strings.NewReader(data), bucket) - return bucketPolicy, minio.ErrorRespToObjectError(err, bucket) -} - -// DeleteBucketPolicy deletes all policies on bucket -func (l *s3Objects) DeleteBucketPolicy(ctx context.Context, bucket string) error { - if err := l.Client.SetBucketPolicy(bucket, ""); err != nil { - return minio.ErrorRespToObjectError(err, bucket, "") - } - return nil -} - -// GetObjectTags gets the tags set on the object -func (l *s3Objects) GetObjectTags(ctx context.Context, bucket string, object string) (*tags.Tags, error) { - var err error - var tagObj *tags.Tags - var tagStr string - var opts minio.ObjectOptions - - if _, err = l.GetObjectInfo(ctx, bucket, object, opts); err != nil { - return nil, minio.ErrorRespToObjectError(err, bucket, object) - } - - if tagStr, err = l.Client.GetObjectTagging(bucket, object); err != nil { - return nil, minio.ErrorRespToObjectError(err, bucket, object) - } - - if tagObj, err = tags.ParseObjectXML(strings.NewReader(tagStr)); err != nil { - return nil, minio.ErrorRespToObjectError(err, bucket, object) - } - return tagObj, err -} - -// PutObjectTags attaches the tags to the object -func (l *s3Objects) PutObjectTags(ctx context.Context, bucket, object string, tagStr string) error { - tagObj, err := tags.Parse(tagStr, true) - if err != nil { - return minio.ErrorRespToObjectError(err, bucket, object) - } - if err = l.Client.PutObjectTagging(bucket, object, tagObj.ToMap()); err != nil { - return minio.ErrorRespToObjectError(err, bucket, object) - } - return nil -} - -// DeleteObjectTags removes the tags attached to the object -func (l *s3Objects) DeleteObjectTags(ctx context.Context, bucket, object string) error { - if err := l.Client.RemoveObjectTagging(bucket, object); err != nil { - return minio.ErrorRespToObjectError(err, bucket, object) - } - return nil -} - -// IsCompressionSupported returns whether compression is applicable for this layer. -func (l *s3Objects) IsCompressionSupported() bool { - return false -} - -// IsEncryptionSupported returns whether server side encryption is implemented for this layer. -func (l *s3Objects) IsEncryptionSupported() bool { - return minio.GlobalKMS != nil || len(minio.GlobalGatewaySSE) > 0 -} - -// IsReady returns whether the layer is ready to take requests. -func (l *s3Objects) IsReady(ctx context.Context) bool { - return minio.IsBackendOnline(ctx, l.HTTPClient, l.Client.EndpointURL().String()) -} - -func (l *s3Objects) IsTaggingSupported() bool { - return true -} diff --git a/cmd/gateway/s3/gateway-s3_test.go b/cmd/gateway/s3/gateway-s3_test.go deleted file mode 100644 index c597299..0000000 --- a/cmd/gateway/s3/gateway-s3_test.go +++ /dev/null @@ -1,124 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3 - -import ( - "fmt" - "testing" - - miniogo "github.com/minio/minio-go/v6" - "github.com/minio/minio/pkg/hash" - - minio "github.com/minio/minio/cmd" -) - -func errResponse(code string) miniogo.ErrorResponse { - return miniogo.ErrorResponse{ - Code: code, - } -} - -func TestS3ToObjectError(t *testing.T) { - testCases := []struct { - inputErr error - expectedErr error - bucket, object string - }{ - { - inputErr: errResponse("BucketAlreadyOwnedByYou"), - expectedErr: minio.BucketAlreadyOwnedByYou{}, - }, - { - inputErr: errResponse("BucketNotEmpty"), - expectedErr: minio.BucketNotEmpty{}, - }, - { - inputErr: errResponse("InvalidBucketName"), - expectedErr: minio.BucketNameInvalid{}, - }, - { - inputErr: errResponse("InvalidPart"), - expectedErr: minio.InvalidPart{}, - }, - { - inputErr: errResponse("NoSuchBucketPolicy"), - expectedErr: minio.BucketPolicyNotFound{}, - }, - { - inputErr: errResponse("NoSuchBucket"), - expectedErr: minio.BucketNotFound{}, - }, - // with empty Object in miniogo.ErrorRepsonse, NoSuchKey - // is interpreted as BucketNotFound - { - inputErr: errResponse("NoSuchKey"), - expectedErr: minio.BucketNotFound{}, - }, - { - inputErr: errResponse("NoSuchUpload"), - expectedErr: minio.InvalidUploadID{}, - }, - { - inputErr: errResponse("XMinioInvalidObjectName"), - expectedErr: minio.ObjectNameInvalid{}, - }, - { - inputErr: errResponse("AccessDenied"), - expectedErr: minio.PrefixAccessDenied{}, - }, - { - inputErr: errResponse("XAmzContentSHA256Mismatch"), - expectedErr: hash.SHA256Mismatch{}, - }, - { - inputErr: errResponse("EntityTooSmall"), - expectedErr: minio.PartTooSmall{}, - }, - { - inputErr: nil, - expectedErr: nil, - }, - // Special test case for NoSuchKey with object name - { - inputErr: miniogo.ErrorResponse{ - Code: "NoSuchKey", - }, - expectedErr: minio.ObjectNotFound{ - Bucket: "bucket", - Object: "object", - }, - bucket: "bucket", - object: "object", - }, - - // N B error values that aren't of expected types - // should be left untouched. - // Special test case for error that is not of type - // miniogo.ErrorResponse - { - inputErr: fmt.Errorf("not a ErrorResponse"), - expectedErr: fmt.Errorf("not a ErrorResponse"), - }, - } - - for i, tc := range testCases { - actualErr := minio.ErrorRespToObjectError(tc.inputErr, tc.bucket, tc.object) - if actualErr != nil && tc.expectedErr != nil && actualErr.Error() != tc.expectedErr.Error() { - t.Errorf("Test case %d: Expected error %v but received error %v", i+1, tc.expectedErr, actualErr) - } - } -} diff --git a/go.mod b/go.mod index fc2c6e0..faa2891 100644 --- a/go.mod +++ b/go.mod @@ -3,9 +3,7 @@ module github.com/minio/minio go 1.13 require ( - cloud.google.com/go/storage v1.0.0 contrib.go.opencensus.io/exporter/ocagent v0.5.0 // indirect - github.com/Azure/azure-pipeline-go v0.2.1 github.com/Azure/azure-storage-blob-go v0.8.0 github.com/Azure/go-autorest v11.7.1+incompatible // indirect github.com/Shopify/sarama v1.24.1 @@ -15,7 +13,6 @@ require ( github.com/beevik/ntp v0.2.0 github.com/cespare/xxhash/v2 v2.1.1 github.com/cheggaaa/pb v1.0.28 - github.com/colinmarc/hdfs/v2 v2.1.1 github.com/coredns/coredns v1.4.0 github.com/coreos/bbolt v1.3.3 // indirect github.com/coreos/etcd v3.3.13+incompatible @@ -109,7 +106,7 @@ require ( gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect gopkg.in/cheggaaa/pb.v1 v1.0.28 // indirect gopkg.in/ini.v1 v1.57.0 // indirect - gopkg.in/jcmturner/gokrb5.v7 v7.3.0 + gopkg.in/jcmturner/gokrb5.v7 v7.3.0 // indirect gopkg.in/ldap.v3 v3.0.3 gopkg.in/olivere/elastic.v5 v5.0.80 gopkg.in/yaml.v2 v2.2.8 diff --git a/go.sum b/go.sum index c3a0832..8482000 100644 --- a/go.sum +++ b/go.sum @@ -81,8 +81,6 @@ github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6D github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/colinmarc/hdfs/v2 v2.1.1 h1:x0hw/m+o3UE20Scso/KCkvYNc9Di39TBlCfGMkJ1/a0= -github.com/colinmarc/hdfs/v2 v2.1.1/go.mod h1:M3x+k8UKKmxtFu++uAZ0OtDU8jR3jnaZIAc6yK4Ue0c= github.com/coredns/coredns v1.4.0 h1:RubBkYmkByUqZWWkjRHvNLnUHgkRVqAWgSMmRFvpE1A= github.com/coredns/coredns v1.4.0/go.mod h1:zASH/MVDgR6XZTbxvOnsZfffS+31vg6Ackf/wo1+AM0= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= @@ -164,7 +162,6 @@ github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4er github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= @@ -253,7 +250,6 @@ github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerX github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v0.0.0-20180228145832-27454136f036/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -279,7 +275,6 @@ github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKe github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf h1:WfD7VjIE6z8dIvMsI4/s+1qr5EL+zoIGev1BQj1eoJ8= github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf/go.mod h1:hyb9oH7vZsitZCiBt0ZvifOrB+qc8PS5IiilCIb87rg= -github.com/jcmturner/gofork v0.0.0-20180107083740-2aebee971930/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM= github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= @@ -440,7 +435,6 @@ github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/getopt v0.0.0-20180729010549-6fdd0a2c7117/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= @@ -587,7 +581,6 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= golang.org/x/arch v0.0.0-20190909030613-46d78d1859ac/go.mod h1:flIaEI6LNU6xOCD5PaJvn9wGP0agmIOqjrtsKGRguv4= -golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181106171534-e4dc69e5b2fd/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=