[#264] Adopt minio code distributed with AGPLv3 license in gateway components
Signed-off-by: Alex Vanin <alexey@nspcc.ru>
This commit is contained in:
parent
15023b9371
commit
16296490ee
10 changed files with 2322 additions and 3542 deletions
1916
api/errors/errors.go
1916
api/errors/errors.go
File diff suppressed because it is too large
Load diff
|
@ -1 +1,111 @@
|
||||||
package handler
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type encoding int
|
||||||
|
|
||||||
|
const (
|
||||||
|
encodePathSegment encoding = iota
|
||||||
|
encodeQueryComponent
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
urlEncodingType = "url"
|
||||||
|
upperhex = "0123456789ABCDEF"
|
||||||
|
)
|
||||||
|
|
||||||
|
func shouldEscape(c byte) bool {
|
||||||
|
if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
switch c {
|
||||||
|
case '-', '_', '.', '/', '*':
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// s3URLEncode is based on url.QueryEscape() code,
|
||||||
|
// while considering some S3 exceptions.
|
||||||
|
func s3URLEncode(s string, mode encoding) string {
|
||||||
|
spaceCount, hexCount := 0, 0
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
c := s[i]
|
||||||
|
if shouldEscape(c) {
|
||||||
|
if c == ' ' && mode == encodeQueryComponent {
|
||||||
|
spaceCount++
|
||||||
|
} else {
|
||||||
|
hexCount++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if spaceCount == 0 && hexCount == 0 {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf [64]byte
|
||||||
|
var t []byte
|
||||||
|
|
||||||
|
required := len(s) + 2*hexCount
|
||||||
|
if required <= len(buf) {
|
||||||
|
t = buf[:required]
|
||||||
|
} else {
|
||||||
|
t = make([]byte, required)
|
||||||
|
}
|
||||||
|
|
||||||
|
if hexCount == 0 {
|
||||||
|
copy(t, s)
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
if s[i] == ' ' {
|
||||||
|
t[i] = '+'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return string(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
j := 0
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
switch c := s[i]; {
|
||||||
|
case c == ' ' && mode == encodeQueryComponent:
|
||||||
|
t[j] = '+'
|
||||||
|
j++
|
||||||
|
case shouldEscape(c):
|
||||||
|
t[j] = '%'
|
||||||
|
t[j+1] = upperhex[c>>4]
|
||||||
|
t[j+2] = upperhex[c&15]
|
||||||
|
j += 3
|
||||||
|
default:
|
||||||
|
t[j] = s[i]
|
||||||
|
j++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return string(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func s3QueryEncode(name string, encodingType string) (result string) {
|
||||||
|
if encodingType == "" {
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
encodingType = strings.ToLower(encodingType)
|
||||||
|
switch encodingType {
|
||||||
|
case urlEncodingType:
|
||||||
|
return s3URLEncode(name, encodeQueryComponent)
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
||||||
|
func s3PathEncode(name string, encodingType string) (result string) {
|
||||||
|
if encodingType == "" {
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
encodingType = strings.ToLower(encodingType)
|
||||||
|
switch encodingType {
|
||||||
|
case urlEncodingType:
|
||||||
|
return s3URLEncode(name, encodePathSegment)
|
||||||
|
}
|
||||||
|
return name
|
||||||
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,842 +0,0 @@
|
||||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
||||||
//
|
|
||||||
// This file is part of MinIO Object Storage stack
|
|
||||||
//
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// This program is distributed in the hope that it will be useful
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/xml"
|
|
||||||
"fmt"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"path"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/minio/minio/internal/crypto"
|
|
||||||
"github.com/minio/minio/internal/handlers"
|
|
||||||
xhttp "github.com/minio/minio/internal/http"
|
|
||||||
"github.com/minio/minio/internal/logger"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
|
|
||||||
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
|
|
||||||
maxObjectList = 1000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
|
|
||||||
maxDeleteList = 10000 // Limit number of objects deleted in a delete call.
|
|
||||||
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
|
|
||||||
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
|
|
||||||
)
|
|
||||||
|
|
||||||
// LocationResponse - format for location response.
|
|
||||||
type LocationResponse struct {
|
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LocationConstraint" json:"-"`
|
|
||||||
Location string `xml:",chardata"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PolicyStatus captures information returned by GetBucketPolicyStatusHandler
|
|
||||||
type PolicyStatus struct {
|
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ PolicyStatus" json:"-"`
|
|
||||||
IsPublic string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListVersionsResponse - format for list bucket versions response.
|
|
||||||
type ListVersionsResponse struct {
|
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListVersionsResult" json:"-"`
|
|
||||||
|
|
||||||
Name string
|
|
||||||
Prefix string
|
|
||||||
KeyMarker string
|
|
||||||
|
|
||||||
// When response is truncated (the IsTruncated element value in the response
|
|
||||||
// is true), you can use the key name in this field as marker in the subsequent
|
|
||||||
// request to get next set of objects. Server lists objects in alphabetical
|
|
||||||
// order Note: This element is returned only if you have delimiter request parameter
|
|
||||||
// specified. If response does not include the NextMaker and it is truncated,
|
|
||||||
// you can use the value of the last Key in the response as the marker in the
|
|
||||||
// subsequent request to get the next set of object keys.
|
|
||||||
NextKeyMarker string `xml:"NextKeyMarker,omitempty"`
|
|
||||||
|
|
||||||
// When the number of responses exceeds the value of MaxKeys,
|
|
||||||
// NextVersionIdMarker specifies the first object version not
|
|
||||||
// returned that satisfies the search criteria. Use this value
|
|
||||||
// for the version-id-marker request parameter in a subsequent request.
|
|
||||||
NextVersionIDMarker string `xml:"NextVersionIdMarker"`
|
|
||||||
|
|
||||||
// Marks the last version of the Key returned in a truncated response.
|
|
||||||
VersionIDMarker string `xml:"VersionIdMarker"`
|
|
||||||
|
|
||||||
MaxKeys int
|
|
||||||
Delimiter string
|
|
||||||
// A flag that indicates whether or not ListObjects returned all of the results
|
|
||||||
// that satisfied the search criteria.
|
|
||||||
IsTruncated bool
|
|
||||||
|
|
||||||
CommonPrefixes []CommonPrefix
|
|
||||||
Versions []ObjectVersion
|
|
||||||
|
|
||||||
// Encoding type used to encode object keys in the response.
|
|
||||||
EncodingType string `xml:"EncodingType,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListObjectsResponse - format for list objects response.
|
|
||||||
type ListObjectsResponse struct {
|
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult" json:"-"`
|
|
||||||
|
|
||||||
Name string
|
|
||||||
Prefix string
|
|
||||||
Marker string
|
|
||||||
|
|
||||||
// When response is truncated (the IsTruncated element value in the response
|
|
||||||
// is true), you can use the key name in this field as marker in the subsequent
|
|
||||||
// request to get next set of objects. Server lists objects in alphabetical
|
|
||||||
// order Note: This element is returned only if you have delimiter request parameter
|
|
||||||
// specified. If response does not include the NextMaker and it is truncated,
|
|
||||||
// you can use the value of the last Key in the response as the marker in the
|
|
||||||
// subsequent request to get the next set of object keys.
|
|
||||||
NextMarker string `xml:"NextMarker,omitempty"`
|
|
||||||
|
|
||||||
MaxKeys int
|
|
||||||
Delimiter string
|
|
||||||
// A flag that indicates whether or not ListObjects returned all of the results
|
|
||||||
// that satisfied the search criteria.
|
|
||||||
IsTruncated bool
|
|
||||||
|
|
||||||
Contents []Object
|
|
||||||
CommonPrefixes []CommonPrefix
|
|
||||||
|
|
||||||
// Encoding type used to encode object keys in the response.
|
|
||||||
EncodingType string `xml:"EncodingType,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListObjectsV2Response - format for list objects response.
|
|
||||||
type ListObjectsV2Response struct {
|
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult" json:"-"`
|
|
||||||
|
|
||||||
Name string
|
|
||||||
Prefix string
|
|
||||||
StartAfter string `xml:"StartAfter,omitempty"`
|
|
||||||
// When response is truncated (the IsTruncated element value in the response
|
|
||||||
// is true), you can use the key name in this field as marker in the subsequent
|
|
||||||
// request to get next set of objects. Server lists objects in alphabetical
|
|
||||||
// order Note: This element is returned only if you have delimiter request parameter
|
|
||||||
// specified. If response does not include the NextMaker and it is truncated,
|
|
||||||
// you can use the value of the last Key in the response as the marker in the
|
|
||||||
// subsequent request to get the next set of object keys.
|
|
||||||
ContinuationToken string `xml:"ContinuationToken,omitempty"`
|
|
||||||
NextContinuationToken string `xml:"NextContinuationToken,omitempty"`
|
|
||||||
|
|
||||||
KeyCount int
|
|
||||||
MaxKeys int
|
|
||||||
Delimiter string
|
|
||||||
// A flag that indicates whether or not ListObjects returned all of the results
|
|
||||||
// that satisfied the search criteria.
|
|
||||||
IsTruncated bool
|
|
||||||
|
|
||||||
Contents []Object
|
|
||||||
CommonPrefixes []CommonPrefix
|
|
||||||
|
|
||||||
// Encoding type used to encode object keys in the response.
|
|
||||||
EncodingType string `xml:"EncodingType,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// Part container for part metadata.
|
|
||||||
type Part struct {
|
|
||||||
PartNumber int
|
|
||||||
LastModified string
|
|
||||||
ETag string
|
|
||||||
Size int64
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListPartsResponse - format for list parts response.
|
|
||||||
type ListPartsResponse struct {
|
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListPartsResult" json:"-"`
|
|
||||||
|
|
||||||
Bucket string
|
|
||||||
Key string
|
|
||||||
UploadID string `xml:"UploadId"`
|
|
||||||
|
|
||||||
Initiator Initiator
|
|
||||||
Owner Owner
|
|
||||||
|
|
||||||
// The class of storage used to store the object.
|
|
||||||
StorageClass string
|
|
||||||
|
|
||||||
PartNumberMarker int
|
|
||||||
NextPartNumberMarker int
|
|
||||||
MaxParts int
|
|
||||||
IsTruncated bool
|
|
||||||
|
|
||||||
// List of parts.
|
|
||||||
Parts []Part `xml:"Part"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListMultipartUploadsResponse - format for list multipart uploads response.
|
|
||||||
type ListMultipartUploadsResponse struct {
|
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListMultipartUploadsResult" json:"-"`
|
|
||||||
|
|
||||||
Bucket string
|
|
||||||
KeyMarker string
|
|
||||||
UploadIDMarker string `xml:"UploadIdMarker"`
|
|
||||||
NextKeyMarker string
|
|
||||||
NextUploadIDMarker string `xml:"NextUploadIdMarker"`
|
|
||||||
Delimiter string
|
|
||||||
Prefix string
|
|
||||||
EncodingType string `xml:"EncodingType,omitempty"`
|
|
||||||
MaxUploads int
|
|
||||||
IsTruncated bool
|
|
||||||
|
|
||||||
// List of pending uploads.
|
|
||||||
Uploads []Upload `xml:"Upload"`
|
|
||||||
|
|
||||||
// Delimed common prefixes.
|
|
||||||
CommonPrefixes []CommonPrefix
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListBucketsResponse - format for list buckets response
|
|
||||||
type ListBucketsResponse struct {
|
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResult" json:"-"`
|
|
||||||
|
|
||||||
Owner Owner
|
|
||||||
|
|
||||||
// Container for one or more buckets.
|
|
||||||
Buckets struct {
|
|
||||||
Buckets []Bucket `xml:"Bucket"`
|
|
||||||
} // Buckets are nested
|
|
||||||
}
|
|
||||||
|
|
||||||
// Upload container for in progress multipart upload
|
|
||||||
type Upload struct {
|
|
||||||
Key string
|
|
||||||
UploadID string `xml:"UploadId"`
|
|
||||||
Initiator Initiator
|
|
||||||
Owner Owner
|
|
||||||
StorageClass string
|
|
||||||
Initiated string
|
|
||||||
}
|
|
||||||
|
|
||||||
// CommonPrefix container for prefix response in ListObjectsResponse
|
|
||||||
type CommonPrefix struct {
|
|
||||||
Prefix string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bucket container for bucket metadata
|
|
||||||
type Bucket struct {
|
|
||||||
Name string
|
|
||||||
CreationDate string // time string of format "2006-01-02T15:04:05.000Z"
|
|
||||||
}
|
|
||||||
|
|
||||||
// ObjectVersion container for object version metadata
|
|
||||||
type ObjectVersion struct {
|
|
||||||
Object
|
|
||||||
IsLatest bool
|
|
||||||
VersionID string `xml:"VersionId"`
|
|
||||||
|
|
||||||
isDeleteMarker bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// MarshalXML - marshal ObjectVersion
|
|
||||||
func (o ObjectVersion) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
|
||||||
if o.isDeleteMarker {
|
|
||||||
start.Name.Local = "DeleteMarker"
|
|
||||||
} else {
|
|
||||||
start.Name.Local = "Version"
|
|
||||||
}
|
|
||||||
|
|
||||||
type objectVersionWrapper ObjectVersion
|
|
||||||
return e.EncodeElement(objectVersionWrapper(o), start)
|
|
||||||
}
|
|
||||||
|
|
||||||
// StringMap is a map[string]string.
|
|
||||||
type StringMap map[string]string
|
|
||||||
|
|
||||||
// MarshalXML - StringMap marshals into XML.
|
|
||||||
func (s StringMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
|
||||||
|
|
||||||
tokens := []xml.Token{start}
|
|
||||||
|
|
||||||
for key, value := range s {
|
|
||||||
t := xml.StartElement{}
|
|
||||||
t.Name = xml.Name{
|
|
||||||
Space: "",
|
|
||||||
Local: key,
|
|
||||||
}
|
|
||||||
tokens = append(tokens, t, xml.CharData(value), xml.EndElement{Name: t.Name})
|
|
||||||
}
|
|
||||||
|
|
||||||
tokens = append(tokens, xml.EndElement{
|
|
||||||
Name: start.Name,
|
|
||||||
})
|
|
||||||
|
|
||||||
for _, t := range tokens {
|
|
||||||
if err := e.EncodeToken(t); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// flush to ensure tokens are written
|
|
||||||
return e.Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object container for object metadata
|
|
||||||
type Object struct {
|
|
||||||
Key string
|
|
||||||
LastModified string // time string of format "2006-01-02T15:04:05.000Z"
|
|
||||||
ETag string
|
|
||||||
Size int64
|
|
||||||
|
|
||||||
// Owner of the object.
|
|
||||||
Owner Owner
|
|
||||||
|
|
||||||
// The class of storage used to store the object.
|
|
||||||
StorageClass string
|
|
||||||
|
|
||||||
// UserMetadata user-defined metadata
|
|
||||||
UserMetadata StringMap `xml:"UserMetadata,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyObjectResponse container returns ETag and LastModified of the successfully copied object
|
|
||||||
type CopyObjectResponse struct {
|
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyObjectResult" json:"-"`
|
|
||||||
LastModified string // time string of format "2006-01-02T15:04:05.000Z"
|
|
||||||
ETag string // md5sum of the copied object.
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyObjectPartResponse container returns ETag and LastModified of the successfully copied object
|
|
||||||
type CopyObjectPartResponse struct {
|
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyPartResult" json:"-"`
|
|
||||||
LastModified string // time string of format "2006-01-02T15:04:05.000Z"
|
|
||||||
ETag string // md5sum of the copied object part.
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initiator inherit from Owner struct, fields are same
|
|
||||||
type Initiator Owner
|
|
||||||
|
|
||||||
// Owner - bucket owner/principal
|
|
||||||
type Owner struct {
|
|
||||||
ID string
|
|
||||||
DisplayName string
|
|
||||||
}
|
|
||||||
|
|
||||||
// InitiateMultipartUploadResponse container for InitiateMultiPartUpload response, provides uploadID to start MultiPart upload
|
|
||||||
type InitiateMultipartUploadResponse struct {
|
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InitiateMultipartUploadResult" json:"-"`
|
|
||||||
|
|
||||||
Bucket string
|
|
||||||
Key string
|
|
||||||
UploadID string `xml:"UploadId"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CompleteMultipartUploadResponse container for completed multipart upload response
|
|
||||||
type CompleteMultipartUploadResponse struct {
|
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUploadResult" json:"-"`
|
|
||||||
|
|
||||||
Location string
|
|
||||||
Bucket string
|
|
||||||
Key string
|
|
||||||
ETag string
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteError structure.
|
|
||||||
type DeleteError struct {
|
|
||||||
Code string
|
|
||||||
Message string
|
|
||||||
Key string
|
|
||||||
VersionID string `xml:"VersionId"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteObjectsResponse container for multiple object deletes.
|
|
||||||
type DeleteObjectsResponse struct {
|
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"`
|
|
||||||
|
|
||||||
// Collection of all deleted objects
|
|
||||||
DeletedObjects []DeletedObject `xml:"Deleted,omitempty"`
|
|
||||||
|
|
||||||
// Collection of errors deleting certain objects.
|
|
||||||
Errors []DeleteError `xml:"Error,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// PostResponse container for POST object request when success_action_status is set to 201
|
|
||||||
type PostResponse struct {
|
|
||||||
Bucket string
|
|
||||||
Key string
|
|
||||||
ETag string
|
|
||||||
Location string
|
|
||||||
}
|
|
||||||
|
|
||||||
// returns "https" if the tls boolean is true, "http" otherwise.
|
|
||||||
func getURLScheme(tls bool) string {
|
|
||||||
if tls {
|
|
||||||
return httpsScheme
|
|
||||||
}
|
|
||||||
return httpScheme
|
|
||||||
}
|
|
||||||
|
|
||||||
// getObjectLocation gets the fully qualified URL of an object.
|
|
||||||
func getObjectLocation(r *http.Request, domains []string, bucket, object string) string {
|
|
||||||
// unit tests do not have host set.
|
|
||||||
if r.Host == "" {
|
|
||||||
return path.Clean(r.URL.Path)
|
|
||||||
}
|
|
||||||
proto := handlers.GetSourceScheme(r)
|
|
||||||
if proto == "" {
|
|
||||||
proto = getURLScheme(globalIsTLS)
|
|
||||||
}
|
|
||||||
u := &url.URL{
|
|
||||||
Host: r.Host,
|
|
||||||
Path: path.Join(SlashSeparator, bucket, object),
|
|
||||||
Scheme: proto,
|
|
||||||
}
|
|
||||||
// If domain is set then we need to use bucket DNS style.
|
|
||||||
for _, domain := range domains {
|
|
||||||
if strings.HasPrefix(r.Host, bucket+"."+domain) {
|
|
||||||
u.Path = path.Join(SlashSeparator, object)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return u.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// generates ListBucketsResponse from array of BucketInfo which can be
|
|
||||||
// serialized to match XML and JSON API spec output.
|
|
||||||
func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
|
|
||||||
listbuckets := make([]Bucket, 0, len(buckets))
|
|
||||||
var data = ListBucketsResponse{}
|
|
||||||
var owner = Owner{
|
|
||||||
ID: globalMinioDefaultOwnerID,
|
|
||||||
DisplayName: "minio",
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, bucket := range buckets {
|
|
||||||
var listbucket = Bucket{}
|
|
||||||
listbucket.Name = bucket.Name
|
|
||||||
listbucket.CreationDate = bucket.Created.UTC().Format(iso8601TimeFormat)
|
|
||||||
listbuckets = append(listbuckets, listbucket)
|
|
||||||
}
|
|
||||||
|
|
||||||
data.Owner = owner
|
|
||||||
data.Buckets.Buckets = listbuckets
|
|
||||||
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
||||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
|
||||||
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo) ListVersionsResponse {
|
|
||||||
versions := make([]ObjectVersion, 0, len(resp.Objects))
|
|
||||||
var owner = Owner{
|
|
||||||
ID: globalMinioDefaultOwnerID,
|
|
||||||
DisplayName: "minio",
|
|
||||||
}
|
|
||||||
var data = ListVersionsResponse{}
|
|
||||||
|
|
||||||
for _, object := range resp.Objects {
|
|
||||||
var content = ObjectVersion{}
|
|
||||||
if object.Name == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
content.Key = s3EncodeName(object.Name, encodingType)
|
|
||||||
content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat)
|
|
||||||
if object.ETag != "" {
|
|
||||||
content.ETag = "\"" + object.ETag + "\""
|
|
||||||
}
|
|
||||||
content.Size = object.Size
|
|
||||||
if object.StorageClass != "" {
|
|
||||||
content.StorageClass = object.StorageClass
|
|
||||||
} else {
|
|
||||||
content.StorageClass = globalMinioDefaultStorageClass
|
|
||||||
}
|
|
||||||
content.Owner = owner
|
|
||||||
content.VersionID = object.VersionID
|
|
||||||
if content.VersionID == "" {
|
|
||||||
content.VersionID = nullVersionID
|
|
||||||
}
|
|
||||||
content.IsLatest = object.IsLatest
|
|
||||||
content.isDeleteMarker = object.DeleteMarker
|
|
||||||
versions = append(versions, content)
|
|
||||||
}
|
|
||||||
|
|
||||||
data.Name = bucket
|
|
||||||
data.Versions = versions
|
|
||||||
data.EncodingType = encodingType
|
|
||||||
data.Prefix = s3EncodeName(prefix, encodingType)
|
|
||||||
data.KeyMarker = s3EncodeName(marker, encodingType)
|
|
||||||
data.Delimiter = s3EncodeName(delimiter, encodingType)
|
|
||||||
data.MaxKeys = maxKeys
|
|
||||||
|
|
||||||
data.NextKeyMarker = s3EncodeName(resp.NextMarker, encodingType)
|
|
||||||
data.NextVersionIDMarker = resp.NextVersionIDMarker
|
|
||||||
data.VersionIDMarker = versionIDMarker
|
|
||||||
data.IsTruncated = resp.IsTruncated
|
|
||||||
|
|
||||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
|
||||||
for _, prefix := range resp.Prefixes {
|
|
||||||
var prefixItem = CommonPrefix{}
|
|
||||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
|
||||||
prefixes = append(prefixes, prefixItem)
|
|
||||||
}
|
|
||||||
data.CommonPrefixes = prefixes
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
||||||
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
|
||||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
|
||||||
contents := make([]Object, 0, len(resp.Objects))
|
|
||||||
var owner = Owner{
|
|
||||||
ID: globalMinioDefaultOwnerID,
|
|
||||||
DisplayName: "minio",
|
|
||||||
}
|
|
||||||
var data = ListObjectsResponse{}
|
|
||||||
|
|
||||||
for _, object := range resp.Objects {
|
|
||||||
var content = Object{}
|
|
||||||
if object.Name == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
content.Key = s3EncodeName(object.Name, encodingType)
|
|
||||||
content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat)
|
|
||||||
if object.ETag != "" {
|
|
||||||
content.ETag = "\"" + object.ETag + "\""
|
|
||||||
}
|
|
||||||
content.Size = object.Size
|
|
||||||
if object.StorageClass != "" {
|
|
||||||
content.StorageClass = object.StorageClass
|
|
||||||
} else {
|
|
||||||
content.StorageClass = globalMinioDefaultStorageClass
|
|
||||||
}
|
|
||||||
content.Owner = owner
|
|
||||||
contents = append(contents, content)
|
|
||||||
}
|
|
||||||
data.Name = bucket
|
|
||||||
data.Contents = contents
|
|
||||||
|
|
||||||
data.EncodingType = encodingType
|
|
||||||
data.Prefix = s3EncodeName(prefix, encodingType)
|
|
||||||
data.Marker = s3EncodeName(marker, encodingType)
|
|
||||||
data.Delimiter = s3EncodeName(delimiter, encodingType)
|
|
||||||
data.MaxKeys = maxKeys
|
|
||||||
data.NextMarker = s3EncodeName(resp.NextMarker, encodingType)
|
|
||||||
data.IsTruncated = resp.IsTruncated
|
|
||||||
|
|
||||||
prefixes := make([]CommonPrefix, 0, len(resp.Prefixes))
|
|
||||||
for _, prefix := range resp.Prefixes {
|
|
||||||
var prefixItem = CommonPrefix{}
|
|
||||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
|
||||||
prefixes = append(prefixes, prefixItem)
|
|
||||||
}
|
|
||||||
data.CommonPrefixes = prefixes
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
||||||
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
|
||||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response {
|
|
||||||
contents := make([]Object, 0, len(objects))
|
|
||||||
var owner = Owner{
|
|
||||||
ID: globalMinioDefaultOwnerID,
|
|
||||||
DisplayName: "minio",
|
|
||||||
}
|
|
||||||
var data = ListObjectsV2Response{}
|
|
||||||
|
|
||||||
for _, object := range objects {
|
|
||||||
var content = Object{}
|
|
||||||
if object.Name == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
content.Key = s3EncodeName(object.Name, encodingType)
|
|
||||||
content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat)
|
|
||||||
if object.ETag != "" {
|
|
||||||
content.ETag = "\"" + object.ETag + "\""
|
|
||||||
}
|
|
||||||
content.Size = object.Size
|
|
||||||
if object.StorageClass != "" {
|
|
||||||
content.StorageClass = object.StorageClass
|
|
||||||
} else {
|
|
||||||
content.StorageClass = globalMinioDefaultStorageClass
|
|
||||||
}
|
|
||||||
content.Owner = owner
|
|
||||||
if metadata {
|
|
||||||
content.UserMetadata = make(StringMap)
|
|
||||||
switch kind, _ := crypto.IsEncrypted(object.UserDefined); kind {
|
|
||||||
case crypto.S3:
|
|
||||||
content.UserMetadata[xhttp.AmzServerSideEncryption] = xhttp.AmzEncryptionAES
|
|
||||||
case crypto.S3KMS:
|
|
||||||
content.UserMetadata[xhttp.AmzServerSideEncryption] = xhttp.AmzEncryptionKMS
|
|
||||||
case crypto.SSEC:
|
|
||||||
content.UserMetadata[xhttp.AmzServerSideEncryptionCustomerAlgorithm] = xhttp.AmzEncryptionAES
|
|
||||||
}
|
|
||||||
for k, v := range CleanMinioInternalMetadataKeys(object.UserDefined) {
|
|
||||||
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
|
|
||||||
// Do not need to send any internal metadata
|
|
||||||
// values to client.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// https://github.com/google/security-research/security/advisories/GHSA-76wf-9vgp-pj7w
|
|
||||||
if equals(k, xhttp.AmzMetaUnencryptedContentLength, xhttp.AmzMetaUnencryptedContentMD5) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
content.UserMetadata[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
contents = append(contents, content)
|
|
||||||
}
|
|
||||||
data.Name = bucket
|
|
||||||
data.Contents = contents
|
|
||||||
|
|
||||||
data.EncodingType = encodingType
|
|
||||||
data.StartAfter = s3EncodeName(startAfter, encodingType)
|
|
||||||
data.Delimiter = s3EncodeName(delimiter, encodingType)
|
|
||||||
data.Prefix = s3EncodeName(prefix, encodingType)
|
|
||||||
data.MaxKeys = maxKeys
|
|
||||||
data.ContinuationToken = base64.StdEncoding.EncodeToString([]byte(token))
|
|
||||||
data.NextContinuationToken = base64.StdEncoding.EncodeToString([]byte(nextToken))
|
|
||||||
data.IsTruncated = isTruncated
|
|
||||||
|
|
||||||
commonPrefixes := make([]CommonPrefix, 0, len(prefixes))
|
|
||||||
for _, prefix := range prefixes {
|
|
||||||
var prefixItem = CommonPrefix{}
|
|
||||||
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
|
|
||||||
commonPrefixes = append(commonPrefixes, prefixItem)
|
|
||||||
}
|
|
||||||
data.CommonPrefixes = commonPrefixes
|
|
||||||
data.KeyCount = len(data.Contents) + len(data.CommonPrefixes)
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
||||||
// generates CopyObjectResponse from etag and lastModified time.
|
|
||||||
func generateCopyObjectResponse(etag string, lastModified time.Time) CopyObjectResponse {
|
|
||||||
return CopyObjectResponse{
|
|
||||||
ETag: "\"" + etag + "\"",
|
|
||||||
LastModified: lastModified.UTC().Format(iso8601TimeFormat),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// generates CopyObjectPartResponse from etag and lastModified time.
|
|
||||||
func generateCopyObjectPartResponse(etag string, lastModified time.Time) CopyObjectPartResponse {
|
|
||||||
return CopyObjectPartResponse{
|
|
||||||
ETag: "\"" + etag + "\"",
|
|
||||||
LastModified: lastModified.UTC().Format(iso8601TimeFormat),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// generates InitiateMultipartUploadResponse for given bucket, key and uploadID.
|
|
||||||
func generateInitiateMultipartUploadResponse(bucket, key, uploadID string) InitiateMultipartUploadResponse {
|
|
||||||
return InitiateMultipartUploadResponse{
|
|
||||||
Bucket: bucket,
|
|
||||||
Key: key,
|
|
||||||
UploadID: uploadID,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// generates CompleteMultipartUploadResponse for given bucket, key, location and ETag.
|
|
||||||
func generateCompleteMultpartUploadResponse(bucket, key, location, etag string) CompleteMultipartUploadResponse {
|
|
||||||
return CompleteMultipartUploadResponse{
|
|
||||||
Location: location,
|
|
||||||
Bucket: bucket,
|
|
||||||
Key: key,
|
|
||||||
// AWS S3 quotes the ETag in XML, make sure we are compatible here.
|
|
||||||
ETag: "\"" + etag + "\"",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// generates ListPartsResponse from ListPartsInfo.
|
|
||||||
func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) ListPartsResponse {
|
|
||||||
listPartsResponse := ListPartsResponse{}
|
|
||||||
listPartsResponse.Bucket = partsInfo.Bucket
|
|
||||||
listPartsResponse.Key = s3EncodeName(partsInfo.Object, encodingType)
|
|
||||||
listPartsResponse.UploadID = partsInfo.UploadID
|
|
||||||
listPartsResponse.StorageClass = globalMinioDefaultStorageClass
|
|
||||||
|
|
||||||
// Dumb values not meaningful
|
|
||||||
listPartsResponse.Initiator = Initiator{
|
|
||||||
ID: globalMinioDefaultOwnerID,
|
|
||||||
DisplayName: globalMinioDefaultOwnerID,
|
|
||||||
}
|
|
||||||
listPartsResponse.Owner = Owner{
|
|
||||||
ID: globalMinioDefaultOwnerID,
|
|
||||||
DisplayName: globalMinioDefaultOwnerID,
|
|
||||||
}
|
|
||||||
|
|
||||||
listPartsResponse.MaxParts = partsInfo.MaxParts
|
|
||||||
listPartsResponse.PartNumberMarker = partsInfo.PartNumberMarker
|
|
||||||
listPartsResponse.IsTruncated = partsInfo.IsTruncated
|
|
||||||
listPartsResponse.NextPartNumberMarker = partsInfo.NextPartNumberMarker
|
|
||||||
|
|
||||||
listPartsResponse.Parts = make([]Part, len(partsInfo.Parts))
|
|
||||||
for index, part := range partsInfo.Parts {
|
|
||||||
newPart := Part{}
|
|
||||||
newPart.PartNumber = part.PartNumber
|
|
||||||
newPart.ETag = "\"" + part.ETag + "\""
|
|
||||||
newPart.Size = part.Size
|
|
||||||
newPart.LastModified = part.LastModified.UTC().Format(iso8601TimeFormat)
|
|
||||||
listPartsResponse.Parts[index] = newPart
|
|
||||||
}
|
|
||||||
return listPartsResponse
|
|
||||||
}
|
|
||||||
|
|
||||||
// generates ListMultipartUploadsResponse for given bucket and ListMultipartsInfo.
|
|
||||||
func generateListMultipartUploadsResponse(bucket string, multipartsInfo ListMultipartsInfo, encodingType string) ListMultipartUploadsResponse {
|
|
||||||
listMultipartUploadsResponse := ListMultipartUploadsResponse{}
|
|
||||||
listMultipartUploadsResponse.Bucket = bucket
|
|
||||||
listMultipartUploadsResponse.Delimiter = s3EncodeName(multipartsInfo.Delimiter, encodingType)
|
|
||||||
listMultipartUploadsResponse.IsTruncated = multipartsInfo.IsTruncated
|
|
||||||
listMultipartUploadsResponse.EncodingType = encodingType
|
|
||||||
listMultipartUploadsResponse.Prefix = s3EncodeName(multipartsInfo.Prefix, encodingType)
|
|
||||||
listMultipartUploadsResponse.KeyMarker = s3EncodeName(multipartsInfo.KeyMarker, encodingType)
|
|
||||||
listMultipartUploadsResponse.NextKeyMarker = s3EncodeName(multipartsInfo.NextKeyMarker, encodingType)
|
|
||||||
listMultipartUploadsResponse.MaxUploads = multipartsInfo.MaxUploads
|
|
||||||
listMultipartUploadsResponse.NextUploadIDMarker = multipartsInfo.NextUploadIDMarker
|
|
||||||
listMultipartUploadsResponse.UploadIDMarker = multipartsInfo.UploadIDMarker
|
|
||||||
listMultipartUploadsResponse.CommonPrefixes = make([]CommonPrefix, len(multipartsInfo.CommonPrefixes))
|
|
||||||
for index, commonPrefix := range multipartsInfo.CommonPrefixes {
|
|
||||||
listMultipartUploadsResponse.CommonPrefixes[index] = CommonPrefix{
|
|
||||||
Prefix: s3EncodeName(commonPrefix, encodingType),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
listMultipartUploadsResponse.Uploads = make([]Upload, len(multipartsInfo.Uploads))
|
|
||||||
for index, upload := range multipartsInfo.Uploads {
|
|
||||||
newUpload := Upload{}
|
|
||||||
newUpload.UploadID = upload.UploadID
|
|
||||||
newUpload.Key = s3EncodeName(upload.Object, encodingType)
|
|
||||||
newUpload.Initiated = upload.Initiated.UTC().Format(iso8601TimeFormat)
|
|
||||||
listMultipartUploadsResponse.Uploads[index] = newUpload
|
|
||||||
}
|
|
||||||
return listMultipartUploadsResponse
|
|
||||||
}
|
|
||||||
|
|
||||||
// generate multi objects delete response.
|
|
||||||
func generateMultiDeleteResponse(quiet bool, deletedObjects []DeletedObject, errs []DeleteError) DeleteObjectsResponse {
|
|
||||||
deleteResp := DeleteObjectsResponse{}
|
|
||||||
if !quiet {
|
|
||||||
deleteResp.DeletedObjects = deletedObjects
|
|
||||||
}
|
|
||||||
if len(errs) == len(deletedObjects) {
|
|
||||||
deleteResp.DeletedObjects = nil
|
|
||||||
}
|
|
||||||
deleteResp.Errors = errs
|
|
||||||
return deleteResp
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
|
|
||||||
setCommonHeaders(w)
|
|
||||||
if mType != mimeNone {
|
|
||||||
w.Header().Set(xhttp.ContentType, string(mType))
|
|
||||||
}
|
|
||||||
w.Header().Set(xhttp.ContentLength, strconv.Itoa(len(response)))
|
|
||||||
w.WriteHeader(statusCode)
|
|
||||||
if response != nil {
|
|
||||||
w.Write(response)
|
|
||||||
w.(http.Flusher).Flush()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// mimeType represents various MIME type used API responses.
|
|
||||||
type mimeType string
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Means no response type.
|
|
||||||
mimeNone mimeType = ""
|
|
||||||
// Means response type is JSON.
|
|
||||||
mimeJSON mimeType = "application/json"
|
|
||||||
// Means response type is XML.
|
|
||||||
mimeXML mimeType = "application/xml"
|
|
||||||
)
|
|
||||||
|
|
||||||
// writeSuccessResponseJSON writes success headers and response if any,
|
|
||||||
// with content-type set to `application/json`.
|
|
||||||
func writeSuccessResponseJSON(w http.ResponseWriter, response []byte) {
|
|
||||||
writeResponse(w, http.StatusOK, response, mimeJSON)
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeSuccessResponseXML writes success headers and response if any,
|
|
||||||
// with content-type set to `application/xml`.
|
|
||||||
func writeSuccessResponseXML(w http.ResponseWriter, response []byte) {
|
|
||||||
writeResponse(w, http.StatusOK, response, mimeXML)
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeSuccessNoContent writes success headers with http status 204
|
|
||||||
func writeSuccessNoContent(w http.ResponseWriter) {
|
|
||||||
writeResponse(w, http.StatusNoContent, nil, mimeNone)
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeRedirectSeeOther writes Location header with http status 303
|
|
||||||
func writeRedirectSeeOther(w http.ResponseWriter, location string) {
|
|
||||||
w.Header().Set(xhttp.Location, location)
|
|
||||||
writeResponse(w, http.StatusSeeOther, nil, mimeNone)
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeSuccessResponseHeadersOnly(w http.ResponseWriter) {
|
|
||||||
writeResponse(w, http.StatusOK, nil, mimeNone)
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeErrorRespone writes error headers
|
|
||||||
func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) {
|
|
||||||
switch err.Code {
|
|
||||||
case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum":
|
|
||||||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
|
||||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
|
||||||
w.Header().Set(xhttp.RetryAfter, "120")
|
|
||||||
case "InvalidRegion":
|
|
||||||
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalServerRegion)
|
|
||||||
case "AuthorizationHeaderMalformed":
|
|
||||||
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalServerRegion)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate error response.
|
|
||||||
errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path,
|
|
||||||
w.Header().Get(xhttp.AmzRequestID), globalDeploymentID)
|
|
||||||
encodedErrorResponse := encodeResponse(errorResponse)
|
|
||||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeXML)
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeErrorResponseHeadersOnly(w http.ResponseWriter, err APIError) {
|
|
||||||
writeResponse(w, err.HTTPStatusCode, nil, mimeNone)
|
|
||||||
}
|
|
||||||
|
|
||||||
func writeErrorResponseString(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) {
|
|
||||||
// Generate string error response.
|
|
||||||
writeResponse(w, err.HTTPStatusCode, []byte(err.Description), mimeNone)
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeErrorResponseJSON - writes error response in JSON format;
|
|
||||||
// useful for admin APIs.
|
|
||||||
func writeErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) {
|
|
||||||
// Generate error response.
|
|
||||||
errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path, w.Header().Get(xhttp.AmzRequestID), globalDeploymentID)
|
|
||||||
encodedErrorResponse := encodeResponseJSON(errorResponse)
|
|
||||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeCustomErrorResponseJSON - similar to writeErrorResponseJSON,
|
|
||||||
// but accepts the error message directly (this allows messages to be
|
|
||||||
// dynamically generated.)
|
|
||||||
func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIError,
|
|
||||||
errBody string, reqURL *url.URL) {
|
|
||||||
|
|
||||||
reqInfo := logger.GetReqInfo(ctx)
|
|
||||||
errorResponse := APIErrorResponse{
|
|
||||||
Code: err.Code,
|
|
||||||
Message: errBody,
|
|
||||||
Resource: reqURL.Path,
|
|
||||||
BucketName: reqInfo.BucketName,
|
|
||||||
Key: reqInfo.ObjectName,
|
|
||||||
RequestID: w.Header().Get(xhttp.AmzRequestID),
|
|
||||||
HostID: globalDeploymentID,
|
|
||||||
}
|
|
||||||
encodedErrorResponse := encodeResponseJSON(errorResponse)
|
|
||||||
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
|
|
||||||
}
|
|
|
@ -1,108 +0,0 @@
|
||||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
||||||
//
|
|
||||||
// This file is part of MinIO Object Storage stack
|
|
||||||
//
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// This program is distributed in the hope that it will be useful
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package cmd
|
|
||||||
|
|
||||||
import (
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
func shouldEscape(c byte) bool {
|
|
||||||
if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
switch c {
|
|
||||||
case '-', '_', '.', '/', '*':
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// s3URLEncode is based on Golang's url.QueryEscape() code,
|
|
||||||
// while considering some S3 exceptions:
|
|
||||||
// - Avoid encoding '/' and '*'
|
|
||||||
// - Force encoding of '~'
|
|
||||||
func s3URLEncode(s string) string {
|
|
||||||
spaceCount, hexCount := 0, 0
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
c := s[i]
|
|
||||||
if shouldEscape(c) {
|
|
||||||
if c == ' ' {
|
|
||||||
spaceCount++
|
|
||||||
} else {
|
|
||||||
hexCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if spaceCount == 0 && hexCount == 0 {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
var buf [64]byte
|
|
||||||
var t []byte
|
|
||||||
|
|
||||||
required := len(s) + 2*hexCount
|
|
||||||
if required <= len(buf) {
|
|
||||||
t = buf[:required]
|
|
||||||
} else {
|
|
||||||
t = make([]byte, required)
|
|
||||||
}
|
|
||||||
|
|
||||||
if hexCount == 0 {
|
|
||||||
copy(t, s)
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
if s[i] == ' ' {
|
|
||||||
t[i] = '+'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return string(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
j := 0
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
switch c := s[i]; {
|
|
||||||
case c == ' ':
|
|
||||||
t[j] = '+'
|
|
||||||
j++
|
|
||||||
case shouldEscape(c):
|
|
||||||
t[j] = '%'
|
|
||||||
t[j+1] = "0123456789ABCDEF"[c>>4]
|
|
||||||
t[j+2] = "0123456789ABCDEF"[c&15]
|
|
||||||
j += 3
|
|
||||||
default:
|
|
||||||
t[j] = s[i]
|
|
||||||
j++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return string(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
// s3EncodeName encodes string in response when encodingType is specified in AWS S3 requests.
|
|
||||||
func s3EncodeName(name string, encodingType string) (result string) {
|
|
||||||
// Quick path to exit
|
|
||||||
if encodingType == "" {
|
|
||||||
return name
|
|
||||||
}
|
|
||||||
encodingType = strings.ToLower(encodingType)
|
|
||||||
switch encodingType {
|
|
||||||
case "url":
|
|
||||||
return s3URLEncode(name)
|
|
||||||
}
|
|
||||||
return name
|
|
||||||
}
|
|
|
@ -1,86 +0,0 @@
|
||||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
||||||
//
|
|
||||||
// This file is part of MinIO Object Storage stack
|
|
||||||
//
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// This program is distributed in the hope that it will be useful
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package crypto
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/md5"
|
|
||||||
"encoding/base64"
|
|
||||||
"net/http"
|
|
||||||
|
|
||||||
xhttp "github.com/minio/minio/internal/http"
|
|
||||||
)
|
|
||||||
|
|
||||||
// RemoveSensitiveHeaders removes confidential encryption
|
|
||||||
// information - e.g. the SSE-C key - from the HTTP headers.
|
|
||||||
// It has the same semantics as RemoveSensitiveEntires.
|
|
||||||
func RemoveSensitiveHeaders(h http.Header) {
|
|
||||||
h.Del(xhttp.AmzServerSideEncryptionCustomerKey)
|
|
||||||
h.Del(xhttp.AmzServerSideEncryptionCopyCustomerKey)
|
|
||||||
h.Del(xhttp.AmzMetaUnencryptedContentLength)
|
|
||||||
h.Del(xhttp.AmzMetaUnencryptedContentMD5)
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
// SSECopy represents AWS SSE-C for copy requests. It provides
|
|
||||||
// functionality to handle SSE-C copy requests.
|
|
||||||
SSECopy = ssecCopy{}
|
|
||||||
)
|
|
||||||
|
|
||||||
type ssecCopy struct{}
|
|
||||||
|
|
||||||
// IsRequested returns true if the HTTP headers contains
|
|
||||||
// at least one SSE-C copy header. Regular SSE-C headers
|
|
||||||
// are ignored.
|
|
||||||
func (ssecCopy) IsRequested(h http.Header) bool {
|
|
||||||
if _, ok := h[xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm]; ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if _, ok := h[xhttp.AmzServerSideEncryptionCopyCustomerKey]; ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if _, ok := h[xhttp.AmzServerSideEncryptionCopyCustomerKeyMD5]; ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseHTTP parses the SSE-C copy headers and returns the SSE-C client key
|
|
||||||
// on success. Regular SSE-C headers are ignored.
|
|
||||||
func (ssecCopy) ParseHTTP(h http.Header) (key [32]byte, err error) {
|
|
||||||
if h.Get(xhttp.AmzServerSideEncryptionCopyCustomerAlgorithm) != xhttp.AmzEncryptionAES {
|
|
||||||
return key, ErrInvalidCustomerAlgorithm
|
|
||||||
}
|
|
||||||
if h.Get(xhttp.AmzServerSideEncryptionCopyCustomerKey) == "" {
|
|
||||||
return key, ErrMissingCustomerKey
|
|
||||||
}
|
|
||||||
if h.Get(xhttp.AmzServerSideEncryptionCopyCustomerKeyMD5) == "" {
|
|
||||||
return key, ErrMissingCustomerKeyMD5
|
|
||||||
}
|
|
||||||
|
|
||||||
clientKey, err := base64.StdEncoding.DecodeString(h.Get(xhttp.AmzServerSideEncryptionCopyCustomerKey))
|
|
||||||
if err != nil || len(clientKey) != 32 { // The client key must be 256 bits long
|
|
||||||
return key, ErrInvalidCustomerKey
|
|
||||||
}
|
|
||||||
keyMD5, err := base64.StdEncoding.DecodeString(h.Get(xhttp.AmzServerSideEncryptionCopyCustomerKeyMD5))
|
|
||||||
if md5Sum := md5.Sum(clientKey); err != nil || !bytes.Equal(md5Sum[:], keyMD5) {
|
|
||||||
return key, ErrCustomerKeyMD5Mismatch
|
|
||||||
}
|
|
||||||
copy(key[:], clientKey)
|
|
||||||
return key, nil
|
|
||||||
}
|
|
|
@ -1,127 +0,0 @@
|
||||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
||||||
//
|
|
||||||
// This file is part of MinIO Object Storage stack
|
|
||||||
//
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// This program is distributed in the hope that it will be useful
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
// Originally from https://github.com/gorilla/handlers with following license
|
|
||||||
// https://raw.githubusercontent.com/gorilla/handlers/master/LICENSE, forked
|
|
||||||
// and heavily modified for MinIO's internal needs.
|
|
||||||
|
|
||||||
package handlers
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// De-facto standard header keys.
|
|
||||||
xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For")
|
|
||||||
xForwardedHost = http.CanonicalHeaderKey("X-Forwarded-Host")
|
|
||||||
xForwardedPort = http.CanonicalHeaderKey("X-Forwarded-Port")
|
|
||||||
xForwardedProto = http.CanonicalHeaderKey("X-Forwarded-Proto")
|
|
||||||
xForwardedScheme = http.CanonicalHeaderKey("X-Forwarded-Scheme")
|
|
||||||
xRealIP = http.CanonicalHeaderKey("X-Real-IP")
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// RFC7239 defines a new "Forwarded: " header designed to replace the
|
|
||||||
// existing use of X-Forwarded-* headers.
|
|
||||||
// e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43
|
|
||||||
forwarded = http.CanonicalHeaderKey("Forwarded")
|
|
||||||
// Allows for a sub-match of the first value after 'for=' to the next
|
|
||||||
// comma, semi-colon or space. The match is case-insensitive.
|
|
||||||
forRegex = regexp.MustCompile(`(?i)(?:for=)([^(;|,| )]+)(.*)`)
|
|
||||||
// Allows for a sub-match for the first instance of scheme (http|https)
|
|
||||||
// prefixed by 'proto='. The match is case-insensitive.
|
|
||||||
protoRegex = regexp.MustCompile(`(?i)^(;|,| )+(?:proto=)(https|http)`)
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetSourceScheme retrieves the scheme from the X-Forwarded-Proto and RFC7239
|
|
||||||
// Forwarded headers (in that order).
|
|
||||||
func GetSourceScheme(r *http.Request) string {
|
|
||||||
var scheme string
|
|
||||||
|
|
||||||
// Retrieve the scheme from X-Forwarded-Proto.
|
|
||||||
if proto := r.Header.Get(xForwardedProto); proto != "" {
|
|
||||||
scheme = strings.ToLower(proto)
|
|
||||||
} else if proto = r.Header.Get(xForwardedScheme); proto != "" {
|
|
||||||
scheme = strings.ToLower(proto)
|
|
||||||
} else if proto := r.Header.Get(forwarded); proto != "" {
|
|
||||||
// match should contain at least two elements if the protocol was
|
|
||||||
// specified in the Forwarded header. The first element will always be
|
|
||||||
// the 'for=', which we ignore, subsequently we proceed to look for
|
|
||||||
// 'proto=' which should precede right after `for=` if not
|
|
||||||
// we simply ignore the values and return empty. This is in line
|
|
||||||
// with the approach we took for returning first ip from multiple
|
|
||||||
// params.
|
|
||||||
if match := forRegex.FindStringSubmatch(proto); len(match) > 1 {
|
|
||||||
if match = protoRegex.FindStringSubmatch(match[2]); len(match) > 1 {
|
|
||||||
scheme = strings.ToLower(match[2])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return scheme
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSourceIPFromHeaders retrieves the IP from the X-Forwarded-For, X-Real-IP
|
|
||||||
// and RFC7239 Forwarded headers (in that order)
|
|
||||||
func GetSourceIPFromHeaders(r *http.Request) string {
|
|
||||||
var addr string
|
|
||||||
|
|
||||||
if fwd := r.Header.Get(xForwardedFor); fwd != "" {
|
|
||||||
// Only grab the first (client) address. Note that '192.168.0.1,
|
|
||||||
// 10.1.1.1' is a valid key for X-Forwarded-For where addresses after
|
|
||||||
// the first may represent forwarding proxies earlier in the chain.
|
|
||||||
s := strings.Index(fwd, ", ")
|
|
||||||
if s == -1 {
|
|
||||||
s = len(fwd)
|
|
||||||
}
|
|
||||||
addr = fwd[:s]
|
|
||||||
} else if fwd := r.Header.Get(xRealIP); fwd != "" {
|
|
||||||
// X-Real-IP should only contain one IP address (the client making the
|
|
||||||
// request).
|
|
||||||
addr = fwd
|
|
||||||
} else if fwd := r.Header.Get(forwarded); fwd != "" {
|
|
||||||
// match should contain at least two elements if the protocol was
|
|
||||||
// specified in the Forwarded header. The first element will always be
|
|
||||||
// the 'for=' capture, which we ignore. In the case of multiple IP
|
|
||||||
// addresses (for=8.8.8.8, 8.8.4.4, 172.16.1.20 is valid) we only
|
|
||||||
// extract the first, which should be the client IP.
|
|
||||||
if match := forRegex.FindStringSubmatch(fwd); len(match) > 1 {
|
|
||||||
// IPv6 addresses in Forwarded headers are quoted-strings. We strip
|
|
||||||
// these quotes.
|
|
||||||
addr = strings.Trim(match[1], `"`)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return addr
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetSourceIP retrieves the IP from the request headers
|
|
||||||
// and falls back to r.RemoteAddr when necessary.
|
|
||||||
func GetSourceIP(r *http.Request) string {
|
|
||||||
addr := GetSourceIPFromHeaders(r)
|
|
||||||
if addr != "" {
|
|
||||||
return addr
|
|
||||||
}
|
|
||||||
|
|
||||||
// Default to remote address if headers not set.
|
|
||||||
addr, _, _ = net.SplitHostPort(r.RemoteAddr)
|
|
||||||
return addr
|
|
||||||
}
|
|
|
@ -1,144 +0,0 @@
|
||||||
// Copyright (c) 2015-2021 MinIO, Inc.
|
|
||||||
//
|
|
||||||
// This file is part of MinIO Object Storage stack
|
|
||||||
//
|
|
||||||
// This program is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU Affero General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// This program is distributed in the hope that it will be useful
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU Affero General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU Affero General Public License
|
|
||||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package logger
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Key used for Get/SetReqInfo
|
|
||||||
type contextKeyType string
|
|
||||||
|
|
||||||
const contextLogKey = contextKeyType("miniolog")
|
|
||||||
|
|
||||||
// KeyVal - appended to ReqInfo.Tags
|
|
||||||
type KeyVal struct {
|
|
||||||
Key string
|
|
||||||
Val interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReqInfo stores the request info.
|
|
||||||
type ReqInfo struct {
|
|
||||||
RemoteHost string // Client Host/IP
|
|
||||||
Host string // Node Host/IP
|
|
||||||
UserAgent string // User Agent
|
|
||||||
DeploymentID string // x-minio-deployment-id
|
|
||||||
RequestID string // x-amz-request-id
|
|
||||||
API string // API name - GetObject PutObject NewMultipartUpload etc.
|
|
||||||
BucketName string // Bucket name
|
|
||||||
ObjectName string // Object name
|
|
||||||
AccessKey string // Access Key
|
|
||||||
tags []KeyVal // Any additional info not accommodated by above fields
|
|
||||||
sync.RWMutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReqInfo :
|
|
||||||
func NewReqInfo(remoteHost, userAgent, deploymentID, requestID, api, bucket, object string) *ReqInfo {
|
|
||||||
req := ReqInfo{}
|
|
||||||
req.RemoteHost = remoteHost
|
|
||||||
req.UserAgent = userAgent
|
|
||||||
req.API = api
|
|
||||||
req.DeploymentID = deploymentID
|
|
||||||
req.RequestID = requestID
|
|
||||||
req.BucketName = bucket
|
|
||||||
req.ObjectName = object
|
|
||||||
return &req
|
|
||||||
}
|
|
||||||
|
|
||||||
// AppendTags - appends key/val to ReqInfo.tags
|
|
||||||
func (r *ReqInfo) AppendTags(key string, val interface{}) *ReqInfo {
|
|
||||||
if r == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
r.Lock()
|
|
||||||
defer r.Unlock()
|
|
||||||
r.tags = append(r.tags, KeyVal{key, val})
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetTags - sets key/val to ReqInfo.tags
|
|
||||||
func (r *ReqInfo) SetTags(key string, val interface{}) *ReqInfo {
|
|
||||||
if r == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
r.Lock()
|
|
||||||
defer r.Unlock()
|
|
||||||
// Search of tag key already exists in tags
|
|
||||||
var updated bool
|
|
||||||
for _, tag := range r.tags {
|
|
||||||
if tag.Key == key {
|
|
||||||
tag.Val = val
|
|
||||||
updated = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !updated {
|
|
||||||
// Append to the end of tags list
|
|
||||||
r.tags = append(r.tags, KeyVal{key, val})
|
|
||||||
}
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTags - returns the user defined tags
|
|
||||||
func (r *ReqInfo) GetTags() []KeyVal {
|
|
||||||
if r == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
r.RLock()
|
|
||||||
defer r.RUnlock()
|
|
||||||
return append([]KeyVal(nil), r.tags...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetTagsMap - returns the user defined tags in a map structure
|
|
||||||
func (r *ReqInfo) GetTagsMap() map[string]interface{} {
|
|
||||||
if r == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
r.RLock()
|
|
||||||
defer r.RUnlock()
|
|
||||||
m := make(map[string]interface{}, len(r.tags))
|
|
||||||
for _, t := range r.tags {
|
|
||||||
m[t.Key] = t.Val
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetReqInfo sets ReqInfo in the context.
|
|
||||||
func SetReqInfo(ctx context.Context, req *ReqInfo) context.Context {
|
|
||||||
if ctx == nil {
|
|
||||||
LogIf(context.Background(), fmt.Errorf("context is nil"))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return context.WithValue(ctx, contextLogKey, req)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetReqInfo returns ReqInfo if set.
|
|
||||||
func GetReqInfo(ctx context.Context) *ReqInfo {
|
|
||||||
if ctx != nil {
|
|
||||||
r, ok := ctx.Value(contextLogKey).(*ReqInfo)
|
|
||||||
if ok {
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
r = &ReqInfo{}
|
|
||||||
SetReqInfo(ctx, r)
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
189
api/reqinfo.go
189
api/reqinfo.go
|
@ -1,6 +1,39 @@
|
||||||
package api
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
// KeyVal - appended to ReqInfo.Tags.
|
||||||
|
KeyVal struct {
|
||||||
|
Key string
|
||||||
|
Val string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReqInfo stores the request info.
|
||||||
|
ReqInfo struct {
|
||||||
|
sync.RWMutex
|
||||||
|
RemoteHost string // Client Host/IP
|
||||||
|
Host string // Node Host/IP
|
||||||
|
UserAgent string // User Agent
|
||||||
|
DeploymentID string // x-minio-deployment-id
|
||||||
|
RequestID string // x-amz-request-id
|
||||||
|
API string // API name - GetObject PutObject NewMultipartUpload etc.
|
||||||
|
BucketName string // Bucket name
|
||||||
|
ObjectName string // Object name
|
||||||
|
URL *url.URL // Request url
|
||||||
|
tags []KeyVal // Any additional info not accommodated by above fields
|
||||||
|
}
|
||||||
|
|
||||||
// ObjectRequest represents object request data.
|
// ObjectRequest represents object request data.
|
||||||
ObjectRequest struct {
|
ObjectRequest struct {
|
||||||
Bucket string
|
Bucket string
|
||||||
|
@ -13,3 +46,159 @@ type (
|
||||||
type contextKeyType string
|
type contextKeyType string
|
||||||
|
|
||||||
const ctxRequestInfo = contextKeyType("NeoFS-S3-GW")
|
const ctxRequestInfo = contextKeyType("NeoFS-S3-GW")
|
||||||
|
|
||||||
|
var (
|
||||||
|
// De-facto standard header keys.
|
||||||
|
xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For")
|
||||||
|
xRealIP = http.CanonicalHeaderKey("X-Real-IP")
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// RFC7239 defines a new "Forwarded: " header designed to replace the
|
||||||
|
// existing use of X-Forwarded-* headers.
|
||||||
|
// e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43.
|
||||||
|
forwarded = http.CanonicalHeaderKey("Forwarded")
|
||||||
|
// Allows for a sub-match of the first value after 'for=' to the next
|
||||||
|
// comma, semi-colon or space. The match is case-insensitive.
|
||||||
|
forRegex = regexp.MustCompile(`(?i)(?:for=)([^(;|, )]+)(.*)`)
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetSourceIP retrieves the IP from the X-Forwarded-For, X-Real-IP and RFC7239
|
||||||
|
// Forwarded headers (in that order), falls back to r.RemoteAddr when all
|
||||||
|
// else fails.
|
||||||
|
func GetSourceIP(r *http.Request) string {
|
||||||
|
var addr string
|
||||||
|
|
||||||
|
if fwd := r.Header.Get(xForwardedFor); fwd != "" {
|
||||||
|
// Only grab the first (client) address. Note that '192.168.0.1,
|
||||||
|
// 10.1.1.1' is a valid key for X-Forwarded-For where addresses after
|
||||||
|
// the first may represent forwarding proxies earlier in the chain.
|
||||||
|
s := strings.Index(fwd, ", ")
|
||||||
|
if s == -1 {
|
||||||
|
s = len(fwd)
|
||||||
|
}
|
||||||
|
addr = fwd[:s]
|
||||||
|
} else if fwd := r.Header.Get(xRealIP); fwd != "" {
|
||||||
|
// X-Real-IP should only contain one IP address (the client making the
|
||||||
|
// request).
|
||||||
|
addr = fwd
|
||||||
|
} else if fwd := r.Header.Get(forwarded); fwd != "" {
|
||||||
|
// match should contain at least two elements if the protocol was
|
||||||
|
// specified in the Forwarded header. The first element will always be
|
||||||
|
// the 'for=' capture, which we ignore. In the case of multiple IP
|
||||||
|
// addresses (for=8.8.8.8, 8.8.4.4, 172.16.1.20 is valid) we only
|
||||||
|
// extract the first, which should be the client IP.
|
||||||
|
if match := forRegex.FindStringSubmatch(fwd); len(match) > 1 {
|
||||||
|
// IPv6 addresses in Forwarded headers are quoted-strings. We strip
|
||||||
|
// these quotes.
|
||||||
|
addr = strings.Trim(match[1], `"`)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if addr != "" {
|
||||||
|
return addr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default to remote address if headers not set.
|
||||||
|
addr, _, _ = net.SplitHostPort(r.RemoteAddr)
|
||||||
|
return addr
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareContext(w http.ResponseWriter, r *http.Request) context.Context {
|
||||||
|
vars := mux.Vars(r)
|
||||||
|
bucket := vars["bucket"]
|
||||||
|
object, err := url.PathUnescape(vars["object"])
|
||||||
|
if err != nil {
|
||||||
|
object = vars["object"]
|
||||||
|
}
|
||||||
|
prefix, err := url.QueryUnescape(vars["prefix"])
|
||||||
|
if err != nil {
|
||||||
|
prefix = vars["prefix"]
|
||||||
|
}
|
||||||
|
if prefix != "" {
|
||||||
|
object = prefix
|
||||||
|
}
|
||||||
|
return SetReqInfo(r.Context(),
|
||||||
|
// prepare request info
|
||||||
|
NewReqInfo(w, r, ObjectRequest{
|
||||||
|
Bucket: bucket,
|
||||||
|
Object: object,
|
||||||
|
Method: mux.CurrentRoute(r).GetName(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReqInfo returns new ReqInfo based on parameters.
|
||||||
|
func NewReqInfo(w http.ResponseWriter, r *http.Request, req ObjectRequest) *ReqInfo {
|
||||||
|
return &ReqInfo{
|
||||||
|
API: req.Method,
|
||||||
|
BucketName: req.Bucket,
|
||||||
|
ObjectName: req.Object,
|
||||||
|
UserAgent: r.UserAgent(),
|
||||||
|
RemoteHost: GetSourceIP(r),
|
||||||
|
RequestID: GetRequestID(w),
|
||||||
|
DeploymentID: deploymentID.String(),
|
||||||
|
URL: r.URL,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AppendTags - appends key/val to ReqInfo.tags.
|
||||||
|
func (r *ReqInfo) AppendTags(key string, val string) *ReqInfo {
|
||||||
|
if r == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
r.Lock()
|
||||||
|
defer r.Unlock()
|
||||||
|
r.tags = append(r.tags, KeyVal{key, val})
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTags - sets key/val to ReqInfo.tags.
|
||||||
|
func (r *ReqInfo) SetTags(key string, val string) *ReqInfo {
|
||||||
|
if r == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
r.Lock()
|
||||||
|
defer r.Unlock()
|
||||||
|
// Search of tag key already exists in tags
|
||||||
|
var updated bool
|
||||||
|
for _, tag := range r.tags {
|
||||||
|
if tag.Key == key {
|
||||||
|
tag.Val = val
|
||||||
|
updated = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !updated {
|
||||||
|
// Append to the end of tags list
|
||||||
|
r.tags = append(r.tags, KeyVal{key, val})
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetTags - returns the user defined tags.
|
||||||
|
func (r *ReqInfo) GetTags() []KeyVal {
|
||||||
|
if r == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
r.RLock()
|
||||||
|
defer r.RUnlock()
|
||||||
|
return append([]KeyVal(nil), r.tags...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetReqInfo sets ReqInfo in the context.
|
||||||
|
func SetReqInfo(ctx context.Context, req *ReqInfo) context.Context {
|
||||||
|
if ctx == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return context.WithValue(ctx, ctxRequestInfo, req)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetReqInfo returns ReqInfo if set.
|
||||||
|
func GetReqInfo(ctx context.Context) *ReqInfo {
|
||||||
|
if ctx == nil {
|
||||||
|
return &ReqInfo{}
|
||||||
|
} else if r, ok := ctx.Value(ctxRequestInfo).(*ReqInfo); ok {
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
return &ReqInfo{}
|
||||||
|
}
|
||||||
|
|
107
api/response.go
107
api/response.go
|
@ -1,11 +1,15 @@
|
||||||
package api
|
package api
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"encoding/xml"
|
"encoding/xml"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||||
|
"github.com/nspcc-dev/neofs-s3-gw/internal/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
type (
|
type (
|
||||||
|
@ -105,6 +109,109 @@ var s3ErrorResponseMap = map[string]string{
|
||||||
// Add new API errors here.
|
// Add new API errors here.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WriteErrorResponse writes error headers.
|
||||||
|
func WriteErrorResponse(w http.ResponseWriter, reqInfo *ReqInfo, err error) {
|
||||||
|
code := http.StatusInternalServerError
|
||||||
|
|
||||||
|
if e, ok := err.(errors.Error); ok {
|
||||||
|
code = e.HTTPStatusCode
|
||||||
|
|
||||||
|
switch e.Code {
|
||||||
|
case "SlowDown", "XNeoFSServerNotInitialized", "XNeoFSReadQuorum", "XNeoFSWriteQuorum":
|
||||||
|
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||||
|
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||||
|
w.Header().Set(hdrRetryAfter, "120")
|
||||||
|
case "AccessDenied":
|
||||||
|
// TODO process when the request is from browser and also if browser
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate error response.
|
||||||
|
errorResponse := getAPIErrorResponse(reqInfo, err)
|
||||||
|
encodedErrorResponse := EncodeResponse(errorResponse)
|
||||||
|
WriteResponse(w, code, encodedErrorResponse, MimeXML)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If none of the http routes match respond with appropriate errors.
|
||||||
|
func errorResponseHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
desc := fmt.Sprintf("Unknown API request at %s", r.URL.Path)
|
||||||
|
WriteErrorResponse(w, GetReqInfo(r.Context()), errors.Error{
|
||||||
|
Code: "XMinioUnknownAPIRequest",
|
||||||
|
Description: desc,
|
||||||
|
HTTPStatusCode: http.StatusBadRequest,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write http common headers.
|
||||||
|
func setCommonHeaders(w http.ResponseWriter) {
|
||||||
|
w.Header().Set(hdrServerInfo, version.Server)
|
||||||
|
w.Header().Set(hdrAcceptRanges, "bytes")
|
||||||
|
|
||||||
|
// Remove sensitive information
|
||||||
|
removeSensitiveHeaders(w.Header())
|
||||||
|
}
|
||||||
|
|
||||||
|
// removeSensitiveHeaders removes confidential encryption
|
||||||
|
// information - e.g. the SSE-C key - from the HTTP headers.
|
||||||
|
// It has the same semantics as RemoveSensitiveEntries.
|
||||||
|
func removeSensitiveHeaders(h http.Header) {
|
||||||
|
h.Del(hdrSSECustomerKey)
|
||||||
|
h.Del(hdrSSECopyKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteResponse writes given statusCode and response into w (with mType header if set).
|
||||||
|
func WriteResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
|
||||||
|
setCommonHeaders(w)
|
||||||
|
if mType != MimeNone {
|
||||||
|
w.Header().Set(hdrContentType, string(mType))
|
||||||
|
}
|
||||||
|
w.Header().Set(hdrContentLength, strconv.Itoa(len(response)))
|
||||||
|
w.WriteHeader(statusCode)
|
||||||
|
if response == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _ = w.Write(response)
|
||||||
|
if flusher, ok := w.(http.Flusher); ok {
|
||||||
|
flusher.Flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeResponse encodes the response headers into XML format.
|
||||||
|
func EncodeResponse(response interface{}) []byte {
|
||||||
|
var bytesBuffer bytes.Buffer
|
||||||
|
bytesBuffer.WriteString(xml.Header)
|
||||||
|
_ = xml.
|
||||||
|
NewEncoder(&bytesBuffer).
|
||||||
|
Encode(response)
|
||||||
|
return bytesBuffer.Bytes()
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncodeToResponse encodes the response into ResponseWriter.
|
||||||
|
func EncodeToResponse(w http.ResponseWriter, response interface{}) error {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
|
||||||
|
if _, err := w.Write(xmlHeader); err != nil {
|
||||||
|
return err
|
||||||
|
} else if err = xml.NewEncoder(w).Encode(response); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// // WriteSuccessResponseXML writes success headers and response if any,
|
||||||
|
// // with content-type set to `application/xml`.
|
||||||
|
// func WriteSuccessResponseXML(w http.ResponseWriter, response []byte) {
|
||||||
|
// WriteResponse(w, http.StatusOK, response, MimeXML)
|
||||||
|
// }
|
||||||
|
|
||||||
|
// WriteSuccessResponseHeadersOnly writes HTTP (200) OK response with no data
|
||||||
|
// to the client.
|
||||||
|
func WriteSuccessResponseHeadersOnly(w http.ResponseWriter) {
|
||||||
|
WriteResponse(w, http.StatusOK, nil, MimeNone)
|
||||||
|
}
|
||||||
|
|
||||||
// Error - Returns S3 error string.
|
// Error - Returns S3 error string.
|
||||||
func (e ErrorResponse) Error() string {
|
func (e ErrorResponse) Error() string {
|
||||||
if e.Message == "" {
|
if e.Message == "" {
|
||||||
|
|
Loading…
Reference in a new issue