diff --git a/pkg/auth/credentials.go b/pkg/auth/credentials.go deleted file mode 100644 index ea3a3398..00000000 --- a/pkg/auth/credentials.go +++ /dev/null @@ -1,258 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2015, 2016, 2017, 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package auth - -import ( - "crypto/rand" - "crypto/subtle" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "strconv" - "strings" - "time" - - jwtgo "github.com/dgrijalva/jwt-go" -) - -const ( - // Minimum length for MinIO access key. - accessKeyMinLen = 3 - - // Maximum length for MinIO access key. - // There is no max length enforcement for access keys - accessKeyMaxLen = 20 - - // Minimum length for MinIO secret key for both server and gateway mode. - secretKeyMinLen = 8 - - // Maximum secret key length for MinIO, this - // is used when autogenerating new credentials. - // There is no max length enforcement for secret keys - secretKeyMaxLen = 40 - - // Alpha numeric table used for generating access keys. - alphaNumericTable = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" - - // Total length of the alpha numeric table. - alphaNumericTableLen = byte(len(alphaNumericTable)) -) - -// Common errors generated for access and secret key validation. -var ( - ErrInvalidAccessKeyLength = fmt.Errorf("access key must be minimum %v or more characters long", accessKeyMinLen) - ErrInvalidSecretKeyLength = fmt.Errorf("secret key must be minimum %v or more characters long", secretKeyMinLen) -) - -// IsAccessKeyValid - validate access key for right length. -func IsAccessKeyValid(accessKey string) bool { - return len(accessKey) >= accessKeyMinLen -} - -// IsSecretKeyValid - validate secret key for right length. -func IsSecretKeyValid(secretKey string) bool { - return len(secretKey) >= secretKeyMinLen -} - -// Default access and secret keys. -const ( - DefaultAccessKey = "minioadmin" - DefaultSecretKey = "minioadmin" -) - -// Default access credentials -var ( - DefaultCredentials = Credentials{ - AccessKey: DefaultAccessKey, - SecretKey: DefaultSecretKey, - } -) - -// Credentials holds access and secret keys. -type Credentials struct { - AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"` - SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"` - Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"` - SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"` - Status string `xml:"-" json:"status,omitempty"` - ParentUser string `xml:"-" json:"parentUser,omitempty"` - Groups []string `xml:"-" json:"groups,omitempty"` -} - -func (cred Credentials) String() string { - var s strings.Builder - s.WriteString(cred.AccessKey) - s.WriteString(":") - s.WriteString(cred.SecretKey) - if cred.SessionToken != "" { - s.WriteString("\n") - s.WriteString(cred.SessionToken) - } - if !cred.Expiration.IsZero() && !cred.Expiration.Equal(timeSentinel) { - s.WriteString("\n") - s.WriteString(cred.Expiration.String()) - } - return s.String() -} - -// IsExpired - returns whether Credential is expired or not. -func (cred Credentials) IsExpired() bool { - if cred.Expiration.IsZero() || cred.Expiration.Equal(timeSentinel) { - return false - } - - return cred.Expiration.Before(time.Now().UTC()) -} - -// IsTemp - returns whether credential is temporary or not. -func (cred Credentials) IsTemp() bool { - return cred.SessionToken != "" && !cred.Expiration.IsZero() && !cred.Expiration.Equal(timeSentinel) -} - -// IsServiceAccount - returns whether credential is a service account or not -func (cred Credentials) IsServiceAccount() bool { - return cred.ParentUser != "" && (cred.Expiration.IsZero() || cred.Expiration.Equal(timeSentinel)) -} - -// IsValid - returns whether credential is valid or not. -func (cred Credentials) IsValid() bool { - // Verify credentials if its enabled or not set. - if cred.Status == "off" { - return false - } - return IsAccessKeyValid(cred.AccessKey) && IsSecretKeyValid(cred.SecretKey) && !cred.IsExpired() -} - -// Equal - returns whether two credentials are equal or not. -func (cred Credentials) Equal(ccred Credentials) bool { - if !ccred.IsValid() { - return false - } - return (cred.AccessKey == ccred.AccessKey && subtle.ConstantTimeCompare([]byte(cred.SecretKey), []byte(ccred.SecretKey)) == 1 && - subtle.ConstantTimeCompare([]byte(cred.SessionToken), []byte(ccred.SessionToken)) == 1) -} - -var timeSentinel = time.Unix(0, 0).UTC() - -// ErrInvalidDuration invalid token expiry -var ErrInvalidDuration = errors.New("invalid token expiry") - -// ExpToInt64 - convert input interface value to int64. -func ExpToInt64(expI interface{}) (expAt int64, err error) { - switch exp := expI.(type) { - case string: - expAt, err = strconv.ParseInt(exp, 10, 64) - case float64: - expAt, err = int64(exp), nil - case int64: - expAt, err = exp, nil - case int: - expAt, err = int64(exp), nil - case uint64: - expAt, err = int64(exp), nil - case uint: - expAt, err = int64(exp), nil - case json.Number: - expAt, err = exp.Int64() - case time.Duration: - expAt, err = time.Now().UTC().Add(exp).Unix(), nil - case nil: - expAt, err = 0, nil - default: - expAt, err = 0, ErrInvalidDuration - } - if expAt < 0 { - return 0, ErrInvalidDuration - } - return expAt, err -} - -// GetNewCredentialsWithMetadata generates and returns new credential with expiry. -func GetNewCredentialsWithMetadata(m map[string]interface{}, tokenSecret string) (cred Credentials, err error) { - readBytes := func(size int) (data []byte, err error) { - data = make([]byte, size) - var n int - if n, err = rand.Read(data); err != nil { - return nil, err - } else if n != size { - return nil, fmt.Errorf("Not enough data. Expected to read: %v bytes, got: %v bytes", size, n) - } - return data, nil - } - - // Generate access key. - keyBytes, err := readBytes(accessKeyMaxLen) - if err != nil { - return cred, err - } - for i := 0; i < accessKeyMaxLen; i++ { - keyBytes[i] = alphaNumericTable[keyBytes[i]%alphaNumericTableLen] - } - cred.AccessKey = string(keyBytes) - - // Generate secret key. - keyBytes, err = readBytes(secretKeyMaxLen) - if err != nil { - return cred, err - } - cred.SecretKey = strings.Replace(string([]byte(base64.StdEncoding.EncodeToString(keyBytes))[:secretKeyMaxLen]), - "/", "+", -1) - cred.Status = "on" - - if tokenSecret == "" { - cred.Expiration = timeSentinel - return cred, nil - } - - expiry, err := ExpToInt64(m["exp"]) - if err != nil { - return cred, err - } - - m["accessKey"] = cred.AccessKey - jwt := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, jwtgo.MapClaims(m)) - - cred.Expiration = time.Unix(expiry, 0).UTC() - cred.SessionToken, err = jwt.SignedString([]byte(tokenSecret)) - if err != nil { - return cred, err - } - - return cred, nil -} - -// GetNewCredentials generates and returns new credential. -func GetNewCredentials() (cred Credentials, err error) { - return GetNewCredentialsWithMetadata(map[string]interface{}{}, "") -} - -// CreateCredentials returns new credential with the given access key and secret key. -// Error is returned if given access key or secret key are invalid length. -func CreateCredentials(accessKey, secretKey string) (cred Credentials, err error) { - if !IsAccessKeyValid(accessKey) { - return cred, ErrInvalidAccessKeyLength - } - if !IsSecretKeyValid(secretKey) { - return cred, ErrInvalidSecretKeyLength - } - cred.AccessKey = accessKey - cred.SecretKey = secretKey - cred.Expiration = timeSentinel - cred.Status = "on" - return cred, nil -} diff --git a/pkg/auth/credentials_test.go b/pkg/auth/credentials_test.go deleted file mode 100644 index 4577bef9..00000000 --- a/pkg/auth/credentials_test.go +++ /dev/null @@ -1,181 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package auth - -import ( - "encoding/json" - "testing" - "time" -) - -func TestExpToInt64(t *testing.T) { - testCases := []struct { - exp interface{} - expectedFailure bool - }{ - {"", true}, - {"-1", true}, - {"1574812326", false}, - {1574812326, false}, - {int64(1574812326), false}, - {int(1574812326), false}, - {uint(1574812326), false}, - {uint64(1574812326), false}, - {json.Number("1574812326"), false}, - {1574812326.000, false}, - {time.Duration(3) * time.Minute, false}, - } - - for _, testCase := range testCases { - testCase := testCase - t.Run("", func(t *testing.T) { - _, err := ExpToInt64(testCase.exp) - if err != nil && !testCase.expectedFailure { - t.Errorf("Expected success but got failure %s", err) - } - if err == nil && testCase.expectedFailure { - t.Error("Expected failure but got success") - } - }) - } -} - -func TestIsAccessKeyValid(t *testing.T) { - testCases := []struct { - accessKey string - expectedResult bool - }{ - {alphaNumericTable[:accessKeyMinLen], true}, - {alphaNumericTable[:accessKeyMinLen+1], true}, - {alphaNumericTable[:accessKeyMinLen-1], false}, - } - - for i, testCase := range testCases { - result := IsAccessKeyValid(testCase.accessKey) - if result != testCase.expectedResult { - t.Fatalf("test %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestIsSecretKeyValid(t *testing.T) { - testCases := []struct { - secretKey string - expectedResult bool - }{ - {alphaNumericTable[:secretKeyMinLen], true}, - {alphaNumericTable[:secretKeyMinLen+1], true}, - {alphaNumericTable[:secretKeyMinLen-1], false}, - } - - for i, testCase := range testCases { - result := IsSecretKeyValid(testCase.secretKey) - if result != testCase.expectedResult { - t.Fatalf("test %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestGetNewCredentials(t *testing.T) { - cred, err := GetNewCredentials() - if err != nil { - t.Fatalf("Failed to get a new credential") - } - if !cred.IsValid() { - t.Fatalf("Failed to get new valid credential") - } - if len(cred.AccessKey) != accessKeyMaxLen { - t.Fatalf("access key length: expected: %v, got: %v", secretKeyMaxLen, len(cred.AccessKey)) - } - if len(cred.SecretKey) != secretKeyMaxLen { - t.Fatalf("secret key length: expected: %v, got: %v", secretKeyMaxLen, len(cred.SecretKey)) - } -} - -func TestCreateCredentials(t *testing.T) { - testCases := []struct { - accessKey string - secretKey string - valid bool - expectedErr error - }{ - // Valid access and secret keys with minimum length. - {alphaNumericTable[:accessKeyMinLen], alphaNumericTable[:secretKeyMinLen], true, nil}, - // Valid access and/or secret keys are longer than minimum length. - {alphaNumericTable[:accessKeyMinLen+1], alphaNumericTable[:secretKeyMinLen+1], true, nil}, - // Smaller access key. - {alphaNumericTable[:accessKeyMinLen-1], alphaNumericTable[:secretKeyMinLen], false, ErrInvalidAccessKeyLength}, - // Smaller secret key. - {alphaNumericTable[:accessKeyMinLen], alphaNumericTable[:secretKeyMinLen-1], false, ErrInvalidSecretKeyLength}, - } - - for i, testCase := range testCases { - cred, err := CreateCredentials(testCase.accessKey, testCase.secretKey) - - if err != nil { - if testCase.expectedErr == nil { - t.Fatalf("test %v: error: expected = , got = %v", i+1, err) - } - if testCase.expectedErr.Error() != err.Error() { - t.Fatalf("test %v: error: expected = %v, got = %v", i+1, testCase.expectedErr, err) - } - } else { - if testCase.expectedErr != nil { - t.Fatalf("test %v: error: expected = %v, got = ", i+1, testCase.expectedErr) - } - if !cred.IsValid() { - t.Fatalf("test %v: got invalid credentials", i+1) - } - } - } -} - -func TestCredentialsEqual(t *testing.T) { - cred, err := GetNewCredentials() - if err != nil { - t.Fatalf("Failed to get a new credential") - } - cred2, err := GetNewCredentials() - if err != nil { - t.Fatalf("Failed to get a new credential") - } - testCases := []struct { - cred Credentials - ccred Credentials - expectedResult bool - }{ - // Same Credentialss. - {cred, cred, true}, - // Empty credentials to compare. - {cred, Credentials{}, false}, - // Empty credentials. - {Credentials{}, cred, false}, - // Two different credentialss - {cred, cred2, false}, - // Access key is different in credentials to compare. - {cred, Credentials{AccessKey: "myuser", SecretKey: cred.SecretKey}, false}, - // Secret key is different in credentials to compare. - {cred, Credentials{AccessKey: cred.AccessKey, SecretKey: "mypassword"}, false}, - } - - for i, testCase := range testCases { - result := testCase.cred.Equal(testCase.ccred) - if result != testCase.expectedResult { - t.Fatalf("test %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} diff --git a/pkg/bpool/bpool.go b/pkg/bpool/bpool.go deleted file mode 100644 index 0338c0f7..00000000 --- a/pkg/bpool/bpool.go +++ /dev/null @@ -1,77 +0,0 @@ -// Original work https://github.com/oxtoacart/bpool borrowed -// only bpool.go licensed under Apache 2.0. - -// This file modifies original bpool.go to add one more option -// to provide []byte capacity for better GC management. - -/* - * MinIO Cloud Storage (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package bpool - -// BytePoolCap implements a leaky pool of []byte in the form of a bounded channel. -type BytePoolCap struct { - c chan []byte - w int - wcap int -} - -// NewBytePoolCap creates a new BytePool bounded to the given maxSize, with new -// byte arrays sized based on width. -func NewBytePoolCap(maxSize int, width int, capwidth int) (bp *BytePoolCap) { - return &BytePoolCap{ - c: make(chan []byte, maxSize), - w: width, - wcap: capwidth, - } -} - -// Get gets a []byte from the BytePool, or creates a new one if none are -// available in the pool. -func (bp *BytePoolCap) Get() (b []byte) { - select { - case b = <-bp.c: - // reuse existing buffer - default: - // create new buffer - if bp.wcap > 0 { - b = make([]byte, bp.w, bp.wcap) - } else { - b = make([]byte, bp.w) - } - } - return -} - -// Put returns the given Buffer to the BytePool. -func (bp *BytePoolCap) Put(b []byte) { - select { - case bp.c <- b: - // buffer went back into pool - default: - // buffer didn't go back into pool, just discard - } -} - -// Width returns the width of the byte arrays in this pool. -func (bp *BytePoolCap) Width() (n int) { - return bp.w -} - -// WidthCap returns the cap width of the byte arrays in this pool. -func (bp *BytePoolCap) WidthCap() (n int) { - return bp.wcap -} diff --git a/pkg/bpool/bpool_test.go b/pkg/bpool/bpool_test.go deleted file mode 100644 index bca1a224..00000000 --- a/pkg/bpool/bpool_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// Original work https://github.com/oxtoacart/bpool borrowed -// only bpool.go licensed under Apache 2.0. - -// This file modifies original bpool.go to add one more option -// to provide []byte capacity for better GC management. - -/* - * MinIO Cloud Storage (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package bpool - -import "testing" - -// Tests - bytePool functionality. -func TestBytePool(t *testing.T) { - var size = 4 - var width = 10 - var capWidth = 16 - - bufPool := NewBytePoolCap(size, width, capWidth) - - // Check the width - if bufPool.Width() != width { - t.Fatalf("bytepool width invalid: got %v want %v", bufPool.Width(), width) - } - - // Check with width cap - if bufPool.WidthCap() != capWidth { - t.Fatalf("bytepool capWidth invalid: got %v want %v", bufPool.WidthCap(), capWidth) - } - - // Check that retrieved buffer are of the expected width - b := bufPool.Get() - if len(b) != width { - t.Fatalf("bytepool length invalid: got %v want %v", len(b), width) - } - if cap(b) != capWidth { - t.Fatalf("bytepool length invalid: got %v want %v", cap(b), capWidth) - } - - bufPool.Put(b) - - // Fill the pool beyond the capped pool size. - for i := 0; i < size*2; i++ { - bufPool.Put(make([]byte, bufPool.w)) - } - - b = bufPool.Get() - if len(b) != width { - t.Fatalf("bytepool length invalid: got %v want %v", len(b), width) - } - if cap(b) != capWidth { - t.Fatalf("bytepool length invalid: got %v want %v", cap(b), capWidth) - } - - bufPool.Put(b) - - // Close the channel so we can iterate over it. - close(bufPool.c) - - // Check the size of the pool. - if len(bufPool.c) != size { - t.Fatalf("bytepool size invalid: got %v want %v", len(bufPool.c), size) - } - - bufPoolNoCap := NewBytePoolCap(size, width, 0) - // Check the width - if bufPoolNoCap.Width() != width { - t.Fatalf("bytepool width invalid: got %v want %v", bufPool.Width(), width) - } - - // Check with width cap - if bufPoolNoCap.WidthCap() != 0 { - t.Fatalf("bytepool capWidth invalid: got %v want %v", bufPool.WidthCap(), 0) - } - b = bufPoolNoCap.Get() - if len(b) != width { - t.Fatalf("bytepool length invalid: got %v want %v", len(b), width) - } - if cap(b) != width { - t.Fatalf("bytepool length invalid: got %v want %v", cap(b), width) - } -} diff --git a/pkg/bucket/encryption/bucket-sse-config.go b/pkg/bucket/encryption/bucket-sse-config.go deleted file mode 100644 index 667a5266..00000000 --- a/pkg/bucket/encryption/bucket-sse-config.go +++ /dev/null @@ -1,111 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "encoding/xml" - "errors" - "io" -) - -const ( - // AES256 is used with SSE-S3 - AES256 SSEAlgorithm = "AES256" - // AWSKms is used with SSE-KMS - AWSKms SSEAlgorithm = "aws:kms" -) - -// SSEAlgorithm - represents valid SSE algorithms supported; currently only AES256 is supported -type SSEAlgorithm string - -// UnmarshalXML - Unmarshals XML tag to valid SSE algorithm -func (alg *SSEAlgorithm) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - var s string - if err := d.DecodeElement(&s, &start); err != nil { - return err - } - - switch s { - case string(AES256): - *alg = AES256 - case string(AWSKms): - *alg = AWSKms - default: - return errors.New("Unknown SSE algorithm") - } - - return nil -} - -// MarshalXML - Marshals given SSE algorithm to valid XML -func (alg *SSEAlgorithm) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - return e.EncodeElement(string(*alg), start) -} - -// EncryptionAction - for ApplyServerSideEncryptionByDefault XML tag -type EncryptionAction struct { - Algorithm SSEAlgorithm `xml:"SSEAlgorithm,omitempty"` - MasterKeyID string `xml:"KMSMasterKeyID,omitempty"` -} - -// SSERule - for ServerSideEncryptionConfiguration XML tag -type SSERule struct { - DefaultEncryptionAction EncryptionAction `xml:"ApplyServerSideEncryptionByDefault"` -} - -const xmlNS = "http://s3.amazonaws.com/doc/2006-03-01/" - -// BucketSSEConfig - represents default bucket encryption configuration -type BucketSSEConfig struct { - XMLNS string `xml:"xmlns,attr,omitempty"` - XMLName xml.Name `xml:"ServerSideEncryptionConfiguration"` - Rules []SSERule `xml:"Rule"` -} - -// ParseBucketSSEConfig - Decodes given XML to a valid default bucket encryption config -func ParseBucketSSEConfig(r io.Reader) (*BucketSSEConfig, error) { - var config BucketSSEConfig - err := xml.NewDecoder(r).Decode(&config) - if err != nil { - return nil, err - } - - // Validates server-side encryption config rules - // Only one rule is allowed on AWS S3 - if len(config.Rules) != 1 { - return nil, errors.New("only one server-side encryption rule is allowed at a time") - } - - for _, rule := range config.Rules { - switch rule.DefaultEncryptionAction.Algorithm { - case AES256: - if rule.DefaultEncryptionAction.MasterKeyID != "" { - return nil, errors.New("MasterKeyID is allowed with aws:kms only") - } - case AWSKms: - if rule.DefaultEncryptionAction.MasterKeyID == "" { - return nil, errors.New("MasterKeyID is missing with aws:kms") - } - } - } - - if config.XMLNS == "" { - config.XMLNS = xmlNS - } - - return &config, nil -} diff --git a/pkg/bucket/encryption/bucket-sse-config_test.go b/pkg/bucket/encryption/bucket-sse-config_test.go deleted file mode 100644 index e5309fc2..00000000 --- a/pkg/bucket/encryption/bucket-sse-config_test.go +++ /dev/null @@ -1,140 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "bytes" - "encoding/xml" - "errors" - "testing" -) - -// TestParseBucketSSEConfig performs basic sanity tests on ParseBucketSSEConfig -func TestParseBucketSSEConfig(t *testing.T) { - actualAES256NoNSConfig := &BucketSSEConfig{ - XMLName: xml.Name{ - Local: "ServerSideEncryptionConfiguration", - }, - Rules: []SSERule{ - { - DefaultEncryptionAction: EncryptionAction{ - Algorithm: AES256, - }, - }, - }, - } - - actualAES256Config := &BucketSSEConfig{ - XMLNS: xmlNS, - XMLName: xml.Name{ - Local: "ServerSideEncryptionConfiguration", - }, - Rules: []SSERule{ - { - DefaultEncryptionAction: EncryptionAction{ - Algorithm: AES256, - }, - }, - }, - } - - actualKMSConfig := &BucketSSEConfig{ - XMLNS: xmlNS, - XMLName: xml.Name{ - Local: "ServerSideEncryptionConfiguration", - }, - Rules: []SSERule{ - { - DefaultEncryptionAction: EncryptionAction{ - Algorithm: AWSKms, - MasterKeyID: "arn:aws:kms:us-east-1:1234/5678example", - }, - }, - }, - } - - testCases := []struct { - inputXML string - expectedErr error - shouldPass bool - expectedConfig *BucketSSEConfig - }{ - // 1. Valid XML SSE-S3 - { - inputXML: `AES256`, - expectedErr: nil, - shouldPass: true, - expectedConfig: actualAES256Config, - }, - // 2. Valid XML SSE-KMS - { - inputXML: `aws:kmsarn:aws:kms:us-east-1:1234/5678example`, - expectedErr: nil, - shouldPass: true, - expectedConfig: actualKMSConfig, - }, - // 3. Invalid - more than one rule - { - inputXML: `AES256AES256`, - expectedErr: errors.New("only one server-side encryption rule is allowed at a time"), - shouldPass: false, - }, - // 4. Invalid XML - master key ID present along with AES256 - { - inputXML: `AES256arn:aws:kms:us-east-1:1234/5678example`, - expectedErr: errors.New("MasterKeyID is allowed with aws:kms only"), - shouldPass: false, - }, - // 5. Invalid XML - master key ID not provided when algorithm is set to aws:kms algorithm - { - inputXML: `aws:kms`, - expectedErr: errors.New("MasterKeyID is missing with aws:kms"), - shouldPass: false, - }, - // 6. Invalid Algorithm - { - inputXML: `InvalidAlgorithm`, - expectedErr: errors.New("Unknown SSE algorithm"), - shouldPass: false, - }, - // 7. Valid XML without the namespace set - { - inputXML: `AES256`, - expectedErr: nil, - shouldPass: true, - expectedConfig: actualAES256NoNSConfig, - }, - } - - for i, tc := range testCases { - _, err := ParseBucketSSEConfig(bytes.NewReader([]byte(tc.inputXML))) - if tc.shouldPass && err != nil { - t.Fatalf("Test case %d: Expected to succeed but got %s", i+1, err) - } - - if !tc.shouldPass { - if err == nil || err != nil && err.Error() != tc.expectedErr.Error() { - t.Fatalf("Test case %d: Expected %s but got %s", i+1, tc.expectedErr, err) - } - continue - } - - if expectedXML, err := xml.Marshal(tc.expectedConfig); err != nil || !bytes.Equal(expectedXML, []byte(tc.inputXML)) { - t.Fatalf("Test case %d: Expected bucket encryption XML %s but got %s", i+1, string(expectedXML), tc.inputXML) - } - } -} diff --git a/pkg/bucket/lifecycle/action_string.go b/pkg/bucket/lifecycle/action_string.go deleted file mode 100644 index 5b22b9af..00000000 --- a/pkg/bucket/lifecycle/action_string.go +++ /dev/null @@ -1,24 +0,0 @@ -// Code generated by "stringer -type Action lifecycle.go"; DO NOT EDIT. - -package lifecycle - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[NoneAction-0] - _ = x[DeleteAction-1] -} - -const _Action_name = "NoneActionDeleteAction" - -var _Action_index = [...]uint8{0, 10, 22} - -func (i Action) String() string { - if i < 0 || i >= Action(len(_Action_index)-1) { - return "Action(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Action_name[_Action_index[i]:_Action_index[i+1]] -} diff --git a/pkg/bucket/lifecycle/and.go b/pkg/bucket/lifecycle/and.go deleted file mode 100644 index c1ef17ca..00000000 --- a/pkg/bucket/lifecycle/and.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package lifecycle - -import ( - "encoding/xml" -) - -// And - a tag to combine a prefix and multiple tags for lifecycle configuration rule. -type And struct { - XMLName xml.Name `xml:"And"` - Prefix string `xml:"Prefix,omitempty"` - Tags []Tag `xml:"Tag,omitempty"` -} - -var errDuplicateTagKey = Errorf("Duplicate Tag Keys are not allowed") - -// isEmpty returns true if Tags field is null -func (a And) isEmpty() bool { - return len(a.Tags) == 0 && a.Prefix == "" -} - -// Validate - validates the And field -func (a And) Validate() error { - if a.ContainsDuplicateTag() { - return errDuplicateTagKey - } - for _, t := range a.Tags { - if err := t.Validate(); err != nil { - return err - } - } - return nil -} - -// ContainsDuplicateTag - returns true if duplicate keys are present in And -func (a And) ContainsDuplicateTag() bool { - x := make(map[string]struct{}, len(a.Tags)) - - for _, t := range a.Tags { - if _, has := x[t.Key]; has { - return true - } - x[t.Key] = struct{}{} - } - - return false -} diff --git a/pkg/bucket/lifecycle/error.go b/pkg/bucket/lifecycle/error.go deleted file mode 100644 index 31138ad1..00000000 --- a/pkg/bucket/lifecycle/error.go +++ /dev/null @@ -1,44 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package lifecycle - -import ( - "fmt" -) - -// Error is the generic type for any error happening during tag -// parsing. -type Error struct { - err error -} - -// Errorf - formats according to a format specifier and returns -// the string as a value that satisfies error of type tagging.Error -func Errorf(format string, a ...interface{}) error { - return Error{err: fmt.Errorf(format, a...)} -} - -// Unwrap the internal error. -func (e Error) Unwrap() error { return e.err } - -// Error 'error' compatible method. -func (e Error) Error() string { - if e.err == nil { - return "lifecycle: cause " - } - return e.err.Error() -} diff --git a/pkg/bucket/lifecycle/expiration.go b/pkg/bucket/lifecycle/expiration.go deleted file mode 100644 index 2748d7ef..00000000 --- a/pkg/bucket/lifecycle/expiration.go +++ /dev/null @@ -1,133 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package lifecycle - -import ( - "encoding/xml" - "time" -) - -var ( - errLifecycleInvalidDate = Errorf("Date must be provided in ISO 8601 format") - errLifecycleInvalidDays = Errorf("Days must be positive integer when used with Expiration") - errLifecycleInvalidExpiration = Errorf("At least one of Days or Date should be present inside Expiration") - errLifecycleDateNotMidnight = Errorf("'Date' must be at midnight GMT") -) - -// ExpirationDays is a type alias to unmarshal Days in Expiration -type ExpirationDays int - -// UnmarshalXML parses number of days from Expiration and validates if -// greater than zero -func (eDays *ExpirationDays) UnmarshalXML(d *xml.Decoder, startElement xml.StartElement) error { - var numDays int - err := d.DecodeElement(&numDays, &startElement) - if err != nil { - return err - } - if numDays <= 0 { - return errLifecycleInvalidDays - } - *eDays = ExpirationDays(numDays) - return nil -} - -// MarshalXML encodes number of days to expire if it is non-zero and -// encodes empty string otherwise -func (eDays *ExpirationDays) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { - if *eDays == ExpirationDays(0) { - return nil - } - return e.EncodeElement(int(*eDays), startElement) -} - -// ExpirationDate is a embedded type containing time.Time to unmarshal -// Date in Expiration -type ExpirationDate struct { - time.Time -} - -// UnmarshalXML parses date from Expiration and validates date format -func (eDate *ExpirationDate) UnmarshalXML(d *xml.Decoder, startElement xml.StartElement) error { - var dateStr string - err := d.DecodeElement(&dateStr, &startElement) - if err != nil { - return err - } - // While AWS documentation mentions that the date specified - // must be present in ISO 8601 format, in reality they allow - // users to provide RFC 3339 compliant dates. - expDate, err := time.Parse(time.RFC3339, dateStr) - if err != nil { - return errLifecycleInvalidDate - } - // Allow only date timestamp specifying midnight GMT - hr, min, sec := expDate.Clock() - nsec := expDate.Nanosecond() - loc := expDate.Location() - if !(hr == 0 && min == 0 && sec == 0 && nsec == 0 && loc.String() == time.UTC.String()) { - return errLifecycleDateNotMidnight - } - - *eDate = ExpirationDate{expDate} - return nil -} - -// MarshalXML encodes expiration date if it is non-zero and encodes -// empty string otherwise -func (eDate *ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { - if *eDate == (ExpirationDate{time.Time{}}) { - return nil - } - return e.EncodeElement(eDate.Format(time.RFC3339), startElement) -} - -// Expiration - expiration actions for a rule in lifecycle configuration. -type Expiration struct { - XMLName xml.Name `xml:"Expiration"` - Days ExpirationDays `xml:"Days,omitempty"` - Date ExpirationDate `xml:"Date,omitempty"` -} - -// Validate - validates the "Expiration" element -func (e Expiration) Validate() error { - // Neither expiration days or date is specified - if e.IsDaysNull() && e.IsDateNull() { - return errLifecycleInvalidExpiration - } - - // Both expiration days and date are specified - if !e.IsDaysNull() && !e.IsDateNull() { - return errLifecycleInvalidExpiration - } - return nil -} - -// IsDaysNull returns true if days field is null -func (e Expiration) IsDaysNull() bool { - return e.Days == ExpirationDays(0) -} - -// IsDateNull returns true if date field is null -func (e Expiration) IsDateNull() bool { - return e.Date.Time.IsZero() -} - -// IsNull returns true if both date and days fields are null -func (e Expiration) IsNull() bool { - return e.IsDaysNull() && e.IsDateNull() -} diff --git a/pkg/bucket/lifecycle/expiration_test.go b/pkg/bucket/lifecycle/expiration_test.go deleted file mode 100644 index e1394c24..00000000 --- a/pkg/bucket/lifecycle/expiration_test.go +++ /dev/null @@ -1,105 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package lifecycle - -import ( - "encoding/xml" - "fmt" - "testing" -) - -// appropriate errors on validation -func TestInvalidExpiration(t *testing.T) { - testCases := []struct { - inputXML string - expectedErr error - }{ - { // Expiration with zero days - inputXML: ` - 0 - `, - expectedErr: errLifecycleInvalidDays, - }, - { // Expiration with invalid date - inputXML: ` - invalid date - `, - expectedErr: errLifecycleInvalidDate, - }, - { // Expiration with both number of days nor a date - inputXML: ` - 2019-04-20T00:01:00Z - `, - expectedErr: errLifecycleDateNotMidnight, - }, - } - - for i, tc := range testCases { - t.Run(fmt.Sprintf("Test %d", i+1), func(t *testing.T) { - var expiration Expiration - err := xml.Unmarshal([]byte(tc.inputXML), &expiration) - if err != tc.expectedErr { - t.Fatalf("%d: Expected %v but got %v", i+1, tc.expectedErr, err) - } - }) - - } - - validationTestCases := []struct { - inputXML string - expectedErr error - }{ - { // Expiration with a valid ISO 8601 date - inputXML: ` - 2019-04-20T00:00:00Z - `, - expectedErr: nil, - }, - { // Expiration with a valid number of days - inputXML: ` - 3 - `, - expectedErr: nil, - }, - { // Expiration with neither number of days nor a date - inputXML: ` - `, - expectedErr: errLifecycleInvalidExpiration, - }, - { // Expiration with both number of days nor a date - inputXML: ` - 3 - 2019-04-20T00:00:00Z - `, - expectedErr: errLifecycleInvalidExpiration, - }, - } - for i, tc := range validationTestCases { - t.Run(fmt.Sprintf("Test %d", i+1), func(t *testing.T) { - var expiration Expiration - err := xml.Unmarshal([]byte(tc.inputXML), &expiration) - if err != nil { - t.Fatalf("%d: %v", i+1, err) - } - - err = expiration.Validate() - if err != tc.expectedErr { - t.Fatalf("%d: %v", i+1, err) - } - }) - } -} diff --git a/pkg/bucket/lifecycle/filter.go b/pkg/bucket/lifecycle/filter.go deleted file mode 100644 index e9345872..00000000 --- a/pkg/bucket/lifecycle/filter.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package lifecycle - -import ( - "encoding/xml" -) - -var ( - errInvalidFilter = Errorf("Filter must have exactly one of Prefix, Tag, or And specified") -) - -// Filter - a filter for a lifecycle configuration Rule. -type Filter struct { - XMLName xml.Name `xml:"Filter"` - Prefix string - And And - Tag Tag - - // Caching tags, only once - cachedTags []string -} - -// MarshalXML - produces the xml representation of the Filter struct -// only one of Prefix, And and Tag should be present in the output. -func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - if err := e.EncodeToken(start); err != nil { - return err - } - - switch { - case !f.And.isEmpty(): - if err := e.EncodeElement(f.And, xml.StartElement{Name: xml.Name{Local: "And"}}); err != nil { - return err - } - case !f.Tag.IsEmpty(): - if err := e.EncodeElement(f.Tag, xml.StartElement{Name: xml.Name{Local: "Tag"}}); err != nil { - return err - } - default: - // Always print Prefix field when both And & Tag are empty - if err := e.EncodeElement(f.Prefix, xml.StartElement{Name: xml.Name{Local: "Prefix"}}); err != nil { - return err - } - } - - return e.EncodeToken(xml.EndElement{Name: start.Name}) -} - -// Validate - validates the filter element -func (f Filter) Validate() error { - // A Filter must have exactly one of Prefix, Tag, or And specified. - if !f.And.isEmpty() { - if f.Prefix != "" { - return errInvalidFilter - } - if !f.Tag.IsEmpty() { - return errInvalidFilter - } - if err := f.And.Validate(); err != nil { - return err - } - } - if f.Prefix != "" { - if !f.Tag.IsEmpty() { - return errInvalidFilter - } - } - if !f.Tag.IsEmpty() { - if err := f.Tag.Validate(); err != nil { - return err - } - } - return nil -} - -// TestTags tests if the object tags satisfy the Filter tags requirement, -// it returns true if there is no tags in the underlying Filter. -func (f Filter) TestTags(tags []string) bool { - if f.cachedTags == nil { - tags := make([]string, 0) - for _, t := range append(f.And.Tags, f.Tag) { - if !t.IsEmpty() { - tags = append(tags, t.String()) - } - } - f.cachedTags = tags - } - for _, ct := range f.cachedTags { - foundTag := false - for _, t := range tags { - if ct == t { - foundTag = true - break - } - } - if !foundTag { - return false - } - } - return true -} diff --git a/pkg/bucket/lifecycle/filter_test.go b/pkg/bucket/lifecycle/filter_test.go deleted file mode 100644 index a398fce3..00000000 --- a/pkg/bucket/lifecycle/filter_test.go +++ /dev/null @@ -1,124 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package lifecycle - -import ( - "encoding/xml" - "fmt" - "testing" -) - -// TestUnsupportedFilters checks if parsing Filter xml with -// unsupported elements returns appropriate errors -func TestUnsupportedFilters(t *testing.T) { - testCases := []struct { - inputXML string - expectedErr error - }{ - { // Filter with And tags - inputXML: ` - - key-prefix - - `, - expectedErr: nil, - }, - { // Filter with Tag tags - inputXML: ` - - key1 - value1 - - `, - expectedErr: nil, - }, - { // Filter with Prefix tag - inputXML: ` - key-prefix - `, - expectedErr: nil, - }, - { // Filter without And and multiple Tag tags - inputXML: ` - key-prefix - - key1 - value1 - - - key2 - value2 - - `, - expectedErr: errInvalidFilter, - }, - { // Filter with And, Prefix & multiple Tag tags - inputXML: ` - - key-prefix - - key1 - value1 - - - key2 - value2 - - - `, - expectedErr: nil, - }, - { // Filter with And and multiple Tag tags - inputXML: ` - - - key1 - value1 - - - key2 - value2 - - - `, - expectedErr: nil, - }, - { // Filter without And and single Tag tag - inputXML: ` - key-prefix - - key1 - value1 - - `, - expectedErr: errInvalidFilter, - }, - } - for i, tc := range testCases { - t.Run(fmt.Sprintf("Test %d", i+1), func(t *testing.T) { - var filter Filter - err := xml.Unmarshal([]byte(tc.inputXML), &filter) - if err != nil { - t.Fatalf("%d: Expected no error but got %v", i+1, err) - } - err = filter.Validate() - if err != tc.expectedErr { - t.Fatalf("%d: Expected %v but got %v", i+1, tc.expectedErr, err) - } - }) - } -} diff --git a/pkg/bucket/lifecycle/lifecycle.go b/pkg/bucket/lifecycle/lifecycle.go deleted file mode 100644 index fd401d3c..00000000 --- a/pkg/bucket/lifecycle/lifecycle.go +++ /dev/null @@ -1,204 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package lifecycle - -import ( - "encoding/xml" - "io" - "strings" - "time" -) - -var ( - errLifecycleTooManyRules = Errorf("Lifecycle configuration allows a maximum of 1000 rules") - errLifecycleNoRule = Errorf("Lifecycle configuration should have at least one rule") - errLifecycleOverlappingPrefix = Errorf("Lifecycle configuration has rules with overlapping prefix") -) - -// Action represents a delete action or other transition -// actions that will be implemented later. -type Action int - -//go:generate stringer -type Action $GOFILE - -const ( - // NoneAction means no action required after evaluting lifecycle rules - NoneAction Action = iota - // DeleteAction means the object needs to be removed after evaluting lifecycle rules - DeleteAction -) - -// Lifecycle - Configuration for bucket lifecycle. -type Lifecycle struct { - XMLName xml.Name `xml:"LifecycleConfiguration"` - Rules []Rule `xml:"Rule"` -} - -// HasActiveRules - returns whether policy has active rules for. -// Optionally a prefix can be supplied. -// If recursive is specified the function will also return true if any level below the -// prefix has active rules. If no prefix is specified recursive is effectively true. -func (lc Lifecycle) HasActiveRules(prefix string, recursive bool) bool { - if len(lc.Rules) == 0 { - return false - } - for _, rule := range lc.Rules { - if rule.Status == Disabled { - continue - } - if len(prefix) > 0 && len(rule.Filter.Prefix) > 0 { - // incoming prefix must be in rule prefix - if !recursive && !strings.HasPrefix(prefix, rule.Filter.Prefix) { - continue - } - // If recursive, we can skip this rule if it doesn't match the tested prefix. - if recursive && !strings.HasPrefix(rule.Filter.Prefix, prefix) { - continue - } - } - - if rule.NoncurrentVersionExpiration.NoncurrentDays > 0 { - return true - } - if rule.NoncurrentVersionTransition.NoncurrentDays > 0 { - return true - } - if rule.Expiration.IsNull() { - continue - } - if !rule.Expiration.IsDateNull() && rule.Expiration.Date.After(time.Now()) { - continue - } - return true - } - return false -} - -// ParseLifecycleConfig - parses data in given reader to Lifecycle. -func ParseLifecycleConfig(reader io.Reader) (*Lifecycle, error) { - var lc Lifecycle - if err := xml.NewDecoder(reader).Decode(&lc); err != nil { - return nil, err - } - return &lc, nil -} - -// Validate - validates the lifecycle configuration -func (lc Lifecycle) Validate() error { - // Lifecycle config can't have more than 1000 rules - if len(lc.Rules) > 1000 { - return errLifecycleTooManyRules - } - // Lifecycle config should have at least one rule - if len(lc.Rules) == 0 { - return errLifecycleNoRule - } - // Validate all the rules in the lifecycle config - for _, r := range lc.Rules { - if err := r.Validate(); err != nil { - return err - } - } - // Compare every rule's prefix with every other rule's prefix - for i := range lc.Rules { - if i == len(lc.Rules)-1 { - break - } - // N B Empty prefixes overlap with all prefixes - otherRules := lc.Rules[i+1:] - for _, otherRule := range otherRules { - if strings.HasPrefix(lc.Rules[i].Prefix(), otherRule.Prefix()) || - strings.HasPrefix(otherRule.Prefix(), lc.Rules[i].Prefix()) { - return errLifecycleOverlappingPrefix - } - } - } - return nil -} - -// FilterActionableRules returns the rules actions that need to be executed -// after evaluating prefix/tag filtering -func (lc Lifecycle) FilterActionableRules(objName, objTags string) []Rule { - if objName == "" { - return nil - } - var rules []Rule - for _, rule := range lc.Rules { - if rule.Status == Disabled { - continue - } - if !strings.HasPrefix(objName, rule.Prefix()) { - continue - } - tags := strings.Split(objTags, "&") - if rule.Filter.TestTags(tags) { - rules = append(rules, rule) - } - } - return rules -} - -// ComputeAction returns the action to perform by evaluating all lifecycle rules -// against the object name and its modification time. -func (lc Lifecycle) ComputeAction(objName, objTags string, modTime time.Time) (action Action) { - action = NoneAction - if modTime.IsZero() { - return - } - - _, expiryTime := lc.PredictExpiryTime(objName, modTime, objTags) - if !expiryTime.IsZero() && time.Now().After(expiryTime) { - return DeleteAction - } - return -} - -// expectedExpiryTime calculates the expiry date/time based on a object modtime. -// The expected expiry time is always a midnight time following the the object -// modification time plus the number of expiration days. -// e.g. If the object modtime is `Thu May 21 13:42:50 GMT 2020` and the object should -// expire in 1 day, then the expected expiry time is `Fri, 23 May 2020 00:00:00 GMT` -func expectedExpiryTime(modTime time.Time, days ExpirationDays) time.Time { - t := modTime.UTC().Add(time.Duration(days+1) * 24 * time.Hour) - return t.Truncate(24 * time.Hour) -} - -// PredictExpiryTime returns the expiry date/time of a given object -// after evaluting the current lifecycle document. -func (lc Lifecycle) PredictExpiryTime(objName string, modTime time.Time, objTags string) (string, time.Time) { - var finalExpiryDate time.Time - var finalExpiryRuleID string - - // Iterate over all actionable rules and find the earliest - // expiration date and its associated rule ID. - for _, rule := range lc.FilterActionableRules(objName, objTags) { - if !rule.Expiration.IsDateNull() { - if finalExpiryDate.IsZero() || finalExpiryDate.After(rule.Expiration.Date.Time) { - finalExpiryRuleID = rule.ID - finalExpiryDate = rule.Expiration.Date.Time - } - } - if !rule.Expiration.IsDaysNull() { - expectedExpiry := expectedExpiryTime(modTime, rule.Expiration.Days) - if finalExpiryDate.IsZero() || finalExpiryDate.After(expectedExpiry) { - finalExpiryRuleID = rule.ID - finalExpiryDate = expectedExpiry - } - } - } - return finalExpiryRuleID, finalExpiryDate -} diff --git a/pkg/bucket/lifecycle/lifecycle_test.go b/pkg/bucket/lifecycle/lifecycle_test.go deleted file mode 100644 index 35b24384..00000000 --- a/pkg/bucket/lifecycle/lifecycle_test.go +++ /dev/null @@ -1,348 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package lifecycle - -import ( - "bytes" - "encoding/xml" - "fmt" - "testing" - "time" -) - -func TestParseAndValidateLifecycleConfig(t *testing.T) { - // Test for lifecycle config with more than 1000 rules - var manyRules []Rule - rule := Rule{ - Status: "Enabled", - Expiration: Expiration{Days: ExpirationDays(3)}, - } - for i := 0; i < 1001; i++ { - manyRules = append(manyRules, rule) - } - - manyRuleLcConfig, err := xml.Marshal(Lifecycle{Rules: manyRules}) - if err != nil { - t.Fatal("Failed to marshal lifecycle config with more than 1000 rules") - } - - // Test for lifecycle config with rules containing overlapping prefixes - rule1 := Rule{ - Status: "Enabled", - Expiration: Expiration{Days: ExpirationDays(3)}, - Filter: Filter{ - Prefix: "/a/b", - }, - } - rule2 := Rule{ - Status: "Enabled", - Expiration: Expiration{Days: ExpirationDays(3)}, - Filter: Filter{ - And: And{ - Prefix: "/a/b/c", - }, - }, - } - overlappingRules := []Rule{rule1, rule2} - overlappingLcConfig, err := xml.Marshal(Lifecycle{Rules: overlappingRules}) - if err != nil { - t.Fatal("Failed to marshal lifecycle config with rules having overlapping prefix") - } - - testCases := []struct { - inputConfig string - expectedParsingErr error - expectedValidationErr error - }{ - { // Valid lifecycle config - inputConfig: ` - - - prefix - - Enabled - 3 - - - - another-prefix - - Enabled - 3 - - `, - expectedParsingErr: nil, - expectedValidationErr: nil, - }, - { // Valid lifecycle config - inputConfig: ` - - - key1val1key2val2 - - 3 - - `, - expectedParsingErr: errDuplicatedXMLTag, - expectedValidationErr: nil, - }, - { // lifecycle config with no rules - inputConfig: ` - `, - expectedParsingErr: nil, - expectedValidationErr: errLifecycleNoRule, - }, - { // lifecycle config with more than 1000 rules - inputConfig: string(manyRuleLcConfig), - expectedParsingErr: nil, - expectedValidationErr: errLifecycleTooManyRules, - }, - { // lifecycle config with rules having overlapping prefix - inputConfig: string(overlappingLcConfig), - expectedParsingErr: nil, - expectedValidationErr: errLifecycleOverlappingPrefix, - }, - } - - for i, tc := range testCases { - t.Run(fmt.Sprintf("Test %d", i+1), func(t *testing.T) { - lc, err := ParseLifecycleConfig(bytes.NewReader([]byte(tc.inputConfig))) - if err != tc.expectedParsingErr { - t.Fatalf("%d: Expected %v during parsing but got %v", i+1, tc.expectedParsingErr, err) - } - if tc.expectedParsingErr != nil { - // We already expect a parsing error, - // no need to continue this test. - return - } - err = lc.Validate() - if err != tc.expectedValidationErr { - t.Fatalf("%d: Expected %v during parsing but got %v", i+1, tc.expectedValidationErr, err) - } - }) - } -} - -// TestMarshalLifecycleConfig checks if lifecycleconfig xml -// marshaling/unmarshaling can handle output from each other -func TestMarshalLifecycleConfig(t *testing.T) { - // Time at midnight UTC - midnightTS := ExpirationDate{time.Date(2019, time.April, 20, 0, 0, 0, 0, time.UTC)} - lc := Lifecycle{ - Rules: []Rule{ - { - Status: "Enabled", - Filter: Filter{Prefix: "prefix-1"}, - Expiration: Expiration{Days: ExpirationDays(3)}, - }, - { - Status: "Enabled", - Filter: Filter{Prefix: "prefix-1"}, - Expiration: Expiration{Date: ExpirationDate(midnightTS)}, - }, - }, - } - b, err := xml.MarshalIndent(&lc, "", "\t") - if err != nil { - t.Fatal(err) - } - var lc1 Lifecycle - err = xml.Unmarshal(b, &lc1) - if err != nil { - t.Fatal(err) - } - - ruleSet := make(map[string]struct{}) - for _, rule := range lc.Rules { - ruleBytes, err := xml.Marshal(rule) - if err != nil { - t.Fatal(err) - } - ruleSet[string(ruleBytes)] = struct{}{} - } - for _, rule := range lc1.Rules { - ruleBytes, err := xml.Marshal(rule) - if err != nil { - t.Fatal(err) - } - if _, ok := ruleSet[string(ruleBytes)]; !ok { - t.Fatalf("Expected %v to be equal to %v, %v missing", lc, lc1, rule) - } - } -} - -func TestExpectedExpiryTime(t *testing.T) { - testCases := []struct { - modTime time.Time - days ExpirationDays - expected time.Time - }{ - { - time.Date(2020, time.March, 15, 10, 10, 10, 0, time.UTC), - 4, - time.Date(2020, time.March, 20, 0, 0, 0, 0, time.UTC), - }, - { - time.Date(2020, time.March, 15, 0, 0, 0, 0, time.UTC), - 1, - time.Date(2020, time.March, 17, 0, 0, 0, 0, time.UTC), - }, - } - - for i, tc := range testCases { - t.Run(fmt.Sprintf("Test %d", i+1), func(t *testing.T) { - got := expectedExpiryTime(tc.modTime, tc.days) - if got != tc.expected { - t.Fatalf("Expected %v to be equal to %v", got, tc.expected) - } - }) - } - -} - -func TestComputeActions(t *testing.T) { - testCases := []struct { - inputConfig string - objectName string - objectTags string - objectModTime time.Time - expectedAction Action - }{ - // Empty object name (unexpected case) should always return NoneAction - { - inputConfig: `prefixEnabled5`, - expectedAction: NoneAction, - }, - // Disabled should always return NoneAction - { - inputConfig: `foodir/Disabled5`, - objectName: "foodir/fooobject", - objectModTime: time.Now().UTC().Add(-10 * 24 * time.Hour), // Created 10 days ago - expectedAction: NoneAction, - }, - // No modTime, should be none-action - { - inputConfig: `foodir/Enabled5`, - objectName: "foodir/fooobject", - expectedAction: NoneAction, - }, - // Prefix not matched - { - inputConfig: `foodir/Enabled5`, - objectName: "foxdir/fooobject", - objectModTime: time.Now().UTC().Add(-10 * 24 * time.Hour), // Created 10 days ago - expectedAction: NoneAction, - }, - // Too early to remove (test Days) - { - inputConfig: `foodir/Enabled5`, - objectName: "foxdir/fooobject", - objectModTime: time.Now().UTC().Add(-10 * 24 * time.Hour), // Created 10 days ago - expectedAction: NoneAction, - }, - // Should remove (test Days) - { - inputConfig: `foodir/Enabled5`, - objectName: "foodir/fooobject", - objectModTime: time.Now().UTC().Add(-6 * 24 * time.Hour), // Created 6 days ago - expectedAction: DeleteAction, - }, - // Too early to remove (test Date) - { - inputConfig: `foodir/Enabled` + time.Now().Truncate(24*time.Hour).UTC().Add(24*time.Hour).Format(time.RFC3339) + ``, - objectName: "foodir/fooobject", - objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago - expectedAction: NoneAction, - }, - // Should remove (test Days) - { - inputConfig: `foodir/Enabled` + time.Now().Truncate(24*time.Hour).UTC().Add(-24*time.Hour).Format(time.RFC3339) + ``, - objectName: "foodir/fooobject", - objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago - expectedAction: DeleteAction, - }, - // Should remove (Tags match) - { - inputConfig: `foodir/tag1value1Enabled` + time.Now().Truncate(24*time.Hour).UTC().Add(-24*time.Hour).Format(time.RFC3339) + ``, - objectName: "foodir/fooobject", - objectTags: "tag1=value1&tag2=value2", - objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago - expectedAction: DeleteAction, - }, - // Should remove (Multiple Rules, Tags match) - { - inputConfig: `foodir/tag1value1tag2value2Enabled` + time.Now().Truncate(24*time.Hour).UTC().Add(-24*time.Hour).Format(time.RFC3339) + `abc/tag2valueEnabled` + time.Now().Truncate(24*time.Hour).UTC().Add(-24*time.Hour).Format(time.RFC3339) + ``, - objectName: "foodir/fooobject", - objectTags: "tag1=value1&tag2=value2", - objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago - expectedAction: DeleteAction, - }, - // Should remove (Tags match) - { - inputConfig: `foodir/tag1value1tag2value2Enabled` + time.Now().Truncate(24*time.Hour).UTC().Add(-24*time.Hour).Format(time.RFC3339) + ``, - objectName: "foodir/fooobject", - objectTags: "tag1=value1&tag2=value2", - objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago - expectedAction: DeleteAction, - }, - // Should remove (Tags match with inverted order) - { - inputConfig: `factorytruestoreforeverfalseEnabled` + time.Now().Truncate(24*time.Hour).UTC().Add(-24*time.Hour).Format(time.RFC3339) + ``, - objectName: "fooobject", - objectTags: "storeforever=false&factory=true", - objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago - expectedAction: DeleteAction, - }, - - // Should not remove (Tags don't match) - { - inputConfig: `foodir/tagvalue1Enabled` + time.Now().Truncate(24*time.Hour).UTC().Add(-24*time.Hour).Format(time.RFC3339) + ``, - objectName: "foodir/fooobject", - objectTags: "tag1=value1", - objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago - expectedAction: NoneAction, - }, - // Should not remove (Tags match, but prefix doesn't match) - { - inputConfig: `foodir/tag1value1Enabled` + time.Now().Truncate(24*time.Hour).UTC().Add(-24*time.Hour).Format(time.RFC3339) + ``, - objectName: "foxdir/fooobject", - objectTags: "tag1=value1", - objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago - expectedAction: NoneAction, - }, - // Should remove, the second rule has expiration kicked in - { - inputConfig: `Enabled` + time.Now().Truncate(24*time.Hour).UTC().Add(24*time.Hour).Format(time.RFC3339) + `foxdir/Enabled` + time.Now().Truncate(24*time.Hour).UTC().Add(-24*time.Hour).Format(time.RFC3339) + ``, - objectName: "foxdir/fooobject", - objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago - expectedAction: DeleteAction, - }, - } - - for i, tc := range testCases { - t.Run(fmt.Sprintf("Test %d", i+1), func(t *testing.T) { - lc, err := ParseLifecycleConfig(bytes.NewReader([]byte(tc.inputConfig))) - if err != nil { - t.Fatalf("%d: Got unexpected error: %v", i+1, err) - } - if resultAction := lc.ComputeAction(tc.objectName, tc.objectTags, tc.objectModTime); resultAction != tc.expectedAction { - t.Fatalf("%d: Expected action: `%v`, got: `%v`", i+1, tc.expectedAction, resultAction) - } - }) - - } -} diff --git a/pkg/bucket/lifecycle/noncurrentversion.go b/pkg/bucket/lifecycle/noncurrentversion.go deleted file mode 100644 index d879c12c..00000000 --- a/pkg/bucket/lifecycle/noncurrentversion.go +++ /dev/null @@ -1,64 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package lifecycle - -import ( - "encoding/xml" -) - -// NoncurrentVersionExpiration - an action for lifecycle configuration rule. -type NoncurrentVersionExpiration struct { - XMLName xml.Name `xml:"NoncurrentVersionExpiration"` - NoncurrentDays int `xml:"NoncurrentDays,omitempty"` -} - -// NoncurrentVersionTransition - an action for lifecycle configuration rule. -type NoncurrentVersionTransition struct { - NoncurrentDays int `xml:"NoncurrentDays"` - StorageClass string `xml:"StorageClass"` -} - -var ( - errNoncurrentVersionExpirationUnsupported = Errorf("Specifying is not supported") - errNoncurrentVersionTransitionUnsupported = Errorf("Specifying is not supported") -) - -// UnmarshalXML is extended to indicate lack of support for -// NoncurrentVersionExpiration xml tag in object lifecycle -// configuration -func (n NoncurrentVersionExpiration) UnmarshalXML(d *xml.Decoder, startElement xml.StartElement) error { - return errNoncurrentVersionExpirationUnsupported -} - -// UnmarshalXML is extended to indicate lack of support for -// NoncurrentVersionTransition xml tag in object lifecycle -// configuration -func (n NoncurrentVersionTransition) UnmarshalXML(d *xml.Decoder, startElement xml.StartElement) error { - return errNoncurrentVersionTransitionUnsupported -} - -// MarshalXML is extended to leave out -// tags -func (n NoncurrentVersionTransition) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - return nil -} - -// MarshalXML is extended to leave out -// tags -func (n NoncurrentVersionExpiration) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - return nil -} diff --git a/pkg/bucket/lifecycle/rule.go b/pkg/bucket/lifecycle/rule.go deleted file mode 100644 index eadfbc85..00000000 --- a/pkg/bucket/lifecycle/rule.go +++ /dev/null @@ -1,135 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package lifecycle - -import ( - "bytes" - "encoding/xml" -) - -// Status represents lifecycle configuration status -type Status string - -// Supported status types -const ( - Enabled Status = "Enabled" - Disabled Status = "Disabled" -) - -// Rule - a rule for lifecycle configuration. -type Rule struct { - XMLName xml.Name `xml:"Rule"` - ID string `xml:"ID,omitempty"` - Status Status `xml:"Status"` - Filter Filter `xml:"Filter,omitempty"` - Expiration Expiration `xml:"Expiration,omitempty"` - Transition Transition `xml:"Transition,omitempty"` - // FIXME: add a type to catch unsupported AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty"` - NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"` - NoncurrentVersionTransition NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty"` -} - -var ( - errInvalidRuleID = Errorf("ID must be less than 255 characters") - errEmptyRuleStatus = Errorf("Status should not be empty") - errInvalidRuleStatus = Errorf("Status must be set to either Enabled or Disabled") - errMissingExpirationAction = Errorf("No expiration action found") -) - -// validateID - checks if ID is valid or not. -func (r Rule) validateID() error { - // cannot be longer than 255 characters - if len(string(r.ID)) > 255 { - return errInvalidRuleID - } - return nil -} - -// validateStatus - checks if status is valid or not. -func (r Rule) validateStatus() error { - // Status can't be empty - if len(r.Status) == 0 { - return errEmptyRuleStatus - } - - // Status must be one of Enabled or Disabled - if r.Status != Enabled && r.Status != Disabled { - return errInvalidRuleStatus - } - return nil -} - -func (r Rule) validateAction() error { - if r.Expiration == (Expiration{}) { - return errMissingExpirationAction - } - return nil -} - -func (r Rule) validateFilter() error { - return r.Filter.Validate() -} - -// Prefix - a rule can either have prefix under or under -// . This method returns the prefix from the -// location where it is available -func (r Rule) Prefix() string { - if r.Filter.Prefix != "" { - return r.Filter.Prefix - } - if r.Filter.And.Prefix != "" { - return r.Filter.And.Prefix - } - return "" -} - -// Tags - a rule can either have tag under or under -// . This method returns all the tags from the -// rule in the format tag1=value1&tag2=value2 -func (r Rule) Tags() string { - if !r.Filter.Tag.IsEmpty() { - return r.Filter.Tag.String() - } - if len(r.Filter.And.Tags) != 0 { - var buf bytes.Buffer - for _, t := range r.Filter.And.Tags { - if buf.Len() > 0 { - buf.WriteString("&") - } - buf.WriteString(t.String()) - } - return buf.String() - } - return "" -} - -// Validate - validates the rule element -func (r Rule) Validate() error { - if err := r.validateID(); err != nil { - return err - } - if err := r.validateStatus(); err != nil { - return err - } - if err := r.validateAction(); err != nil { - return err - } - if err := r.validateFilter(); err != nil { - return err - } - return nil -} diff --git a/pkg/bucket/lifecycle/rule_test.go b/pkg/bucket/lifecycle/rule_test.go deleted file mode 100644 index 5a2da208..00000000 --- a/pkg/bucket/lifecycle/rule_test.go +++ /dev/null @@ -1,112 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package lifecycle - -import ( - "encoding/xml" - "fmt" - "testing" -) - -// TestUnsupportedRules checks if Rule xml with unsuported tags return -// appropriate errors on parsing -func TestUnsupportedRules(t *testing.T) { - // NoncurrentVersionTransition, NoncurrentVersionExpiration - // and Transition tags aren't supported - unsupportedTestCases := []struct { - inputXML string - expectedErr error - }{ - { // Rule with unsupported NoncurrentVersionTransition - inputXML: ` - - `, - expectedErr: errNoncurrentVersionTransitionUnsupported, - }, - { // Rule with unsupported NoncurrentVersionExpiration - - inputXML: ` - - `, - expectedErr: errNoncurrentVersionExpirationUnsupported, - }, - { // Rule with unsupported Transition action - inputXML: ` - - `, - expectedErr: errTransitionUnsupported, - }, - } - - for i, tc := range unsupportedTestCases { - t.Run(fmt.Sprintf("Test %d", i+1), func(t *testing.T) { - var rule Rule - err := xml.Unmarshal([]byte(tc.inputXML), &rule) - if err != tc.expectedErr { - t.Fatalf("%d: Expected %v but got %v", i+1, tc.expectedErr, err) - } - }) - } -} - -// TestInvalidRules checks if Rule xml with invalid elements returns -// appropriate errors on validation -func TestInvalidRules(t *testing.T) { - invalidTestCases := []struct { - inputXML string - expectedErr error - }{ - { // Rule without expiration action - inputXML: ` - Enabled - `, - expectedErr: errMissingExpirationAction, - }, - { // Rule with ID longer than 255 characters - inputXML: ` - babababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababab - `, - expectedErr: errInvalidRuleID, - }, - { // Rule with empty status - inputXML: ` - - `, - expectedErr: errEmptyRuleStatus, - }, - { // Rule with invalid status - inputXML: ` - OK - `, - expectedErr: errInvalidRuleStatus, - }, - } - - for i, tc := range invalidTestCases { - t.Run(fmt.Sprintf("Test %d", i+1), func(t *testing.T) { - var rule Rule - err := xml.Unmarshal([]byte(tc.inputXML), &rule) - if err != nil { - t.Fatal(err) - } - - if err := rule.Validate(); err != tc.expectedErr { - t.Fatalf("%d: Expected %v but got %v", i+1, tc.expectedErr, err) - } - }) - } -} diff --git a/pkg/bucket/lifecycle/tag.go b/pkg/bucket/lifecycle/tag.go deleted file mode 100644 index d183e022..00000000 --- a/pkg/bucket/lifecycle/tag.go +++ /dev/null @@ -1,101 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package lifecycle - -import ( - "encoding/xml" - "io" - "unicode/utf8" -) - -// Tag - a tag for a lifecycle configuration Rule filter. -type Tag struct { - XMLName xml.Name `xml:"Tag"` - Key string `xml:"Key,omitempty"` - Value string `xml:"Value,omitempty"` -} - -var ( - errInvalidTagKey = Errorf("The TagKey you have provided is invalid") - errInvalidTagValue = Errorf("The TagValue you have provided is invalid") - - errDuplicatedXMLTag = Errorf("duplicated XML Tag") - errUnknownXMLTag = Errorf("unknown XML Tag") -) - -// UnmarshalXML - decodes XML data. -func (tag *Tag) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) { - var keyAlreadyParsed, valueAlreadyParsed bool - for { - // Read tokens from the XML document in a stream. - t, err := d.Token() - if err != nil { - if err == io.EOF { - break - } - return err - } - - switch se := t.(type) { - case xml.StartElement: - var s string - if err = d.DecodeElement(&s, &se); err != nil { - return err - } - switch se.Name.Local { - case "Key": - if keyAlreadyParsed { - return errDuplicatedXMLTag - } - tag.Key = s - keyAlreadyParsed = true - case "Value": - if valueAlreadyParsed { - return errDuplicatedXMLTag - } - tag.Value = s - valueAlreadyParsed = true - default: - return errUnknownXMLTag - } - } - } - - return nil -} - -func (tag Tag) String() string { - return tag.Key + "=" + tag.Value -} - -// IsEmpty returns whether this tag is empty or not. -func (tag Tag) IsEmpty() bool { - return tag.Key == "" -} - -// Validate checks this tag. -func (tag Tag) Validate() error { - if len(tag.Key) == 0 || utf8.RuneCountInString(tag.Key) > 128 { - return errInvalidTagKey - } - - if utf8.RuneCountInString(tag.Value) > 256 { - return errInvalidTagValue - } - - return nil -} diff --git a/pkg/bucket/lifecycle/transition.go b/pkg/bucket/lifecycle/transition.go deleted file mode 100644 index 7937b34c..00000000 --- a/pkg/bucket/lifecycle/transition.go +++ /dev/null @@ -1,42 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package lifecycle - -import ( - "encoding/xml" -) - -// Transition - transition actions for a rule in lifecycle configuration. -type Transition struct { - XMLName xml.Name `xml:"Transition"` - Days int `xml:"Days,omitempty"` - Date string `xml:"Date,omitempty"` - StorageClass string `xml:"StorageClass"` -} - -var errTransitionUnsupported = Errorf("Specifying tag is not supported") - -// UnmarshalXML is extended to indicate lack of support for Transition -// xml tag in object lifecycle configuration -func (t Transition) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - return errTransitionUnsupported -} - -// MarshalXML is extended to leave out tags -func (t Transition) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - return nil -} diff --git a/pkg/bucket/object/lock/lock.go b/pkg/bucket/object/lock/lock.go deleted file mode 100644 index df8cbca8..00000000 --- a/pkg/bucket/object/lock/lock.go +++ /dev/null @@ -1,529 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package lock - -import ( - "context" - "encoding/xml" - "errors" - "fmt" - "io" - "net/http" - "strings" - "time" - - "github.com/beevik/ntp" - "github.com/minio/minio/legacy/logger" - "github.com/minio/minio/pkg/env" -) - -// RetMode - object retention mode. -type RetMode string - -const ( - // RetGovernance - governance mode. - RetGovernance RetMode = "GOVERNANCE" - - // RetCompliance - compliance mode. - RetCompliance RetMode = "COMPLIANCE" -) - -// Valid - returns if retention mode is valid -func (r RetMode) Valid() bool { - switch r { - case RetGovernance, RetCompliance: - return true - } - return false -} - -func parseRetMode(modeStr string) (mode RetMode) { - switch strings.ToUpper(modeStr) { - case "GOVERNANCE": - mode = RetGovernance - case "COMPLIANCE": - mode = RetCompliance - } - return mode -} - -// LegalHoldStatus - object legal hold status. -type LegalHoldStatus string - -const ( - // LegalHoldOn - legal hold is on. - LegalHoldOn LegalHoldStatus = "ON" - - // LegalHoldOff - legal hold is off. - LegalHoldOff LegalHoldStatus = "OFF" -) - -// Valid - returns true if legal hold status has valid values -func (l LegalHoldStatus) Valid() bool { - switch l { - case LegalHoldOn, LegalHoldOff: - return true - } - return false -} - -func parseLegalHoldStatus(holdStr string) (st LegalHoldStatus) { - switch strings.ToUpper(holdStr) { - case "ON": - st = LegalHoldOn - case "OFF": - st = LegalHoldOff - } - return st -} - -// Bypass retention governance header. -const ( - AmzObjectLockBypassRetGovernance = "X-Amz-Bypass-Governance-Retention" - AmzObjectLockRetainUntilDate = "X-Amz-Object-Lock-Retain-Until-Date" - AmzObjectLockMode = "X-Amz-Object-Lock-Mode" - AmzObjectLockLegalHold = "X-Amz-Object-Lock-Legal-Hold" -) - -var ( - // ErrMalformedBucketObjectConfig -indicates that the bucket object lock config is malformed - ErrMalformedBucketObjectConfig = errors.New("invalid bucket object lock config") - // ErrInvalidRetentionDate - indicates that retention date needs to be in ISO 8601 format - ErrInvalidRetentionDate = errors.New("date must be provided in ISO 8601 format") - // ErrPastObjectLockRetainDate - indicates that retention date must be in the future - ErrPastObjectLockRetainDate = errors.New("the retain until date must be in the future") - // ErrUnknownWORMModeDirective - indicates that the retention mode is invalid - ErrUnknownWORMModeDirective = errors.New("unknown WORM mode directive") - // ErrObjectLockMissingContentMD5 - indicates missing Content-MD5 header for put object requests with locking - ErrObjectLockMissingContentMD5 = errors.New("content-MD5 HTTP header is required for Put Object requests with Object Lock parameters") - // ErrObjectLockInvalidHeaders indicates that object lock headers are missing - ErrObjectLockInvalidHeaders = errors.New("x-amz-object-lock-retain-until-date and x-amz-object-lock-mode must both be supplied") - // ErrMalformedXML - generic error indicating malformed XML - ErrMalformedXML = errors.New("the XML you provided was not well-formed or did not validate against our published schema") -) - -const ( - ntpServerEnv = "MINIO_NTP_SERVER" -) - -var ( - ntpServer = env.Get(ntpServerEnv, "") -) - -// UTCNowNTP - is similar in functionality to UTCNow() -// but only used when we do not wish to rely on system -// time. -func UTCNowNTP() (time.Time, error) { - // ntp server is disabled - if ntpServer == "" { - return time.Now().UTC(), nil - } - return ntp.Time(ntpServer) -} - -// Retention - bucket level retention configuration. -type Retention struct { - Mode RetMode - Validity time.Duration - LockEnabled bool -} - -// Retain - check whether given date is retainable by validity time. -func (r Retention) Retain(created time.Time) bool { - t, err := UTCNowNTP() - if err != nil { - logger.LogIf(context.Background(), err) - // Retain - return true - } - return created.Add(r.Validity).After(t) -} - -// DefaultRetention - default retention configuration. -type DefaultRetention struct { - XMLName xml.Name `xml:"DefaultRetention"` - Mode RetMode `xml:"Mode"` - Days *uint64 `xml:"Days"` - Years *uint64 `xml:"Years"` -} - -// Maximum support retention days and years supported by AWS S3. -const ( - // This tested by using `mc lock` command - maximumRetentionDays = 36500 - maximumRetentionYears = 100 -) - -// UnmarshalXML - decodes XML data. -func (dr *DefaultRetention) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - // Make subtype to avoid recursive UnmarshalXML(). - type defaultRetention DefaultRetention - retention := defaultRetention{} - - if err := d.DecodeElement(&retention, &start); err != nil { - return err - } - - switch retention.Mode { - case RetGovernance, RetCompliance: - default: - return fmt.Errorf("unknown retention mode %v", retention.Mode) - } - - if retention.Days == nil && retention.Years == nil { - return fmt.Errorf("either Days or Years must be specified") - } - - if retention.Days != nil && retention.Years != nil { - return fmt.Errorf("either Days or Years must be specified, not both") - } - - if retention.Days != nil { - if *retention.Days == 0 { - return fmt.Errorf("Default retention period must be a positive integer value for 'Days'") - } - if *retention.Days > maximumRetentionDays { - return fmt.Errorf("Default retention period too large for 'Days' %d", *retention.Days) - } - } else if *retention.Years == 0 { - return fmt.Errorf("Default retention period must be a positive integer value for 'Years'") - } else if *retention.Years > maximumRetentionYears { - return fmt.Errorf("Default retention period too large for 'Years' %d", *retention.Years) - } - - *dr = DefaultRetention(retention) - - return nil -} - -// Config - object lock configuration specified in -// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html -type Config struct { - XMLNS string `xml:"xmlns,attr,omitempty"` - XMLName xml.Name `xml:"ObjectLockConfiguration"` - ObjectLockEnabled string `xml:"ObjectLockEnabled"` - Rule *struct { - DefaultRetention DefaultRetention `xml:"DefaultRetention"` - } `xml:"Rule,omitempty"` -} - -// UnmarshalXML - decodes XML data. -func (config *Config) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - // Make subtype to avoid recursive UnmarshalXML(). - type objectLockConfig Config - parsedConfig := objectLockConfig{} - - if err := d.DecodeElement(&parsedConfig, &start); err != nil { - return err - } - - if parsedConfig.ObjectLockEnabled != "Enabled" { - return fmt.Errorf("only 'Enabled' value is allowed to ObjectLockEnabled element") - } - - *config = Config(parsedConfig) - return nil -} - -// ToRetention - convert to Retention type. -func (config *Config) ToRetention() Retention { - r := Retention{ - LockEnabled: config.ObjectLockEnabled == "Enabled", - } - if config.Rule != nil { - r.Mode = config.Rule.DefaultRetention.Mode - - t, err := UTCNowNTP() - if err != nil { - logger.LogIf(context.Background(), err) - // Do not change any configuration - // upon NTP failure. - return r - } - - if config.Rule.DefaultRetention.Days != nil { - r.Validity = t.AddDate(0, 0, int(*config.Rule.DefaultRetention.Days)).Sub(t) - } else { - r.Validity = t.AddDate(int(*config.Rule.DefaultRetention.Years), 0, 0).Sub(t) - } - } - - return r -} - -// Maximum 4KiB size per object lock config. -const maxObjectLockConfigSize = 1 << 12 - -// ParseObjectLockConfig parses ObjectLockConfig from xml -func ParseObjectLockConfig(reader io.Reader) (*Config, error) { - config := Config{} - if err := xml.NewDecoder(io.LimitReader(reader, maxObjectLockConfigSize)).Decode(&config); err != nil { - return nil, err - } - - return &config, nil -} - -// NewObjectLockConfig returns a initialized lock.Config struct -func NewObjectLockConfig() *Config { - return &Config{ - ObjectLockEnabled: "Enabled", - } -} - -// RetentionDate is a embedded type containing time.Time to unmarshal -// Date in Retention -type RetentionDate struct { - time.Time -} - -// UnmarshalXML parses date from Retention and validates date format -func (rDate *RetentionDate) UnmarshalXML(d *xml.Decoder, startElement xml.StartElement) error { - var dateStr string - err := d.DecodeElement(&dateStr, &startElement) - if err != nil { - return err - } - // While AWS documentation mentions that the date specified - // must be present in ISO 8601 format, in reality they allow - // users to provide RFC 3339 compliant dates. - retDate, err := time.Parse(time.RFC3339, dateStr) - if err != nil { - return ErrInvalidRetentionDate - } - - *rDate = RetentionDate{retDate} - return nil -} - -// MarshalXML encodes expiration date if it is non-zero and encodes -// empty string otherwise -func (rDate *RetentionDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error { - if *rDate == (RetentionDate{time.Time{}}) { - return nil - } - return e.EncodeElement(rDate.Format(time.RFC3339), startElement) -} - -// ObjectRetention specified in -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectRetention.html -type ObjectRetention struct { - XMLNS string `xml:"xmlns,attr,omitempty"` - XMLName xml.Name `xml:"Retention"` - Mode RetMode `xml:"Mode,omitempty"` - RetainUntilDate RetentionDate `xml:"RetainUntilDate,omitempty"` -} - -// Maximum 4KiB size per object retention config. -const maxObjectRetentionSize = 1 << 12 - -// ParseObjectRetention constructs ObjectRetention struct from xml input -func ParseObjectRetention(reader io.Reader) (*ObjectRetention, error) { - ret := ObjectRetention{} - if err := xml.NewDecoder(io.LimitReader(reader, maxObjectRetentionSize)).Decode(&ret); err != nil { - return nil, err - } - if ret.Mode != "" && !ret.Mode.Valid() { - return &ret, ErrUnknownWORMModeDirective - } - - t, err := UTCNowNTP() - if err != nil { - logger.LogIf(context.Background(), err) - return &ret, ErrPastObjectLockRetainDate - } - - if !ret.RetainUntilDate.IsZero() && ret.RetainUntilDate.Before(t) { - return &ret, ErrPastObjectLockRetainDate - } - - return &ret, nil -} - -// IsObjectLockRetentionRequested returns true if object lock retention headers are set. -func IsObjectLockRetentionRequested(h http.Header) bool { - if _, ok := h[AmzObjectLockMode]; ok { - return true - } - if _, ok := h[AmzObjectLockRetainUntilDate]; ok { - return true - } - return false -} - -// IsObjectLockLegalHoldRequested returns true if object lock legal hold header is set. -func IsObjectLockLegalHoldRequested(h http.Header) bool { - _, ok := h[AmzObjectLockLegalHold] - return ok -} - -// IsObjectLockGovernanceBypassSet returns true if object lock governance bypass header is set. -func IsObjectLockGovernanceBypassSet(h http.Header) bool { - return strings.ToLower(h.Get(AmzObjectLockBypassRetGovernance)) == "true" -} - -// IsObjectLockRequested returns true if legal hold or object lock retention headers are requested. -func IsObjectLockRequested(h http.Header) bool { - return IsObjectLockLegalHoldRequested(h) || IsObjectLockRetentionRequested(h) -} - -// ParseObjectLockRetentionHeaders parses http headers to extract retention mode and retention date -func ParseObjectLockRetentionHeaders(h http.Header) (rmode RetMode, r RetentionDate, err error) { - retMode := h.Get(AmzObjectLockMode) - dateStr := h.Get(AmzObjectLockRetainUntilDate) - if len(retMode) == 0 || len(dateStr) == 0 { - return rmode, r, ErrObjectLockInvalidHeaders - } - - rmode = parseRetMode(retMode) - if !rmode.Valid() { - return rmode, r, ErrUnknownWORMModeDirective - } - - var retDate time.Time - // While AWS documentation mentions that the date specified - // must be present in ISO 8601 format, in reality they allow - // users to provide RFC 3339 compliant dates. - retDate, err = time.Parse(time.RFC3339, dateStr) - if err != nil { - return rmode, r, ErrInvalidRetentionDate - } - - t, err := UTCNowNTP() - if err != nil { - logger.LogIf(context.Background(), err) - return rmode, r, ErrPastObjectLockRetainDate - } - - if retDate.Before(t) { - return rmode, r, ErrPastObjectLockRetainDate - } - - return rmode, RetentionDate{retDate}, nil - -} - -// GetObjectRetentionMeta constructs ObjectRetention from metadata -func GetObjectRetentionMeta(meta map[string]string) ObjectRetention { - var mode RetMode - var retainTill RetentionDate - - var modeStr, tillStr string - ok := false - - modeStr, ok = meta[strings.ToLower(AmzObjectLockMode)] - if !ok { - modeStr, ok = meta[AmzObjectLockMode] - } - if ok { - mode = parseRetMode(modeStr) - } - tillStr, ok = meta[strings.ToLower(AmzObjectLockRetainUntilDate)] - if !ok { - tillStr, ok = meta[AmzObjectLockRetainUntilDate] - } - if ok { - if t, e := time.Parse(time.RFC3339, tillStr); e == nil { - retainTill = RetentionDate{t.UTC()} - } - } - return ObjectRetention{XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/", Mode: mode, RetainUntilDate: retainTill} -} - -// GetObjectLegalHoldMeta constructs ObjectLegalHold from metadata -func GetObjectLegalHoldMeta(meta map[string]string) ObjectLegalHold { - holdStr, ok := meta[strings.ToLower(AmzObjectLockLegalHold)] - if !ok { - holdStr, ok = meta[AmzObjectLockLegalHold] - } - if ok { - return ObjectLegalHold{XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/", Status: parseLegalHoldStatus(holdStr)} - } - return ObjectLegalHold{} -} - -// ParseObjectLockLegalHoldHeaders parses request headers to construct ObjectLegalHold -func ParseObjectLockLegalHoldHeaders(h http.Header) (lhold ObjectLegalHold, err error) { - holdStatus, ok := h[AmzObjectLockLegalHold] - if ok { - lh := parseLegalHoldStatus(holdStatus[0]) - if !lh.Valid() { - return lhold, ErrUnknownWORMModeDirective - } - lhold = ObjectLegalHold{XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/", Status: lh} - } - return lhold, nil - -} - -// ObjectLegalHold specified in -// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectLegalHold.html -type ObjectLegalHold struct { - XMLNS string `xml:"xmlns,attr,omitempty"` - XMLName xml.Name `xml:"LegalHold"` - Status LegalHoldStatus `xml:"Status,omitempty"` -} - -// IsEmpty returns true if struct is empty -func (l *ObjectLegalHold) IsEmpty() bool { - return !l.Status.Valid() -} - -// ParseObjectLegalHold decodes the XML into ObjectLegalHold -func ParseObjectLegalHold(reader io.Reader) (hold *ObjectLegalHold, err error) { - hold = &ObjectLegalHold{} - if err = xml.NewDecoder(reader).Decode(hold); err != nil { - return - } - - if !hold.Status.Valid() { - return nil, ErrMalformedXML - } - return -} - -// FilterObjectLockMetadata filters object lock metadata if s3:GetObjectRetention permission is denied or if isCopy flag set. -func FilterObjectLockMetadata(metadata map[string]string, filterRetention, filterLegalHold bool) map[string]string { - // Copy on write - dst := metadata - var copied bool - delKey := func(key string) { - if _, ok := metadata[key]; !ok { - return - } - if !copied { - dst = make(map[string]string, len(metadata)) - for k, v := range metadata { - dst[k] = v - } - copied = true - } - delete(dst, key) - } - legalHold := GetObjectLegalHoldMeta(metadata) - if !legalHold.Status.Valid() || filterLegalHold { - delKey(AmzObjectLockLegalHold) - } - - ret := GetObjectRetentionMeta(metadata) - if !ret.Mode.Valid() || filterRetention { - delKey(AmzObjectLockMode) - delKey(AmzObjectLockRetainUntilDate) - return dst - } - return dst -} diff --git a/pkg/bucket/object/lock/lock_test.go b/pkg/bucket/object/lock/lock_test.go deleted file mode 100644 index 9edd7f63..00000000 --- a/pkg/bucket/object/lock/lock_test.go +++ /dev/null @@ -1,567 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package lock - -import ( - "encoding/xml" - "fmt" - "net/http" - "reflect" - "strings" - "testing" - "time" - - xhttp "github.com/minio/minio/legacy/http" -) - -func TestParseMode(t *testing.T) { - testCases := []struct { - value string - expectedMode RetMode - }{ - { - value: "governance", - expectedMode: RetGovernance, - }, - { - value: "complIAnce", - expectedMode: RetCompliance, - }, - { - value: "gce", - expectedMode: "", - }, - } - - for _, tc := range testCases { - if parseRetMode(tc.value) != tc.expectedMode { - t.Errorf("Expected Mode %s, got %s", tc.expectedMode, parseRetMode(tc.value)) - } - } -} -func TestParseLegalHoldStatus(t *testing.T) { - tests := []struct { - value string - expectedStatus LegalHoldStatus - }{ - { - value: "ON", - expectedStatus: LegalHoldOn, - }, - { - value: "Off", - expectedStatus: LegalHoldOff, - }, - { - value: "x", - expectedStatus: "", - }, - } - - for _, tt := range tests { - actualStatus := parseLegalHoldStatus(tt.value) - if actualStatus != tt.expectedStatus { - t.Errorf("Expected legal hold status %s, got %s", tt.expectedStatus, actualStatus) - } - } -} - -// TestUnmarshalDefaultRetention checks if default retention -// marshaling and unmarshaling work as expected -func TestUnmarshalDefaultRetention(t *testing.T) { - days := uint64(4) - years := uint64(1) - zerodays := uint64(0) - invalidDays := uint64(maximumRetentionDays + 1) - tests := []struct { - value DefaultRetention - expectedErr error - expectErr bool - }{ - { - value: DefaultRetention{Mode: "retain"}, - expectedErr: fmt.Errorf("unknown retention mode retain"), - expectErr: true, - }, - { - value: DefaultRetention{Mode: RetGovernance}, - expectedErr: fmt.Errorf("either Days or Years must be specified"), - expectErr: true, - }, - { - value: DefaultRetention{Mode: RetGovernance, Days: &days}, - expectedErr: nil, - expectErr: false, - }, - { - value: DefaultRetention{Mode: RetGovernance, Years: &years}, - expectedErr: nil, - expectErr: false, - }, - { - value: DefaultRetention{Mode: RetGovernance, Days: &days, Years: &years}, - expectedErr: fmt.Errorf("either Days or Years must be specified, not both"), - expectErr: true, - }, - { - value: DefaultRetention{Mode: RetGovernance, Days: &zerodays}, - expectedErr: fmt.Errorf("Default retention period must be a positive integer value for 'Days'"), - expectErr: true, - }, - { - value: DefaultRetention{Mode: RetGovernance, Days: &invalidDays}, - expectedErr: fmt.Errorf("Default retention period too large for 'Days' %d", invalidDays), - expectErr: true, - }, - } - for _, tt := range tests { - d, err := xml.MarshalIndent(&tt.value, "", "\t") - if err != nil { - t.Fatal(err) - } - var dr DefaultRetention - err = xml.Unmarshal(d, &dr) - if tt.expectedErr == nil { - if err != nil { - t.Fatalf("error: expected = , got = %v", err) - } - } else if err == nil { - t.Fatalf("error: expected = %v, got = ", tt.expectedErr) - } else if tt.expectedErr.Error() != err.Error() { - t.Fatalf("error: expected = %v, got = %v", tt.expectedErr, err) - } - } -} - -func TestParseObjectLockConfig(t *testing.T) { - tests := []struct { - value string - expectedErr error - expectErr bool - }{ - { - value: `yes`, - expectedErr: fmt.Errorf("only 'Enabled' value is allowed to ObjectLockEnabled element"), - expectErr: true, - }, - { - value: `EnabledCOMPLIANCE0`, - expectedErr: fmt.Errorf("Default retention period must be a positive integer value for 'Days'"), - expectErr: true, - }, - { - value: `EnabledCOMPLIANCE30`, - expectedErr: nil, - expectErr: false, - }, - } - for _, tt := range tests { - _, err := ParseObjectLockConfig(strings.NewReader(tt.value)) - if tt.expectedErr == nil { - if err != nil { - t.Fatalf("error: expected = , got = %v", err) - } - } else if err == nil { - t.Fatalf("error: expected = %v, got = ", tt.expectedErr) - } else if tt.expectedErr.Error() != err.Error() { - t.Fatalf("error: expected = %v, got = %v", tt.expectedErr, err) - } - } -} - -func TestParseObjectRetention(t *testing.T) { - tests := []struct { - value string - expectedErr error - expectErr bool - }{ - { - value: `string2020-01-02T15:04:05Z`, - expectedErr: ErrUnknownWORMModeDirective, - expectErr: true, - }, - { - value: `COMPLIANCE2017-01-02T15:04:05Z`, - expectedErr: ErrPastObjectLockRetainDate, - expectErr: true, - }, - { - value: `GOVERNANCE2057-01-02T15:04:05Z`, - expectedErr: nil, - expectErr: false, - }, - } - for _, tt := range tests { - _, err := ParseObjectRetention(strings.NewReader(tt.value)) - if tt.expectedErr == nil { - if err != nil { - t.Fatalf("error: expected = , got = %v", err) - } - } else if err == nil { - t.Fatalf("error: expected = %v, got = ", tt.expectedErr) - } else if tt.expectedErr.Error() != err.Error() { - t.Fatalf("error: expected = %v, got = %v", tt.expectedErr, err) - } - } -} - -func TestIsObjectLockRequested(t *testing.T) { - tests := []struct { - header http.Header - expectedVal bool - }{ - { - header: http.Header{ - "Authorization": []string{"AWS4-HMAC-SHA256 "}, - "X-Amz-Content-Sha256": []string{""}, - "Content-Encoding": []string{""}, - }, - expectedVal: false, - }, - { - header: http.Header{ - AmzObjectLockLegalHold: []string{""}, - }, - expectedVal: true, - }, - { - header: http.Header{ - AmzObjectLockRetainUntilDate: []string{""}, - AmzObjectLockMode: []string{""}, - }, - expectedVal: true, - }, - { - header: http.Header{ - AmzObjectLockBypassRetGovernance: []string{""}, - }, - expectedVal: false, - }, - } - for _, tt := range tests { - actualVal := IsObjectLockRequested(tt.header) - if actualVal != tt.expectedVal { - t.Fatalf("error: expected %v, actual %v", tt.expectedVal, actualVal) - } - } -} - -func TestIsObjectLockGovernanceBypassSet(t *testing.T) { - tests := []struct { - header http.Header - expectedVal bool - }{ - { - header: http.Header{ - "Authorization": []string{"AWS4-HMAC-SHA256 "}, - "X-Amz-Content-Sha256": []string{""}, - "Content-Encoding": []string{""}, - }, - expectedVal: false, - }, - { - header: http.Header{ - AmzObjectLockLegalHold: []string{""}, - }, - expectedVal: false, - }, - { - header: http.Header{ - AmzObjectLockRetainUntilDate: []string{""}, - AmzObjectLockMode: []string{""}, - }, - expectedVal: false, - }, - { - header: http.Header{ - AmzObjectLockBypassRetGovernance: []string{""}, - }, - expectedVal: false, - }, - { - header: http.Header{ - AmzObjectLockBypassRetGovernance: []string{"true"}, - }, - expectedVal: true, - }, - } - for _, tt := range tests { - actualVal := IsObjectLockGovernanceBypassSet(tt.header) - if actualVal != tt.expectedVal { - t.Fatalf("error: expected %v, actual %v", tt.expectedVal, actualVal) - } - } -} - -func TestParseObjectLockRetentionHeaders(t *testing.T) { - tests := []struct { - header http.Header - expectedErr error - }{ - { - header: http.Header{ - "Authorization": []string{"AWS4-HMAC-SHA256 "}, - "X-Amz-Content-Sha256": []string{""}, - "Content-Encoding": []string{""}, - }, - expectedErr: ErrObjectLockInvalidHeaders, - }, - { - header: http.Header{ - xhttp.AmzObjectLockMode: []string{"lock"}, - xhttp.AmzObjectLockRetainUntilDate: []string{"2017-01-02"}, - }, - expectedErr: ErrUnknownWORMModeDirective, - }, - { - header: http.Header{ - xhttp.AmzObjectLockMode: []string{"governance"}, - }, - expectedErr: ErrObjectLockInvalidHeaders, - }, - { - header: http.Header{ - xhttp.AmzObjectLockRetainUntilDate: []string{"2017-01-02"}, - xhttp.AmzObjectLockMode: []string{"governance"}, - }, - expectedErr: ErrInvalidRetentionDate, - }, - { - header: http.Header{ - xhttp.AmzObjectLockRetainUntilDate: []string{"2017-01-02T15:04:05Z"}, - xhttp.AmzObjectLockMode: []string{"governance"}, - }, - expectedErr: ErrPastObjectLockRetainDate, - }, - { - header: http.Header{ - xhttp.AmzObjectLockMode: []string{"governance"}, - xhttp.AmzObjectLockRetainUntilDate: []string{"2017-01-02T15:04:05Z"}, - }, - expectedErr: ErrPastObjectLockRetainDate, - }, - { - header: http.Header{ - xhttp.AmzObjectLockMode: []string{"governance"}, - xhttp.AmzObjectLockRetainUntilDate: []string{"2087-01-02T15:04:05Z"}, - }, - expectedErr: nil, - }, - } - - for i, tt := range tests { - _, _, err := ParseObjectLockRetentionHeaders(tt.header) - if tt.expectedErr == nil { - if err != nil { - t.Fatalf("Case %d error: expected = , got = %v", i, err) - } - } else if err == nil { - t.Fatalf("Case %d error: expected = %v, got = ", i, tt.expectedErr) - } else if tt.expectedErr.Error() != err.Error() { - t.Fatalf("Case %d error: expected = %v, got = %v", i, tt.expectedErr, err) - } - } -} - -func TestGetObjectRetentionMeta(t *testing.T) { - tests := []struct { - metadata map[string]string - expected ObjectRetention - }{ - { - metadata: map[string]string{ - "Authorization": "AWS4-HMAC-SHA256 ", - "X-Amz-Content-Sha256": "", - "Content-Encoding": "", - }, - expected: ObjectRetention{}, - }, - { - metadata: map[string]string{ - "x-amz-object-lock-mode": "governance", - }, - expected: ObjectRetention{Mode: RetGovernance}, - }, - { - metadata: map[string]string{ - "x-amz-object-lock-retain-until-date": "2020-02-01", - }, - expected: ObjectRetention{RetainUntilDate: RetentionDate{time.Date(2020, 2, 1, 12, 0, 0, 0, time.UTC)}}, - }, - } - - for i, tt := range tests { - o := GetObjectRetentionMeta(tt.metadata) - if o.Mode != tt.expected.Mode { - t.Fatalf("Case %d expected %v, got %v", i, tt.expected.Mode, o.Mode) - } - } -} - -func TestGetObjectLegalHoldMeta(t *testing.T) { - tests := []struct { - metadata map[string]string - expected ObjectLegalHold - }{ - { - metadata: map[string]string{ - "x-amz-object-lock-mode": "governance", - }, - expected: ObjectLegalHold{}, - }, - { - metadata: map[string]string{ - "x-amz-object-lock-legal-hold": "on", - }, - expected: ObjectLegalHold{Status: LegalHoldOn}, - }, - { - metadata: map[string]string{ - "x-amz-object-lock-legal-hold": "off", - }, - expected: ObjectLegalHold{Status: LegalHoldOff}, - }, - { - metadata: map[string]string{ - "x-amz-object-lock-legal-hold": "X", - }, - expected: ObjectLegalHold{Status: ""}, - }, - } - - for i, tt := range tests { - o := GetObjectLegalHoldMeta(tt.metadata) - if o.Status != tt.expected.Status { - t.Fatalf("Case %d expected %v, got %v", i, tt.expected.Status, o.Status) - } - } -} - -func TestParseObjectLegalHold(t *testing.T) { - tests := []struct { - value string - expectedErr error - expectErr bool - }{ - { - value: `string`, - expectedErr: ErrMalformedXML, - expectErr: true, - }, - { - value: `ON`, - expectedErr: nil, - expectErr: false, - }, - { - value: `On`, - expectedErr: ErrMalformedXML, - expectErr: true, - }, - } - for i, tt := range tests { - _, err := ParseObjectLegalHold(strings.NewReader(tt.value)) - if tt.expectedErr == nil { - if err != nil { - t.Fatalf("Case %d error: expected = , got = %v", i, err) - } - } else if err == nil { - t.Fatalf("Case %d error: expected = %v, got = ", i, tt.expectedErr) - } else if tt.expectedErr.Error() != err.Error() { - t.Fatalf("Case %d error: expected = %v, got = %v", i, tt.expectedErr, err) - } - } -} -func TestFilterObjectLockMetadata(t *testing.T) { - tests := []struct { - metadata map[string]string - filterRetention bool - filterLegalHold bool - expected map[string]string - }{ - { - metadata: map[string]string{ - "Authorization": "AWS4-HMAC-SHA256 ", - "X-Amz-Content-Sha256": "", - "Content-Encoding": "", - }, - expected: map[string]string{ - "Authorization": "AWS4-HMAC-SHA256 ", - "X-Amz-Content-Sha256": "", - "Content-Encoding": "", - }, - }, - { - metadata: map[string]string{ - "x-amz-object-lock-mode": "governance", - }, - expected: map[string]string{ - "x-amz-object-lock-mode": "governance", - }, - filterRetention: false, - }, - { - metadata: map[string]string{ - "x-amz-object-lock-mode": "governance", - "x-amz-object-lock-retain-until-date": "2020-02-01", - }, - expected: map[string]string{}, - filterRetention: true, - }, - { - metadata: map[string]string{ - "x-amz-object-lock-legal-hold": "off", - }, - expected: map[string]string{}, - filterLegalHold: true, - }, - { - metadata: map[string]string{ - "x-amz-object-lock-legal-hold": "on", - }, - expected: map[string]string{"x-amz-object-lock-legal-hold": "on"}, - filterLegalHold: false, - }, - { - metadata: map[string]string{ - "x-amz-object-lock-legal-hold": "on", - "x-amz-object-lock-mode": "governance", - "x-amz-object-lock-retain-until-date": "2020-02-01", - }, - expected: map[string]string{}, - filterRetention: true, - filterLegalHold: true, - }, - { - metadata: map[string]string{ - "x-amz-object-lock-legal-hold": "on", - "x-amz-object-lock-mode": "governance", - "x-amz-object-lock-retain-until-date": "2020-02-01", - }, - expected: map[string]string{"x-amz-object-lock-legal-hold": "on", - "x-amz-object-lock-mode": "governance", - "x-amz-object-lock-retain-until-date": "2020-02-01"}, - }, - } - - for i, tt := range tests { - o := FilterObjectLockMetadata(tt.metadata, tt.filterRetention, tt.filterLegalHold) - if !reflect.DeepEqual(o, tt.metadata) { - t.Fatalf("Case %d expected %v, got %v", i, tt.metadata, o) - } - } -} diff --git a/pkg/bucket/policy/action.go b/pkg/bucket/policy/action.go deleted file mode 100644 index 0e0a3a55..00000000 --- a/pkg/bucket/policy/action.go +++ /dev/null @@ -1,311 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package policy - -import ( - "encoding/json" - - "github.com/minio/minio/pkg/bucket/policy/condition" -) - -// Action - policy action. -// Refer https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazons3.html -// for more information about available actions. -type Action string - -const ( - // AbortMultipartUploadAction - AbortMultipartUpload Rest API action. - AbortMultipartUploadAction Action = "s3:AbortMultipartUpload" - - // CreateBucketAction - CreateBucket Rest API action. - CreateBucketAction = "s3:CreateBucket" - - // DeleteBucketAction - DeleteBucket Rest API action. - DeleteBucketAction = "s3:DeleteBucket" - - // ForceDeleteBucketAction - DeleteBucket Rest API action when x-minio-force-delete flag - // is specified. - ForceDeleteBucketAction = "s3:ForceDeleteBucket" - - // DeleteBucketPolicyAction - DeleteBucketPolicy Rest API action. - DeleteBucketPolicyAction = "s3:DeleteBucketPolicy" - - // DeleteObjectAction - DeleteObject Rest API action. - DeleteObjectAction = "s3:DeleteObject" - - // GetBucketLocationAction - GetBucketLocation Rest API action. - GetBucketLocationAction = "s3:GetBucketLocation" - - // GetBucketNotificationAction - GetBucketNotification Rest API action. - GetBucketNotificationAction = "s3:GetBucketNotification" - - // GetBucketPolicyAction - GetBucketPolicy Rest API action. - GetBucketPolicyAction = "s3:GetBucketPolicy" - - // GetObjectAction - GetObject Rest API action. - GetObjectAction = "s3:GetObject" - - // HeadBucketAction - HeadBucket Rest API action. This action is unused in minio. - HeadBucketAction = "s3:HeadBucket" - - // ListAllMyBucketsAction - ListAllMyBuckets (List buckets) Rest API action. - ListAllMyBucketsAction = "s3:ListAllMyBuckets" - - // ListBucketAction - ListBucket Rest API action. - ListBucketAction = "s3:ListBucket" - - // ListBucketMultipartUploadsAction - ListMultipartUploads Rest API action. - ListBucketMultipartUploadsAction = "s3:ListBucketMultipartUploads" - - // ListenBucketNotificationAction - ListenBucketNotification Rest API action. - // This is MinIO extension. - ListenBucketNotificationAction = "s3:ListenBucketNotification" - - // ListMultipartUploadPartsAction - ListParts Rest API action. - ListMultipartUploadPartsAction = "s3:ListMultipartUploadParts" - - // PutBucketNotificationAction - PutObjectNotification Rest API action. - PutBucketNotificationAction = "s3:PutBucketNotification" - - // PutBucketPolicyAction - PutBucketPolicy Rest API action. - PutBucketPolicyAction = "s3:PutBucketPolicy" - - // PutObjectAction - PutObject Rest API action. - PutObjectAction = "s3:PutObject" - - // PutBucketLifecycleAction - PutBucketLifecycle Rest API action. - PutBucketLifecycleAction = "s3:PutLifecycleConfiguration" - - // GetBucketLifecycleAction - GetBucketLifecycle Rest API action. - GetBucketLifecycleAction = "s3:GetLifecycleConfiguration" - - // BypassGovernanceRetentionAction - bypass governance retention for PutObjectRetention, PutObject and DeleteObject Rest API action. - BypassGovernanceRetentionAction = "s3:BypassGovernanceRetention" - // PutObjectRetentionAction - PutObjectRetention Rest API action. - PutObjectRetentionAction = "s3:PutObjectRetention" - - // GetObjectRetentionAction - GetObjectRetention, GetObject, HeadObject Rest API action. - GetObjectRetentionAction = "s3:GetObjectRetention" - // GetObjectLegalHoldAction - GetObjectLegalHold, GetObject Rest API action. - GetObjectLegalHoldAction = "s3:GetObjectLegalHold" - // PutObjectLegalHoldAction - PutObjectLegalHold, PutObject Rest API action. - PutObjectLegalHoldAction = "s3:PutObjectLegalHold" - // GetBucketObjectLockConfigurationAction - GetObjectLockConfiguration Rest API action - GetBucketObjectLockConfigurationAction = "s3:GetBucketObjectLockConfiguration" - // PutBucketObjectLockConfigurationAction - PutObjectLockConfiguration Rest API action - PutBucketObjectLockConfigurationAction = "s3:PutBucketObjectLockConfiguration" - - // GetBucketTaggingAction - GetTagging Rest API action - GetBucketTaggingAction = "s3:GetBucketTagging" - // PutBucketTaggingAction - PutTagging Rest API action - PutBucketTaggingAction = "s3:PutBucketTagging" - - // GetObjectTaggingAction - Get Object Tags API action - GetObjectTaggingAction = "s3:GetObjectTagging" - // PutObjectTaggingAction - Put Object Tags API action - PutObjectTaggingAction = "s3:PutObjectTagging" - // DeleteObjectTaggingAction - Delete Object Tags API action - DeleteObjectTaggingAction = "s3:DeleteObjectTagging" - - // PutBucketEncryptionAction - PutBucketEncryption REST API action - PutBucketEncryptionAction = "s3:PutEncryptionConfiguration" - // GetBucketEncryptionAction - GetBucketEncryption REST API action - GetBucketEncryptionAction = "s3:GetEncryptionConfiguration" -) - -// List of all supported object actions. -var supportedObjectActions = map[Action]struct{}{ - AbortMultipartUploadAction: {}, - DeleteObjectAction: {}, - GetObjectAction: {}, - ListMultipartUploadPartsAction: {}, - PutObjectAction: {}, - BypassGovernanceRetentionAction: {}, - PutObjectRetentionAction: {}, - GetObjectRetentionAction: {}, - PutObjectLegalHoldAction: {}, - GetObjectLegalHoldAction: {}, - GetObjectTaggingAction: {}, - PutObjectTaggingAction: {}, - DeleteObjectTaggingAction: {}, -} - -// isObjectAction - returns whether action is object type or not. -func (action Action) isObjectAction() bool { - _, ok := supportedObjectActions[action] - return ok -} - -// List of all supported actions. -var supportedActions = map[Action]struct{}{ - AbortMultipartUploadAction: {}, - CreateBucketAction: {}, - DeleteBucketAction: {}, - ForceDeleteBucketAction: {}, - DeleteBucketPolicyAction: {}, - DeleteObjectAction: {}, - GetBucketLocationAction: {}, - GetBucketNotificationAction: {}, - GetBucketPolicyAction: {}, - GetObjectAction: {}, - HeadBucketAction: {}, - ListAllMyBucketsAction: {}, - ListBucketAction: {}, - ListBucketMultipartUploadsAction: {}, - ListenBucketNotificationAction: {}, - ListMultipartUploadPartsAction: {}, - PutBucketNotificationAction: {}, - PutBucketPolicyAction: {}, - PutObjectAction: {}, - GetBucketLifecycleAction: {}, - PutBucketLifecycleAction: {}, - PutObjectRetentionAction: {}, - GetObjectRetentionAction: {}, - GetObjectLegalHoldAction: {}, - PutObjectLegalHoldAction: {}, - PutBucketObjectLockConfigurationAction: {}, - GetBucketObjectLockConfigurationAction: {}, - PutBucketTaggingAction: {}, - GetBucketTaggingAction: {}, - BypassGovernanceRetentionAction: {}, - GetObjectTaggingAction: {}, - PutObjectTaggingAction: {}, - DeleteObjectTaggingAction: {}, - PutBucketEncryptionAction: {}, - GetBucketEncryptionAction: {}, -} - -// IsValid - checks if action is valid or not. -func (action Action) IsValid() bool { - _, ok := supportedActions[action] - return ok -} - -// MarshalJSON - encodes Action to JSON data. -func (action Action) MarshalJSON() ([]byte, error) { - if action.IsValid() { - return json.Marshal(string(action)) - } - - return nil, Errorf("invalid action '%v'", action) -} - -// UnmarshalJSON - decodes JSON data to Action. -func (action *Action) UnmarshalJSON(data []byte) error { - var s string - - if err := json.Unmarshal(data, &s); err != nil { - return err - } - - a := Action(s) - if !a.IsValid() { - return Errorf("invalid action '%v'", s) - } - - *action = a - - return nil -} - -func parseAction(s string) (Action, error) { - action := Action(s) - - if action.IsValid() { - return action, nil - } - - return action, Errorf("unsupported action '%v'", s) -} - -// actionConditionKeyMap - holds mapping of supported condition key for an action. -var actionConditionKeyMap = map[Action]condition.KeySet{ - AbortMultipartUploadAction: condition.NewKeySet(condition.CommonKeys...), - - CreateBucketAction: condition.NewKeySet(condition.CommonKeys...), - - DeleteObjectAction: condition.NewKeySet(condition.CommonKeys...), - - GetBucketLocationAction: condition.NewKeySet(condition.CommonKeys...), - - GetObjectAction: condition.NewKeySet( - append([]condition.Key{ - condition.S3XAmzServerSideEncryption, - condition.S3XAmzServerSideEncryptionCustomerAlgorithm, - condition.S3XAmzStorageClass, - }, condition.CommonKeys...)...), - - HeadBucketAction: condition.NewKeySet(condition.CommonKeys...), - - ListAllMyBucketsAction: condition.NewKeySet(condition.CommonKeys...), - - ListBucketAction: condition.NewKeySet( - append([]condition.Key{ - condition.S3Prefix, - condition.S3Delimiter, - condition.S3MaxKeys, - }, condition.CommonKeys...)...), - - ListBucketMultipartUploadsAction: condition.NewKeySet(condition.CommonKeys...), - - ListMultipartUploadPartsAction: condition.NewKeySet(condition.CommonKeys...), - - PutObjectAction: condition.NewKeySet( - append([]condition.Key{ - condition.S3XAmzCopySource, - condition.S3XAmzServerSideEncryption, - condition.S3XAmzServerSideEncryptionCustomerAlgorithm, - condition.S3XAmzMetadataDirective, - condition.S3XAmzStorageClass, - condition.S3ObjectLockRetainUntilDate, - condition.S3ObjectLockMode, - condition.S3ObjectLockLegalHold, - }, condition.CommonKeys...)...), - - // https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html - // LockLegalHold is not supported with PutObjectRetentionAction - PutObjectRetentionAction: condition.NewKeySet( - append([]condition.Key{ - condition.S3ObjectLockRemainingRetentionDays, - condition.S3ObjectLockRetainUntilDate, - condition.S3ObjectLockMode, - }, condition.CommonKeys...)...), - - GetObjectRetentionAction: condition.NewKeySet(condition.CommonKeys...), - PutObjectLegalHoldAction: condition.NewKeySet( - append([]condition.Key{ - condition.S3ObjectLockLegalHold, - }, condition.CommonKeys...)...), - GetObjectLegalHoldAction: condition.NewKeySet(condition.CommonKeys...), - - // https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html - BypassGovernanceRetentionAction: condition.NewKeySet( - append([]condition.Key{ - condition.S3ObjectLockRemainingRetentionDays, - condition.S3ObjectLockRetainUntilDate, - condition.S3ObjectLockMode, - condition.S3ObjectLockLegalHold, - }, condition.CommonKeys...)...), - - GetBucketObjectLockConfigurationAction: condition.NewKeySet(condition.CommonKeys...), - PutBucketObjectLockConfigurationAction: condition.NewKeySet(condition.CommonKeys...), - GetBucketTaggingAction: condition.NewKeySet(condition.CommonKeys...), - PutBucketTaggingAction: condition.NewKeySet(condition.CommonKeys...), - PutObjectTaggingAction: condition.NewKeySet(condition.CommonKeys...), - GetObjectTaggingAction: condition.NewKeySet(condition.CommonKeys...), - DeleteObjectTaggingAction: condition.NewKeySet(condition.CommonKeys...), -} diff --git a/pkg/bucket/policy/action_test.go b/pkg/bucket/policy/action_test.go deleted file mode 100644 index 40dcb9cd..00000000 --- a/pkg/bucket/policy/action_test.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package policy - -import ( - "encoding/json" - "reflect" - "testing" -) - -func TestActionIsObjectAction(t *testing.T) { - testCases := []struct { - action Action - expectedResult bool - }{ - {AbortMultipartUploadAction, true}, - {DeleteObjectAction, true}, - {GetObjectAction, true}, - {ListMultipartUploadPartsAction, true}, - {PutObjectAction, true}, - {CreateBucketAction, false}, - } - - for i, testCase := range testCases { - result := testCase.action.isObjectAction() - - if testCase.expectedResult != result { - t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestActionIsValid(t *testing.T) { - testCases := []struct { - action Action - expectedResult bool - }{ - {AbortMultipartUploadAction, true}, - {Action("foo"), false}, - } - - for i, testCase := range testCases { - result := testCase.action.IsValid() - - if testCase.expectedResult != result { - t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestActionMarshalJSON(t *testing.T) { - testCases := []struct { - action Action - expectedResult []byte - expectErr bool - }{ - {PutObjectAction, []byte(`"s3:PutObject"`), false}, - {Action("foo"), nil, true}, - } - - for i, testCase := range testCases { - result, err := json.Marshal(testCase.action) - expectErr := (err != nil) - - if testCase.expectErr != expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } - } -} - -func TestActionUnmarshalJSON(t *testing.T) { - testCases := []struct { - data []byte - expectedResult Action - expectErr bool - }{ - {[]byte(`"s3:PutObject"`), PutObjectAction, false}, - {[]byte(`"foo"`), Action(""), true}, - } - - for i, testCase := range testCases { - var result Action - err := json.Unmarshal(testCase.data, &result) - expectErr := (err != nil) - - if testCase.expectErr != expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if testCase.expectedResult != result { - t.Fatalf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } - } -} diff --git a/pkg/bucket/policy/actionset.go b/pkg/bucket/policy/actionset.go deleted file mode 100644 index 1f4dd820..00000000 --- a/pkg/bucket/policy/actionset.go +++ /dev/null @@ -1,132 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package policy - -import ( - "encoding/json" - "fmt" - "sort" - - "github.com/minio/minio-go/v6/pkg/set" -) - -// ActionSet - set of actions. -type ActionSet map[Action]struct{} - -// Add - add action to the set. -func (actionSet ActionSet) Add(action Action) { - actionSet[action] = struct{}{} -} - -// Contains - checks given action exists in the action set. -func (actionSet ActionSet) Contains(action Action) bool { - _, found := actionSet[action] - return found -} - -// Equals - checks whether given action set is equal to current action set or not. -func (actionSet ActionSet) Equals(sactionSet ActionSet) bool { - // If length of set is not equal to length of given set, the - // set is not equal to given set. - if len(actionSet) != len(sactionSet) { - return false - } - - // As both sets are equal in length, check each elements are equal. - for k := range actionSet { - if _, ok := sactionSet[k]; !ok { - return false - } - } - - return true -} - -// Intersection - returns actions available in both ActionSet. -func (actionSet ActionSet) Intersection(sset ActionSet) ActionSet { - nset := NewActionSet() - for k := range actionSet { - if _, ok := sset[k]; ok { - nset.Add(k) - } - } - - return nset -} - -// MarshalJSON - encodes ActionSet to JSON data. -func (actionSet ActionSet) MarshalJSON() ([]byte, error) { - if len(actionSet) == 0 { - return nil, Errorf("empty actions not allowed") - } - - return json.Marshal(actionSet.ToSlice()) -} - -func (actionSet ActionSet) String() string { - actions := []string{} - for action := range actionSet { - actions = append(actions, string(action)) - } - sort.Strings(actions) - - return fmt.Sprintf("%v", actions) -} - -// ToSlice - returns slice of actions from the action set. -func (actionSet ActionSet) ToSlice() []Action { - actions := []Action{} - for action := range actionSet { - actions = append(actions, action) - } - - return actions -} - -// UnmarshalJSON - decodes JSON data to ActionSet. -func (actionSet *ActionSet) UnmarshalJSON(data []byte) error { - var sset set.StringSet - if err := json.Unmarshal(data, &sset); err != nil { - return err - } - - if len(sset) == 0 { - return Errorf("empty actions not allowed") - } - - *actionSet = make(ActionSet) - for _, s := range sset.ToSlice() { - action, err := parseAction(s) - if err != nil { - return err - } - - actionSet.Add(action) - } - - return nil -} - -// NewActionSet - creates new action set. -func NewActionSet(actions ...Action) ActionSet { - actionSet := make(ActionSet) - for _, action := range actions { - actionSet.Add(action) - } - - return actionSet -} diff --git a/pkg/bucket/policy/actionset_test.go b/pkg/bucket/policy/actionset_test.go deleted file mode 100644 index a84bcd17..00000000 --- a/pkg/bucket/policy/actionset_test.go +++ /dev/null @@ -1,158 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package policy - -import ( - "encoding/json" - "reflect" - "testing" -) - -func TestActionSetAdd(t *testing.T) { - testCases := []struct { - set ActionSet - action Action - expectedResult ActionSet - }{ - {NewActionSet(), PutObjectAction, NewActionSet(PutObjectAction)}, - {NewActionSet(PutObjectAction), PutObjectAction, NewActionSet(PutObjectAction)}, - } - - for i, testCase := range testCases { - testCase.set.Add(testCase.action) - - if !reflect.DeepEqual(testCase.expectedResult, testCase.set) { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, testCase.set) - } - } -} - -func TestActionSetContains(t *testing.T) { - testCases := []struct { - set ActionSet - action Action - expectedResult bool - }{ - {NewActionSet(PutObjectAction), PutObjectAction, true}, - {NewActionSet(PutObjectAction, GetObjectAction), PutObjectAction, true}, - {NewActionSet(PutObjectAction, GetObjectAction), AbortMultipartUploadAction, false}, - } - - for i, testCase := range testCases { - result := testCase.set.Contains(testCase.action) - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestActionSetIntersection(t *testing.T) { - testCases := []struct { - set ActionSet - setToIntersect ActionSet - expectedResult ActionSet - }{ - {NewActionSet(), NewActionSet(PutObjectAction), NewActionSet()}, - {NewActionSet(PutObjectAction), NewActionSet(), NewActionSet()}, - {NewActionSet(PutObjectAction), NewActionSet(PutObjectAction, GetObjectAction), NewActionSet(PutObjectAction)}, - } - - for i, testCase := range testCases { - result := testCase.set.Intersection(testCase.setToIntersect) - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, testCase.set) - } - } -} - -func TestActionSetMarshalJSON(t *testing.T) { - testCases := []struct { - actionSet ActionSet - expectedResult []byte - expectErr bool - }{ - {NewActionSet(PutObjectAction), []byte(`["s3:PutObject"]`), false}, - {NewActionSet(), nil, true}, - } - - for i, testCase := range testCases { - result, err := json.Marshal(testCase.actionSet) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, string(testCase.expectedResult), string(result)) - } - } - } -} - -func TestActionSetToSlice(t *testing.T) { - testCases := []struct { - actionSet ActionSet - expectedResult []Action - }{ - {NewActionSet(PutObjectAction), []Action{PutObjectAction}}, - {NewActionSet(), []Action{}}, - } - - for i, testCase := range testCases { - result := testCase.actionSet.ToSlice() - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestActionSetUnmarshalJSON(t *testing.T) { - testCases := []struct { - data []byte - expectedResult ActionSet - expectErr bool - }{ - {[]byte(`"s3:PutObject"`), NewActionSet(PutObjectAction), false}, - {[]byte(`["s3:PutObject"]`), NewActionSet(PutObjectAction), false}, - {[]byte(`["s3:PutObject", "s3:GetObject"]`), NewActionSet(PutObjectAction, GetObjectAction), false}, - {[]byte(`["s3:PutObject", "s3:GetObject", "s3:PutObject"]`), NewActionSet(PutObjectAction, GetObjectAction), false}, - {[]byte(`[]`), NewActionSet(), true}, // Empty array. - {[]byte(`"foo"`), nil, true}, // Invalid action. - {[]byte(`["s3:PutObject", "foo"]`), nil, true}, // Invalid action. - } - - for i, testCase := range testCases { - result := make(ActionSet) - err := json.Unmarshal(testCase.data, &result) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } - } -} diff --git a/pkg/bucket/policy/condition/binaryequalsfunc.go b/pkg/bucket/policy/condition/binaryequalsfunc.go deleted file mode 100644 index 25799ee9..00000000 --- a/pkg/bucket/policy/condition/binaryequalsfunc.go +++ /dev/null @@ -1,143 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "encoding/base64" - "fmt" - "net/http" - "sort" - - "github.com/minio/minio-go/v6/pkg/s3utils" - "github.com/minio/minio-go/v6/pkg/set" -) - -func toBinaryEqualsFuncString(n name, key Key, values set.StringSet) string { - valueStrings := values.ToSlice() - sort.Strings(valueStrings) - - return fmt.Sprintf("%v:%v:%v", n, key, valueStrings) -} - -// binaryEqualsFunc - String equals function. It checks whether value by Key in given -// values map is in condition values. -// For example, -// - if values = ["mybucket/foo"], at evaluate() it returns whether string -// in value map for Key is in values. -type binaryEqualsFunc struct { - k Key - values set.StringSet -} - -// evaluate() - evaluates to check whether value by Key in given values is in -// condition values. -func (f binaryEqualsFunc) evaluate(values map[string][]string) bool { - requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())] - if !ok { - requestValue = values[f.k.Name()] - } - - fvalues := f.values.ApplyFunc(substFuncFromValues(values)) - return !fvalues.Intersection(set.CreateStringSet(requestValue...)).IsEmpty() -} - -// key() - returns condition key which is used by this condition function. -func (f binaryEqualsFunc) key() Key { - return f.k -} - -// name() - returns "BinaryEquals" condition name. -func (f binaryEqualsFunc) name() name { - return binaryEquals -} - -func (f binaryEqualsFunc) String() string { - return toBinaryEqualsFuncString(binaryEquals, f.k, f.values) -} - -// toMap - returns map representation of this function. -func (f binaryEqualsFunc) toMap() map[Key]ValueSet { - if !f.k.IsValid() { - return nil - } - - values := NewValueSet() - for _, value := range f.values.ToSlice() { - values.Add(NewStringValue(base64.StdEncoding.EncodeToString([]byte(value)))) - } - - return map[Key]ValueSet{ - f.k: values, - } -} - -func validateBinaryEqualsValues(n name, key Key, values set.StringSet) error { - vslice := values.ToSlice() - for _, s := range vslice { - sbytes, err := base64.StdEncoding.DecodeString(s) - if err != nil { - return err - } - values.Remove(s) - s = string(sbytes) - switch key { - case S3XAmzCopySource: - bucket, object := path2BucketAndObject(s) - if object == "" { - return fmt.Errorf("invalid value '%v' for '%v' for %v condition", s, S3XAmzCopySource, n) - } - if err = s3utils.CheckValidBucketName(bucket); err != nil { - return err - } - case S3XAmzServerSideEncryption, S3XAmzServerSideEncryptionCustomerAlgorithm: - if s != "AES256" { - return fmt.Errorf("invalid value '%v' for '%v' for %v condition", s, S3XAmzServerSideEncryption, n) - } - case S3XAmzMetadataDirective: - if s != "COPY" && s != "REPLACE" { - return fmt.Errorf("invalid value '%v' for '%v' for %v condition", s, S3XAmzMetadataDirective, n) - } - case S3XAmzContentSha256: - if s == "" { - return fmt.Errorf("invalid empty value for '%v' for %v condition", S3XAmzContentSha256, n) - } - } - values.Add(s) - } - - return nil -} - -// newBinaryEqualsFunc - returns new BinaryEquals function. -func newBinaryEqualsFunc(key Key, values ValueSet) (Function, error) { - valueStrings, err := valuesToStringSlice(binaryEquals, values) - if err != nil { - return nil, err - } - - return NewBinaryEqualsFunc(key, valueStrings...) -} - -// NewBinaryEqualsFunc - returns new BinaryEquals function. -func NewBinaryEqualsFunc(key Key, values ...string) (Function, error) { - sset := set.CreateStringSet(values...) - if err := validateBinaryEqualsValues(binaryEquals, key, sset); err != nil { - return nil, err - } - - return &binaryEqualsFunc{key, sset}, nil -} diff --git a/pkg/bucket/policy/condition/binaryequalsfunc_test.go b/pkg/bucket/policy/condition/binaryequalsfunc_test.go deleted file mode 100644 index 3032d895..00000000 --- a/pkg/bucket/policy/condition/binaryequalsfunc_test.go +++ /dev/null @@ -1,382 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "encoding/base64" - "reflect" - "testing" -) - -func TestBinaryEqualsFuncEvaluate(t *testing.T) { - case1Function, err := newBinaryEqualsFunc(S3XAmzCopySource, - NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("mybucket/myobject"))))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newBinaryEqualsFunc(S3XAmzServerSideEncryption, - NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("AES256"))))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Function, err := newBinaryEqualsFunc(S3XAmzMetadataDirective, - NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("REPLACE"))))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Function, err := newBinaryEqualsFunc(S3LocationConstraint, - NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("eu-west-1"))))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - function Function - values map[string][]string - expectedResult bool - }{ - {case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, true}, - {case1Function, map[string][]string{"x-amz-copy-source": {"yourbucket/myobject"}}, false}, - {case1Function, map[string][]string{}, false}, - {case1Function, map[string][]string{"delimiter": {"/"}}, false}, - - {case2Function, map[string][]string{"x-amz-server-side-encryption": {"AES256"}}, true}, - {case2Function, map[string][]string{}, false}, - {case2Function, map[string][]string{"delimiter": {"/"}}, false}, - - {case3Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE"}}, true}, - {case3Function, map[string][]string{"x-amz-metadata-directive": {"COPY"}}, false}, - {case3Function, map[string][]string{}, false}, - {case3Function, map[string][]string{"delimiter": {"/"}}, false}, - - {case4Function, map[string][]string{"LocationConstraint": {"eu-west-1"}}, true}, - {case4Function, map[string][]string{"LocationConstraint": {"us-east-1"}}, false}, - {case4Function, map[string][]string{}, false}, - {case4Function, map[string][]string{"delimiter": {"/"}}, false}, - } - - for i, testCase := range testCases { - result := testCase.function.evaluate(testCase.values) - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestBinaryEqualsFuncKey(t *testing.T) { - case1Function, err := newBinaryEqualsFunc(S3XAmzCopySource, - NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("mybucket/myobject"))))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newBinaryEqualsFunc(S3XAmzServerSideEncryption, - NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("AES256"))))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Function, err := newBinaryEqualsFunc(S3XAmzMetadataDirective, - NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("REPLACE"))))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Function, err := newBinaryEqualsFunc(S3LocationConstraint, - NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("eu-west-1"))))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - function Function - expectedResult Key - }{ - {case1Function, S3XAmzCopySource}, - {case2Function, S3XAmzServerSideEncryption}, - {case3Function, S3XAmzMetadataDirective}, - {case4Function, S3LocationConstraint}, - } - - for i, testCase := range testCases { - result := testCase.function.key() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestBinaryEqualsFuncToMap(t *testing.T) { - case1Function, err := newBinaryEqualsFunc(S3XAmzCopySource, - NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("mybucket/myobject"))))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case1Result := map[Key]ValueSet{ - S3XAmzCopySource: NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("mybucket/myobject")))), - } - - case2Function, err := newBinaryEqualsFunc(S3XAmzCopySource, - NewValueSet( - NewStringValue(base64.StdEncoding.EncodeToString([]byte("mybucket/myobject"))), - NewStringValue(base64.StdEncoding.EncodeToString([]byte("yourbucket/myobject"))), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Result := map[Key]ValueSet{ - S3XAmzCopySource: NewValueSet( - NewStringValue(base64.StdEncoding.EncodeToString([]byte("mybucket/myobject"))), - NewStringValue(base64.StdEncoding.EncodeToString([]byte("yourbucket/myobject"))), - ), - } - - case3Function, err := newBinaryEqualsFunc(S3XAmzServerSideEncryption, - NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("AES256"))))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Result := map[Key]ValueSet{ - S3XAmzServerSideEncryption: NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("AES256")))), - } - - case4Function, err := newBinaryEqualsFunc(S3XAmzServerSideEncryption, - NewValueSet( - NewStringValue(base64.StdEncoding.EncodeToString([]byte("AES256"))), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Result := map[Key]ValueSet{ - S3XAmzServerSideEncryption: NewValueSet( - NewStringValue(base64.StdEncoding.EncodeToString([]byte("AES256"))), - ), - } - - case5Function, err := newBinaryEqualsFunc(S3XAmzMetadataDirective, - NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("REPLACE"))))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case5Result := map[Key]ValueSet{ - S3XAmzMetadataDirective: NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("REPLACE")))), - } - - case6Function, err := newBinaryEqualsFunc(S3XAmzMetadataDirective, - NewValueSet( - NewStringValue(base64.StdEncoding.EncodeToString([]byte("REPLACE"))), - NewStringValue(base64.StdEncoding.EncodeToString([]byte("COPY"))), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case6Result := map[Key]ValueSet{ - S3XAmzMetadataDirective: NewValueSet( - NewStringValue(base64.StdEncoding.EncodeToString([]byte("REPLACE"))), - NewStringValue(base64.StdEncoding.EncodeToString([]byte("COPY"))), - ), - } - - case7Function, err := newBinaryEqualsFunc(S3LocationConstraint, - NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("eu-west-1"))))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case7Result := map[Key]ValueSet{ - S3LocationConstraint: NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("eu-west-1")))), - } - - case8Function, err := newBinaryEqualsFunc(S3LocationConstraint, - NewValueSet( - NewStringValue(base64.StdEncoding.EncodeToString([]byte("eu-west-1"))), - NewStringValue(base64.StdEncoding.EncodeToString([]byte("us-west-1"))), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case8Result := map[Key]ValueSet{ - S3LocationConstraint: NewValueSet( - NewStringValue(base64.StdEncoding.EncodeToString([]byte("eu-west-1"))), - NewStringValue(base64.StdEncoding.EncodeToString([]byte("us-west-1"))), - ), - } - - testCases := []struct { - f Function - expectedResult map[Key]ValueSet - }{ - {case1Function, case1Result}, - {case2Function, case2Result}, - {case3Function, case3Result}, - {case4Function, case4Result}, - {case5Function, case5Result}, - {case6Function, case6Result}, - {case7Function, case7Result}, - {case8Function, case8Result}, - {&binaryEqualsFunc{}, nil}, - } - - for i, testCase := range testCases { - result := testCase.f.toMap() - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestNewBinaryEqualsFunc(t *testing.T) { - case1Function, err := newBinaryEqualsFunc(S3XAmzCopySource, - NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("mybucket/myobject"))))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newBinaryEqualsFunc(S3XAmzCopySource, - NewValueSet( - NewStringValue(base64.StdEncoding.EncodeToString([]byte("mybucket/myobject"))), - NewStringValue(base64.StdEncoding.EncodeToString([]byte("yourbucket/myobject"))), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Function, err := newBinaryEqualsFunc(S3XAmzServerSideEncryption, - NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("AES256"))))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Function, err := newBinaryEqualsFunc(S3XAmzServerSideEncryption, - NewValueSet( - NewStringValue(base64.StdEncoding.EncodeToString([]byte("AES256"))), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case5Function, err := newBinaryEqualsFunc(S3XAmzMetadataDirective, - NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("REPLACE"))))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case6Function, err := newBinaryEqualsFunc(S3XAmzMetadataDirective, - NewValueSet( - NewStringValue(base64.StdEncoding.EncodeToString([]byte("REPLACE"))), - NewStringValue(base64.StdEncoding.EncodeToString([]byte("COPY"))), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case7Function, err := newBinaryEqualsFunc(S3LocationConstraint, - NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("eu-west-1"))))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case8Function, err := newBinaryEqualsFunc(S3LocationConstraint, - NewValueSet( - NewStringValue(base64.StdEncoding.EncodeToString([]byte("eu-west-1"))), - NewStringValue(base64.StdEncoding.EncodeToString([]byte("us-west-1"))), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - key Key - values ValueSet - expectedResult Function - expectErr bool - }{ - {S3XAmzCopySource, NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("mybucket/myobject")))), case1Function, false}, - {S3XAmzCopySource, - NewValueSet( - NewStringValue(base64.StdEncoding.EncodeToString([]byte("mybucket/myobject"))), - NewStringValue(base64.StdEncoding.EncodeToString([]byte("yourbucket/myobject"))), - ), case2Function, false}, - - {S3XAmzServerSideEncryption, NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("AES256")))), case3Function, false}, - {S3XAmzServerSideEncryption, - NewValueSet( - NewStringValue(base64.StdEncoding.EncodeToString([]byte("AES256"))), - ), case4Function, false}, - - {S3XAmzMetadataDirective, NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("REPLACE")))), case5Function, false}, - {S3XAmzMetadataDirective, - NewValueSet( - NewStringValue(base64.StdEncoding.EncodeToString([]byte("REPLACE"))), - NewStringValue(base64.StdEncoding.EncodeToString([]byte("COPY"))), - ), case6Function, false}, - - {S3LocationConstraint, NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("eu-west-1")))), case7Function, false}, - {S3LocationConstraint, - NewValueSet( - NewStringValue(base64.StdEncoding.EncodeToString([]byte("eu-west-1"))), - NewStringValue(base64.StdEncoding.EncodeToString([]byte("us-west-1"))), - ), case8Function, false}, - - // Unsupported value error. - {S3XAmzCopySource, NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("mybucket/myobject"))), NewIntValue(7)), nil, true}, - {S3XAmzServerSideEncryption, NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("AES256"))), NewIntValue(7)), nil, true}, - {S3XAmzMetadataDirective, NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("REPLACE"))), NewIntValue(7)), nil, true}, - {S3LocationConstraint, NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("eu-west-1"))), NewIntValue(7)), nil, true}, - - // Invalid value error. - {S3XAmzCopySource, NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("mybucket")))), nil, true}, - {S3XAmzServerSideEncryption, NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("SSE-C")))), nil, true}, - {S3XAmzMetadataDirective, NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("DUPLICATE")))), nil, true}, - } - - for i, testCase := range testCases { - result, err := newBinaryEqualsFunc(testCase.key, testCase.values) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } - } -} diff --git a/pkg/bucket/policy/condition/boolfunc.go b/pkg/bucket/policy/condition/boolfunc.go deleted file mode 100644 index b8ecd3e6..00000000 --- a/pkg/bucket/policy/condition/boolfunc.go +++ /dev/null @@ -1,109 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "fmt" - "net/http" - "reflect" - "strconv" -) - -// booleanFunc - Bool condition function. It checks whether Key is true or false. -// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_condition_operators.html#Conditions_Boolean -type booleanFunc struct { - k Key - value string -} - -// evaluate() - evaluates to check whether Key is present in given values or not. -// Depending on condition boolean value, this function returns true or false. -func (f booleanFunc) evaluate(values map[string][]string) bool { - requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())] - if !ok { - requestValue = values[f.k.Name()] - } - - if len(requestValue) == 0 { - return false - } - - return f.value == requestValue[0] -} - -// key() - returns condition key which is used by this condition function. -func (f booleanFunc) key() Key { - return f.k -} - -// name() - returns "Bool" condition name. -func (f booleanFunc) name() name { - return boolean -} - -func (f booleanFunc) String() string { - return fmt.Sprintf("%v:%v:%v", boolean, f.k, f.value) -} - -// toMap - returns map representation of this function. -func (f booleanFunc) toMap() map[Key]ValueSet { - if !f.k.IsValid() { - return nil - } - - return map[Key]ValueSet{ - f.k: NewValueSet(NewStringValue(f.value)), - } -} - -func newBooleanFunc(key Key, values ValueSet) (Function, error) { - if key != AWSSecureTransport { - return nil, fmt.Errorf("only %v key is allowed for %v condition", AWSSecureTransport, boolean) - } - - if len(values) != 1 { - return nil, fmt.Errorf("only one value is allowed for boolean condition") - } - - var value Value - for v := range values { - value = v - switch v.GetType() { - case reflect.Bool: - if _, err := v.GetBool(); err != nil { - return nil, err - } - case reflect.String: - s, err := v.GetString() - if err != nil { - return nil, err - } - if _, err = strconv.ParseBool(s); err != nil { - return nil, fmt.Errorf("value must be a boolean string for boolean condition") - } - default: - return nil, fmt.Errorf("value must be a boolean for boolean condition") - } - } - - return &booleanFunc{key, value.String()}, nil -} - -// NewBoolFunc - returns new Bool function. -func NewBoolFunc(key Key, value string) (Function, error) { - return &booleanFunc{key, value}, nil -} diff --git a/pkg/bucket/policy/condition/boolfunc_test.go b/pkg/bucket/policy/condition/boolfunc_test.go deleted file mode 100644 index ff4cbdf9..00000000 --- a/pkg/bucket/policy/condition/boolfunc_test.go +++ /dev/null @@ -1,152 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "reflect" - "testing" -) - -func TestBooleanFuncEvaluate(t *testing.T) { - case1Function, err := newBooleanFunc(AWSSecureTransport, NewValueSet(NewBoolValue(true))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newBooleanFunc(AWSSecureTransport, NewValueSet(NewBoolValue(false))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - function Function - values map[string][]string - expectedResult bool - }{ - {case1Function, map[string][]string{"SecureTransport": {"true"}}, true}, - {case2Function, map[string][]string{"SecureTransport": {"false"}}, true}, - } - - for i, testCase := range testCases { - result := testCase.function.evaluate(testCase.values) - - if result != testCase.expectedResult { - t.Errorf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestBooleanFuncKey(t *testing.T) { - case1Function, err := newBooleanFunc(AWSSecureTransport, NewValueSet(NewBoolValue(true))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - function Function - expectedResult Key - }{ - {case1Function, AWSSecureTransport}, - } - - for i, testCase := range testCases { - result := testCase.function.key() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestBooleanFuncToMap(t *testing.T) { - case1Function, err := newBooleanFunc(AWSSecureTransport, NewValueSet(NewBoolValue(true))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case1Result := map[Key]ValueSet{ - AWSSecureTransport: NewValueSet(NewStringValue("true")), - } - - case2Function, err := newBooleanFunc(AWSSecureTransport, NewValueSet(NewBoolValue(false))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Result := map[Key]ValueSet{ - AWSSecureTransport: NewValueSet(NewStringValue("false")), - } - - testCases := []struct { - f Function - expectedResult map[Key]ValueSet - }{ - {case1Function, case1Result}, - {case2Function, case2Result}, - } - - for i, testCase := range testCases { - result := testCase.f.toMap() - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestNewBooleanFunc(t *testing.T) { - case1Function, err := newBooleanFunc(AWSSecureTransport, NewValueSet(NewBoolValue(true))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newBooleanFunc(AWSSecureTransport, NewValueSet(NewBoolValue(false))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - key Key - values ValueSet - expectedResult Function - expectErr bool - }{ - {AWSSecureTransport, NewValueSet(NewBoolValue(true)), case1Function, false}, - {AWSSecureTransport, NewValueSet(NewStringValue("false")), case2Function, false}, - // Multiple values error. - {AWSSecureTransport, NewValueSet(NewStringValue("true"), NewStringValue("false")), nil, true}, - // Invalid boolean string error. - {AWSSecureTransport, NewValueSet(NewStringValue("foo")), nil, true}, - // Invalid value error. - {AWSSecureTransport, NewValueSet(NewIntValue(7)), nil, true}, - } - - for i, testCase := range testCases { - result, err := newBooleanFunc(testCase.key, testCase.values) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } - } -} diff --git a/pkg/bucket/policy/condition/dateequalsfunc.go b/pkg/bucket/policy/condition/dateequalsfunc.go deleted file mode 100644 index 64871f9b..00000000 --- a/pkg/bucket/policy/condition/dateequalsfunc.go +++ /dev/null @@ -1,164 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "fmt" - "net/http" - "reflect" - "time" -) - -func toDateEqualsFuncString(n name, key Key, value time.Time) string { - return fmt.Sprintf("%v:%v:%v", n, key, value.Format(time.RFC3339)) -} - -// dateEqualsFunc - String equals function. It checks whether value by Key in given -// values map is in condition values. -// For example, -// - if values = ["mybucket/foo"], at evaluate() it returns whether string -// in value map for Key is in values. -type dateEqualsFunc struct { - k Key - value time.Time -} - -// evaluate() - evaluates to check whether value by Key in given values is in -// condition values. -func (f dateEqualsFunc) evaluate(values map[string][]string) bool { - requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())] - if !ok { - requestValue = values[f.k.Name()] - } - - if len(requestValue) == 0 { - return false - } - - t, err := time.Parse(time.RFC3339, requestValue[0]) - if err != nil { - return false - } - - return f.value.Equal(t) -} - -// key() - returns condition key which is used by this condition function. -func (f dateEqualsFunc) key() Key { - return f.k -} - -// name() - returns "DateEquals" condition name. -func (f dateEqualsFunc) name() name { - return dateEquals -} - -func (f dateEqualsFunc) String() string { - return toDateEqualsFuncString(dateEquals, f.k, f.value) -} - -// toMap - returns map representation of this function. -func (f dateEqualsFunc) toMap() map[Key]ValueSet { - if !f.k.IsValid() { - return nil - } - - values := NewValueSet() - values.Add(NewStringValue(f.value.Format(time.RFC3339))) - - return map[Key]ValueSet{ - f.k: values, - } -} - -// dateNotEqualsFunc - String not equals function. It checks whether value by Key in -// given values is NOT in condition values. -// For example, -// - if values = ["mybucket/foo"], at evaluate() it returns whether string -// in value map for Key is NOT in values. -type dateNotEqualsFunc struct { - dateEqualsFunc -} - -// evaluate() - evaluates to check whether value by Key in given values is NOT in -// condition values. -func (f dateNotEqualsFunc) evaluate(values map[string][]string) bool { - return !f.dateEqualsFunc.evaluate(values) -} - -// name() - returns "DateNotEquals" condition name. -func (f dateNotEqualsFunc) name() name { - return dateNotEquals -} - -func (f dateNotEqualsFunc) String() string { - return toDateEqualsFuncString(dateNotEquals, f.dateEqualsFunc.k, f.dateEqualsFunc.value) -} - -func valueToTime(n name, values ValueSet) (v time.Time, err error) { - if len(values) != 1 { - return v, fmt.Errorf("only one value is allowed for %s condition", n) - } - - for vs := range values { - switch vs.GetType() { - case reflect.String: - s, err := vs.GetString() - if err != nil { - return v, err - } - if v, err = time.Parse(time.RFC3339, s); err != nil { - return v, fmt.Errorf("value %s must be a time.Time string for %s condition: %w", vs, n, err) - } - default: - return v, fmt.Errorf("value %s must be a time.Time for %s condition", vs, n) - } - } - - return v, nil - -} - -// newDateEqualsFunc - returns new DateEquals function. -func newDateEqualsFunc(key Key, values ValueSet) (Function, error) { - v, err := valueToTime(dateEquals, values) - if err != nil { - return nil, err - } - - return NewDateEqualsFunc(key, v) -} - -// NewDateEqualsFunc - returns new DateEquals function. -func NewDateEqualsFunc(key Key, value time.Time) (Function, error) { - return &dateEqualsFunc{key, value}, nil -} - -// newDateNotEqualsFunc - returns new DateNotEquals function. -func newDateNotEqualsFunc(key Key, values ValueSet) (Function, error) { - v, err := valueToTime(dateNotEquals, values) - if err != nil { - return nil, err - } - - return NewDateNotEqualsFunc(key, v) -} - -// NewDateNotEqualsFunc - returns new DateNotEquals function. -func NewDateNotEqualsFunc(key Key, value time.Time) (Function, error) { - return &dateNotEqualsFunc{dateEqualsFunc{key, value}}, nil -} diff --git a/pkg/bucket/policy/condition/dategreaterthanfunc.go b/pkg/bucket/policy/condition/dategreaterthanfunc.go deleted file mode 100644 index 25c375d5..00000000 --- a/pkg/bucket/policy/condition/dategreaterthanfunc.go +++ /dev/null @@ -1,153 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "fmt" - "net/http" - "time" -) - -func toDateGreaterThanFuncString(n name, key Key, value time.Time) string { - return fmt.Sprintf("%v:%v:%v", n, key, value.Format(time.RFC3339)) -} - -// dateGreaterThanFunc - String equals function. It checks whether value by Key in given -// values map is in condition values. -// For example, -// - if values = ["mybucket/foo"], at evaluate() it returns whether string -// in value map for Key is in values. -type dateGreaterThanFunc struct { - k Key - value time.Time -} - -// evaluate() - evaluates to check whether value by Key in given values is in -// condition values. -func (f dateGreaterThanFunc) evaluate(values map[string][]string) bool { - requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())] - if !ok { - requestValue = values[f.k.Name()] - } - - if len(requestValue) == 0 { - return false - } - - t, err := time.Parse(time.RFC3339, requestValue[0]) - if err != nil { - return false - } - - return t.After(f.value) -} - -// key() - returns condition key which is used by this condition function. -func (f dateGreaterThanFunc) key() Key { - return f.k -} - -// name() - returns "DateGreaterThan" condition name. -func (f dateGreaterThanFunc) name() name { - return dateGreaterThan -} - -func (f dateGreaterThanFunc) String() string { - return toDateGreaterThanFuncString(dateGreaterThan, f.k, f.value) -} - -// toMap - returns map representation of this function. -func (f dateGreaterThanFunc) toMap() map[Key]ValueSet { - if !f.k.IsValid() { - return nil - } - - values := NewValueSet() - values.Add(NewStringValue(f.value.Format(time.RFC3339))) - - return map[Key]ValueSet{ - f.k: values, - } -} - -// dateNotEqualsFunc - String not equals function. It checks whether value by Key in -// given values is NOT in condition values. -// For example, -// - if values = ["mybucket/foo"], at evaluate() it returns whether string -// in value map for Key is NOT in values. -type dateGreaterThanEqualsFunc struct { - dateGreaterThanFunc -} - -// evaluate() - evaluates to check whether value by Key in given values is NOT in -// condition values. -func (f dateGreaterThanEqualsFunc) evaluate(values map[string][]string) bool { - requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())] - if !ok { - requestValue = values[f.k.Name()] - } - - if len(requestValue) == 0 { - return false - } - - t, err := time.Parse(time.RFC3339, requestValue[0]) - if err != nil { - return false - } - - return t.After(f.value) || t.Equal(f.value) -} - -// name() - returns "DateNotEquals" condition name. -func (f dateGreaterThanEqualsFunc) name() name { - return dateGreaterThanEquals -} - -func (f dateGreaterThanEqualsFunc) String() string { - return toDateGreaterThanFuncString(dateNotEquals, f.dateGreaterThanFunc.k, f.dateGreaterThanFunc.value) -} - -// newDateGreaterThanFunc - returns new DateGreaterThan function. -func newDateGreaterThanFunc(key Key, values ValueSet) (Function, error) { - v, err := valueToTime(dateGreaterThan, values) - if err != nil { - return nil, err - } - - return NewDateGreaterThanFunc(key, v) -} - -// NewDateGreaterThanFunc - returns new DateGreaterThan function. -func NewDateGreaterThanFunc(key Key, value time.Time) (Function, error) { - return &dateGreaterThanFunc{key, value}, nil -} - -// newDateNotEqualsFunc - returns new DateNotEquals function. -func newDateGreaterThanEqualsFunc(key Key, values ValueSet) (Function, error) { - v, err := valueToTime(dateNotEquals, values) - if err != nil { - return nil, err - } - - return NewDateGreaterThanEqualsFunc(key, v) -} - -// NewDateGreaterThanEqualsFunc - returns new DateNotEquals function. -func NewDateGreaterThanEqualsFunc(key Key, value time.Time) (Function, error) { - return &dateGreaterThanEqualsFunc{dateGreaterThanFunc{key, value}}, nil -} diff --git a/pkg/bucket/policy/condition/datelessthanfunc.go b/pkg/bucket/policy/condition/datelessthanfunc.go deleted file mode 100644 index 20b16d28..00000000 --- a/pkg/bucket/policy/condition/datelessthanfunc.go +++ /dev/null @@ -1,153 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "fmt" - "net/http" - "time" -) - -func toDateLessThanFuncString(n name, key Key, value time.Time) string { - return fmt.Sprintf("%v:%v:%v", n, key, value.Format(time.RFC3339)) -} - -// dateLessThanFunc - String equals function. It checks whether value by Key in given -// values map is in condition values. -// For example, -// - if values = ["mybucket/foo"], at evaluate() it returns whether string -// in value map for Key is in values. -type dateLessThanFunc struct { - k Key - value time.Time -} - -// evaluate() - evaluates to check whether value by Key in given values is in -// condition values. -func (f dateLessThanFunc) evaluate(values map[string][]string) bool { - requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())] - if !ok { - requestValue = values[f.k.Name()] - } - - if len(requestValue) == 0 { - return false - } - - t, err := time.Parse(time.RFC3339, requestValue[0]) - if err != nil { - return false - } - - return t.Before(f.value) -} - -// key() - returns condition key which is used by this condition function. -func (f dateLessThanFunc) key() Key { - return f.k -} - -// name() - returns "DateLessThan" condition name. -func (f dateLessThanFunc) name() name { - return dateLessThan -} - -func (f dateLessThanFunc) String() string { - return toDateLessThanFuncString(dateLessThan, f.k, f.value) -} - -// toMap - returns map representation of this function. -func (f dateLessThanFunc) toMap() map[Key]ValueSet { - if !f.k.IsValid() { - return nil - } - - values := NewValueSet() - values.Add(NewStringValue(f.value.Format(time.RFC3339))) - - return map[Key]ValueSet{ - f.k: values, - } -} - -// dateNotEqualsFunc - String not equals function. It checks whether value by Key in -// given values is NOT in condition values. -// For example, -// - if values = ["mybucket/foo"], at evaluate() it returns whether string -// in value map for Key is NOT in values. -type dateLessThanEqualsFunc struct { - dateLessThanFunc -} - -// evaluate() - evaluates to check whether value by Key in given values is NOT in -// condition values. -func (f dateLessThanEqualsFunc) evaluate(values map[string][]string) bool { - requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())] - if !ok { - requestValue = values[f.k.Name()] - } - - if len(requestValue) == 0 { - return false - } - - t, err := time.Parse(time.RFC3339, requestValue[0]) - if err != nil { - return false - } - - return t.Before(f.value) || t.Equal(f.value) -} - -// name() - returns "DateNotEquals" condition name. -func (f dateLessThanEqualsFunc) name() name { - return dateLessThanEquals -} - -func (f dateLessThanEqualsFunc) String() string { - return toDateLessThanFuncString(dateNotEquals, f.dateLessThanFunc.k, f.dateLessThanFunc.value) -} - -// newDateLessThanFunc - returns new DateLessThan function. -func newDateLessThanFunc(key Key, values ValueSet) (Function, error) { - v, err := valueToTime(dateLessThan, values) - if err != nil { - return nil, err - } - - return NewDateLessThanFunc(key, v) -} - -// NewDateLessThanFunc - returns new DateLessThan function. -func NewDateLessThanFunc(key Key, value time.Time) (Function, error) { - return &dateLessThanFunc{key, value}, nil -} - -// newDateNotEqualsFunc - returns new DateNotEquals function. -func newDateLessThanEqualsFunc(key Key, values ValueSet) (Function, error) { - v, err := valueToTime(dateNotEquals, values) - if err != nil { - return nil, err - } - - return NewDateLessThanEqualsFunc(key, v) -} - -// NewDateLessThanEqualsFunc - returns new DateNotEquals function. -func NewDateLessThanEqualsFunc(key Key, value time.Time) (Function, error) { - return &dateLessThanEqualsFunc{dateLessThanFunc{key, value}}, nil -} diff --git a/pkg/bucket/policy/condition/func.go b/pkg/bucket/policy/condition/func.go deleted file mode 100644 index 08a368b3..00000000 --- a/pkg/bucket/policy/condition/func.go +++ /dev/null @@ -1,187 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "encoding/json" - "fmt" - "sort" -) - -// Function - condition function interface. -type Function interface { - // evaluate() - evaluates this condition function with given values. - evaluate(values map[string][]string) bool - - // key() - returns condition key used in this function. - key() Key - - // name() - returns condition name of this function. - name() name - - // String() - returns string representation of function. - String() string - - // toMap - returns map representation of this function. - toMap() map[Key]ValueSet -} - -// Functions - list of functions. -type Functions []Function - -// Evaluate - evaluates all functions with given values map. Each function is evaluated -// sequencely and next function is called only if current function succeeds. -func (functions Functions) Evaluate(values map[string][]string) bool { - for _, f := range functions { - if !f.evaluate(values) { - return false - } - } - - return true -} - -// Keys - returns list of keys used in all functions. -func (functions Functions) Keys() KeySet { - keySet := NewKeySet() - - for _, f := range functions { - keySet.Add(f.key()) - } - - return keySet -} - -// MarshalJSON - encodes Functions to JSON data. -func (functions Functions) MarshalJSON() ([]byte, error) { - nm := make(map[name]map[Key]ValueSet) - - for _, f := range functions { - if _, ok := nm[f.name()]; ok { - for k, v := range f.toMap() { - nm[f.name()][k] = v - } - } else { - nm[f.name()] = f.toMap() - } - } - - return json.Marshal(nm) -} - -func (functions Functions) String() string { - funcStrings := []string{} - for _, f := range functions { - s := fmt.Sprintf("%v", f) - funcStrings = append(funcStrings, s) - } - sort.Strings(funcStrings) - - return fmt.Sprintf("%v", funcStrings) -} - -var conditionFuncMap = map[name]func(Key, ValueSet) (Function, error){ - stringEquals: newStringEqualsFunc, - stringNotEquals: newStringNotEqualsFunc, - stringEqualsIgnoreCase: newStringEqualsIgnoreCaseFunc, - stringNotEqualsIgnoreCase: newStringNotEqualsIgnoreCaseFunc, - binaryEquals: newBinaryEqualsFunc, - stringLike: newStringLikeFunc, - stringNotLike: newStringNotLikeFunc, - ipAddress: newIPAddressFunc, - notIPAddress: newNotIPAddressFunc, - null: newNullFunc, - boolean: newBooleanFunc, - numericEquals: newNumericEqualsFunc, - numericNotEquals: newNumericNotEqualsFunc, - numericLessThan: newNumericLessThanFunc, - numericLessThanEquals: newNumericLessThanEqualsFunc, - numericGreaterThan: newNumericGreaterThanFunc, - numericGreaterThanEquals: newNumericGreaterThanEqualsFunc, - dateEquals: newDateEqualsFunc, - dateNotEquals: newDateNotEqualsFunc, - dateLessThan: newDateLessThanFunc, - dateLessThanEquals: newDateLessThanEqualsFunc, - dateGreaterThan: newDateGreaterThanFunc, - dateGreaterThanEquals: newDateGreaterThanEqualsFunc, - // Add new conditions here. -} - -// UnmarshalJSON - decodes JSON data to Functions. -func (functions *Functions) UnmarshalJSON(data []byte) error { - // As string kind, int kind then json.Unmarshaler is checked at - // https://github.com/golang/go/blob/master/src/encoding/json/decode.go#L618 - // UnmarshalJSON() is not called for types extending string - // see https://play.golang.org/p/HrSsKksHvrS, better way to do is - // https://play.golang.org/p/y9ElWpBgVAB - // - // Due to this issue, name and Key types cannot be used as map keys below. - nm := make(map[string]map[string]ValueSet) - if err := json.Unmarshal(data, &nm); err != nil { - return err - } - - if len(nm) == 0 { - return fmt.Errorf("condition must not be empty") - } - - funcs := []Function{} - for nameString, args := range nm { - n, err := parseName(nameString) - if err != nil { - return err - } - - for keyString, values := range args { - key, err := parseKey(keyString) - if err != nil { - return err - } - - vfn, ok := conditionFuncMap[n] - if !ok { - return fmt.Errorf("condition %v is not handled", n) - } - - f, err := vfn(key, values) - if err != nil { - return err - } - - funcs = append(funcs, f) - } - } - - *functions = funcs - - return nil -} - -// GobEncode - encodes Functions to gob data. -func (functions Functions) GobEncode() ([]byte, error) { - return functions.MarshalJSON() -} - -// GobDecode - decodes gob data to Functions. -func (functions *Functions) GobDecode(data []byte) error { - return functions.UnmarshalJSON(data) -} - -// NewFunctions - returns new Functions with given function list. -func NewFunctions(functions ...Function) Functions { - return Functions(functions) -} diff --git a/pkg/bucket/policy/condition/func_test.go b/pkg/bucket/policy/condition/func_test.go deleted file mode 100644 index 77f47be6..00000000 --- a/pkg/bucket/policy/condition/func_test.go +++ /dev/null @@ -1,353 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "encoding/json" - "reflect" - "testing" -) - -func TestFunctionsEvaluate(t *testing.T) { - func1, err := newNullFunc(S3XAmzCopySource, NewValueSet(NewBoolValue(true))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - func2, err := newIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - func3, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - func4, err := newStringLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case1Function := NewFunctions(func1, func2, func3, func4) - - testCases := []struct { - functions Functions - values map[string][]string - expectedResult bool - }{ - {case1Function, map[string][]string{ - "x-amz-copy-source": {"mybucket/myobject"}, - "SourceIp": {"192.168.1.10"}, - }, false}, - {case1Function, map[string][]string{ - "x-amz-copy-source": {"mybucket/myobject"}, - "SourceIp": {"192.168.1.10"}, - "Refer": {"http://example.org/"}, - }, false}, - {case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, false}, - {case1Function, map[string][]string{"SourceIp": {"192.168.1.10"}}, false}, - {case1Function, map[string][]string{ - "x-amz-copy-source": {"mybucket/yourobject"}, - "SourceIp": {"192.168.1.10"}, - }, false}, - {case1Function, map[string][]string{ - "x-amz-copy-source": {"mybucket/myobject"}, - "SourceIp": {"192.168.2.10"}, - }, false}, - {case1Function, map[string][]string{ - "x-amz-copy-source": {"mybucket/myobject"}, - "Refer": {"http://example.org/"}, - }, false}, - } - - for i, testCase := range testCases { - result := testCase.functions.Evaluate(testCase.values) - - if result != testCase.expectedResult { - t.Errorf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestFunctionsKeys(t *testing.T) { - func1, err := newNullFunc(S3XAmzCopySource, NewValueSet(NewBoolValue(true))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - func2, err := newIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - func3, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - func4, err := newStringLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - functions Functions - expectedResult KeySet - }{ - {NewFunctions(func1, func2, func3, func4), NewKeySet(S3XAmzCopySource, AWSSourceIP)}, - } - - for i, testCase := range testCases { - result := testCase.functions.Keys() - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestFunctionsMarshalJSON(t *testing.T) { - func1, err := newStringLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - func2, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - func3, err := newStringNotEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - func4, err := newNotIPAddressFunc(AWSSourceIP, - NewValueSet(NewStringValue("10.1.10.0/24"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - func5, err := newStringNotLikeFunc(S3XAmzStorageClass, NewValueSet(NewStringValue("STANDARD"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - func6, err := newNullFunc(S3XAmzServerSideEncryptionCustomerAlgorithm, NewValueSet(NewBoolValue(true))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - func7, err := newIPAddressFunc(AWSSourceIP, - NewValueSet(NewStringValue("192.168.1.0/24"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case1Result := []byte(`{"IpAddress":{"aws:SourceIp":["192.168.1.0/24"]},"NotIpAddress":{"aws:SourceIp":["10.1.10.0/24"]},"Null":{"s3:x-amz-server-side-encryption-customer-algorithm":[true]},"StringEquals":{"s3:x-amz-copy-source":["mybucket/myobject"]},"StringLike":{"s3:x-amz-metadata-directive":["REPL*"]},"StringNotEquals":{"s3:x-amz-server-side-encryption":["AES256"]},"StringNotLike":{"s3:x-amz-storage-class":["STANDARD"]}}`) - - case2Result := []byte(`{"Null":{"s3:x-amz-server-side-encryption-customer-algorithm":[true]}}`) - - testCases := []struct { - functions Functions - expectedResult []byte - expectErr bool - }{ - {NewFunctions(func1, func2, func3, func4, func5, func6, func7), case1Result, false}, - {NewFunctions(func6), case2Result, false}, - {NewFunctions(), []byte(`{}`), false}, - {nil, []byte(`{}`), false}, - } - - for i, testCase := range testCases { - result, err := json.Marshal(testCase.functions) - expectErr := (err != nil) - - if testCase.expectErr != expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v", i+1, string(testCase.expectedResult), string(result)) - } - } - } -} - -func TestFunctionsUnmarshalJSON(t *testing.T) { - case1Data := []byte(`{ - "StringLike": { - "s3:x-amz-metadata-directive": "REPL*" - }, - "StringEquals": { - "s3:x-amz-copy-source": "mybucket/myobject" - }, - "StringNotEquals": { - "s3:x-amz-server-side-encryption": "AES256" - }, - "NotIpAddress": { - "aws:SourceIp": [ - "10.1.10.0/24", - "10.10.1.0/24" - ] - }, - "StringNotLike": { - "s3:x-amz-storage-class": "STANDARD" - }, - "Null": { - "s3:x-amz-server-side-encryption-customer-algorithm": true - }, - "IpAddress": { - "aws:SourceIp": [ - "192.168.1.0/24", - "192.168.2.0/24" - ] - } -}`) - func1, err := newStringLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - func2, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - func3, err := newStringNotEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - func4, err := newNotIPAddressFunc(AWSSourceIP, - NewValueSet(NewStringValue("10.1.10.0/24"), NewStringValue("10.10.1.0/24"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - func5, err := newStringNotLikeFunc(S3XAmzStorageClass, NewValueSet(NewStringValue("STANDARD"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - func6, err := newNullFunc(S3XAmzServerSideEncryptionCustomerAlgorithm, NewValueSet(NewBoolValue(true))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - func7, err := newIPAddressFunc(AWSSourceIP, - NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("192.168.2.0/24"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Data := []byte(`{ - "Null": { - "s3:x-amz-server-side-encryption-customer-algorithm": true - }, - "Null": { - "s3:x-amz-server-side-encryption-customer-algorithm": "true" - } -}`) - - case3Data := []byte(`{}`) - - case4Data := []byte(`{ - "StringLike": { - "s3:x-amz-metadata-directive": "REPL*" - }, - "StringEquals": { - "s3:x-amz-copy-source": "mybucket/myobject", - "s3:prefix": [ - "", - "home/" - ], - "s3:delimiter": [ - "/" - ] - }, - "StringNotEquals": { - "s3:x-amz-server-side-encryption": "AES256" - }, - "NotIpAddress": { - "aws:SourceIp": [ - "10.1.10.0/24", - "10.10.1.0/24" - ] - }, - "StringNotLike": { - "s3:x-amz-storage-class": "STANDARD" - }, - "Null": { - "s3:x-amz-server-side-encryption-customer-algorithm": true - }, - "IpAddress": { - "aws:SourceIp": [ - "192.168.1.0/24", - "192.168.2.0/24" - ] - } -}`) - - func2_1, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - func2_2, err := newStringEqualsFunc(S3Prefix, NewValueSet(NewStringValue(""), NewStringValue("home/"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - func2_3, err := newStringEqualsFunc(S3Delimiter, NewValueSet(NewStringValue("/"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - data []byte - expectedResult Functions - expectErr bool - }{ - // Success case, basic conditions. - {case1Data, NewFunctions(func1, func2, func3, func4, func5, func6, func7), false}, - // Duplicate conditions, success case only one value is preserved. - {case2Data, NewFunctions(func6), false}, - // empty condition error. - {case3Data, nil, true}, - // Success case multiple keys, same condition. - {case4Data, NewFunctions(func1, func2_1, func2_2, func2_3, func3, func4, func5, func6, func7), false}, - } - - for i, testCase := range testCases { - result := new(Functions) - err := json.Unmarshal(testCase.data, result) - expectErr := (err != nil) - - if testCase.expectErr != expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if (*result).String() != testCase.expectedResult.String() { - t.Fatalf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, *result) - } - } - } -} diff --git a/pkg/bucket/policy/condition/ipaddressfunc.go b/pkg/bucket/policy/condition/ipaddressfunc.go deleted file mode 100644 index 4f80f378..00000000 --- a/pkg/bucket/policy/condition/ipaddressfunc.go +++ /dev/null @@ -1,186 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "fmt" - "net" - "net/http" - "sort" -) - -func toIPAddressFuncString(n name, key Key, values []*net.IPNet) string { - valueStrings := []string{} - for _, value := range values { - valueStrings = append(valueStrings, value.String()) - } - sort.Strings(valueStrings) - - return fmt.Sprintf("%v:%v:%v", n, key, valueStrings) -} - -// ipAddressFunc - IP address function. It checks whether value by Key in given -// values is in IP network. Here Key must be AWSSourceIP. -// For example, -// - if values = [192.168.1.0/24], at evaluate() it returns whether IP address -// in value map for AWSSourceIP falls in the network 192.168.1.10/24. -type ipAddressFunc struct { - k Key - values []*net.IPNet -} - -// evaluate() - evaluates to check whether IP address in values map for AWSSourceIP -// falls in one of network or not. -func (f ipAddressFunc) evaluate(values map[string][]string) bool { - IPs := []net.IP{} - requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())] - if !ok { - requestValue = values[f.k.Name()] - } - - for _, s := range requestValue { - IP := net.ParseIP(s) - if IP == nil { - panic(fmt.Errorf("invalid IP address '%v'", s)) - } - - IPs = append(IPs, IP) - } - - for _, IP := range IPs { - for _, IPNet := range f.values { - if IPNet.Contains(IP) { - return true - } - } - } - - return false -} - -// key() - returns condition key which is used by this condition function. -// Key is always AWSSourceIP. -func (f ipAddressFunc) key() Key { - return f.k -} - -// name() - returns "IpAddress" condition name. -func (f ipAddressFunc) name() name { - return ipAddress -} - -func (f ipAddressFunc) String() string { - return toIPAddressFuncString(ipAddress, f.k, f.values) -} - -// toMap - returns map representation of this function. -func (f ipAddressFunc) toMap() map[Key]ValueSet { - if !f.k.IsValid() { - return nil - } - - values := NewValueSet() - for _, value := range f.values { - values.Add(NewStringValue(value.String())) - } - - return map[Key]ValueSet{ - f.k: values, - } -} - -// notIPAddressFunc - Not IP address function. It checks whether value by Key in given -// values is NOT in IP network. Here Key must be AWSSourceIP. -// For example, -// - if values = [192.168.1.0/24], at evaluate() it returns whether IP address -// in value map for AWSSourceIP does not fall in the network 192.168.1.10/24. -type notIPAddressFunc struct { - ipAddressFunc -} - -// evaluate() - evaluates to check whether IP address in values map for AWSSourceIP -// does not fall in one of network. -func (f notIPAddressFunc) evaluate(values map[string][]string) bool { - return !f.ipAddressFunc.evaluate(values) -} - -// name() - returns "NotIpAddress" condition name. -func (f notIPAddressFunc) name() name { - return notIPAddress -} - -func (f notIPAddressFunc) String() string { - return toIPAddressFuncString(notIPAddress, f.ipAddressFunc.k, f.ipAddressFunc.values) -} - -func valuesToIPNets(n name, values ValueSet) ([]*net.IPNet, error) { - IPNets := []*net.IPNet{} - for v := range values { - s, err := v.GetString() - if err != nil { - return nil, fmt.Errorf("value %v must be string representation of CIDR for %v condition", v, n) - } - - var IPNet *net.IPNet - _, IPNet, err = net.ParseCIDR(s) - if err != nil { - return nil, fmt.Errorf("value %v must be CIDR string for %v condition", s, n) - } - - IPNets = append(IPNets, IPNet) - } - - return IPNets, nil -} - -// newIPAddressFunc - returns new IP address function. -func newIPAddressFunc(key Key, values ValueSet) (Function, error) { - IPNets, err := valuesToIPNets(ipAddress, values) - if err != nil { - return nil, err - } - - return NewIPAddressFunc(key, IPNets...) -} - -// NewIPAddressFunc - returns new IP address function. -func NewIPAddressFunc(key Key, IPNets ...*net.IPNet) (Function, error) { - if key != AWSSourceIP { - return nil, fmt.Errorf("only %v key is allowed for %v condition", AWSSourceIP, ipAddress) - } - - return &ipAddressFunc{key, IPNets}, nil -} - -// newNotIPAddressFunc - returns new Not IP address function. -func newNotIPAddressFunc(key Key, values ValueSet) (Function, error) { - IPNets, err := valuesToIPNets(notIPAddress, values) - if err != nil { - return nil, err - } - - return NewNotIPAddressFunc(key, IPNets...) -} - -// NewNotIPAddressFunc - returns new Not IP address function. -func NewNotIPAddressFunc(key Key, IPNets ...*net.IPNet) (Function, error) { - if key != AWSSourceIP { - return nil, fmt.Errorf("only %v key is allowed for %v condition", AWSSourceIP, notIPAddress) - } - - return ¬IPAddressFunc{ipAddressFunc{key, IPNets}}, nil -} diff --git a/pkg/bucket/policy/condition/ipaddressfunc_test.go b/pkg/bucket/policy/condition/ipaddressfunc_test.go deleted file mode 100644 index 6cbfcfbe..00000000 --- a/pkg/bucket/policy/condition/ipaddressfunc_test.go +++ /dev/null @@ -1,278 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "reflect" - "testing" -) - -func TestIPAddressFuncEvaluate(t *testing.T) { - case1Function, err := newIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - function Function - values map[string][]string - expectedResult bool - }{ - {case1Function, map[string][]string{"SourceIp": {"192.168.1.10"}}, true}, - {case1Function, map[string][]string{"SourceIp": {"192.168.2.10"}}, false}, - {case1Function, map[string][]string{}, false}, - {case1Function, map[string][]string{"delimiter": {"/"}}, false}, - } - - for i, testCase := range testCases { - result := testCase.function.evaluate(testCase.values) - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestIPAddressFuncKey(t *testing.T) { - case1Function, err := newIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - function Function - expectedResult Key - }{ - {case1Function, AWSSourceIP}, - } - - for i, testCase := range testCases { - result := testCase.function.key() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestIPAddressFuncToMap(t *testing.T) { - case1Function, err := newIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("10.1.10.1/32"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case1Result := map[Key]ValueSet{ - AWSSourceIP: NewValueSet(NewStringValue("192.168.1.0/24")), - } - - case2Result := map[Key]ValueSet{ - AWSSourceIP: NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("10.1.10.1/32")), - } - - testCases := []struct { - f Function - expectedResult map[Key]ValueSet - }{ - {case1Function, case1Result}, - {case2Function, case2Result}, - {&ipAddressFunc{}, nil}, - } - - for i, testCase := range testCases { - result := testCase.f.toMap() - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestNotIPAddressFuncEvaluate(t *testing.T) { - case1Function, err := newNotIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - function Function - values map[string][]string - expectedResult bool - }{ - {case1Function, map[string][]string{"SourceIp": {"192.168.2.10"}}, true}, - {case1Function, map[string][]string{}, true}, - {case1Function, map[string][]string{"delimiter": {"/"}}, true}, - {case1Function, map[string][]string{"SourceIp": {"192.168.1.10"}}, false}, - } - - for i, testCase := range testCases { - result := testCase.function.evaluate(testCase.values) - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestNotIPAddressFuncKey(t *testing.T) { - case1Function, err := newNotIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - function Function - expectedResult Key - }{ - {case1Function, AWSSourceIP}, - } - - for i, testCase := range testCases { - result := testCase.function.key() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestNotIPAddressFuncToMap(t *testing.T) { - case1Function, err := newNotIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newNotIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("10.1.10.1/32"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case1Result := map[Key]ValueSet{ - AWSSourceIP: NewValueSet(NewStringValue("192.168.1.0/24")), - } - - case2Result := map[Key]ValueSet{ - AWSSourceIP: NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("10.1.10.1/32")), - } - - testCases := []struct { - f Function - expectedResult map[Key]ValueSet - }{ - {case1Function, case1Result}, - {case2Function, case2Result}, - {¬IPAddressFunc{}, nil}, - } - - for i, testCase := range testCases { - result := testCase.f.toMap() - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestNewIPAddressFunc(t *testing.T) { - case1Function, err := newIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("10.1.10.1/32"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - key Key - values ValueSet - expectedResult Function - expectErr bool - }{ - {AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24")), case1Function, false}, - {AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("10.1.10.1/32")), case2Function, false}, - // Unsupported key error. - {S3Prefix, NewValueSet(NewStringValue("192.168.1.0/24")), nil, true}, - // Invalid value error. - {AWSSourceIP, NewValueSet(NewStringValue("node1.example.org")), nil, true}, - // Invalid CIDR format error. - {AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0.0/24")), nil, true}, - } - - for i, testCase := range testCases { - result, err := newIPAddressFunc(testCase.key, testCase.values) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if result.String() != testCase.expectedResult.String() { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } - } -} - -func TestNewNotIPAddressFunc(t *testing.T) { - case1Function, err := newNotIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newNotIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("10.1.10.1/32"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - key Key - values ValueSet - expectedResult Function - expectErr bool - }{ - {AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24")), case1Function, false}, - {AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("10.1.10.1/32")), case2Function, false}, - // Unsupported key error. - {S3Prefix, NewValueSet(NewStringValue("192.168.1.0/24")), nil, true}, - // Invalid value error. - {AWSSourceIP, NewValueSet(NewStringValue("node1.example.org")), nil, true}, - // Invalid CIDR format error. - {AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0.0/24")), nil, true}, - } - - for i, testCase := range testCases { - result, err := newNotIPAddressFunc(testCase.key, testCase.values) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if result.String() != testCase.expectedResult.String() { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } - } -} diff --git a/pkg/bucket/policy/condition/jwt.go b/pkg/bucket/policy/condition/jwt.go deleted file mode 100644 index 422fff55..00000000 --- a/pkg/bucket/policy/condition/jwt.go +++ /dev/null @@ -1,75 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -// JWT claims supported substitutions. -// https://www.iana.org/assignments/jwt/jwt.xhtml#claims -const ( - // JWTSub - JWT subject claim substitution. - JWTSub Key = "jwt:sub" - - // JWTIss issuer claim substitution. - JWTIss Key = "jwt:iss" - - // JWTAud audience claim substitution. - JWTAud Key = "jwt:aud" - - // JWTJti JWT unique identifier claim substitution. - JWTJti Key = "jwt:jti" - - JWTName Key = "jwt:name" - JWTGivenName Key = "jwt:given_name" - JWTFamilyName Key = "jwt:family_name" - JWTMiddleName Key = "jwt:middle_name" - JWTNickName Key = "jwt:nickname" - JWTPrefUsername Key = "jwt:preferred_username" - JWTProfile Key = "jwt:profile" - JWTPicture Key = "jwt:picture" - JWTWebsite Key = "jwt:website" - JWTEmail Key = "jwt:email" - JWTGender Key = "jwt:gender" - JWTBirthdate Key = "jwt:birthdate" - JWTPhoneNumber Key = "jwt:phone_number" - JWTAddress Key = "jwt:address" - JWTScope Key = "jwt:scope" - JWTClientID Key = "jwt:client_id" -) - -// JWTKeys - Supported JWT keys, non-exhaustive list please -// expand as new claims are standardized. -var JWTKeys = []Key{ - JWTSub, - JWTIss, - JWTAud, - JWTJti, - JWTName, - JWTGivenName, - JWTFamilyName, - JWTMiddleName, - JWTNickName, - JWTPrefUsername, - JWTProfile, - JWTPicture, - JWTWebsite, - JWTEmail, - JWTGender, - JWTBirthdate, - JWTPhoneNumber, - JWTAddress, - JWTScope, - JWTClientID, -} diff --git a/pkg/bucket/policy/condition/key.go b/pkg/bucket/policy/condition/key.go deleted file mode 100644 index 08f490a9..00000000 --- a/pkg/bucket/policy/condition/key.go +++ /dev/null @@ -1,292 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "encoding/json" - "fmt" - "strings" -) - -// Key - conditional key which is used to fetch values for any condition. -// Refer https://docs.aws.amazon.com/IAM/latest/UserGuide/list_s3.html -// for more information about available condition keys. -type Key string - -const ( - // S3XAmzCopySource - key representing x-amz-copy-source HTTP header applicable to PutObject API only. - S3XAmzCopySource Key = "s3:x-amz-copy-source" - - // S3XAmzServerSideEncryption - key representing x-amz-server-side-encryption HTTP header applicable - // to PutObject API only. - S3XAmzServerSideEncryption Key = "s3:x-amz-server-side-encryption" - - // S3XAmzServerSideEncryptionCustomerAlgorithm - key representing - // x-amz-server-side-encryption-customer-algorithm HTTP header applicable to PutObject API only. - S3XAmzServerSideEncryptionCustomerAlgorithm Key = "s3:x-amz-server-side-encryption-customer-algorithm" - - // S3XAmzMetadataDirective - key representing x-amz-metadata-directive HTTP header applicable to - // PutObject API only. - S3XAmzMetadataDirective Key = "s3:x-amz-metadata-directive" - - // S3XAmzContentSha256 - set a static content-sha256 for all calls for a given action. - S3XAmzContentSha256 = "s3:x-amz-content-sha256" - - // S3XAmzStorageClass - key representing x-amz-storage-class HTTP header applicable to PutObject API - // only. - S3XAmzStorageClass Key = "s3:x-amz-storage-class" - - // S3LocationConstraint - key representing LocationConstraint XML tag of CreateBucket API only. - S3LocationConstraint Key = "s3:LocationConstraint" - - // S3Prefix - key representing prefix query parameter of ListBucket API only. - S3Prefix Key = "s3:prefix" - - // S3Delimiter - key representing delimiter query parameter of ListBucket API only. - S3Delimiter Key = "s3:delimiter" - - // S3MaxKeys - key representing max-keys query parameter of ListBucket API only. - S3MaxKeys Key = "s3:max-keys" - - // S3ObjectLockRemainingRetentionDays - key representing object-lock-remaining-retention-days - // Enables enforcement of an object relative to the remaining retention days, you can set - // minimum and maximum allowable retention periods for a bucket using a bucket policy. - // This key are specific for s3:PutObjectRetention API. - S3ObjectLockRemainingRetentionDays Key = "s3:object-lock-remaining-retention-days" - - // S3ObjectLockMode - key representing object-lock-mode - // Enables enforcement of the specified object retention mode - S3ObjectLockMode Key = "s3:object-lock-mode" - - // S3ObjectLockRetainUntilDate - key representing object-lock-retain-util-date - // Enables enforcement of a specific retain-until-date - S3ObjectLockRetainUntilDate Key = "s3:object-lock-retain-until-date" - - // S3ObjectLockLegalHold - key representing object-local-legal-hold - // Enables enforcement of the specified object legal hold status - S3ObjectLockLegalHold Key = "s3:object-lock-legal-hold" - - // AWSReferer - key representing Referer header of any API. - AWSReferer Key = "aws:Referer" - - // AWSSourceIP - key representing client's IP address (not intermittent proxies) of any API. - AWSSourceIP Key = "aws:SourceIp" - - // AWSUserAgent - key representing UserAgent header for any API. - AWSUserAgent Key = "aws:UserAgent" - - // AWSSecureTransport - key representing if the clients request is authenticated or not. - AWSSecureTransport Key = "aws:SecureTransport" - - // AWSCurrentTime - key representing the current time. - AWSCurrentTime Key = "aws:CurrentTime" - - // AWSEpochTime - key representing the current epoch time. - AWSEpochTime Key = "aws:EpochTime" - - // AWSPrincipalType - user principal type currently supported values are "User" and "Anonymous". - AWSPrincipalType Key = "aws:principaltype" - - // AWSUserID - user unique ID, in MinIO this value is same as your user Access Key. - AWSUserID Key = "aws:userid" - - // AWSUsername - user friendly name, in MinIO this value is same as your user Access Key. - AWSUsername Key = "aws:username" -) - -// AllSupportedKeys - is list of all all supported keys. -var AllSupportedKeys = append([]Key{ - S3XAmzCopySource, - S3XAmzServerSideEncryption, - S3XAmzServerSideEncryptionCustomerAlgorithm, - S3XAmzMetadataDirective, - S3XAmzStorageClass, - S3XAmzContentSha256, - S3LocationConstraint, - S3Prefix, - S3Delimiter, - S3MaxKeys, - S3ObjectLockRemainingRetentionDays, - S3ObjectLockMode, - S3ObjectLockLegalHold, - S3ObjectLockRetainUntilDate, - AWSReferer, - AWSSourceIP, - AWSUserAgent, - AWSSecureTransport, - AWSCurrentTime, - AWSEpochTime, - AWSPrincipalType, - AWSUserID, - AWSUsername, - // Add new supported condition keys. -}, JWTKeys...) - -// CommonKeys - is list of all common condition keys. -var CommonKeys = append([]Key{ - AWSReferer, - AWSSourceIP, - AWSUserAgent, - AWSSecureTransport, - AWSCurrentTime, - AWSEpochTime, - AWSPrincipalType, - AWSUserID, - AWSUsername, - S3XAmzContentSha256, -}, JWTKeys...) - -func substFuncFromValues(values map[string][]string) func(string) string { - return func(v string) string { - for _, key := range CommonKeys { - // Empty values are not supported for policy variables. - if rvalues, ok := values[key.Name()]; ok && rvalues[0] != "" { - v = strings.Replace(v, key.VarName(), rvalues[0], -1) - } - } - return v - } -} - -// IsValid - checks if key is valid or not. -func (key Key) IsValid() bool { - for _, supKey := range AllSupportedKeys { - if supKey == key { - return true - } - } - - return false -} - -// MarshalJSON - encodes Key to JSON data. -func (key Key) MarshalJSON() ([]byte, error) { - if !key.IsValid() { - return nil, fmt.Errorf("unknown key %v", key) - } - - return json.Marshal(string(key)) -} - -// VarName - returns variable key name, such as "${aws:username}" -func (key Key) VarName() string { - return fmt.Sprintf("${%s}", key) -} - -// Name - returns key name which is stripped value of prefixes "aws:" and "s3:" -func (key Key) Name() string { - keyString := string(key) - - if strings.HasPrefix(keyString, "aws:") { - return strings.TrimPrefix(keyString, "aws:") - } else if strings.HasPrefix(keyString, "jwt:") { - return strings.TrimPrefix(keyString, "jwt:") - } - return strings.TrimPrefix(keyString, "s3:") -} - -// UnmarshalJSON - decodes JSON data to Key. -func (key *Key) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - - parsedKey, err := parseKey(s) - if err != nil { - return err - } - - *key = parsedKey - return nil -} - -func parseKey(s string) (Key, error) { - key := Key(s) - - if key.IsValid() { - return key, nil - } - - return key, fmt.Errorf("invalid condition key '%v'", s) -} - -// KeySet - set representation of slice of keys. -type KeySet map[Key]struct{} - -// Add - add a key to key set. -func (set KeySet) Add(key Key) { - set[key] = struct{}{} -} - -// Difference - returns a key set contains difference of two keys. -// Example: -// keySet1 := ["one", "two", "three"] -// keySet2 := ["two", "four", "three"] -// keySet1.Difference(keySet2) == ["one"] -func (set KeySet) Difference(sset KeySet) KeySet { - nset := make(KeySet) - - for k := range set { - if _, ok := sset[k]; !ok { - nset.Add(k) - } - } - - return nset -} - -// IsEmpty - returns whether key set is empty or not. -func (set KeySet) IsEmpty() bool { - return len(set) == 0 -} - -func (set KeySet) String() string { - return fmt.Sprintf("%v", set.ToSlice()) -} - -// ToSlice - returns slice of keys. -func (set KeySet) ToSlice() []Key { - keys := []Key{} - - for key := range set { - keys = append(keys, key) - } - - return keys -} - -// NewKeySet - returns new KeySet contains given keys. -func NewKeySet(keys ...Key) KeySet { - set := make(KeySet) - for _, key := range keys { - set.Add(key) - } - - return set -} - -// AllSupportedAdminKeys - is list of all admin supported keys. -var AllSupportedAdminKeys = []Key{ - AWSReferer, - AWSSourceIP, - AWSUserAgent, - AWSSecureTransport, - AWSCurrentTime, - AWSEpochTime, - // Add new supported condition keys. -} diff --git a/pkg/bucket/policy/condition/key_test.go b/pkg/bucket/policy/condition/key_test.go deleted file mode 100644 index 001a47b5..00000000 --- a/pkg/bucket/policy/condition/key_test.go +++ /dev/null @@ -1,214 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "encoding/json" - "reflect" - "testing" -) - -func TestKeyIsValid(t *testing.T) { - testCases := []struct { - key Key - expectedResult bool - }{ - {S3XAmzCopySource, true}, - {S3XAmzServerSideEncryption, true}, - {S3XAmzServerSideEncryptionCustomerAlgorithm, true}, - {S3XAmzMetadataDirective, true}, - {S3XAmzStorageClass, true}, - {S3LocationConstraint, true}, - {S3Prefix, true}, - {S3Delimiter, true}, - {S3MaxKeys, true}, - {AWSReferer, true}, - {AWSSourceIP, true}, - {Key("foo"), false}, - } - - for i, testCase := range testCases { - result := testCase.key.IsValid() - - if testCase.expectedResult != result { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestKeyMarshalJSON(t *testing.T) { - testCases := []struct { - key Key - expectedResult []byte - expectErr bool - }{ - {S3XAmzCopySource, []byte(`"s3:x-amz-copy-source"`), false}, - {Key("foo"), nil, true}, - } - - for i, testCase := range testCases { - result, err := json.Marshal(testCase.key) - expectErr := (err != nil) - - if testCase.expectErr != expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: key: expected: %v, got: %v\n", i+1, string(testCase.expectedResult), string(result)) - } - } - } -} - -func TestKeyName(t *testing.T) { - testCases := []struct { - key Key - expectedResult string - }{ - {S3XAmzCopySource, "x-amz-copy-source"}, - {AWSReferer, "Referer"}, - } - - for i, testCase := range testCases { - result := testCase.key.Name() - - if testCase.expectedResult != result { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestKeyUnmarshalJSON(t *testing.T) { - testCases := []struct { - data []byte - expectedKey Key - expectErr bool - }{ - {[]byte(`"s3:x-amz-copy-source"`), S3XAmzCopySource, false}, - {[]byte(`"foo"`), Key(""), true}, - } - - for i, testCase := range testCases { - var key Key - err := json.Unmarshal(testCase.data, &key) - expectErr := (err != nil) - - if testCase.expectErr != expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if testCase.expectedKey != key { - t.Fatalf("case %v: key: expected: %v, got: %v\n", i+1, testCase.expectedKey, key) - } - } - } -} - -func TestKeySetAdd(t *testing.T) { - testCases := []struct { - set KeySet - key Key - expectedResult KeySet - }{ - {NewKeySet(), S3XAmzCopySource, NewKeySet(S3XAmzCopySource)}, - {NewKeySet(S3XAmzCopySource), S3XAmzCopySource, NewKeySet(S3XAmzCopySource)}, - } - - for i, testCase := range testCases { - testCase.set.Add(testCase.key) - - if !reflect.DeepEqual(testCase.expectedResult, testCase.set) { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, testCase.set) - } - } -} - -func TestKeySetDifference(t *testing.T) { - testCases := []struct { - set KeySet - setToDiff KeySet - expectedResult KeySet - }{ - {NewKeySet(), NewKeySet(S3XAmzCopySource), NewKeySet()}, - {NewKeySet(S3Prefix, S3Delimiter, S3MaxKeys), NewKeySet(S3Delimiter, S3MaxKeys), NewKeySet(S3Prefix)}, - } - - for i, testCase := range testCases { - result := testCase.set.Difference(testCase.setToDiff) - - if !reflect.DeepEqual(testCase.expectedResult, result) { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestKeySetIsEmpty(t *testing.T) { - testCases := []struct { - set KeySet - expectedResult bool - }{ - {NewKeySet(), true}, - {NewKeySet(S3Delimiter), false}, - } - - for i, testCase := range testCases { - result := testCase.set.IsEmpty() - - if testCase.expectedResult != result { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestKeySetString(t *testing.T) { - testCases := []struct { - set KeySet - expectedResult string - }{ - {NewKeySet(), `[]`}, - {NewKeySet(S3Delimiter), `[s3:delimiter]`}, - } - - for i, testCase := range testCases { - result := testCase.set.String() - - if testCase.expectedResult != result { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestKeySetToSlice(t *testing.T) { - testCases := []struct { - set KeySet - expectedResult []Key - }{ - {NewKeySet(), []Key{}}, - {NewKeySet(S3Delimiter), []Key{S3Delimiter}}, - } - - for i, testCase := range testCases { - result := testCase.set.ToSlice() - - if !reflect.DeepEqual(testCase.expectedResult, result) { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} diff --git a/pkg/bucket/policy/condition/name.go b/pkg/bucket/policy/condition/name.go deleted file mode 100644 index 270cd1a4..00000000 --- a/pkg/bucket/policy/condition/name.go +++ /dev/null @@ -1,123 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "encoding/json" - "fmt" -) - -type name string - -const ( - stringEquals name = "StringEquals" - stringNotEquals = "StringNotEquals" - stringEqualsIgnoreCase = "StringEqualsIgnoreCase" - stringNotEqualsIgnoreCase = "StringNotEqualsIgnoreCase" - stringLike = "StringLike" - stringNotLike = "StringNotLike" - binaryEquals = "BinaryEquals" - ipAddress = "IpAddress" - notIPAddress = "NotIpAddress" - null = "Null" - boolean = "Bool" - numericEquals = "NumericEquals" - numericNotEquals = "NumericNotEquals" - numericLessThan = "NumericLessThan" - numericLessThanEquals = "NumericLessThanEquals" - numericGreaterThan = "NumericGreaterThan" - numericGreaterThanEquals = "NumericGreaterThanEquals" - dateEquals = "DateEquals" - dateNotEquals = "DateNotEquals" - dateLessThan = "DateLessThan" - dateLessThanEquals = "DateLessThanEquals" - dateGreaterThan = "DateGreaterThan" - dateGreaterThanEquals = "DateGreaterThanEquals" -) - -var supportedConditions = []name{ - stringEquals, - stringNotEquals, - stringEqualsIgnoreCase, - stringNotEqualsIgnoreCase, - binaryEquals, - stringLike, - stringNotLike, - ipAddress, - notIPAddress, - null, - boolean, - numericEquals, - numericNotEquals, - numericLessThan, - numericLessThanEquals, - numericGreaterThan, - numericGreaterThanEquals, - dateEquals, - dateNotEquals, - dateLessThan, - dateLessThanEquals, - dateGreaterThan, - dateGreaterThanEquals, - // Add new conditions here. -} - -// IsValid - checks if name is valid or not. -func (n name) IsValid() bool { - for _, supn := range supportedConditions { - if n == supn { - return true - } - } - - return false -} - -// MarshalJSON - encodes name to JSON data. -func (n name) MarshalJSON() ([]byte, error) { - if !n.IsValid() { - return nil, fmt.Errorf("invalid name %v", n) - } - - return json.Marshal(string(n)) -} - -// UnmarshalJSON - decodes JSON data to condition name. -func (n *name) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - - parsedName, err := parseName(s) - if err != nil { - return err - } - - *n = parsedName - return nil -} - -func parseName(s string) (name, error) { - n := name(s) - - if n.IsValid() { - return n, nil - } - - return n, fmt.Errorf("invalid condition name '%v'", s) -} diff --git a/pkg/bucket/policy/condition/name_test.go b/pkg/bucket/policy/condition/name_test.go deleted file mode 100644 index b6c98be4..00000000 --- a/pkg/bucket/policy/condition/name_test.go +++ /dev/null @@ -1,106 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "encoding/json" - "reflect" - "testing" -) - -func TestNameIsValid(t *testing.T) { - testCases := []struct { - n name - expectedResult bool - }{ - {stringEquals, true}, - {stringNotEquals, true}, - {stringLike, true}, - {stringNotLike, true}, - {ipAddress, true}, - {notIPAddress, true}, - {null, true}, - {name("foo"), false}, - } - - for i, testCase := range testCases { - result := testCase.n.IsValid() - - if testCase.expectedResult != result { - t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestNameMarshalJSON(t *testing.T) { - testCases := []struct { - n name - expectedResult []byte - expectErr bool - }{ - {stringEquals, []byte(`"StringEquals"`), false}, - {stringNotEquals, []byte(`"StringNotEquals"`), false}, - {stringLike, []byte(`"StringLike"`), false}, - {stringNotLike, []byte(`"StringNotLike"`), false}, - {ipAddress, []byte(`"IpAddress"`), false}, - {notIPAddress, []byte(`"NotIpAddress"`), false}, - {null, []byte(`"Null"`), false}, - {name("foo"), nil, true}, - } - - for i, testCase := range testCases { - result, err := json.Marshal(testCase.n) - expectErr := (err != nil) - - if testCase.expectErr != expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v", i+1, string(testCase.expectedResult), string(result)) - } - } - } -} - -func TestNameUnmarshalJSON(t *testing.T) { - testCases := []struct { - data []byte - expectedResult name - expectErr bool - }{ - {[]byte(`"StringEquals"`), stringEquals, false}, - {[]byte(`"foo"`), name(""), true}, - } - - for i, testCase := range testCases { - var result name - err := json.Unmarshal(testCase.data, &result) - expectErr := (err != nil) - - if testCase.expectErr != expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if testCase.expectedResult != result { - t.Fatalf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } - } -} diff --git a/pkg/bucket/policy/condition/nullfunc.go b/pkg/bucket/policy/condition/nullfunc.go deleted file mode 100644 index abc2f66f..00000000 --- a/pkg/bucket/policy/condition/nullfunc.go +++ /dev/null @@ -1,106 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "fmt" - "net/http" - "reflect" - "strconv" -) - -// nullFunc - Null condition function. It checks whether Key is not present in given -// values or not. -// For example, -// 1. if Key = S3XAmzCopySource and Value = true, at evaluate() it returns whether -// S3XAmzCopySource is NOT in given value map or not. -// 2. if Key = S3XAmzCopySource and Value = false, at evaluate() it returns whether -// S3XAmzCopySource is in given value map or not. -// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_condition_operators.html#Conditions_Null -type nullFunc struct { - k Key - value bool -} - -// evaluate() - evaluates to check whether Key is present in given values or not. -// Depending on condition boolean value, this function returns true or false. -func (f nullFunc) evaluate(values map[string][]string) bool { - requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())] - if !ok { - requestValue = values[f.k.Name()] - } - - if f.value { - return len(requestValue) == 0 - } - - return len(requestValue) != 0 -} - -// key() - returns condition key which is used by this condition function. -func (f nullFunc) key() Key { - return f.k -} - -// name() - returns "Null" condition name. -func (f nullFunc) name() name { - return null -} - -func (f nullFunc) String() string { - return fmt.Sprintf("%v:%v:%v", null, f.k, f.value) -} - -// toMap - returns map representation of this function. -func (f nullFunc) toMap() map[Key]ValueSet { - if !f.k.IsValid() { - return nil - } - - return map[Key]ValueSet{ - f.k: NewValueSet(NewBoolValue(f.value)), - } -} - -func newNullFunc(key Key, values ValueSet) (Function, error) { - if len(values) != 1 { - return nil, fmt.Errorf("only one value is allowed for Null condition") - } - - var value bool - for v := range values { - switch v.GetType() { - case reflect.Bool: - value, _ = v.GetBool() - case reflect.String: - var err error - s, _ := v.GetString() - if value, err = strconv.ParseBool(s); err != nil { - return nil, fmt.Errorf("value must be a boolean string for Null condition") - } - default: - return nil, fmt.Errorf("value must be a boolean for Null condition") - } - } - - return &nullFunc{key, value}, nil -} - -// NewNullFunc - returns new Null function. -func NewNullFunc(key Key, value bool) (Function, error) { - return &nullFunc{key, value}, nil -} diff --git a/pkg/bucket/policy/condition/nullfunc_test.go b/pkg/bucket/policy/condition/nullfunc_test.go deleted file mode 100644 index ea3e2e70..00000000 --- a/pkg/bucket/policy/condition/nullfunc_test.go +++ /dev/null @@ -1,161 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "reflect" - "testing" -) - -func TestNullFuncEvaluate(t *testing.T) { - case1Function, err := newNullFunc(S3Prefix, NewValueSet(NewBoolValue(true))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newNullFunc(S3Prefix, NewValueSet(NewBoolValue(false))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - function Function - values map[string][]string - expectedResult bool - }{ - {case1Function, map[string][]string{"prefix": {"true"}}, false}, - {case1Function, map[string][]string{"prefix": {"false"}}, false}, - {case1Function, map[string][]string{"prefix": {"mybucket/foo"}}, false}, - {case1Function, map[string][]string{}, true}, - {case1Function, map[string][]string{"delimiter": {"/"}}, true}, - {case2Function, map[string][]string{"prefix": {"true"}}, true}, - {case2Function, map[string][]string{"prefix": {"false"}}, true}, - {case2Function, map[string][]string{"prefix": {"mybucket/foo"}}, true}, - {case2Function, map[string][]string{}, false}, - {case2Function, map[string][]string{"delimiter": {"/"}}, false}, - } - - for i, testCase := range testCases { - result := testCase.function.evaluate(testCase.values) - - if result != testCase.expectedResult { - t.Errorf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestNullFuncKey(t *testing.T) { - case1Function, err := newNullFunc(S3XAmzCopySource, NewValueSet(NewBoolValue(true))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - function Function - expectedResult Key - }{ - {case1Function, S3XAmzCopySource}, - } - - for i, testCase := range testCases { - result := testCase.function.key() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestNullFuncToMap(t *testing.T) { - case1Function, err := newNullFunc(S3Prefix, NewValueSet(NewBoolValue(true))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case1Result := map[Key]ValueSet{ - S3Prefix: NewValueSet(NewBoolValue(true)), - } - - case2Function, err := newNullFunc(S3Prefix, NewValueSet(NewBoolValue(false))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Result := map[Key]ValueSet{ - S3Prefix: NewValueSet(NewBoolValue(false)), - } - - testCases := []struct { - f Function - expectedResult map[Key]ValueSet - }{ - {case1Function, case1Result}, - {case2Function, case2Result}, - {&nullFunc{}, nil}, - } - - for i, testCase := range testCases { - result := testCase.f.toMap() - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestNewNullFunc(t *testing.T) { - case1Function, err := newNullFunc(S3Prefix, NewValueSet(NewBoolValue(true))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newNullFunc(S3Prefix, NewValueSet(NewBoolValue(false))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - key Key - values ValueSet - expectedResult Function - expectErr bool - }{ - {S3Prefix, NewValueSet(NewBoolValue(true)), case1Function, false}, - {S3Prefix, NewValueSet(NewStringValue("false")), case2Function, false}, - // Multiple values error. - {S3Prefix, NewValueSet(NewBoolValue(true), NewBoolValue(false)), nil, true}, - // Invalid boolean string error. - {S3Prefix, NewValueSet(NewStringValue("foo")), nil, true}, - // Invalid value error. - {S3Prefix, NewValueSet(NewIntValue(7)), nil, true}, - } - - for i, testCase := range testCases { - result, err := newNullFunc(testCase.key, testCase.values) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } - } -} diff --git a/pkg/bucket/policy/condition/numericequalsfunc.go b/pkg/bucket/policy/condition/numericequalsfunc.go deleted file mode 100644 index 42134a0b..00000000 --- a/pkg/bucket/policy/condition/numericequalsfunc.go +++ /dev/null @@ -1,168 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "fmt" - "net/http" - "reflect" - "strconv" -) - -func toNumericEqualsFuncString(n name, key Key, value int) string { - return fmt.Sprintf("%v:%v:%v", n, key, value) -} - -// numericEqualsFunc - String equals function. It checks whether value by Key in given -// values map is in condition values. -// For example, -// - if values = ["mybucket/foo"], at evaluate() it returns whether string -// in value map for Key is in values. -type numericEqualsFunc struct { - k Key - value int -} - -// evaluate() - evaluates to check whether value by Key in given values is in -// condition values. -func (f numericEqualsFunc) evaluate(values map[string][]string) bool { - requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())] - if !ok { - requestValue = values[f.k.Name()] - } - - if len(requestValue) == 0 { - return false - } - - rvInt, err := strconv.Atoi(requestValue[0]) - if err != nil { - return false - } - - return f.value == rvInt -} - -// key() - returns condition key which is used by this condition function. -func (f numericEqualsFunc) key() Key { - return f.k -} - -// name() - returns "NumericEquals" condition name. -func (f numericEqualsFunc) name() name { - return numericEquals -} - -func (f numericEqualsFunc) String() string { - return toNumericEqualsFuncString(numericEquals, f.k, f.value) -} - -// toMap - returns map representation of this function. -func (f numericEqualsFunc) toMap() map[Key]ValueSet { - if !f.k.IsValid() { - return nil - } - - values := NewValueSet() - values.Add(NewIntValue(f.value)) - - return map[Key]ValueSet{ - f.k: values, - } -} - -// numericNotEqualsFunc - String not equals function. It checks whether value by Key in -// given values is NOT in condition values. -// For example, -// - if values = ["mybucket/foo"], at evaluate() it returns whether string -// in value map for Key is NOT in values. -type numericNotEqualsFunc struct { - numericEqualsFunc -} - -// evaluate() - evaluates to check whether value by Key in given values is NOT in -// condition values. -func (f numericNotEqualsFunc) evaluate(values map[string][]string) bool { - return !f.numericEqualsFunc.evaluate(values) -} - -// name() - returns "NumericNotEquals" condition name. -func (f numericNotEqualsFunc) name() name { - return numericNotEquals -} - -func (f numericNotEqualsFunc) String() string { - return toNumericEqualsFuncString(numericNotEquals, f.numericEqualsFunc.k, f.numericEqualsFunc.value) -} - -func valueToInt(n name, values ValueSet) (v int, err error) { - if len(values) != 1 { - return -1, fmt.Errorf("only one value is allowed for %s condition", n) - } - - for vs := range values { - switch vs.GetType() { - case reflect.Int: - if v, err = vs.GetInt(); err != nil { - return -1, err - } - case reflect.String: - s, err := vs.GetString() - if err != nil { - return -1, err - } - if v, err = strconv.Atoi(s); err != nil { - return -1, fmt.Errorf("value %s must be a int for %s condition: %w", vs, n, err) - } - default: - return -1, fmt.Errorf("value %s must be a int for %s condition", vs, n) - } - } - - return v, nil - -} - -// newNumericEqualsFunc - returns new NumericEquals function. -func newNumericEqualsFunc(key Key, values ValueSet) (Function, error) { - v, err := valueToInt(numericEquals, values) - if err != nil { - return nil, err - } - - return NewNumericEqualsFunc(key, v) -} - -// NewNumericEqualsFunc - returns new NumericEquals function. -func NewNumericEqualsFunc(key Key, value int) (Function, error) { - return &numericEqualsFunc{key, value}, nil -} - -// newNumericNotEqualsFunc - returns new NumericNotEquals function. -func newNumericNotEqualsFunc(key Key, values ValueSet) (Function, error) { - v, err := valueToInt(numericNotEquals, values) - if err != nil { - return nil, err - } - - return NewNumericNotEqualsFunc(key, v) -} - -// NewNumericNotEqualsFunc - returns new NumericNotEquals function. -func NewNumericNotEqualsFunc(key Key, value int) (Function, error) { - return &numericNotEqualsFunc{numericEqualsFunc{key, value}}, nil -} diff --git a/pkg/bucket/policy/condition/numericgreaterfunc.go b/pkg/bucket/policy/condition/numericgreaterfunc.go deleted file mode 100644 index e4d07a4e..00000000 --- a/pkg/bucket/policy/condition/numericgreaterfunc.go +++ /dev/null @@ -1,153 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "fmt" - "net/http" - "strconv" -) - -func toNumericGreaterThanFuncString(n name, key Key, value int) string { - return fmt.Sprintf("%v:%v:%v", n, key, value) -} - -// numericGreaterThanFunc - String equals function. It checks whether value by Key in given -// values map is in condition values. -// For example, -// - if values = ["mybucket/foo"], at evaluate() it returns whether string -// in value map for Key is in values. -type numericGreaterThanFunc struct { - k Key - value int -} - -// evaluate() - evaluates to check whether value by Key in given values is in -// condition values. -func (f numericGreaterThanFunc) evaluate(values map[string][]string) bool { - requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())] - if !ok { - requestValue = values[f.k.Name()] - } - - if len(requestValue) == 0 { - return false - } - - rvInt, err := strconv.Atoi(requestValue[0]) - if err != nil { - return false - } - - return rvInt > f.value -} - -// key() - returns condition key which is used by this condition function. -func (f numericGreaterThanFunc) key() Key { - return f.k -} - -// name() - returns "NumericGreaterThan" condition name. -func (f numericGreaterThanFunc) name() name { - return numericGreaterThan -} - -func (f numericGreaterThanFunc) String() string { - return toNumericGreaterThanFuncString(numericGreaterThan, f.k, f.value) -} - -// toMap - returns map representation of this function. -func (f numericGreaterThanFunc) toMap() map[Key]ValueSet { - if !f.k.IsValid() { - return nil - } - - values := NewValueSet() - values.Add(NewIntValue(f.value)) - - return map[Key]ValueSet{ - f.k: values, - } -} - -// numericGreaterThanEqualsFunc - String not equals function. It checks whether value by Key in -// given values is NOT in condition values. -// For example, -// - if values = ["mybucket/foo"], at evaluate() it returns whether string -// in value map for Key is NOT in values. -type numericGreaterThanEqualsFunc struct { - numericGreaterThanFunc -} - -// evaluate() - evaluates to check whether value by Key in given values is NOT in -// condition values. -func (f numericGreaterThanEqualsFunc) evaluate(values map[string][]string) bool { - requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())] - if !ok { - requestValue = values[f.k.Name()] - } - - if len(requestValue) == 0 { - return false - } - - rvInt, err := strconv.Atoi(requestValue[0]) - if err != nil { - return false - } - - return rvInt >= f.value -} - -// name() - returns "NumericGreaterThanEquals" condition name. -func (f numericGreaterThanEqualsFunc) name() name { - return numericGreaterThanEquals -} - -func (f numericGreaterThanEqualsFunc) String() string { - return toNumericGreaterThanFuncString(numericGreaterThanEquals, f.numericGreaterThanFunc.k, f.numericGreaterThanFunc.value) -} - -// newNumericGreaterThanFunc - returns new NumericGreaterThan function. -func newNumericGreaterThanFunc(key Key, values ValueSet) (Function, error) { - v, err := valueToInt(numericGreaterThan, values) - if err != nil { - return nil, err - } - - return NewNumericGreaterThanFunc(key, v) -} - -// NewNumericGreaterThanFunc - returns new NumericGreaterThan function. -func NewNumericGreaterThanFunc(key Key, value int) (Function, error) { - return &numericGreaterThanFunc{key, value}, nil -} - -// newNumericGreaterThanEqualsFunc - returns new NumericGreaterThanEquals function. -func newNumericGreaterThanEqualsFunc(key Key, values ValueSet) (Function, error) { - v, err := valueToInt(numericGreaterThanEquals, values) - if err != nil { - return nil, err - } - - return NewNumericGreaterThanEqualsFunc(key, v) -} - -// NewNumericGreaterThanEqualsFunc - returns new NumericGreaterThanEquals function. -func NewNumericGreaterThanEqualsFunc(key Key, value int) (Function, error) { - return &numericGreaterThanEqualsFunc{numericGreaterThanFunc{key, value}}, nil -} diff --git a/pkg/bucket/policy/condition/numericlessfunc.go b/pkg/bucket/policy/condition/numericlessfunc.go deleted file mode 100644 index 7890b655..00000000 --- a/pkg/bucket/policy/condition/numericlessfunc.go +++ /dev/null @@ -1,153 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "fmt" - "net/http" - "strconv" -) - -func toNumericLessThanFuncString(n name, key Key, value int) string { - return fmt.Sprintf("%v:%v:%v", n, key, value) -} - -// numericLessThanFunc - String equals function. It checks whether value by Key in given -// values map is in condition values. -// For example, -// - if values = ["mybucket/foo"], at evaluate() it returns whether string -// in value map for Key is in values. -type numericLessThanFunc struct { - k Key - value int -} - -// evaluate() - evaluates to check whether value by Key in given values is in -// condition values. -func (f numericLessThanFunc) evaluate(values map[string][]string) bool { - requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())] - if !ok { - requestValue = values[f.k.Name()] - } - - if len(requestValue) == 0 { - return false - } - - rvInt, err := strconv.Atoi(requestValue[0]) - if err != nil { - return false - } - - return rvInt < f.value -} - -// key() - returns condition key which is used by this condition function. -func (f numericLessThanFunc) key() Key { - return f.k -} - -// name() - returns "NumericLessThan" condition name. -func (f numericLessThanFunc) name() name { - return numericLessThan -} - -func (f numericLessThanFunc) String() string { - return toNumericLessThanFuncString(numericLessThan, f.k, f.value) -} - -// toMap - returns map representation of this function. -func (f numericLessThanFunc) toMap() map[Key]ValueSet { - if !f.k.IsValid() { - return nil - } - - values := NewValueSet() - values.Add(NewIntValue(f.value)) - - return map[Key]ValueSet{ - f.k: values, - } -} - -// numericLessThanEqualsFunc - String not equals function. It checks whether value by Key in -// given values is NOT in condition values. -// For example, -// - if values = ["mybucket/foo"], at evaluate() it returns whether string -// in value map for Key is NOT in values. -type numericLessThanEqualsFunc struct { - numericLessThanFunc -} - -// evaluate() - evaluates to check whether value by Key in given values is NOT in -// condition values. -func (f numericLessThanEqualsFunc) evaluate(values map[string][]string) bool { - requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())] - if !ok { - requestValue = values[f.k.Name()] - } - - if len(requestValue) == 0 { - return false - } - - rvInt, err := strconv.Atoi(requestValue[0]) - if err != nil { - return false - } - - return rvInt <= f.value -} - -// name() - returns "NumericLessThanEquals" condition name. -func (f numericLessThanEqualsFunc) name() name { - return numericLessThanEquals -} - -func (f numericLessThanEqualsFunc) String() string { - return toNumericLessThanFuncString(numericLessThanEquals, f.numericLessThanFunc.k, f.numericLessThanFunc.value) -} - -// newNumericLessThanFunc - returns new NumericLessThan function. -func newNumericLessThanFunc(key Key, values ValueSet) (Function, error) { - v, err := valueToInt(numericLessThan, values) - if err != nil { - return nil, err - } - - return NewNumericLessThanFunc(key, v) -} - -// NewNumericLessThanFunc - returns new NumericLessThan function. -func NewNumericLessThanFunc(key Key, value int) (Function, error) { - return &numericLessThanFunc{key, value}, nil -} - -// newNumericLessThanEqualsFunc - returns new NumericLessThanEquals function. -func newNumericLessThanEqualsFunc(key Key, values ValueSet) (Function, error) { - v, err := valueToInt(numericLessThanEquals, values) - if err != nil { - return nil, err - } - - return NewNumericLessThanEqualsFunc(key, v) -} - -// NewNumericLessThanEqualsFunc - returns new NumericLessThanEquals function. -func NewNumericLessThanEqualsFunc(key Key, value int) (Function, error) { - return &numericLessThanEqualsFunc{numericLessThanFunc{key, value}}, nil -} diff --git a/pkg/bucket/policy/condition/stringequalsfunc.go b/pkg/bucket/policy/condition/stringequalsfunc.go deleted file mode 100644 index 66bfd028..00000000 --- a/pkg/bucket/policy/condition/stringequalsfunc.go +++ /dev/null @@ -1,193 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "fmt" - "net/http" - "sort" - - "github.com/minio/minio-go/v6/pkg/s3utils" - "github.com/minio/minio-go/v6/pkg/set" -) - -func toStringEqualsFuncString(n name, key Key, values set.StringSet) string { - valueStrings := values.ToSlice() - sort.Strings(valueStrings) - - return fmt.Sprintf("%v:%v:%v", n, key, valueStrings) -} - -// stringEqualsFunc - String equals function. It checks whether value by Key in given -// values map is in condition values. -// For example, -// - if values = ["mybucket/foo"], at evaluate() it returns whether string -// in value map for Key is in values. -type stringEqualsFunc struct { - k Key - values set.StringSet -} - -// evaluate() - evaluates to check whether value by Key in given values is in -// condition values. -func (f stringEqualsFunc) evaluate(values map[string][]string) bool { - requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())] - if !ok { - requestValue = values[f.k.Name()] - } - - fvalues := f.values.ApplyFunc(substFuncFromValues(values)) - return !fvalues.Intersection(set.CreateStringSet(requestValue...)).IsEmpty() -} - -// key() - returns condition key which is used by this condition function. -func (f stringEqualsFunc) key() Key { - return f.k -} - -// name() - returns "StringEquals" condition name. -func (f stringEqualsFunc) name() name { - return stringEquals -} - -func (f stringEqualsFunc) String() string { - return toStringEqualsFuncString(stringEquals, f.k, f.values) -} - -// toMap - returns map representation of this function. -func (f stringEqualsFunc) toMap() map[Key]ValueSet { - if !f.k.IsValid() { - return nil - } - - values := NewValueSet() - for _, value := range f.values.ToSlice() { - values.Add(NewStringValue(value)) - } - - return map[Key]ValueSet{ - f.k: values, - } -} - -// stringNotEqualsFunc - String not equals function. It checks whether value by Key in -// given values is NOT in condition values. -// For example, -// - if values = ["mybucket/foo"], at evaluate() it returns whether string -// in value map for Key is NOT in values. -type stringNotEqualsFunc struct { - stringEqualsFunc -} - -// evaluate() - evaluates to check whether value by Key in given values is NOT in -// condition values. -func (f stringNotEqualsFunc) evaluate(values map[string][]string) bool { - return !f.stringEqualsFunc.evaluate(values) -} - -// name() - returns "StringNotEquals" condition name. -func (f stringNotEqualsFunc) name() name { - return stringNotEquals -} - -func (f stringNotEqualsFunc) String() string { - return toStringEqualsFuncString(stringNotEquals, f.stringEqualsFunc.k, f.stringEqualsFunc.values) -} - -func valuesToStringSlice(n name, values ValueSet) ([]string, error) { - valueStrings := []string{} - - for value := range values { - s, err := value.GetString() - if err != nil { - return nil, fmt.Errorf("value must be a string for %v condition", n) - } - - valueStrings = append(valueStrings, s) - } - - return valueStrings, nil -} - -func validateStringEqualsValues(n name, key Key, values set.StringSet) error { - for _, s := range values.ToSlice() { - switch key { - case S3XAmzCopySource: - bucket, object := path2BucketAndObject(s) - if object == "" { - return fmt.Errorf("invalid value '%v' for '%v' for %v condition", s, S3XAmzCopySource, n) - } - if err := s3utils.CheckValidBucketName(bucket); err != nil { - return err - } - case S3XAmzServerSideEncryption, S3XAmzServerSideEncryptionCustomerAlgorithm: - if s != "AES256" { - return fmt.Errorf("invalid value '%v' for '%v' for %v condition", s, S3XAmzServerSideEncryption, n) - } - case S3XAmzMetadataDirective: - if s != "COPY" && s != "REPLACE" { - return fmt.Errorf("invalid value '%v' for '%v' for %v condition", s, S3XAmzMetadataDirective, n) - } - case S3XAmzContentSha256: - if s == "" { - return fmt.Errorf("invalid empty value for '%v' for %v condition", S3XAmzContentSha256, n) - } - } - } - - return nil -} - -// newStringEqualsFunc - returns new StringEquals function. -func newStringEqualsFunc(key Key, values ValueSet) (Function, error) { - valueStrings, err := valuesToStringSlice(stringEquals, values) - if err != nil { - return nil, err - } - - return NewStringEqualsFunc(key, valueStrings...) -} - -// NewStringEqualsFunc - returns new StringEquals function. -func NewStringEqualsFunc(key Key, values ...string) (Function, error) { - sset := set.CreateStringSet(values...) - if err := validateStringEqualsValues(stringEquals, key, sset); err != nil { - return nil, err - } - - return &stringEqualsFunc{key, sset}, nil -} - -// newStringNotEqualsFunc - returns new StringNotEquals function. -func newStringNotEqualsFunc(key Key, values ValueSet) (Function, error) { - valueStrings, err := valuesToStringSlice(stringNotEquals, values) - if err != nil { - return nil, err - } - - return NewStringNotEqualsFunc(key, valueStrings...) -} - -// NewStringNotEqualsFunc - returns new StringNotEquals function. -func NewStringNotEqualsFunc(key Key, values ...string) (Function, error) { - sset := set.CreateStringSet(values...) - if err := validateStringEqualsValues(stringNotEquals, key, sset); err != nil { - return nil, err - } - - return &stringNotEqualsFunc{stringEqualsFunc{key, sset}}, nil -} diff --git a/pkg/bucket/policy/condition/stringequalsfunc_test.go b/pkg/bucket/policy/condition/stringequalsfunc_test.go deleted file mode 100644 index a0609759..00000000 --- a/pkg/bucket/policy/condition/stringequalsfunc_test.go +++ /dev/null @@ -1,708 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "reflect" - "testing" -) - -func TestStringEqualsFuncEvaluate(t *testing.T) { - case1Function, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newStringEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Function, err := newStringEqualsFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Function, err := newStringEqualsFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - function Function - values map[string][]string - expectedResult bool - }{ - {case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, true}, - {case1Function, map[string][]string{"x-amz-copy-source": {"yourbucket/myobject"}}, false}, - {case1Function, map[string][]string{}, false}, - {case1Function, map[string][]string{"delimiter": {"/"}}, false}, - - {case2Function, map[string][]string{"x-amz-server-side-encryption": {"AES256"}}, true}, - {case2Function, map[string][]string{}, false}, - {case2Function, map[string][]string{"delimiter": {"/"}}, false}, - - {case3Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE"}}, true}, - {case3Function, map[string][]string{"x-amz-metadata-directive": {"COPY"}}, false}, - {case3Function, map[string][]string{}, false}, - {case3Function, map[string][]string{"delimiter": {"/"}}, false}, - - {case4Function, map[string][]string{"LocationConstraint": {"eu-west-1"}}, true}, - {case4Function, map[string][]string{"LocationConstraint": {"us-east-1"}}, false}, - {case4Function, map[string][]string{}, false}, - {case4Function, map[string][]string{"delimiter": {"/"}}, false}, - } - - for i, testCase := range testCases { - result := testCase.function.evaluate(testCase.values) - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestStringEqualsFuncKey(t *testing.T) { - case1Function, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newStringEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Function, err := newStringEqualsFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Function, err := newStringEqualsFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - function Function - expectedResult Key - }{ - {case1Function, S3XAmzCopySource}, - {case2Function, S3XAmzServerSideEncryption}, - {case3Function, S3XAmzMetadataDirective}, - {case4Function, S3LocationConstraint}, - } - - for i, testCase := range testCases { - result := testCase.function.key() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestStringEqualsFuncToMap(t *testing.T) { - case1Function, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case1Result := map[Key]ValueSet{ - S3XAmzCopySource: NewValueSet(NewStringValue("mybucket/myobject")), - } - - case2Function, err := newStringEqualsFunc(S3XAmzCopySource, - NewValueSet( - NewStringValue("mybucket/myobject"), - NewStringValue("yourbucket/myobject"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Result := map[Key]ValueSet{ - S3XAmzCopySource: NewValueSet( - NewStringValue("mybucket/myobject"), - NewStringValue("yourbucket/myobject"), - ), - } - - case3Function, err := newStringEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Result := map[Key]ValueSet{ - S3XAmzServerSideEncryption: NewValueSet(NewStringValue("AES256")), - } - - case4Function, err := newStringEqualsFunc(S3XAmzServerSideEncryption, - NewValueSet( - NewStringValue("AES256"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Result := map[Key]ValueSet{ - S3XAmzServerSideEncryption: NewValueSet( - NewStringValue("AES256"), - ), - } - - case5Function, err := newStringEqualsFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case5Result := map[Key]ValueSet{ - S3XAmzMetadataDirective: NewValueSet(NewStringValue("REPLACE")), - } - - case6Function, err := newStringEqualsFunc(S3XAmzMetadataDirective, - NewValueSet( - NewStringValue("REPLACE"), - NewStringValue("COPY"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case6Result := map[Key]ValueSet{ - S3XAmzMetadataDirective: NewValueSet( - NewStringValue("REPLACE"), - NewStringValue("COPY"), - ), - } - - case7Function, err := newStringEqualsFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case7Result := map[Key]ValueSet{ - S3LocationConstraint: NewValueSet(NewStringValue("eu-west-1")), - } - - case8Function, err := newStringEqualsFunc(S3LocationConstraint, - NewValueSet( - NewStringValue("eu-west-1"), - NewStringValue("us-west-1"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case8Result := map[Key]ValueSet{ - S3LocationConstraint: NewValueSet( - NewStringValue("eu-west-1"), - NewStringValue("us-west-1"), - ), - } - - testCases := []struct { - f Function - expectedResult map[Key]ValueSet - }{ - {case1Function, case1Result}, - {case2Function, case2Result}, - {case3Function, case3Result}, - {case4Function, case4Result}, - {case5Function, case5Result}, - {case6Function, case6Result}, - {case7Function, case7Result}, - {case8Function, case8Result}, - {&stringEqualsFunc{}, nil}, - } - - for i, testCase := range testCases { - result := testCase.f.toMap() - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestStringNotEqualsFuncEvaluate(t *testing.T) { - case1Function, err := newStringNotEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newStringNotEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Function, err := newStringNotEqualsFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Function, err := newStringNotEqualsFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - function Function - values map[string][]string - expectedResult bool - }{ - {case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, false}, - {case1Function, map[string][]string{"x-amz-copy-source": {"yourbucket/myobject"}}, true}, - {case1Function, map[string][]string{}, true}, - {case1Function, map[string][]string{"delimiter": {"/"}}, true}, - - {case2Function, map[string][]string{"x-amz-server-side-encryption": {"AES256"}}, false}, - {case2Function, map[string][]string{}, true}, - {case2Function, map[string][]string{"delimiter": {"/"}}, true}, - - {case3Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE"}}, false}, - {case3Function, map[string][]string{"x-amz-metadata-directive": {"COPY"}}, true}, - {case3Function, map[string][]string{}, true}, - {case3Function, map[string][]string{"delimiter": {"/"}}, true}, - - {case4Function, map[string][]string{"LocationConstraint": {"eu-west-1"}}, false}, - {case4Function, map[string][]string{"LocationConstraint": {"us-east-1"}}, true}, - {case4Function, map[string][]string{}, true}, - {case4Function, map[string][]string{"delimiter": {"/"}}, true}, - } - - for i, testCase := range testCases { - result := testCase.function.evaluate(testCase.values) - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestStringNotEqualsFuncKey(t *testing.T) { - case1Function, err := newStringNotEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newStringNotEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Function, err := newStringNotEqualsFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Function, err := newStringNotEqualsFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - function Function - expectedResult Key - }{ - {case1Function, S3XAmzCopySource}, - {case2Function, S3XAmzServerSideEncryption}, - {case3Function, S3XAmzMetadataDirective}, - {case4Function, S3LocationConstraint}, - } - - for i, testCase := range testCases { - result := testCase.function.key() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestStringNotEqualsFuncToMap(t *testing.T) { - case1Function, err := newStringNotEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case1Result := map[Key]ValueSet{ - S3XAmzCopySource: NewValueSet(NewStringValue("mybucket/myobject")), - } - - case2Function, err := newStringNotEqualsFunc(S3XAmzCopySource, - NewValueSet( - NewStringValue("mybucket/myobject"), - NewStringValue("yourbucket/myobject"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Result := map[Key]ValueSet{ - S3XAmzCopySource: NewValueSet( - NewStringValue("mybucket/myobject"), - NewStringValue("yourbucket/myobject"), - ), - } - - case3Function, err := newStringNotEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Result := map[Key]ValueSet{ - S3XAmzServerSideEncryption: NewValueSet(NewStringValue("AES256")), - } - - case4Function, err := newStringNotEqualsFunc(S3XAmzServerSideEncryption, - NewValueSet( - NewStringValue("AES256"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Result := map[Key]ValueSet{ - S3XAmzServerSideEncryption: NewValueSet( - NewStringValue("AES256"), - ), - } - - case5Function, err := newStringNotEqualsFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case5Result := map[Key]ValueSet{ - S3XAmzMetadataDirective: NewValueSet(NewStringValue("REPLACE")), - } - - case6Function, err := newStringNotEqualsFunc(S3XAmzMetadataDirective, - NewValueSet( - NewStringValue("REPLACE"), - NewStringValue("COPY"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case6Result := map[Key]ValueSet{ - S3XAmzMetadataDirective: NewValueSet( - NewStringValue("REPLACE"), - NewStringValue("COPY"), - ), - } - - case7Function, err := newStringNotEqualsFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case7Result := map[Key]ValueSet{ - S3LocationConstraint: NewValueSet(NewStringValue("eu-west-1")), - } - - case8Function, err := newStringNotEqualsFunc(S3LocationConstraint, - NewValueSet( - NewStringValue("eu-west-1"), - NewStringValue("us-west-1"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case8Result := map[Key]ValueSet{ - S3LocationConstraint: NewValueSet( - NewStringValue("eu-west-1"), - NewStringValue("us-west-1"), - ), - } - - testCases := []struct { - f Function - expectedResult map[Key]ValueSet - }{ - {case1Function, case1Result}, - {case2Function, case2Result}, - {case3Function, case3Result}, - {case4Function, case4Result}, - {case5Function, case5Result}, - {case6Function, case6Result}, - {case7Function, case7Result}, - {case8Function, case8Result}, - {&stringNotEqualsFunc{}, nil}, - } - - for i, testCase := range testCases { - result := testCase.f.toMap() - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestNewStringEqualsFunc(t *testing.T) { - case1Function, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newStringEqualsFunc(S3XAmzCopySource, - NewValueSet( - NewStringValue("mybucket/myobject"), - NewStringValue("yourbucket/myobject"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Function, err := newStringEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Function, err := newStringEqualsFunc(S3XAmzServerSideEncryption, - NewValueSet( - NewStringValue("AES256"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case5Function, err := newStringEqualsFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case6Function, err := newStringEqualsFunc(S3XAmzMetadataDirective, - NewValueSet( - NewStringValue("REPLACE"), - NewStringValue("COPY"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case7Function, err := newStringEqualsFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case8Function, err := newStringEqualsFunc(S3LocationConstraint, - NewValueSet( - NewStringValue("eu-west-1"), - NewStringValue("us-west-1"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - key Key - values ValueSet - expectedResult Function - expectErr bool - }{ - {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")), case1Function, false}, - {S3XAmzCopySource, - NewValueSet( - NewStringValue("mybucket/myobject"), - NewStringValue("yourbucket/myobject"), - ), case2Function, false}, - - {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")), case3Function, false}, - {S3XAmzServerSideEncryption, - NewValueSet( - NewStringValue("AES256"), - ), case4Function, false}, - - {S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")), case5Function, false}, - {S3XAmzMetadataDirective, - NewValueSet( - NewStringValue("REPLACE"), - NewStringValue("COPY"), - ), case6Function, false}, - - {S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")), case7Function, false}, - {S3LocationConstraint, - NewValueSet( - NewStringValue("eu-west-1"), - NewStringValue("us-west-1"), - ), case8Function, false}, - - // Unsupported value error. - {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"), NewIntValue(7)), nil, true}, - {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"), NewIntValue(7)), nil, true}, - {S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"), NewIntValue(7)), nil, true}, - {S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"), NewIntValue(7)), nil, true}, - - // Invalid value error. - {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket")), nil, true}, - {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("SSE-C")), nil, true}, - {S3XAmzMetadataDirective, NewValueSet(NewStringValue("DUPLICATE")), nil, true}, - } - - for i, testCase := range testCases { - result, err := newStringEqualsFunc(testCase.key, testCase.values) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } - } -} - -func TestNewStringNotEqualsFunc(t *testing.T) { - case1Function, err := newStringNotEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newStringNotEqualsFunc(S3XAmzCopySource, - NewValueSet( - NewStringValue("mybucket/myobject"), - NewStringValue("yourbucket/myobject"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Function, err := newStringNotEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Function, err := newStringNotEqualsFunc(S3XAmzServerSideEncryption, - NewValueSet( - NewStringValue("AES256"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case5Function, err := newStringNotEqualsFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case6Function, err := newStringNotEqualsFunc(S3XAmzMetadataDirective, - NewValueSet( - NewStringValue("REPLACE"), - NewStringValue("COPY"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case7Function, err := newStringNotEqualsFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case8Function, err := newStringNotEqualsFunc(S3LocationConstraint, - NewValueSet( - NewStringValue("eu-west-1"), - NewStringValue("us-west-1"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - key Key - values ValueSet - expectedResult Function - expectErr bool - }{ - {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")), case1Function, false}, - {S3XAmzCopySource, - NewValueSet( - NewStringValue("mybucket/myobject"), - NewStringValue("yourbucket/myobject"), - ), case2Function, false}, - - {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")), case3Function, false}, - {S3XAmzServerSideEncryption, - NewValueSet( - NewStringValue("AES256"), - ), case4Function, false}, - - {S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")), case5Function, false}, - {S3XAmzMetadataDirective, - NewValueSet( - NewStringValue("REPLACE"), - NewStringValue("COPY"), - ), case6Function, false}, - - {S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")), case7Function, false}, - {S3LocationConstraint, - NewValueSet( - NewStringValue("eu-west-1"), - NewStringValue("us-west-1"), - ), case8Function, false}, - - // Unsupported value error. - {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"), NewIntValue(7)), nil, true}, - {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"), NewIntValue(7)), nil, true}, - {S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"), NewIntValue(7)), nil, true}, - {S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"), NewIntValue(7)), nil, true}, - - // Invalid value error. - {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket")), nil, true}, - {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("SSE-C")), nil, true}, - {S3XAmzMetadataDirective, NewValueSet(NewStringValue("DUPLICATE")), nil, true}, - } - - for i, testCase := range testCases { - result, err := newStringNotEqualsFunc(testCase.key, testCase.values) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } - } -} diff --git a/pkg/bucket/policy/condition/stringequalsignorecasefunc.go b/pkg/bucket/policy/condition/stringequalsignorecasefunc.go deleted file mode 100644 index d7eadfa5..00000000 --- a/pkg/bucket/policy/condition/stringequalsignorecasefunc.go +++ /dev/null @@ -1,160 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "fmt" - "net/http" - "sort" - "strings" - - "github.com/minio/minio-go/v6/pkg/set" -) - -func toStringEqualsIgnoreCaseFuncString(n name, key Key, values set.StringSet) string { - valueStrings := values.ToSlice() - sort.Strings(valueStrings) - - return fmt.Sprintf("%v:%v:%v", n, key, valueStrings) -} - -// stringEqualsIgnoreCaseFunc - String equals function. It checks whether value by Key in given -// values map is in condition values. -// For example, -// - if values = ["mybucket/foo"], at evaluate() it returns whether string -// in value map for Key is in values. -type stringEqualsIgnoreCaseFunc struct { - k Key - values set.StringSet -} - -// evaluate() - evaluates to check whether value by Key in given values is in -// condition values, ignores case. -func (f stringEqualsIgnoreCaseFunc) evaluate(values map[string][]string) bool { - requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())] - if !ok { - requestValue = values[f.k.Name()] - } - - fvalues := f.values.ApplyFunc(substFuncFromValues(values)) - - for _, v := range requestValue { - if !fvalues.FuncMatch(strings.EqualFold, v).IsEmpty() { - return true - } - } - - return false -} - -// key() - returns condition key which is used by this condition function. -func (f stringEqualsIgnoreCaseFunc) key() Key { - return f.k -} - -// name() - returns "StringEqualsIgnoreCase" condition name. -func (f stringEqualsIgnoreCaseFunc) name() name { - return stringEqualsIgnoreCase -} - -func (f stringEqualsIgnoreCaseFunc) String() string { - return toStringEqualsIgnoreCaseFuncString(stringEqualsIgnoreCase, f.k, f.values) -} - -// toMap - returns map representation of this function. -func (f stringEqualsIgnoreCaseFunc) toMap() map[Key]ValueSet { - if !f.k.IsValid() { - return nil - } - - values := NewValueSet() - for _, value := range f.values.ToSlice() { - values.Add(NewStringValue(value)) - } - - return map[Key]ValueSet{ - f.k: values, - } -} - -// stringNotEqualsIgnoreCaseFunc - String not equals function. It checks whether value by Key in -// given values is NOT in condition values. -// For example, -// - if values = ["mybucket/foo"], at evaluate() it returns whether string -// in value map for Key is NOT in values. -type stringNotEqualsIgnoreCaseFunc struct { - stringEqualsIgnoreCaseFunc -} - -// evaluate() - evaluates to check whether value by Key in given values is NOT in -// condition values. -func (f stringNotEqualsIgnoreCaseFunc) evaluate(values map[string][]string) bool { - return !f.stringEqualsIgnoreCaseFunc.evaluate(values) -} - -// name() - returns "StringNotEqualsIgnoreCase" condition name. -func (f stringNotEqualsIgnoreCaseFunc) name() name { - return stringNotEqualsIgnoreCase -} - -func (f stringNotEqualsIgnoreCaseFunc) String() string { - return toStringEqualsIgnoreCaseFuncString(stringNotEqualsIgnoreCase, f.stringEqualsIgnoreCaseFunc.k, f.stringEqualsIgnoreCaseFunc.values) -} - -func validateStringEqualsIgnoreCaseValues(n name, key Key, values set.StringSet) error { - return validateStringEqualsValues(n, key, values) -} - -// newStringEqualsIgnoreCaseFunc - returns new StringEqualsIgnoreCase function. -func newStringEqualsIgnoreCaseFunc(key Key, values ValueSet) (Function, error) { - valueStrings, err := valuesToStringSlice(stringEqualsIgnoreCase, values) - if err != nil { - return nil, err - } - - return NewStringEqualsIgnoreCaseFunc(key, valueStrings...) -} - -// NewStringEqualsIgnoreCaseFunc - returns new StringEqualsIgnoreCase function. -func NewStringEqualsIgnoreCaseFunc(key Key, values ...string) (Function, error) { - sset := set.CreateStringSet(values...) - if err := validateStringEqualsIgnoreCaseValues(stringEqualsIgnoreCase, key, sset); err != nil { - return nil, err - } - - return &stringEqualsIgnoreCaseFunc{key, sset}, nil -} - -// newStringNotEqualsIgnoreCaseFunc - returns new StringNotEqualsIgnoreCase function. -func newStringNotEqualsIgnoreCaseFunc(key Key, values ValueSet) (Function, error) { - valueStrings, err := valuesToStringSlice(stringNotEqualsIgnoreCase, values) - if err != nil { - return nil, err - } - - return NewStringNotEqualsIgnoreCaseFunc(key, valueStrings...) -} - -// NewStringNotEqualsIgnoreCaseFunc - returns new StringNotEqualsIgnoreCase function. -func NewStringNotEqualsIgnoreCaseFunc(key Key, values ...string) (Function, error) { - sset := set.CreateStringSet(values...) - if err := validateStringEqualsIgnoreCaseValues(stringNotEqualsIgnoreCase, key, sset); err != nil { - return nil, err - } - - return &stringNotEqualsIgnoreCaseFunc{stringEqualsIgnoreCaseFunc{key, sset}}, nil -} diff --git a/pkg/bucket/policy/condition/stringequalsignorecasefunc_test.go b/pkg/bucket/policy/condition/stringequalsignorecasefunc_test.go deleted file mode 100644 index e223701b..00000000 --- a/pkg/bucket/policy/condition/stringequalsignorecasefunc_test.go +++ /dev/null @@ -1,710 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "reflect" - "testing" -) - -func TestStringEqualsIgnoreCaseFuncEvaluate(t *testing.T) { - case1Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Function, err := newStringEqualsIgnoreCaseFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - function Function - values map[string][]string - expectedResult bool - }{ - {case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, true}, - {case1Function, map[string][]string{"x-amz-copy-source": {"yourbucket/myobject"}}, false}, - {case1Function, map[string][]string{}, false}, - {case1Function, map[string][]string{"delimiter": {"/"}}, false}, - - {case2Function, map[string][]string{"x-amz-server-side-encryption": {"AES256"}}, true}, - {case2Function, map[string][]string{"x-amz-server-side-encryption": {"aes256"}}, true}, - {case2Function, map[string][]string{}, false}, - {case2Function, map[string][]string{"delimiter": {"/"}}, false}, - - {case3Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE"}}, true}, - {case3Function, map[string][]string{"x-amz-metadata-directive": {"replace"}}, true}, - {case3Function, map[string][]string{"x-amz-metadata-directive": {"COPY"}}, false}, - {case3Function, map[string][]string{}, false}, - {case3Function, map[string][]string{"delimiter": {"/"}}, false}, - - {case4Function, map[string][]string{"LocationConstraint": {"eu-west-1"}}, true}, - {case4Function, map[string][]string{"LocationConstraint": {"us-east-1"}}, false}, - {case4Function, map[string][]string{}, false}, - {case4Function, map[string][]string{"delimiter": {"/"}}, false}, - } - - for i, testCase := range testCases { - result := testCase.function.evaluate(testCase.values) - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestStringEqualsIgnoreCaseFuncKey(t *testing.T) { - case1Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Function, err := newStringEqualsIgnoreCaseFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - function Function - expectedResult Key - }{ - {case1Function, S3XAmzCopySource}, - {case2Function, S3XAmzServerSideEncryption}, - {case3Function, S3XAmzMetadataDirective}, - {case4Function, S3LocationConstraint}, - } - - for i, testCase := range testCases { - result := testCase.function.key() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestStringEqualsIgnoreCaseFuncToMap(t *testing.T) { - case1Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case1Result := map[Key]ValueSet{ - S3XAmzCopySource: NewValueSet(NewStringValue("mybucket/myobject")), - } - - case2Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzCopySource, - NewValueSet( - NewStringValue("mybucket/myobject"), - NewStringValue("yourbucket/myobject"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Result := map[Key]ValueSet{ - S3XAmzCopySource: NewValueSet( - NewStringValue("mybucket/myobject"), - NewStringValue("yourbucket/myobject"), - ), - } - - case3Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Result := map[Key]ValueSet{ - S3XAmzServerSideEncryption: NewValueSet(NewStringValue("AES256")), - } - - case4Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzServerSideEncryption, - NewValueSet( - NewStringValue("AES256"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Result := map[Key]ValueSet{ - S3XAmzServerSideEncryption: NewValueSet( - NewStringValue("AES256"), - ), - } - - case5Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case5Result := map[Key]ValueSet{ - S3XAmzMetadataDirective: NewValueSet(NewStringValue("REPLACE")), - } - - case6Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzMetadataDirective, - NewValueSet( - NewStringValue("REPLACE"), - NewStringValue("COPY"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case6Result := map[Key]ValueSet{ - S3XAmzMetadataDirective: NewValueSet( - NewStringValue("REPLACE"), - NewStringValue("COPY"), - ), - } - - case7Function, err := newStringEqualsIgnoreCaseFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case7Result := map[Key]ValueSet{ - S3LocationConstraint: NewValueSet(NewStringValue("eu-west-1")), - } - - case8Function, err := newStringEqualsIgnoreCaseFunc(S3LocationConstraint, - NewValueSet( - NewStringValue("eu-west-1"), - NewStringValue("us-west-1"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case8Result := map[Key]ValueSet{ - S3LocationConstraint: NewValueSet( - NewStringValue("eu-west-1"), - NewStringValue("us-west-1"), - ), - } - - testCases := []struct { - f Function - expectedResult map[Key]ValueSet - }{ - {case1Function, case1Result}, - {case2Function, case2Result}, - {case3Function, case3Result}, - {case4Function, case4Result}, - {case5Function, case5Result}, - {case6Function, case6Result}, - {case7Function, case7Result}, - {case8Function, case8Result}, - {&stringEqualsFunc{}, nil}, - } - - for i, testCase := range testCases { - result := testCase.f.toMap() - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestStringNotEqualsIgnoreCaseFuncEvaluate(t *testing.T) { - case1Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Function, err := newStringNotEqualsIgnoreCaseFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - function Function - values map[string][]string - expectedResult bool - }{ - {case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, false}, - {case1Function, map[string][]string{"x-amz-copy-source": {"yourbucket/myobject"}}, true}, - {case1Function, map[string][]string{}, true}, - {case1Function, map[string][]string{"delimiter": {"/"}}, true}, - - {case2Function, map[string][]string{"x-amz-server-side-encryption": {"AES256"}}, false}, - {case2Function, map[string][]string{}, true}, - {case2Function, map[string][]string{"delimiter": {"/"}}, true}, - - {case3Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE"}}, false}, - {case3Function, map[string][]string{"x-amz-metadata-directive": {"COPY"}}, true}, - {case3Function, map[string][]string{}, true}, - {case3Function, map[string][]string{"delimiter": {"/"}}, true}, - - {case4Function, map[string][]string{"LocationConstraint": {"eu-west-1"}}, false}, - {case4Function, map[string][]string{"LocationConstraint": {"us-east-1"}}, true}, - {case4Function, map[string][]string{}, true}, - {case4Function, map[string][]string{"delimiter": {"/"}}, true}, - } - - for i, testCase := range testCases { - result := testCase.function.evaluate(testCase.values) - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestStringNotEqualsIgnoreCaseFuncKey(t *testing.T) { - case1Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Function, err := newStringNotEqualsIgnoreCaseFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - function Function - expectedResult Key - }{ - {case1Function, S3XAmzCopySource}, - {case2Function, S3XAmzServerSideEncryption}, - {case3Function, S3XAmzMetadataDirective}, - {case4Function, S3LocationConstraint}, - } - - for i, testCase := range testCases { - result := testCase.function.key() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestStringNotEqualsIgnoreCaseFuncToMap(t *testing.T) { - case1Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case1Result := map[Key]ValueSet{ - S3XAmzCopySource: NewValueSet(NewStringValue("mybucket/myobject")), - } - - case2Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzCopySource, - NewValueSet( - NewStringValue("mybucket/myobject"), - NewStringValue("yourbucket/myobject"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Result := map[Key]ValueSet{ - S3XAmzCopySource: NewValueSet( - NewStringValue("mybucket/myobject"), - NewStringValue("yourbucket/myobject"), - ), - } - - case3Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Result := map[Key]ValueSet{ - S3XAmzServerSideEncryption: NewValueSet(NewStringValue("AES256")), - } - - case4Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzServerSideEncryption, - NewValueSet( - NewStringValue("AES256"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Result := map[Key]ValueSet{ - S3XAmzServerSideEncryption: NewValueSet( - NewStringValue("AES256"), - ), - } - - case5Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case5Result := map[Key]ValueSet{ - S3XAmzMetadataDirective: NewValueSet(NewStringValue("REPLACE")), - } - - case6Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzMetadataDirective, - NewValueSet( - NewStringValue("REPLACE"), - NewStringValue("COPY"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case6Result := map[Key]ValueSet{ - S3XAmzMetadataDirective: NewValueSet( - NewStringValue("REPLACE"), - NewStringValue("COPY"), - ), - } - - case7Function, err := newStringNotEqualsIgnoreCaseFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case7Result := map[Key]ValueSet{ - S3LocationConstraint: NewValueSet(NewStringValue("eu-west-1")), - } - - case8Function, err := newStringNotEqualsIgnoreCaseFunc(S3LocationConstraint, - NewValueSet( - NewStringValue("eu-west-1"), - NewStringValue("us-west-1"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case8Result := map[Key]ValueSet{ - S3LocationConstraint: NewValueSet( - NewStringValue("eu-west-1"), - NewStringValue("us-west-1"), - ), - } - - testCases := []struct { - f Function - expectedResult map[Key]ValueSet - }{ - {case1Function, case1Result}, - {case2Function, case2Result}, - {case3Function, case3Result}, - {case4Function, case4Result}, - {case5Function, case5Result}, - {case6Function, case6Result}, - {case7Function, case7Result}, - {case8Function, case8Result}, - {&stringNotEqualsFunc{}, nil}, - } - - for i, testCase := range testCases { - result := testCase.f.toMap() - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestNewStringEqualsIgnoreCaseFunc(t *testing.T) { - case1Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzCopySource, - NewValueSet( - NewStringValue("mybucket/myobject"), - NewStringValue("yourbucket/myobject"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzServerSideEncryption, - NewValueSet( - NewStringValue("AES256"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case5Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case6Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzMetadataDirective, - NewValueSet( - NewStringValue("REPLACE"), - NewStringValue("COPY"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case7Function, err := newStringEqualsIgnoreCaseFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case8Function, err := newStringEqualsIgnoreCaseFunc(S3LocationConstraint, - NewValueSet( - NewStringValue("eu-west-1"), - NewStringValue("us-west-1"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - key Key - values ValueSet - expectedResult Function - expectErr bool - }{ - {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")), case1Function, false}, - {S3XAmzCopySource, - NewValueSet( - NewStringValue("mybucket/myobject"), - NewStringValue("yourbucket/myobject"), - ), case2Function, false}, - - {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")), case3Function, false}, - {S3XAmzServerSideEncryption, - NewValueSet( - NewStringValue("AES256"), - ), case4Function, false}, - - {S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")), case5Function, false}, - {S3XAmzMetadataDirective, - NewValueSet( - NewStringValue("REPLACE"), - NewStringValue("COPY"), - ), case6Function, false}, - - {S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")), case7Function, false}, - {S3LocationConstraint, - NewValueSet( - NewStringValue("eu-west-1"), - NewStringValue("us-west-1"), - ), case8Function, false}, - - // Unsupported value error. - {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"), NewIntValue(7)), nil, true}, - {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"), NewIntValue(7)), nil, true}, - {S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"), NewIntValue(7)), nil, true}, - {S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"), NewIntValue(7)), nil, true}, - - // Invalid value error. - {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket")), nil, true}, - {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("SSE-C")), nil, true}, - {S3XAmzMetadataDirective, NewValueSet(NewStringValue("DUPLICATE")), nil, true}, - } - - for i, testCase := range testCases { - result, err := newStringEqualsIgnoreCaseFunc(testCase.key, testCase.values) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } - } -} - -func TestNewStringNotEqualsIgnoreCaseFunc(t *testing.T) { - case1Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzCopySource, - NewValueSet( - NewStringValue("mybucket/myobject"), - NewStringValue("yourbucket/myobject"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzServerSideEncryption, - NewValueSet( - NewStringValue("AES256"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case5Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case6Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzMetadataDirective, - NewValueSet( - NewStringValue("REPLACE"), - NewStringValue("COPY"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case7Function, err := newStringNotEqualsIgnoreCaseFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case8Function, err := newStringNotEqualsIgnoreCaseFunc(S3LocationConstraint, - NewValueSet( - NewStringValue("eu-west-1"), - NewStringValue("us-west-1"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - key Key - values ValueSet - expectedResult Function - expectErr bool - }{ - {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")), case1Function, false}, - {S3XAmzCopySource, - NewValueSet( - NewStringValue("mybucket/myobject"), - NewStringValue("yourbucket/myobject"), - ), case2Function, false}, - - {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")), case3Function, false}, - {S3XAmzServerSideEncryption, - NewValueSet( - NewStringValue("AES256"), - ), case4Function, false}, - - {S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")), case5Function, false}, - {S3XAmzMetadataDirective, - NewValueSet( - NewStringValue("REPLACE"), - NewStringValue("COPY"), - ), case6Function, false}, - - {S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")), case7Function, false}, - {S3LocationConstraint, - NewValueSet( - NewStringValue("eu-west-1"), - NewStringValue("us-west-1"), - ), case8Function, false}, - - // Unsupported value error. - {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"), NewIntValue(7)), nil, true}, - {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"), NewIntValue(7)), nil, true}, - {S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"), NewIntValue(7)), nil, true}, - {S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"), NewIntValue(7)), nil, true}, - - // Invalid value error. - {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket")), nil, true}, - {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("SSE-C")), nil, true}, - {S3XAmzMetadataDirective, NewValueSet(NewStringValue("DUPLICATE")), nil, true}, - } - - for i, testCase := range testCases { - result, err := newStringNotEqualsIgnoreCaseFunc(testCase.key, testCase.values) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } - } -} diff --git a/pkg/bucket/policy/condition/stringlikefunc.go b/pkg/bucket/policy/condition/stringlikefunc.go deleted file mode 100644 index 7506f9f2..00000000 --- a/pkg/bucket/policy/condition/stringlikefunc.go +++ /dev/null @@ -1,174 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "fmt" - "net/http" - "sort" - - "github.com/minio/minio-go/v6/pkg/s3utils" - "github.com/minio/minio-go/v6/pkg/set" - "github.com/minio/minio/pkg/wildcard" -) - -func toStringLikeFuncString(n name, key Key, values set.StringSet) string { - valueStrings := values.ToSlice() - sort.Strings(valueStrings) - - return fmt.Sprintf("%v:%v:%v", n, key, valueStrings) -} - -// stringLikeFunc - String like function. It checks whether value by Key in given -// values map is widcard matching in condition values. -// For example, -// - if values = ["mybucket/foo*"], at evaluate() it returns whether string -// in value map for Key is wildcard matching in values. -type stringLikeFunc struct { - k Key - values set.StringSet -} - -// evaluate() - evaluates to check whether value by Key in given values is wildcard -// matching in condition values. -func (f stringLikeFunc) evaluate(values map[string][]string) bool { - requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())] - if !ok { - requestValue = values[f.k.Name()] - } - - fvalues := f.values.ApplyFunc(substFuncFromValues(values)) - - for _, v := range requestValue { - if !fvalues.FuncMatch(wildcard.Match, v).IsEmpty() { - return true - } - } - - return false -} - -// key() - returns condition key which is used by this condition function. -func (f stringLikeFunc) key() Key { - return f.k -} - -// name() - returns "StringLike" function name. -func (f stringLikeFunc) name() name { - return stringLike -} - -func (f stringLikeFunc) String() string { - return toStringLikeFuncString(stringLike, f.k, f.values) -} - -// toMap - returns map representation of this function. -func (f stringLikeFunc) toMap() map[Key]ValueSet { - if !f.k.IsValid() { - return nil - } - - values := NewValueSet() - for _, value := range f.values.ToSlice() { - values.Add(NewStringValue(value)) - } - - return map[Key]ValueSet{ - f.k: values, - } -} - -// stringNotLikeFunc - String not like function. It checks whether value by Key in given -// values map is NOT widcard matching in condition values. -// For example, -// - if values = ["mybucket/foo*"], at evaluate() it returns whether string -// in value map for Key is NOT wildcard matching in values. -type stringNotLikeFunc struct { - stringLikeFunc -} - -// evaluate() - evaluates to check whether value by Key in given values is NOT wildcard -// matching in condition values. -func (f stringNotLikeFunc) evaluate(values map[string][]string) bool { - return !f.stringLikeFunc.evaluate(values) -} - -// name() - returns "StringNotLike" function name. -func (f stringNotLikeFunc) name() name { - return stringNotLike -} - -func (f stringNotLikeFunc) String() string { - return toStringLikeFuncString(stringNotLike, f.stringLikeFunc.k, f.stringLikeFunc.values) -} - -func validateStringLikeValues(n name, key Key, values set.StringSet) error { - for _, s := range values.ToSlice() { - switch key { - case S3XAmzCopySource: - bucket, object := path2BucketAndObject(s) - if object == "" { - return fmt.Errorf("invalid value '%v' for '%v' for %v condition", s, S3XAmzCopySource, n) - } - if err := s3utils.CheckValidBucketName(bucket); err != nil { - return err - } - } - } - - return nil -} - -// newStringLikeFunc - returns new StringLike function. -func newStringLikeFunc(key Key, values ValueSet) (Function, error) { - valueStrings, err := valuesToStringSlice(stringLike, values) - if err != nil { - return nil, err - } - - return NewStringLikeFunc(key, valueStrings...) -} - -// NewStringLikeFunc - returns new StringLike function. -func NewStringLikeFunc(key Key, values ...string) (Function, error) { - sset := set.CreateStringSet(values...) - if err := validateStringLikeValues(stringLike, key, sset); err != nil { - return nil, err - } - - return &stringLikeFunc{key, sset}, nil -} - -// newStringNotLikeFunc - returns new StringNotLike function. -func newStringNotLikeFunc(key Key, values ValueSet) (Function, error) { - valueStrings, err := valuesToStringSlice(stringNotLike, values) - if err != nil { - return nil, err - } - - return NewStringNotLikeFunc(key, valueStrings...) -} - -// NewStringNotLikeFunc - returns new StringNotLike function. -func NewStringNotLikeFunc(key Key, values ...string) (Function, error) { - sset := set.CreateStringSet(values...) - if err := validateStringLikeValues(stringNotLike, key, sset); err != nil { - return nil, err - } - - return &stringNotLikeFunc{stringLikeFunc{key, sset}}, nil -} diff --git a/pkg/bucket/policy/condition/stringlikefunc_test.go b/pkg/bucket/policy/condition/stringlikefunc_test.go deleted file mode 100644 index 010d99f0..00000000 --- a/pkg/bucket/policy/condition/stringlikefunc_test.go +++ /dev/null @@ -1,798 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "reflect" - "testing" -) - -func TestStringLikeFuncEvaluate(t *testing.T) { - case1Function, err := newStringLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newStringLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Function, err := newStringLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Function, err := newStringLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case5Function, err := newStringLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case6Function, err := newStringLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case7Function, err := newStringLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case8Function, err := newStringLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - function Function - values map[string][]string - expectedResult bool - }{ - {case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, true}, - {case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject.png"}}, true}, - {case1Function, map[string][]string{"x-amz-copy-source": {"yourbucket/myobject"}}, false}, - {case1Function, map[string][]string{}, false}, - {case1Function, map[string][]string{"delimiter": {"/"}}, false}, - - {case2Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, true}, - {case2Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject.png"}}, false}, - {case2Function, map[string][]string{"x-amz-copy-source": {"yourbucket/myobject"}}, false}, - {case2Function, map[string][]string{}, false}, - {case2Function, map[string][]string{"delimiter": {"/"}}, false}, - - {case3Function, map[string][]string{"x-amz-server-side-encryption": {"AES256"}}, true}, - {case3Function, map[string][]string{"x-amz-server-side-encryption": {"AES512"}}, true}, - {case3Function, map[string][]string{}, false}, - {case3Function, map[string][]string{"delimiter": {"/"}}, false}, - - {case4Function, map[string][]string{"x-amz-server-side-encryption": {"AES256"}}, true}, - {case4Function, map[string][]string{"x-amz-server-side-encryption": {"AES512"}}, false}, - {case4Function, map[string][]string{}, false}, - {case4Function, map[string][]string{"delimiter": {"/"}}, false}, - - {case5Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE"}}, true}, - {case5Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE/COPY"}}, true}, - {case5Function, map[string][]string{"x-amz-metadata-directive": {"COPY"}}, false}, - {case5Function, map[string][]string{}, false}, - {case5Function, map[string][]string{"delimiter": {"/"}}, false}, - - {case6Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE"}}, true}, - {case6Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE/COPY"}}, false}, - {case6Function, map[string][]string{"x-amz-metadata-directive": {"COPY"}}, false}, - {case6Function, map[string][]string{}, false}, - {case6Function, map[string][]string{"delimiter": {"/"}}, false}, - - {case7Function, map[string][]string{"LocationConstraint": {"eu-west-1"}}, true}, - {case7Function, map[string][]string{"LocationConstraint": {"eu-west-2"}}, true}, - {case7Function, map[string][]string{"LocationConstraint": {"us-east-1"}}, false}, - {case7Function, map[string][]string{}, false}, - {case7Function, map[string][]string{"delimiter": {"/"}}, false}, - - {case8Function, map[string][]string{"LocationConstraint": {"eu-west-1"}}, true}, - {case8Function, map[string][]string{"LocationConstraint": {"eu-west-2"}}, false}, - {case8Function, map[string][]string{"LocationConstraint": {"us-east-1"}}, false}, - {case8Function, map[string][]string{}, false}, - {case8Function, map[string][]string{"delimiter": {"/"}}, false}, - } - - for i, testCase := range testCases { - result := testCase.function.evaluate(testCase.values) - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestStringLikeFuncKey(t *testing.T) { - case1Function, err := newStringLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newStringLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Function, err := newStringLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Function, err := newStringLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - function Function - expectedResult Key - }{ - {case1Function, S3XAmzCopySource}, - {case2Function, S3XAmzServerSideEncryption}, - {case3Function, S3XAmzMetadataDirective}, - {case4Function, S3LocationConstraint}, - } - - for i, testCase := range testCases { - result := testCase.function.key() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestStringLikeFuncToMap(t *testing.T) { - case1Function, err := newStringLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case1Result := map[Key]ValueSet{ - S3XAmzCopySource: NewValueSet(NewStringValue("mybucket/*")), - } - - case2Function, err := newStringLikeFunc(S3XAmzCopySource, - NewValueSet( - NewStringValue("mybucket/*"), - NewStringValue("yourbucket/myobject*"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Result := map[Key]ValueSet{ - S3XAmzCopySource: NewValueSet( - NewStringValue("mybucket/*"), - NewStringValue("yourbucket/myobject*"), - ), - } - - case3Function, err := newStringLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Result := map[Key]ValueSet{ - S3XAmzServerSideEncryption: NewValueSet(NewStringValue("AES*")), - } - - case4Function, err := newStringLikeFunc(S3XAmzServerSideEncryption, - NewValueSet( - NewStringValue("AES*"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Result := map[Key]ValueSet{ - S3XAmzServerSideEncryption: NewValueSet( - NewStringValue("AES*"), - ), - } - - case5Function, err := newStringLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case5Result := map[Key]ValueSet{ - S3XAmzMetadataDirective: NewValueSet(NewStringValue("REPL*")), - } - - case6Function, err := newStringLikeFunc(S3XAmzMetadataDirective, - NewValueSet( - NewStringValue("REPL*"), - NewStringValue("COPY*"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case6Result := map[Key]ValueSet{ - S3XAmzMetadataDirective: NewValueSet( - NewStringValue("REPL*"), - NewStringValue("COPY*"), - ), - } - - case7Function, err := newStringLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case7Result := map[Key]ValueSet{ - S3LocationConstraint: NewValueSet(NewStringValue("eu-west-*")), - } - - case8Function, err := newStringLikeFunc(S3LocationConstraint, - NewValueSet( - NewStringValue("eu-west-*"), - NewStringValue("us-west-*"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case8Result := map[Key]ValueSet{ - S3LocationConstraint: NewValueSet( - NewStringValue("eu-west-*"), - NewStringValue("us-west-*"), - ), - } - - testCases := []struct { - f Function - expectedResult map[Key]ValueSet - }{ - {case1Function, case1Result}, - {case2Function, case2Result}, - {case3Function, case3Result}, - {case4Function, case4Result}, - {case5Function, case5Result}, - {case6Function, case6Result}, - {case7Function, case7Result}, - {case8Function, case8Result}, - {&stringLikeFunc{}, nil}, - } - - for i, testCase := range testCases { - result := testCase.f.toMap() - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestStringNotLikeFuncEvaluate(t *testing.T) { - case1Function, err := newStringNotLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newStringNotLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Function, err := newStringNotLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Function, err := newStringNotLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case5Function, err := newStringNotLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case6Function, err := newStringNotLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case7Function, err := newStringNotLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case8Function, err := newStringNotLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - function Function - values map[string][]string - expectedResult bool - }{ - {case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, false}, - {case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject.png"}}, false}, - {case1Function, map[string][]string{"x-amz-copy-source": {"yourbucket/myobject"}}, true}, - {case1Function, map[string][]string{}, true}, - {case1Function, map[string][]string{"delimiter": {"/"}}, true}, - - {case2Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, false}, - {case2Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject.png"}}, true}, - {case2Function, map[string][]string{"x-amz-copy-source": {"yourbucket/myobject"}}, true}, - {case2Function, map[string][]string{}, true}, - {case2Function, map[string][]string{"delimiter": {"/"}}, true}, - - {case3Function, map[string][]string{"x-amz-server-side-encryption": {"AES256"}}, false}, - {case3Function, map[string][]string{"x-amz-server-side-encryption": {"AES512"}}, false}, - {case3Function, map[string][]string{}, true}, - {case3Function, map[string][]string{"delimiter": {"/"}}, true}, - - {case4Function, map[string][]string{"x-amz-server-side-encryption": {"AES256"}}, false}, - {case4Function, map[string][]string{"x-amz-server-side-encryption": {"AES512"}}, true}, - {case4Function, map[string][]string{}, true}, - {case4Function, map[string][]string{"delimiter": {"/"}}, true}, - - {case5Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE"}}, false}, - {case5Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE/COPY"}}, false}, - {case5Function, map[string][]string{"x-amz-metadata-directive": {"COPY"}}, true}, - {case5Function, map[string][]string{}, true}, - {case5Function, map[string][]string{"delimiter": {"/"}}, true}, - - {case6Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE"}}, false}, - {case6Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE/COPY"}}, true}, - {case6Function, map[string][]string{"x-amz-metadata-directive": {"COPY"}}, true}, - {case6Function, map[string][]string{}, true}, - {case6Function, map[string][]string{"delimiter": {"/"}}, true}, - - {case7Function, map[string][]string{"LocationConstraint": {"eu-west-1"}}, false}, - {case7Function, map[string][]string{"LocationConstraint": {"eu-west-2"}}, false}, - {case7Function, map[string][]string{"LocationConstraint": {"us-east-1"}}, true}, - {case7Function, map[string][]string{}, true}, - {case7Function, map[string][]string{"delimiter": {"/"}}, true}, - - {case8Function, map[string][]string{"LocationConstraint": {"eu-west-1"}}, false}, - {case8Function, map[string][]string{"LocationConstraint": {"eu-west-2"}}, true}, - {case8Function, map[string][]string{"LocationConstraint": {"us-east-1"}}, true}, - {case8Function, map[string][]string{}, true}, - {case8Function, map[string][]string{"delimiter": {"/"}}, true}, - } - - for i, testCase := range testCases { - result := testCase.function.evaluate(testCase.values) - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestStringNotLikeFuncKey(t *testing.T) { - case1Function, err := newStringNotLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newStringNotLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Function, err := newStringNotLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Function, err := newStringNotLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - function Function - expectedResult Key - }{ - {case1Function, S3XAmzCopySource}, - {case2Function, S3XAmzServerSideEncryption}, - {case3Function, S3XAmzMetadataDirective}, - {case4Function, S3LocationConstraint}, - } - - for i, testCase := range testCases { - result := testCase.function.key() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestStringNotLikeFuncToMap(t *testing.T) { - case1Function, err := newStringNotLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case1Result := map[Key]ValueSet{ - S3XAmzCopySource: NewValueSet(NewStringValue("mybucket/*")), - } - - case2Function, err := newStringNotLikeFunc(S3XAmzCopySource, - NewValueSet( - NewStringValue("mybucket/*"), - NewStringValue("yourbucket/myobject*"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Result := map[Key]ValueSet{ - S3XAmzCopySource: NewValueSet( - NewStringValue("mybucket/*"), - NewStringValue("yourbucket/myobject*"), - ), - } - - case3Function, err := newStringNotLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Result := map[Key]ValueSet{ - S3XAmzServerSideEncryption: NewValueSet(NewStringValue("AES*")), - } - - case4Function, err := newStringNotLikeFunc(S3XAmzServerSideEncryption, - NewValueSet( - NewStringValue("AES*"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Result := map[Key]ValueSet{ - S3XAmzServerSideEncryption: NewValueSet( - NewStringValue("AES*"), - ), - } - - case5Function, err := newStringNotLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case5Result := map[Key]ValueSet{ - S3XAmzMetadataDirective: NewValueSet(NewStringValue("REPL*")), - } - - case6Function, err := newStringNotLikeFunc(S3XAmzMetadataDirective, - NewValueSet( - NewStringValue("REPL*"), - NewStringValue("COPY*"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case6Result := map[Key]ValueSet{ - S3XAmzMetadataDirective: NewValueSet( - NewStringValue("REPL*"), - NewStringValue("COPY*"), - ), - } - - case7Function, err := newStringNotLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case7Result := map[Key]ValueSet{ - S3LocationConstraint: NewValueSet(NewStringValue("eu-west-*")), - } - - case8Function, err := newStringNotLikeFunc(S3LocationConstraint, - NewValueSet( - NewStringValue("eu-west-*"), - NewStringValue("us-west-*"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case8Result := map[Key]ValueSet{ - S3LocationConstraint: NewValueSet( - NewStringValue("eu-west-*"), - NewStringValue("us-west-*"), - ), - } - - testCases := []struct { - f Function - expectedResult map[Key]ValueSet - }{ - {case1Function, case1Result}, - {case2Function, case2Result}, - {case3Function, case3Result}, - {case4Function, case4Result}, - {case5Function, case5Result}, - {case6Function, case6Result}, - {case7Function, case7Result}, - {case8Function, case8Result}, - {&stringNotLikeFunc{}, nil}, - } - - for i, testCase := range testCases { - result := testCase.f.toMap() - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestNewStringLikeFunc(t *testing.T) { - case1Function, err := newStringLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newStringLikeFunc(S3XAmzCopySource, - NewValueSet( - NewStringValue("mybucket/*"), - NewStringValue("yourbucket/myobject*"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Function, err := newStringLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Function, err := newStringLikeFunc(S3XAmzServerSideEncryption, - NewValueSet( - NewStringValue("AES*"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case5Function, err := newStringLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case6Function, err := newStringLikeFunc(S3XAmzMetadataDirective, - NewValueSet( - NewStringValue("REPL*"), - NewStringValue("COPY*"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case7Function, err := newStringLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case8Function, err := newStringLikeFunc(S3LocationConstraint, - NewValueSet( - NewStringValue("eu-west-*"), - NewStringValue("us-west-*"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - key Key - values ValueSet - expectedResult Function - expectErr bool - }{ - {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/*")), case1Function, false}, - {S3XAmzCopySource, - NewValueSet( - NewStringValue("mybucket/*"), - NewStringValue("yourbucket/myobject*"), - ), case2Function, false}, - - {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES*")), case3Function, false}, - {S3XAmzServerSideEncryption, - NewValueSet( - NewStringValue("AES*"), - ), case4Function, false}, - - {S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*")), case5Function, false}, - {S3XAmzMetadataDirective, - NewValueSet( - NewStringValue("REPL*"), - NewStringValue("COPY*"), - ), case6Function, false}, - - {S3LocationConstraint, NewValueSet(NewStringValue("eu-west-*")), case7Function, false}, - {S3LocationConstraint, - NewValueSet( - NewStringValue("eu-west-*"), - NewStringValue("us-west-*"), - ), case8Function, false}, - - // Unsupported value error. - {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"), NewIntValue(7)), nil, true}, - {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"), NewIntValue(7)), nil, true}, - {S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"), NewIntValue(7)), nil, true}, - {S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"), NewIntValue(7)), nil, true}, - - // Invalid value error. - {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket")), nil, true}, - } - - for i, testCase := range testCases { - result, err := newStringLikeFunc(testCase.key, testCase.values) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } - } -} - -func TestNewStringNotLikeFunc(t *testing.T) { - case1Function, err := newStringNotLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Function, err := newStringNotLikeFunc(S3XAmzCopySource, - NewValueSet( - NewStringValue("mybucket/*"), - NewStringValue("yourbucket/myobject*"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Function, err := newStringNotLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Function, err := newStringNotLikeFunc(S3XAmzServerSideEncryption, - NewValueSet( - NewStringValue("AES*"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case5Function, err := newStringNotLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case6Function, err := newStringNotLikeFunc(S3XAmzMetadataDirective, - NewValueSet( - NewStringValue("REPL*"), - NewStringValue("COPY*"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case7Function, err := newStringNotLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-*"))) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case8Function, err := newStringNotLikeFunc(S3LocationConstraint, - NewValueSet( - NewStringValue("eu-west-*"), - NewStringValue("us-west-*"), - ), - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - key Key - values ValueSet - expectedResult Function - expectErr bool - }{ - {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/*")), case1Function, false}, - {S3XAmzCopySource, - NewValueSet( - NewStringValue("mybucket/*"), - NewStringValue("yourbucket/myobject*"), - ), case2Function, false}, - - {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES*")), case3Function, false}, - {S3XAmzServerSideEncryption, - NewValueSet( - NewStringValue("AES*"), - ), case4Function, false}, - - {S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*")), case5Function, false}, - {S3XAmzMetadataDirective, - NewValueSet( - NewStringValue("REPL*"), - NewStringValue("COPY*"), - ), case6Function, false}, - - {S3LocationConstraint, NewValueSet(NewStringValue("eu-west-*")), case7Function, false}, - {S3LocationConstraint, - NewValueSet( - NewStringValue("eu-west-*"), - NewStringValue("us-west-*"), - ), case8Function, false}, - - // Unsupported value error. - {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"), NewIntValue(7)), nil, true}, - {S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"), NewIntValue(7)), nil, true}, - {S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"), NewIntValue(7)), nil, true}, - {S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"), NewIntValue(7)), nil, true}, - - // Invalid value error. - {S3XAmzCopySource, NewValueSet(NewStringValue("mybucket")), nil, true}, - } - - for i, testCase := range testCases { - result, err := newStringNotLikeFunc(testCase.key, testCase.values) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } - } -} diff --git a/pkg/bucket/policy/condition/value.go b/pkg/bucket/policy/condition/value.go deleted file mode 100644 index 8a73963b..00000000 --- a/pkg/bucket/policy/condition/value.go +++ /dev/null @@ -1,175 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "encoding/json" - "fmt" - "reflect" - "strconv" - "strings" -) - -// Splits an incoming path into bucket and object components. -func path2BucketAndObject(path string) (bucket, object string) { - // Skip the first element if it is '/', split the rest. - path = strings.TrimPrefix(path, "/") - pathComponents := strings.SplitN(path, "/", 2) - - // Save the bucket and object extracted from path. - switch len(pathComponents) { - case 1: - bucket = pathComponents[0] - case 2: - bucket = pathComponents[0] - object = pathComponents[1] - } - return bucket, object -} - -// Value - is enum type of string, int or bool. -type Value struct { - t reflect.Kind - s string - i int - b bool -} - -// GetBool - gets stored bool value. -func (v Value) GetBool() (bool, error) { - var err error - - if v.t != reflect.Bool { - err = fmt.Errorf("not a bool Value") - } - - return v.b, err -} - -// GetInt - gets stored int value. -func (v Value) GetInt() (int, error) { - var err error - - if v.t != reflect.Int { - err = fmt.Errorf("not a int Value") - } - - return v.i, err -} - -// GetString - gets stored string value. -func (v Value) GetString() (string, error) { - var err error - - if v.t != reflect.String { - err = fmt.Errorf("not a string Value") - } - - return v.s, err -} - -// GetType - gets enum type. -func (v Value) GetType() reflect.Kind { - return v.t -} - -// MarshalJSON - encodes Value to JSON data. -func (v Value) MarshalJSON() ([]byte, error) { - switch v.t { - case reflect.String: - return json.Marshal(v.s) - case reflect.Int: - return json.Marshal(v.i) - case reflect.Bool: - return json.Marshal(v.b) - } - - return nil, fmt.Errorf("unknown value kind %v", v.t) -} - -// StoreBool - stores bool value. -func (v *Value) StoreBool(b bool) { - *v = Value{t: reflect.Bool, b: b} -} - -// StoreInt - stores int value. -func (v *Value) StoreInt(i int) { - *v = Value{t: reflect.Int, i: i} -} - -// StoreString - stores string value. -func (v *Value) StoreString(s string) { - *v = Value{t: reflect.String, s: s} -} - -// String - returns string representation of value. -func (v Value) String() string { - switch v.t { - case reflect.String: - return v.s - case reflect.Int: - return strconv.Itoa(v.i) - case reflect.Bool: - return strconv.FormatBool(v.b) - } - - return "" -} - -// UnmarshalJSON - decodes JSON data. -func (v *Value) UnmarshalJSON(data []byte) error { - var b bool - if err := json.Unmarshal(data, &b); err == nil { - v.StoreBool(b) - return nil - } - - var i int - if err := json.Unmarshal(data, &i); err == nil { - v.StoreInt(i) - return nil - } - - var s string - if err := json.Unmarshal(data, &s); err == nil { - v.StoreString(s) - return nil - } - - return fmt.Errorf("unknown json data '%v'", data) -} - -// NewBoolValue - returns new bool value. -func NewBoolValue(b bool) Value { - value := &Value{} - value.StoreBool(b) - return *value -} - -// NewIntValue - returns new int value. -func NewIntValue(i int) Value { - value := &Value{} - value.StoreInt(i) - return *value -} - -// NewStringValue - returns new string value. -func NewStringValue(s string) Value { - value := &Value{} - value.StoreString(s) - return *value -} diff --git a/pkg/bucket/policy/condition/value_test.go b/pkg/bucket/policy/condition/value_test.go deleted file mode 100644 index edd779ea..00000000 --- a/pkg/bucket/policy/condition/value_test.go +++ /dev/null @@ -1,260 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "encoding/json" - "reflect" - "testing" -) - -func TestValueGetBool(t *testing.T) { - testCases := []struct { - value Value - expectedResult bool - expectErr bool - }{ - {NewBoolValue(true), true, false}, - {NewIntValue(7), false, true}, - {Value{}, false, true}, - } - - for i, testCase := range testCases { - result, err := testCase.value.GetBool() - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if result != testCase.expectedResult { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } - } -} - -func TestValueGetInt(t *testing.T) { - testCases := []struct { - value Value - expectedResult int - expectErr bool - }{ - {NewIntValue(7), 7, false}, - {NewBoolValue(true), 0, true}, - {Value{}, 0, true}, - } - - for i, testCase := range testCases { - result, err := testCase.value.GetInt() - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if result != testCase.expectedResult { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } - } -} - -func TestValueGetString(t *testing.T) { - testCases := []struct { - value Value - expectedResult string - expectErr bool - }{ - {NewStringValue("foo"), "foo", false}, - {NewBoolValue(true), "", true}, - {Value{}, "", true}, - } - - for i, testCase := range testCases { - result, err := testCase.value.GetString() - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if result != testCase.expectedResult { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } - } -} - -func TestValueGetType(t *testing.T) { - testCases := []struct { - value Value - expectedResult reflect.Kind - }{ - {NewBoolValue(true), reflect.Bool}, - {NewIntValue(7), reflect.Int}, - {NewStringValue("foo"), reflect.String}, - {Value{}, reflect.Invalid}, - } - - for i, testCase := range testCases { - result := testCase.value.GetType() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestValueMarshalJSON(t *testing.T) { - testCases := []struct { - value Value - expectedResult []byte - expectErr bool - }{ - {NewBoolValue(true), []byte("true"), false}, - {NewIntValue(7), []byte("7"), false}, - {NewStringValue("foo"), []byte(`"foo"`), false}, - {Value{}, nil, true}, - } - - for i, testCase := range testCases { - result, err := json.Marshal(testCase.value) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } - } -} - -func TestValueStoreBool(t *testing.T) { - testCases := []struct { - value bool - expectedResult Value - }{ - {false, NewBoolValue(false)}, - {true, NewBoolValue(true)}, - } - - for i, testCase := range testCases { - var result Value - result.StoreBool(testCase.value) - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestValueStoreInt(t *testing.T) { - testCases := []struct { - value int - expectedResult Value - }{ - {0, NewIntValue(0)}, - {7, NewIntValue(7)}, - } - - for i, testCase := range testCases { - var result Value - result.StoreInt(testCase.value) - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestValueStoreString(t *testing.T) { - testCases := []struct { - value string - expectedResult Value - }{ - {"", NewStringValue("")}, - {"foo", NewStringValue("foo")}, - } - - for i, testCase := range testCases { - var result Value - result.StoreString(testCase.value) - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestValueString(t *testing.T) { - testCases := []struct { - value Value - expectedResult string - }{ - {NewBoolValue(true), "true"}, - {NewIntValue(7), "7"}, - {NewStringValue("foo"), "foo"}, - {Value{}, ""}, - } - - for i, testCase := range testCases { - result := testCase.value.String() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestValueUnmarshalJSON(t *testing.T) { - testCases := []struct { - data []byte - expectedResult Value - expectErr bool - }{ - {[]byte("true"), NewBoolValue(true), false}, - {[]byte("7"), NewIntValue(7), false}, - {[]byte(`"foo"`), NewStringValue("foo"), false}, - {[]byte("True"), Value{}, true}, - {[]byte("7.1"), Value{}, true}, - {[]byte(`["foo"]`), Value{}, true}, - } - - for i, testCase := range testCases { - var result Value - err := json.Unmarshal(testCase.data, &result) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } - } -} diff --git a/pkg/bucket/policy/condition/valueset.go b/pkg/bucket/policy/condition/valueset.go deleted file mode 100644 index 90e8b26a..00000000 --- a/pkg/bucket/policy/condition/valueset.go +++ /dev/null @@ -1,85 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "encoding/json" - "fmt" -) - -// ValueSet - unique list of values. -type ValueSet map[Value]struct{} - -// Add - adds given value to value set. -func (set ValueSet) Add(value Value) { - set[value] = struct{}{} -} - -// MarshalJSON - encodes ValueSet to JSON data. -func (set ValueSet) MarshalJSON() ([]byte, error) { - var values []Value - for k := range set { - values = append(values, k) - } - - if len(values) == 0 { - return nil, fmt.Errorf("invalid value set %v", set) - } - - return json.Marshal(values) -} - -// UnmarshalJSON - decodes JSON data. -func (set *ValueSet) UnmarshalJSON(data []byte) error { - var v Value - if err := json.Unmarshal(data, &v); err == nil { - *set = make(ValueSet) - set.Add(v) - return nil - } - - var values []Value - if err := json.Unmarshal(data, &values); err != nil { - return err - } - - if len(values) < 1 { - return fmt.Errorf("invalid value") - } - - *set = make(ValueSet) - for _, v = range values { - if _, found := (*set)[v]; found { - return fmt.Errorf("duplicate value found '%v'", v) - } - - set.Add(v) - } - - return nil -} - -// NewValueSet - returns new value set containing given values. -func NewValueSet(values ...Value) ValueSet { - set := make(ValueSet) - - for _, value := range values { - set.Add(value) - } - - return set -} diff --git a/pkg/bucket/policy/condition/valueset_test.go b/pkg/bucket/policy/condition/valueset_test.go deleted file mode 100644 index c8f8b2af..00000000 --- a/pkg/bucket/policy/condition/valueset_test.go +++ /dev/null @@ -1,118 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package condition - -import ( - "encoding/json" - "reflect" - "testing" -) - -func TestValueSetAdd(t *testing.T) { - testCases := []struct { - value Value - expectedResult ValueSet - }{ - {NewBoolValue(true), NewValueSet(NewBoolValue(true))}, - {NewIntValue(7), NewValueSet(NewIntValue(7))}, - {NewStringValue("foo"), NewValueSet(NewStringValue("foo"))}, - } - - for i, testCase := range testCases { - result := NewValueSet() - result.Add(testCase.value) - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestValueSetMarshalJSON(t *testing.T) { - testCases := []struct { - set ValueSet - expectedResult string - expectErr bool - }{ - {NewValueSet(NewBoolValue(true)), `[true]`, false}, - {NewValueSet(NewIntValue(7)), `[7]`, false}, - {NewValueSet(NewStringValue("foo")), `["foo"]`, false}, - {NewValueSet(NewBoolValue(true)), `[true]`, false}, - {NewValueSet(NewStringValue("7")), `["7"]`, false}, - {NewValueSet(NewStringValue("foo")), `["foo"]`, false}, - {make(ValueSet), "", true}, - } - - for i, testCase := range testCases { - result, err := json.Marshal(testCase.set) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if string(result) != testCase.expectedResult { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, string(result)) - } - } - } -} - -func TestValueSetUnmarshalJSON(t *testing.T) { - set1 := NewValueSet( - NewBoolValue(true), - NewStringValue("false"), - NewIntValue(7), - NewStringValue("7"), - NewStringValue("foo"), - NewStringValue("192.168.1.100/24"), - ) - - testCases := []struct { - data []byte - expectedResult ValueSet - expectErr bool - }{ - {[]byte(`true`), NewValueSet(NewBoolValue(true)), false}, - {[]byte(`7`), NewValueSet(NewIntValue(7)), false}, - {[]byte(`"foo"`), NewValueSet(NewStringValue("foo")), false}, - {[]byte(`[true]`), NewValueSet(NewBoolValue(true)), false}, - {[]byte(`[7]`), NewValueSet(NewIntValue(7)), false}, - {[]byte(`["foo"]`), NewValueSet(NewStringValue("foo")), false}, - {[]byte(`[true, "false", 7, "7", "foo", "192.168.1.100/24"]`), set1, false}, - {[]byte(`{}`), nil, true}, // Unsupported data. - {[]byte(`[]`), nil, true}, // Empty array. - {[]byte(`[7, 7, true]`), nil, true}, // Duplicate value. - } - - for i, testCase := range testCases { - result := make(ValueSet) - err := json.Unmarshal(testCase.data, &result) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } - } -} diff --git a/pkg/bucket/policy/effect.go b/pkg/bucket/policy/effect.go deleted file mode 100644 index 31167587..00000000 --- a/pkg/bucket/policy/effect.go +++ /dev/null @@ -1,47 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package policy - -// Effect - policy statement effect Allow or Deny. -type Effect string - -const ( - // Allow - allow effect. - Allow Effect = "Allow" - - // Deny - deny effect. - Deny = "Deny" -) - -// IsAllowed - returns if given check is allowed or not. -func (effect Effect) IsAllowed(b bool) bool { - if effect == Allow { - return b - } - - return !b -} - -// IsValid - checks if Effect is valid or not -func (effect Effect) IsValid() bool { - switch effect { - case Allow, Deny: - return true - } - - return false -} diff --git a/pkg/bucket/policy/effect_test.go b/pkg/bucket/policy/effect_test.go deleted file mode 100644 index 5797aaa1..00000000 --- a/pkg/bucket/policy/effect_test.go +++ /dev/null @@ -1,63 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package policy - -import ( - "testing" -) - -func TestEffectIsAllowed(t *testing.T) { - testCases := []struct { - effect Effect - check bool - expectedResult bool - }{ - {Allow, false, false}, - {Allow, true, true}, - {Deny, false, true}, - {Deny, true, false}, - } - - for i, testCase := range testCases { - result := testCase.effect.IsAllowed(testCase.check) - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } - -} - -func TestEffectIsValid(t *testing.T) { - testCases := []struct { - effect Effect - expectedResult bool - }{ - {Allow, true}, - {Deny, true}, - {Effect(""), false}, - {Effect("foo"), false}, - } - - for i, testCase := range testCases { - result := testCase.effect.IsValid() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} diff --git a/pkg/bucket/policy/error.go b/pkg/bucket/policy/error.go deleted file mode 100644 index 545e6726..00000000 --- a/pkg/bucket/policy/error.go +++ /dev/null @@ -1,44 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package policy - -import ( - "fmt" -) - -// Error is the generic type for any error happening during policy -// parsing. -type Error struct { - err error -} - -// Errorf - formats according to a format specifier and returns -// the string as a value that satisfies error of type policy.Error -func Errorf(format string, a ...interface{}) error { - return Error{err: fmt.Errorf(format, a...)} -} - -// Unwrap the internal error. -func (e Error) Unwrap() error { return e.err } - -// Error 'error' compatible method. -func (e Error) Error() string { - if e.err == nil { - return "policy: cause " - } - return e.err.Error() -} diff --git a/pkg/bucket/policy/id.go b/pkg/bucket/policy/id.go deleted file mode 100644 index 62e48e6c..00000000 --- a/pkg/bucket/policy/id.go +++ /dev/null @@ -1,29 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package policy - -import ( - "unicode/utf8" -) - -// ID - policy ID. -type ID string - -// IsValid - checks if ID is valid or not. -func (id ID) IsValid() bool { - return utf8.ValidString(string(id)) -} diff --git a/pkg/bucket/policy/id_test.go b/pkg/bucket/policy/id_test.go deleted file mode 100644 index b1f7d5df..00000000 --- a/pkg/bucket/policy/id_test.go +++ /dev/null @@ -1,40 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package policy - -import ( - "testing" -) - -func TestIDIsValid(t *testing.T) { - testCases := []struct { - id ID - expectedResult bool - }{ - {ID("DenyEncryptionSt1"), true}, - {ID(""), true}, - {ID("aa\xe2"), false}, - } - - for i, testCase := range testCases { - result := testCase.id.IsValid() - - if result != testCase.expectedResult { - t.Errorf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} diff --git a/pkg/bucket/policy/policy.go b/pkg/bucket/policy/policy.go deleted file mode 100644 index 59bdcf99..00000000 --- a/pkg/bucket/policy/policy.go +++ /dev/null @@ -1,180 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package policy - -import ( - "encoding/json" - "io" -) - -// DefaultVersion - default policy version as per AWS S3 specification. -const DefaultVersion = "2012-10-17" - -// Args - arguments to policy to check whether it is allowed -type Args struct { - AccountName string `json:"account"` - Action Action `json:"action"` - BucketName string `json:"bucket"` - ConditionValues map[string][]string `json:"conditions"` - IsOwner bool `json:"owner"` - ObjectName string `json:"object"` -} - -// Policy - bucket policy. -type Policy struct { - ID ID `json:"ID,omitempty"` - Version string - Statements []Statement `json:"Statement"` -} - -// IsAllowed - checks given policy args is allowed to continue the Rest API. -func (policy Policy) IsAllowed(args Args) bool { - // Check all deny statements. If any one statement denies, return false. - for _, statement := range policy.Statements { - if statement.Effect == Deny { - if !statement.IsAllowed(args) { - return false - } - } - } - - // For owner, its allowed by default. - if args.IsOwner { - return true - } - - // Check all allow statements. If any one statement allows, return true. - for _, statement := range policy.Statements { - if statement.Effect == Allow { - if statement.IsAllowed(args) { - return true - } - } - } - - return false -} - -// IsEmpty - returns whether policy is empty or not. -func (policy Policy) IsEmpty() bool { - return len(policy.Statements) == 0 -} - -// isValid - checks if Policy is valid or not. -func (policy Policy) isValid() error { - if policy.Version != DefaultVersion && policy.Version != "" { - return Errorf("invalid version '%v'", policy.Version) - } - - for _, statement := range policy.Statements { - if err := statement.isValid(); err != nil { - return err - } - } - - return nil -} - -// MarshalJSON - encodes Policy to JSON data. -func (policy Policy) MarshalJSON() ([]byte, error) { - if err := policy.isValid(); err != nil { - return nil, err - } - - // subtype to avoid recursive call to MarshalJSON() - type subPolicy Policy - return json.Marshal(subPolicy(policy)) -} - -func (policy *Policy) dropDuplicateStatements() { -redo: - for i := range policy.Statements { - for j, statement := range policy.Statements[i+1:] { - if policy.Statements[i].Effect != statement.Effect { - continue - } - - if !policy.Statements[i].Principal.Equals(statement.Principal) { - continue - } - - if !policy.Statements[i].Actions.Equals(statement.Actions) { - continue - } - - if !policy.Statements[i].Resources.Equals(statement.Resources) { - continue - } - - if policy.Statements[i].Conditions.String() != statement.Conditions.String() { - continue - } - policy.Statements = append(policy.Statements[:j], policy.Statements[j+1:]...) - goto redo - } - } -} - -// UnmarshalJSON - decodes JSON data to Policy. -func (policy *Policy) UnmarshalJSON(data []byte) error { - // subtype to avoid recursive call to UnmarshalJSON() - type subPolicy Policy - var sp subPolicy - if err := json.Unmarshal(data, &sp); err != nil { - return err - } - - p := Policy(sp) - if err := p.isValid(); err != nil { - return err - } - - p.dropDuplicateStatements() - - *policy = p - - return nil -} - -// Validate - validates all statements are for given bucket or not. -func (policy Policy) Validate(bucketName string) error { - if err := policy.isValid(); err != nil { - return err - } - - for _, statement := range policy.Statements { - if err := statement.Validate(bucketName); err != nil { - return err - } - } - - return nil -} - -// ParseConfig - parses data in given reader to Policy. -func ParseConfig(reader io.Reader, bucketName string) (*Policy, error) { - var policy Policy - - decoder := json.NewDecoder(reader) - decoder.DisallowUnknownFields() - if err := decoder.Decode(&policy); err != nil { - return nil, Errorf("%w", err) - } - - err := policy.Validate(bucketName) - return &policy, err -} diff --git a/pkg/bucket/policy/policy_test.go b/pkg/bucket/policy/policy_test.go deleted file mode 100644 index 039dd612..00000000 --- a/pkg/bucket/policy/policy_test.go +++ /dev/null @@ -1,1146 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package policy - -import ( - "encoding/json" - "net" - "reflect" - "testing" - - "github.com/minio/minio/pkg/bucket/policy/condition" -) - -func TestPolicyIsAllowed(t *testing.T) { - case1Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(GetBucketLocationAction, PutObjectAction), - NewResourceSet(NewResource("*", "")), - condition.NewFunctions(), - )}, - } - - case2Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(GetObjectAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - )}, - } - - _, IPNet, err := net.ParseCIDR("192.168.1.0/24") - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - func1, err := condition.NewIPAddressFunc( - condition.AWSSourceIP, - IPNet, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(GetObjectAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(func1), - )}, - } - - case4Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Deny, - NewPrincipal("*"), - NewActionSet(GetObjectAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(func1), - )}, - } - - anonGetBucketLocationArgs := Args{ - AccountName: "Q3AM3UQ867SPQQA43P2F", - Action: GetBucketLocationAction, - BucketName: "mybucket", - ConditionValues: map[string][]string{}, - } - - anonPutObjectActionArgs := Args{ - AccountName: "Q3AM3UQ867SPQQA43P2F", - Action: PutObjectAction, - BucketName: "mybucket", - ConditionValues: map[string][]string{ - "x-amz-copy-source": {"mybucket/myobject"}, - "SourceIp": {"192.168.1.10"}, - }, - ObjectName: "myobject", - } - - anonGetObjectActionArgs := Args{ - AccountName: "Q3AM3UQ867SPQQA43P2F", - Action: GetObjectAction, - BucketName: "mybucket", - ConditionValues: map[string][]string{}, - ObjectName: "myobject", - } - - getBucketLocationArgs := Args{ - AccountName: "Q3AM3UQ867SPQQA43P2F", - Action: GetBucketLocationAction, - BucketName: "mybucket", - ConditionValues: map[string][]string{}, - IsOwner: true, - } - - putObjectActionArgs := Args{ - AccountName: "Q3AM3UQ867SPQQA43P2F", - Action: PutObjectAction, - BucketName: "mybucket", - ConditionValues: map[string][]string{ - "x-amz-copy-source": {"mybucket/myobject"}, - "SourceIp": {"192.168.1.10"}, - }, - IsOwner: true, - ObjectName: "myobject", - } - - getObjectActionArgs := Args{ - AccountName: "Q3AM3UQ867SPQQA43P2F", - Action: GetObjectAction, - BucketName: "mybucket", - ConditionValues: map[string][]string{}, - IsOwner: true, - ObjectName: "myobject", - } - - testCases := []struct { - policy Policy - args Args - expectedResult bool - }{ - {case1Policy, anonGetBucketLocationArgs, true}, - {case1Policy, anonPutObjectActionArgs, true}, - {case1Policy, anonGetObjectActionArgs, false}, - {case1Policy, getBucketLocationArgs, true}, - {case1Policy, putObjectActionArgs, true}, - {case1Policy, getObjectActionArgs, true}, - - {case2Policy, anonGetBucketLocationArgs, false}, - {case2Policy, anonPutObjectActionArgs, true}, - {case2Policy, anonGetObjectActionArgs, true}, - {case2Policy, getBucketLocationArgs, true}, - {case2Policy, putObjectActionArgs, true}, - {case2Policy, getObjectActionArgs, true}, - - {case3Policy, anonGetBucketLocationArgs, false}, - {case3Policy, anonPutObjectActionArgs, true}, - {case3Policy, anonGetObjectActionArgs, false}, - {case3Policy, getBucketLocationArgs, true}, - {case3Policy, putObjectActionArgs, true}, - {case3Policy, getObjectActionArgs, true}, - - {case4Policy, anonGetBucketLocationArgs, false}, - {case4Policy, anonPutObjectActionArgs, false}, - {case4Policy, anonGetObjectActionArgs, false}, - {case4Policy, getBucketLocationArgs, true}, - {case4Policy, putObjectActionArgs, false}, - {case4Policy, getObjectActionArgs, true}, - } - - for i, testCase := range testCases { - result := testCase.policy.IsAllowed(testCase.args) - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestPolicyIsEmpty(t *testing.T) { - case1Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - }, - } - - case2Policy := Policy{ - ID: "MyPolicyForMyBucket", - Version: DefaultVersion, - } - - testCases := []struct { - policy Policy - expectedResult bool - }{ - {case1Policy, false}, - {case2Policy, true}, - } - - for i, testCase := range testCases { - result := testCase.policy.IsEmpty() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestPolicyIsValid(t *testing.T) { - case1Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - }, - } - - case2Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - NewStatement( - Deny, - NewPrincipal("*"), - NewActionSet(GetObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - }, - } - - case3Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - NewStatement( - Deny, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/yourobject*")), - condition.NewFunctions(), - ), - }, - } - - func1, err := condition.NewNullFunc( - condition.S3XAmzCopySource, - true, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - func2, err := condition.NewNullFunc( - condition.S3XAmzServerSideEncryption, - false, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(func1), - ), - NewStatement( - Deny, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(func2), - ), - }, - } - - case5Policy := Policy{ - Version: "17-10-2012", - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - }, - } - - case6Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(GetObjectAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "myobject*")), - condition.NewFunctions(func1, func2), - ), - }, - } - - case7Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - NewStatement( - Deny, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - }, - } - - case8Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - }, - } - - testCases := []struct { - policy Policy - expectErr bool - }{ - {case1Policy, false}, - // allowed duplicate principal. - {case2Policy, false}, - // allowed duplicate principal and action. - {case3Policy, false}, - // allowed duplicate principal, action and resource. - {case4Policy, false}, - // Invalid version error. - {case5Policy, true}, - // Invalid statement error. - {case6Policy, true}, - // Duplicate statement success different effects. - {case7Policy, false}, - // Duplicate statement success, duplicate statement dropped. - {case8Policy, false}, - } - - for i, testCase := range testCases { - err := testCase.policy.isValid() - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - } -} - -func TestPolicyMarshalJSON(t *testing.T) { - case1Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - }, - } - case1Policy.Statements[0].SID = "SomeId1" - case1Data := []byte(`{"ID":"MyPolicyForMyBucket1","Version":"2012-10-17","Statement":[{"Sid":"SomeId1","Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"]}]}`) - - _, IPNet1, err := net.ParseCIDR("192.168.1.0/24") - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - func1, err := condition.NewIPAddressFunc( - condition.AWSSourceIP, - IPNet1, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - NewStatement( - Deny, - NewPrincipal("*"), - NewActionSet(GetObjectAction), - NewResourceSet(NewResource("mybucket", "/yourobject*")), - condition.NewFunctions(func1), - ), - }, - } - case2Data := []byte(`{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"]},{"Effect":"Deny","Principal":{"AWS":["*"]},"Action":["s3:GetObject"],"Resource":["arn:aws:s3:::mybucket/yourobject*"],"Condition":{"IpAddress":{"aws:SourceIp":["192.168.1.0/24"]}}}]}`) - - case3Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("Q3AM3UQ867SPQQA43P2F"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - }, - } - case3Data := []byte(`{"ID":"MyPolicyForMyBucket1","Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["Q3AM3UQ867SPQQA43P2F"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"]},{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"]}]}`) - - case4Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(GetObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - }, - } - case4Data := []byte(`{"ID":"MyPolicyForMyBucket1","Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"]},{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:GetObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"]}]}`) - - case5Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/yourobject*")), - condition.NewFunctions(), - ), - }, - } - case5Data := []byte(`{"ID":"MyPolicyForMyBucket1","Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"]},{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/yourobject*"]}]}`) - - _, IPNet2, err := net.ParseCIDR("192.168.2.0/24") - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - func2, err := condition.NewIPAddressFunc( - condition.AWSSourceIP, - IPNet2, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case6Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(func1), - ), - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(func2), - ), - }, - } - case6Data := []byte(`{"ID":"MyPolicyForMyBucket1","Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"],"Condition":{"IpAddress":{"aws:SourceIp":["192.168.1.0/24"]}}},{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"],"Condition":{"IpAddress":{"aws:SourceIp":["192.168.2.0/24"]}}}]}`) - - case7Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(GetBucketLocationAction), - NewResourceSet(NewResource("mybucket", "")), - condition.NewFunctions(), - ), - }, - } - case7Data := []byte(`{"ID":"MyPolicyForMyBucket1","Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:GetBucketLocation"],"Resource":["arn:aws:s3:::mybucket"]}]}`) - - case8Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(GetBucketLocationAction), - NewResourceSet(NewResource("*", "")), - condition.NewFunctions(), - ), - }, - } - case8Data := []byte(`{"ID":"MyPolicyForMyBucket1","Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:GetBucketLocation"],"Resource":["arn:aws:s3:::*"]}]}`) - - func3, err := condition.NewNullFunc( - condition.S3XAmzCopySource, - true, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - case9Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(GetObjectAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "myobject*")), - condition.NewFunctions(func1, func2, func3), - ), - }, - } - - testCases := []struct { - policy Policy - expectedResult []byte - expectErr bool - }{ - {case1Policy, case1Data, false}, - {case2Policy, case2Data, false}, - {case3Policy, case3Data, false}, - {case4Policy, case4Data, false}, - {case5Policy, case5Data, false}, - {case6Policy, case6Data, false}, - {case7Policy, case7Data, false}, - {case8Policy, case8Data, false}, - {case9Policy, nil, true}, - } - - for i, testCase := range testCases { - result, err := json.Marshal(testCase.policy) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v", i+1, string(testCase.expectedResult), string(result)) - } - } - } -} - -func TestPolicyUnmarshalJSON(t *testing.T) { - case1Data := []byte(`{ - "ID": "MyPolicyForMyBucket1", - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "SomeId1", - "Effect": "Allow", - "Principal": "*", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" - } - ] -}`) - case1Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - }, - } - case1Policy.Statements[0].SID = "SomeId1" - - case2Data := []byte(`{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": "*", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" - }, - { - "Effect": "Deny", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::mybucket/yourobject*", - "Condition": { - "IpAddress": { - "aws:SourceIp": "192.168.1.0/24" - } - } - } - ] -}`) - _, IPNet1, err := net.ParseCIDR("192.168.1.0/24") - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - func1, err := condition.NewIPAddressFunc( - condition.AWSSourceIP, - IPNet1, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - NewStatement( - Deny, - NewPrincipal("*"), - NewActionSet(GetObjectAction), - NewResourceSet(NewResource("mybucket", "/yourobject*")), - condition.NewFunctions(func1), - ), - }, - } - - case3Data := []byte(`{ - "ID": "MyPolicyForMyBucket1", - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "AWS": [ - "Q3AM3UQ867SPQQA43P2F" - ] - }, - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" - }, - { - "Effect": "Allow", - "Principal": "*", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" - } - ] -}`) - case3Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("Q3AM3UQ867SPQQA43P2F"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - }, - } - - case4Data := []byte(`{ - "ID": "MyPolicyForMyBucket1", - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": "*", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" - }, - { - "Effect": "Allow", - "Principal": "*", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" - } - ] -}`) - case4Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(GetObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - }, - } - - case5Data := []byte(`{ - "ID": "MyPolicyForMyBucket1", - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": "*", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" - }, - { - "Effect": "Allow", - "Principal": "*", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/yourobject*" - } - ] -}`) - case5Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/yourobject*")), - condition.NewFunctions(), - ), - }, - } - - case6Data := []byte(`{ - "ID": "MyPolicyForMyBucket1", - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": "*", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*", - "Condition": { - "IpAddress": { - "aws:SourceIp": "192.168.1.0/24" - } - } - }, - { - "Effect": "Allow", - "Principal": "*", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*", - "Condition": { - "IpAddress": { - "aws:SourceIp": "192.168.2.0/24" - } - } - } - ] -}`) - _, IPNet2, err := net.ParseCIDR("192.168.2.0/24") - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - func2, err := condition.NewIPAddressFunc( - condition.AWSSourceIP, - IPNet2, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case6Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(func1), - ), - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(func2), - ), - }, - } - - case7Data := []byte(`{ - "ID": "MyPolicyForMyBucket1", - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": "*", - "Action": "s3:GetBucketLocation", - "Resource": "arn:aws:s3:::mybucket" - } - ] -}`) - - case7Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(GetBucketLocationAction), - NewResourceSet(NewResource("mybucket", "")), - condition.NewFunctions(), - ), - }, - } - - case8Data := []byte(`{ - "ID": "MyPolicyForMyBucket1", - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": "*", - "Action": "s3:GetBucketLocation", - "Resource": "arn:aws:s3:::*" - } - ] -}`) - - case8Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(GetBucketLocationAction), - NewResourceSet(NewResource("*", "")), - condition.NewFunctions(), - ), - }, - } - - case9Data := []byte(`{ - "ID": "MyPolicyForMyBucket1", - "Version": "17-10-2012", - "Statement": [ - { - "Effect": "Allow", - "Principal": "*", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" - } - ] -}`) - - case10Data := []byte(`{ - "ID": "MyPolicyForMyBucket1", - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": "*", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" - }, - { - "Effect": "Allow", - "Principal": "*", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" - } - ] -}`) - - case10Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "myobject*")), - condition.NewFunctions(), - ), - }, - } - - case11Data := []byte(`{ - "ID": "MyPolicyForMyBucket1", - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": "*", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" - }, - { - "Effect": "Deny", - "Principal": "*", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" - } - ] -}`) - - case11Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "myobject*")), - condition.NewFunctions(), - ), - NewStatement( - Deny, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "myobject*")), - condition.NewFunctions(), - ), - }, - } - - testCases := []struct { - data []byte - expectedResult Policy - expectErr bool - }{ - {case1Data, case1Policy, false}, - {case2Data, case2Policy, false}, - {case3Data, case3Policy, false}, - {case4Data, case4Policy, false}, - {case5Data, case5Policy, false}, - {case6Data, case6Policy, false}, - {case7Data, case7Policy, false}, - {case8Data, case8Policy, false}, - // Invalid version error. - {case9Data, Policy{}, true}, - // Duplicate statement success, duplicate statement removed. - {case10Data, case10Policy, false}, - // Duplicate statement success (Effect differs). - {case11Data, case11Policy, false}, - } - - for i, testCase := range testCases { - var result Policy - err := json.Unmarshal(testCase.data, &result) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Errorf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Errorf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } - } -} - -func TestPolicyValidate(t *testing.T) { - case1Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - }, - } - - func1, err := condition.NewNullFunc( - condition.S3XAmzCopySource, - true, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - func2, err := condition.NewNullFunc( - condition.S3XAmzServerSideEncryption, - false, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - case2Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(GetObjectAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "myobject*")), - condition.NewFunctions(func1, func2), - ), - }, - } - - testCases := []struct { - policy Policy - bucketName string - expectErr bool - }{ - {case1Policy, "mybucket", false}, - {case2Policy, "yourbucket", true}, - {case1Policy, "yourbucket", true}, - } - - for i, testCase := range testCases { - err := testCase.policy.Validate(testCase.bucketName) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - } -} diff --git a/pkg/bucket/policy/principal.go b/pkg/bucket/policy/principal.go deleted file mode 100644 index 47d17e03..00000000 --- a/pkg/bucket/policy/principal.go +++ /dev/null @@ -1,96 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package policy - -import ( - "encoding/json" - - "github.com/minio/minio-go/v6/pkg/set" - "github.com/minio/minio/pkg/wildcard" -) - -// Principal - policy principal. -type Principal struct { - AWS set.StringSet -} - -// IsValid - checks whether Principal is valid or not. -func (p Principal) IsValid() bool { - return len(p.AWS) != 0 -} - -// Equals - returns true if principals are equal. -func (p Principal) Equals(pp Principal) bool { - return p.AWS.Equals(pp.AWS) -} - -// Intersection - returns principals available in both Principal. -func (p Principal) Intersection(principal Principal) set.StringSet { - return p.AWS.Intersection(principal.AWS) -} - -// MarshalJSON - encodes Principal to JSON data. -func (p Principal) MarshalJSON() ([]byte, error) { - if !p.IsValid() { - return nil, Errorf("invalid principal %v", p) - } - - // subtype to avoid recursive call to MarshalJSON() - type subPrincipal Principal - sp := subPrincipal(p) - return json.Marshal(sp) -} - -// Match - matches given principal is wildcard matching with Principal. -func (p Principal) Match(principal string) bool { - for _, pattern := range p.AWS.ToSlice() { - if wildcard.MatchSimple(pattern, principal) { - return true - } - } - - return false -} - -// UnmarshalJSON - decodes JSON data to Principal. -func (p *Principal) UnmarshalJSON(data []byte) error { - // subtype to avoid recursive call to UnmarshalJSON() - type subPrincipal Principal - var sp subPrincipal - - if err := json.Unmarshal(data, &sp); err != nil { - var s string - if err = json.Unmarshal(data, &s); err != nil { - return err - } - - if s != "*" { - return Errorf("invalid principal '%v'", s) - } - - sp.AWS = set.CreateStringSet("*") - } - - *p = Principal(sp) - - return nil -} - -// NewPrincipal - creates new Principal. -func NewPrincipal(principals ...string) Principal { - return Principal{AWS: set.CreateStringSet(principals...)} -} diff --git a/pkg/bucket/policy/principal_test.go b/pkg/bucket/policy/principal_test.go deleted file mode 100644 index 791445c8..00000000 --- a/pkg/bucket/policy/principal_test.go +++ /dev/null @@ -1,141 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package policy - -import ( - "encoding/json" - "reflect" - "testing" - - "github.com/minio/minio-go/v6/pkg/set" -) - -func TestPrincipalIsValid(t *testing.T) { - testCases := []struct { - principal Principal - expectedResult bool - }{ - {NewPrincipal("*"), true}, - {NewPrincipal("arn:aws:iam::AccountNumber:root"), true}, - {NewPrincipal(), false}, - } - - for i, testCase := range testCases { - result := testCase.principal.IsValid() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestPrincipalIntersection(t *testing.T) { - testCases := []struct { - principal Principal - principalToIntersect Principal - expectedResult set.StringSet - }{ - {NewPrincipal("*"), NewPrincipal("*"), set.CreateStringSet("*")}, - {NewPrincipal("arn:aws:iam::AccountNumber:root"), NewPrincipal("arn:aws:iam::AccountNumber:myuser"), set.CreateStringSet()}, - {NewPrincipal(), NewPrincipal("*"), set.CreateStringSet()}, - } - - for i, testCase := range testCases { - result := testCase.principal.Intersection(testCase.principalToIntersect) - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestPrincipalMarshalJSON(t *testing.T) { - testCases := []struct { - principal Principal - expectedResult []byte - expectErr bool - }{ - {NewPrincipal("*"), []byte(`{"AWS":["*"]}`), false}, - {NewPrincipal("arn:aws:iam::AccountNumber:*"), []byte(`{"AWS":["arn:aws:iam::AccountNumber:*"]}`), false}, - {NewPrincipal(), nil, true}, - } - - for i, testCase := range testCases { - result, err := json.Marshal(testCase.principal) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, string(testCase.expectedResult), string(result)) - } - } - } -} - -func TestPrincipalMatch(t *testing.T) { - testCases := []struct { - principals Principal - principal string - expectedResult bool - }{ - {NewPrincipal("*"), "AccountNumber", true}, - {NewPrincipal("arn:aws:iam::*"), "arn:aws:iam::AccountNumber:root", true}, - {NewPrincipal("arn:aws:iam::AccountNumber:*"), "arn:aws:iam::TestAccountNumber:root", false}, - } - - for i, testCase := range testCases { - result := testCase.principals.Match(testCase.principal) - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestPrincipalUnmarshalJSON(t *testing.T) { - testCases := []struct { - data []byte - expectedResult Principal - expectErr bool - }{ - {[]byte(`"*"`), NewPrincipal("*"), false}, - {[]byte(`{"AWS": "*"}`), NewPrincipal("*"), false}, - {[]byte(`{"AWS": "arn:aws:iam::AccountNumber:*"}`), NewPrincipal("arn:aws:iam::AccountNumber:*"), false}, - {[]byte(`"arn:aws:iam::AccountNumber:*"`), NewPrincipal(), true}, - {[]byte(`["arn:aws:iam::AccountNumber:*", "arn:aws:iam:AnotherAccount:*"]`), NewPrincipal(), true}, - } - - for i, testCase := range testCases { - var result Principal - err := json.Unmarshal(testCase.data, &result) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } - } -} diff --git a/pkg/bucket/policy/resource.go b/pkg/bucket/policy/resource.go deleted file mode 100644 index ca892f46..00000000 --- a/pkg/bucket/policy/resource.go +++ /dev/null @@ -1,139 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package policy - -import ( - "encoding/json" - "strings" - - "github.com/minio/minio/pkg/bucket/policy/condition" - "github.com/minio/minio/pkg/wildcard" -) - -// ResourceARNPrefix - resource ARN prefix as per AWS S3 specification. -const ResourceARNPrefix = "arn:aws:s3:::" - -// Resource - resource in policy statement. -type Resource struct { - BucketName string - Pattern string -} - -func (r Resource) isBucketPattern() bool { - return !strings.Contains(r.Pattern, "/") -} - -func (r Resource) isObjectPattern() bool { - return strings.Contains(r.Pattern, "/") || strings.Contains(r.BucketName, "*") -} - -// IsValid - checks whether Resource is valid or not. -func (r Resource) IsValid() bool { - return r.BucketName != "" && r.Pattern != "" -} - -// Match - matches object name with resource pattern. -func (r Resource) Match(resource string, conditionValues map[string][]string) bool { - pattern := r.Pattern - for _, key := range condition.CommonKeys { - // Empty values are not supported for policy variables. - if rvalues, ok := conditionValues[key.Name()]; ok && rvalues[0] != "" { - pattern = strings.Replace(pattern, key.VarName(), rvalues[0], -1) - } - } - - return wildcard.Match(pattern, resource) -} - -// MarshalJSON - encodes Resource to JSON data. -func (r Resource) MarshalJSON() ([]byte, error) { - if !r.IsValid() { - return nil, Errorf("invalid resource %v", r) - } - - return json.Marshal(r.String()) -} - -func (r Resource) String() string { - return ResourceARNPrefix + r.Pattern -} - -// UnmarshalJSON - decodes JSON data to Resource. -func (r *Resource) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - - parsedResource, err := parseResource(s) - if err != nil { - return err - } - - *r = parsedResource - - return nil -} - -// Validate - validates Resource is for given bucket or not. -func (r Resource) Validate(bucketName string) error { - if !r.IsValid() { - return Errorf("invalid resource") - } - - if !wildcard.Match(r.BucketName, bucketName) { - return Errorf("bucket name does not match") - } - - return nil -} - -// parseResource - parses string to Resource. -func parseResource(s string) (Resource, error) { - if !strings.HasPrefix(s, ResourceARNPrefix) { - return Resource{}, Errorf("invalid resource '%v'", s) - } - - pattern := strings.TrimPrefix(s, ResourceARNPrefix) - tokens := strings.SplitN(pattern, "/", 2) - bucketName := tokens[0] - if bucketName == "" { - return Resource{}, Errorf("invalid resource format '%v'", s) - } - - return Resource{ - BucketName: bucketName, - Pattern: pattern, - }, nil -} - -// NewResource - creates new resource. -func NewResource(bucketName, keyName string) Resource { - pattern := bucketName - if keyName != "" { - if !strings.HasPrefix(keyName, "/") { - pattern += "/" - } - - pattern += keyName - } - - return Resource{ - BucketName: bucketName, - Pattern: pattern, - } -} diff --git a/pkg/bucket/policy/resource_test.go b/pkg/bucket/policy/resource_test.go deleted file mode 100644 index 93ae5c28..00000000 --- a/pkg/bucket/policy/resource_test.go +++ /dev/null @@ -1,221 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package policy - -import ( - "encoding/json" - "reflect" - "testing" -) - -func TestResourceIsBucketPattern(t *testing.T) { - testCases := []struct { - resource Resource - expectedResult bool - }{ - {NewResource("*", ""), true}, - {NewResource("mybucket", ""), true}, - {NewResource("mybucket*", ""), true}, - {NewResource("mybucket?0", ""), true}, - {NewResource("", "*"), false}, - {NewResource("*", "*"), false}, - {NewResource("mybucket", "*"), false}, - {NewResource("mybucket*", "/myobject"), false}, - {NewResource("mybucket?0", "/2010/photos/*"), false}, - } - - for i, testCase := range testCases { - result := testCase.resource.isBucketPattern() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestResourceIsObjectPattern(t *testing.T) { - testCases := []struct { - resource Resource - expectedResult bool - }{ - {NewResource("*", ""), true}, - {NewResource("mybucket*", ""), true}, - {NewResource("", "*"), true}, - {NewResource("*", "*"), true}, - {NewResource("mybucket", "*"), true}, - {NewResource("mybucket*", "/myobject"), true}, - {NewResource("mybucket?0", "/2010/photos/*"), true}, - {NewResource("mybucket", ""), false}, - {NewResource("mybucket?0", ""), false}, - } - - for i, testCase := range testCases { - result := testCase.resource.isObjectPattern() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestResourceIsValid(t *testing.T) { - testCases := []struct { - resource Resource - expectedResult bool - }{ - {NewResource("*", ""), true}, - {NewResource("mybucket*", ""), true}, - {NewResource("*", "*"), true}, - {NewResource("mybucket", "*"), true}, - {NewResource("mybucket*", "/myobject"), true}, - {NewResource("mybucket?0", "/2010/photos/*"), true}, - {NewResource("mybucket", ""), true}, - {NewResource("mybucket?0", ""), true}, - {NewResource("", ""), false}, - {NewResource("", "*"), false}, - } - - for i, testCase := range testCases { - result := testCase.resource.IsValid() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestResourceMatch(t *testing.T) { - testCases := []struct { - resource Resource - objectName string - expectedResult bool - }{ - {NewResource("*", ""), "mybucket", true}, - {NewResource("*", ""), "mybucket/myobject", true}, - {NewResource("mybucket*", ""), "mybucket", true}, - {NewResource("mybucket*", ""), "mybucket/myobject", true}, - {NewResource("", "*"), "/myobject", true}, - {NewResource("*", "*"), "mybucket/myobject", true}, - {NewResource("mybucket", "*"), "mybucket/myobject", true}, - {NewResource("mybucket*", "/myobject"), "mybucket/myobject", true}, - {NewResource("mybucket*", "/myobject"), "mybucket100/myobject", true}, - {NewResource("mybucket?0", "/2010/photos/*"), "mybucket20/2010/photos/1.jpg", true}, - {NewResource("mybucket", ""), "mybucket", true}, - {NewResource("mybucket?0", ""), "mybucket30", true}, - {NewResource("", "*"), "mybucket/myobject", false}, - {NewResource("*", "*"), "mybucket", false}, - {NewResource("mybucket", "*"), "mybucket10/myobject", false}, - {NewResource("mybucket?0", "/2010/photos/*"), "mybucket0/2010/photos/1.jpg", false}, - {NewResource("mybucket", ""), "mybucket/myobject", false}, - } - - for i, testCase := range testCases { - result := testCase.resource.Match(testCase.objectName, nil) - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestResourceMarshalJSON(t *testing.T) { - testCases := []struct { - resource Resource - expectedResult []byte - expectErr bool - }{ - {NewResource("*", ""), []byte(`"arn:aws:s3:::*"`), false}, - {NewResource("mybucket*", ""), []byte(`"arn:aws:s3:::mybucket*"`), false}, - {NewResource("mybucket", ""), []byte(`"arn:aws:s3:::mybucket"`), false}, - {NewResource("*", "*"), []byte(`"arn:aws:s3:::*/*"`), false}, - {NewResource("mybucket", "*"), []byte(`"arn:aws:s3:::mybucket/*"`), false}, - {NewResource("mybucket*", "myobject"), []byte(`"arn:aws:s3:::mybucket*/myobject"`), false}, - {NewResource("mybucket?0", "/2010/photos/*"), []byte(`"arn:aws:s3:::mybucket?0/2010/photos/*"`), false}, - {Resource{}, nil, true}, - {NewResource("", "*"), nil, true}, - } - - for i, testCase := range testCases { - result, err := json.Marshal(testCase.resource) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v", i+1, string(testCase.expectedResult), string(result)) - } - } - } -} - -func TestResourceUnmarshalJSON(t *testing.T) { - testCases := []struct { - data []byte - expectedResult Resource - expectErr bool - }{ - {[]byte(`"arn:aws:s3:::*"`), NewResource("*", ""), false}, - {[]byte(`"arn:aws:s3:::mybucket*"`), NewResource("mybucket*", ""), false}, - {[]byte(`"arn:aws:s3:::mybucket"`), NewResource("mybucket", ""), false}, - {[]byte(`"arn:aws:s3:::*/*"`), NewResource("*", "*"), false}, - {[]byte(`"arn:aws:s3:::mybucket/*"`), NewResource("mybucket", "*"), false}, - {[]byte(`"arn:aws:s3:::mybucket*/myobject"`), NewResource("mybucket*", "myobject"), false}, - {[]byte(`"arn:aws:s3:::mybucket?0/2010/photos/*"`), NewResource("mybucket?0", "/2010/photos/*"), false}, - {[]byte(`"mybucket/myobject*"`), Resource{}, true}, - {[]byte(`"arn:aws:s3:::/*"`), Resource{}, true}, - } - - for i, testCase := range testCases { - var result Resource - err := json.Unmarshal(testCase.data, &result) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } - } -} - -func TestResourceValidate(t *testing.T) { - testCases := []struct { - resource Resource - bucketName string - expectErr bool - }{ - {NewResource("mybucket", "/myobject*"), "mybucket", false}, - {NewResource("", "/myobject*"), "yourbucket", true}, - {NewResource("mybucket", "/myobject*"), "yourbucket", true}, - } - - for i, testCase := range testCases { - err := testCase.resource.Validate(testCase.bucketName) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - } -} diff --git a/pkg/bucket/policy/resourceset.go b/pkg/bucket/policy/resourceset.go deleted file mode 100644 index cab7dbbb..00000000 --- a/pkg/bucket/policy/resourceset.go +++ /dev/null @@ -1,165 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package policy - -import ( - "encoding/json" - "fmt" - "sort" - - "github.com/minio/minio-go/v6/pkg/set" -) - -// ResourceSet - set of resources in policy statement. -type ResourceSet map[Resource]struct{} - -// bucketResourceExists - checks if at least one bucket resource exists in the set. -func (resourceSet ResourceSet) bucketResourceExists() bool { - for resource := range resourceSet { - if resource.isBucketPattern() { - return true - } - } - - return false -} - -// objectResourceExists - checks if at least one object resource exists in the set. -func (resourceSet ResourceSet) objectResourceExists() bool { - for resource := range resourceSet { - if resource.isObjectPattern() { - return true - } - } - - return false -} - -// Add - adds resource to resource set. -func (resourceSet ResourceSet) Add(resource Resource) { - resourceSet[resource] = struct{}{} -} - -// Equals - checks whether given resource set is equal to current resource set or not. -func (resourceSet ResourceSet) Equals(sresourceSet ResourceSet) bool { - // If length of set is not equal to length of given set, the - // set is not equal to given set. - if len(resourceSet) != len(sresourceSet) { - return false - } - - // As both sets are equal in length, check each elements are equal. - for k := range resourceSet { - if _, ok := sresourceSet[k]; !ok { - return false - } - } - - return true -} - -// Intersection - returns resouces available in both ResourcsSet. -func (resourceSet ResourceSet) Intersection(sset ResourceSet) ResourceSet { - nset := NewResourceSet() - for k := range resourceSet { - if _, ok := sset[k]; ok { - nset.Add(k) - } - } - - return nset -} - -// MarshalJSON - encodes ResourceSet to JSON data. -func (resourceSet ResourceSet) MarshalJSON() ([]byte, error) { - if len(resourceSet) == 0 { - return nil, Errorf("empty resources not allowed") - } - - resources := []Resource{} - for resource := range resourceSet { - resources = append(resources, resource) - } - - return json.Marshal(resources) -} - -// Match - matches object name with anyone of resource pattern in resource set. -func (resourceSet ResourceSet) Match(resource string, conditionValues map[string][]string) bool { - for r := range resourceSet { - if r.Match(resource, conditionValues) { - return true - } - } - - return false -} - -func (resourceSet ResourceSet) String() string { - resources := []string{} - for resource := range resourceSet { - resources = append(resources, resource.String()) - } - sort.Strings(resources) - - return fmt.Sprintf("%v", resources) -} - -// UnmarshalJSON - decodes JSON data to ResourceSet. -func (resourceSet *ResourceSet) UnmarshalJSON(data []byte) error { - var sset set.StringSet - if err := json.Unmarshal(data, &sset); err != nil { - return err - } - - *resourceSet = make(ResourceSet) - for _, s := range sset.ToSlice() { - resource, err := parseResource(s) - if err != nil { - return err - } - - if _, found := (*resourceSet)[resource]; found { - return Errorf("duplicate resource '%v' found", s) - } - - resourceSet.Add(resource) - } - - return nil -} - -// Validate - validates ResourceSet is for given bucket or not. -func (resourceSet ResourceSet) Validate(bucketName string) error { - for resource := range resourceSet { - if err := resource.Validate(bucketName); err != nil { - return err - } - } - - return nil -} - -// NewResourceSet - creates new resource set. -func NewResourceSet(resources ...Resource) ResourceSet { - resourceSet := make(ResourceSet) - for _, resource := range resources { - resourceSet.Add(resource) - } - - return resourceSet -} diff --git a/pkg/bucket/policy/resourceset_test.go b/pkg/bucket/policy/resourceset_test.go deleted file mode 100644 index 3f5fc102..00000000 --- a/pkg/bucket/policy/resourceset_test.go +++ /dev/null @@ -1,240 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package policy - -import ( - "encoding/json" - "reflect" - "testing" -) - -func TestResourceSetBucketResourceExists(t *testing.T) { - testCases := []struct { - resourceSet ResourceSet - expectedResult bool - }{ - {NewResourceSet(NewResource("*", "")), true}, - {NewResourceSet(NewResource("mybucket", "")), true}, - {NewResourceSet(NewResource("mybucket*", "")), true}, - {NewResourceSet(NewResource("mybucket?0", "")), true}, - {NewResourceSet(NewResource("mybucket", "/2010/photos/*"), NewResource("mybucket", "")), true}, - {NewResourceSet(NewResource("", "*")), false}, - {NewResourceSet(NewResource("*", "*")), false}, - {NewResourceSet(NewResource("mybucket", "*")), false}, - {NewResourceSet(NewResource("mybucket*", "/myobject")), false}, - {NewResourceSet(NewResource("mybucket?0", "/2010/photos/*")), false}, - } - - for i, testCase := range testCases { - result := testCase.resourceSet.bucketResourceExists() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestResourceSetObjectResourceExists(t *testing.T) { - testCases := []struct { - resourceSet ResourceSet - expectedResult bool - }{ - {NewResourceSet(NewResource("*", "")), true}, - {NewResourceSet(NewResource("mybucket*", "")), true}, - {NewResourceSet(NewResource("", "*")), true}, - {NewResourceSet(NewResource("*", "*")), true}, - {NewResourceSet(NewResource("mybucket", "*")), true}, - {NewResourceSet(NewResource("mybucket*", "/myobject")), true}, - {NewResourceSet(NewResource("mybucket?0", "/2010/photos/*")), true}, - {NewResourceSet(NewResource("mybucket", ""), NewResource("mybucket", "/2910/photos/*")), true}, - {NewResourceSet(NewResource("mybucket", "")), false}, - {NewResourceSet(NewResource("mybucket?0", "")), false}, - } - - for i, testCase := range testCases { - result := testCase.resourceSet.objectResourceExists() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestResourceSetAdd(t *testing.T) { - testCases := []struct { - resourceSet ResourceSet - resource Resource - expectedResult ResourceSet - }{ - {NewResourceSet(), NewResource("mybucket", "/myobject*"), - NewResourceSet(NewResource("mybucket", "/myobject*"))}, - {NewResourceSet(NewResource("mybucket", "/myobject*")), - NewResource("mybucket", "/yourobject*"), - NewResourceSet(NewResource("mybucket", "/myobject*"), - NewResource("mybucket", "/yourobject*"))}, - {NewResourceSet(NewResource("mybucket", "/myobject*")), - NewResource("mybucket", "/myobject*"), - NewResourceSet(NewResource("mybucket", "/myobject*"))}, - } - - for i, testCase := range testCases { - testCase.resourceSet.Add(testCase.resource) - - if !reflect.DeepEqual(testCase.resourceSet, testCase.expectedResult) { - t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, testCase.resourceSet) - } - } -} - -func TestResourceSetIntersection(t *testing.T) { - testCases := []struct { - set ResourceSet - setToIntersect ResourceSet - expectedResult ResourceSet - }{ - {NewResourceSet(), NewResourceSet(NewResource("mybucket", "/myobject*")), NewResourceSet()}, - {NewResourceSet(NewResource("mybucket", "/myobject*")), NewResourceSet(), NewResourceSet()}, - {NewResourceSet(NewResource("mybucket", "/myobject*")), - NewResourceSet(NewResource("mybucket", "/myobject*"), NewResource("mybucket", "/yourobject*")), - NewResourceSet(NewResource("mybucket", "/myobject*"))}, - } - - for i, testCase := range testCases { - result := testCase.set.Intersection(testCase.setToIntersect) - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, testCase.set) - } - } -} - -func TestResourceSetMarshalJSON(t *testing.T) { - testCases := []struct { - resoruceSet ResourceSet - expectedResult []byte - expectErr bool - }{ - {NewResourceSet(NewResource("mybucket", "/myobject*")), - []byte(`["arn:aws:s3:::mybucket/myobject*"]`), false}, - {NewResourceSet(NewResource("mybucket", "/photos/myobject*")), - []byte(`["arn:aws:s3:::mybucket/photos/myobject*"]`), false}, - {NewResourceSet(), nil, true}, - } - - for i, testCase := range testCases { - result, err := json.Marshal(testCase.resoruceSet) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v", i+1, string(testCase.expectedResult), string(result)) - } - } - } -} - -func TestResourceSetMatch(t *testing.T) { - testCases := []struct { - resourceSet ResourceSet - resource string - expectedResult bool - }{ - {NewResourceSet(NewResource("*", "")), "mybucket", true}, - {NewResourceSet(NewResource("*", "")), "mybucket/myobject", true}, - {NewResourceSet(NewResource("mybucket*", "")), "mybucket", true}, - {NewResourceSet(NewResource("mybucket*", "")), "mybucket/myobject", true}, - {NewResourceSet(NewResource("", "*")), "/myobject", true}, - {NewResourceSet(NewResource("*", "*")), "mybucket/myobject", true}, - {NewResourceSet(NewResource("mybucket", "*")), "mybucket/myobject", true}, - {NewResourceSet(NewResource("mybucket*", "/myobject")), "mybucket/myobject", true}, - {NewResourceSet(NewResource("mybucket*", "/myobject")), "mybucket100/myobject", true}, - {NewResourceSet(NewResource("mybucket?0", "/2010/photos/*")), "mybucket20/2010/photos/1.jpg", true}, - {NewResourceSet(NewResource("mybucket", "")), "mybucket", true}, - {NewResourceSet(NewResource("mybucket?0", "")), "mybucket30", true}, - {NewResourceSet(NewResource("mybucket?0", "/2010/photos/*"), - NewResource("mybucket", "/2010/photos/*")), "mybucket/2010/photos/1.jpg", true}, - {NewResourceSet(NewResource("", "*")), "mybucket/myobject", false}, - {NewResourceSet(NewResource("*", "*")), "mybucket", false}, - {NewResourceSet(NewResource("mybucket", "*")), "mybucket10/myobject", false}, - {NewResourceSet(NewResource("mybucket", "")), "mybucket/myobject", false}, - {NewResourceSet(), "mybucket/myobject", false}, - } - - for i, testCase := range testCases { - result := testCase.resourceSet.Match(testCase.resource, nil) - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestResourceSetUnmarshalJSON(t *testing.T) { - testCases := []struct { - data []byte - expectedResult ResourceSet - expectErr bool - }{ - {[]byte(`"arn:aws:s3:::mybucket/myobject*"`), - NewResourceSet(NewResource("mybucket", "/myobject*")), false}, - {[]byte(`"arn:aws:s3:::mybucket/photos/myobject*"`), - NewResourceSet(NewResource("mybucket", "/photos/myobject*")), false}, - {[]byte(`"arn:aws:s3:::mybucket"`), NewResourceSet(NewResource("mybucket", "")), false}, - {[]byte(`"mybucket/myobject*"`), nil, true}, - } - - for i, testCase := range testCases { - var result ResourceSet - err := json.Unmarshal(testCase.data, &result) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } - } -} - -func TestResourceSetValidate(t *testing.T) { - testCases := []struct { - resourceSet ResourceSet - bucketName string - expectErr bool - }{ - {NewResourceSet(NewResource("mybucket", "/myobject*")), "mybucket", false}, - {NewResourceSet(NewResource("", "/myobject*")), "yourbucket", true}, - {NewResourceSet(NewResource("mybucket", "/myobject*")), "yourbucket", true}, - } - - for i, testCase := range testCases { - err := testCase.resourceSet.Validate(testCase.bucketName) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - } -} diff --git a/pkg/bucket/policy/statement.go b/pkg/bucket/policy/statement.go deleted file mode 100644 index 5b34f98a..00000000 --- a/pkg/bucket/policy/statement.go +++ /dev/null @@ -1,155 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package policy - -import ( - "encoding/json" - "strings" - - "github.com/minio/minio/pkg/bucket/policy/condition" -) - -// Statement - policy statement. -type Statement struct { - SID ID `json:"Sid,omitempty"` - Effect Effect `json:"Effect"` - Principal Principal `json:"Principal"` - Actions ActionSet `json:"Action"` - Resources ResourceSet `json:"Resource"` - Conditions condition.Functions `json:"Condition,omitempty"` -} - -// IsAllowed - checks given policy args is allowed to continue the Rest API. -func (statement Statement) IsAllowed(args Args) bool { - check := func() bool { - if !statement.Principal.Match(args.AccountName) { - return false - } - - if !statement.Actions.Contains(args.Action) { - return false - } - - resource := args.BucketName - if args.ObjectName != "" { - if !strings.HasPrefix(args.ObjectName, "/") { - resource += "/" - } - - resource += args.ObjectName - } - - if !statement.Resources.Match(resource, args.ConditionValues) { - return false - } - - return statement.Conditions.Evaluate(args.ConditionValues) - } - - return statement.Effect.IsAllowed(check()) -} - -// isValid - checks whether statement is valid or not. -func (statement Statement) isValid() error { - if !statement.Effect.IsValid() { - return Errorf("invalid Effect %v", statement.Effect) - } - - if !statement.Principal.IsValid() { - return Errorf("invalid Principal %v", statement.Principal) - } - - if len(statement.Actions) == 0 { - return Errorf("Action must not be empty") - } - - if len(statement.Resources) == 0 { - return Errorf("Resource must not be empty") - } - - for action := range statement.Actions { - if action.isObjectAction() { - if !statement.Resources.objectResourceExists() { - return Errorf("unsupported Resource found %v for action %v", statement.Resources, action) - } - } else { - if !statement.Resources.bucketResourceExists() { - return Errorf("unsupported Resource found %v for action %v", statement.Resources, action) - } - } - - keys := statement.Conditions.Keys() - keyDiff := keys.Difference(actionConditionKeyMap[action]) - if !keyDiff.IsEmpty() { - return Errorf("unsupported condition keys '%v' used for action '%v'", keyDiff, action) - } - } - - return nil -} - -// MarshalJSON - encodes JSON data to Statement. -func (statement Statement) MarshalJSON() ([]byte, error) { - if err := statement.isValid(); err != nil { - return nil, err - } - - // subtype to avoid recursive call to MarshalJSON() - type subStatement Statement - ss := subStatement(statement) - return json.Marshal(ss) -} - -// UnmarshalJSON - decodes JSON data to Statement. -func (statement *Statement) UnmarshalJSON(data []byte) error { - // subtype to avoid recursive call to UnmarshalJSON() - type subStatement Statement - var ss subStatement - - if err := json.Unmarshal(data, &ss); err != nil { - return err - } - - s := Statement(ss) - if err := s.isValid(); err != nil { - return err - } - - *statement = s - - return nil -} - -// Validate - validates Statement is for given bucket or not. -func (statement Statement) Validate(bucketName string) error { - if err := statement.isValid(); err != nil { - return err - } - - return statement.Resources.Validate(bucketName) -} - -// NewStatement - creates new statement. -func NewStatement(effect Effect, principal Principal, actionSet ActionSet, resourceSet ResourceSet, conditions condition.Functions) Statement { - return Statement{ - Effect: effect, - Principal: principal, - Actions: actionSet, - Resources: resourceSet, - Conditions: conditions, - } -} diff --git a/pkg/bucket/policy/statement_test.go b/pkg/bucket/policy/statement_test.go deleted file mode 100644 index c53d7b82..00000000 --- a/pkg/bucket/policy/statement_test.go +++ /dev/null @@ -1,571 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package policy - -import ( - "encoding/json" - "net" - "reflect" - "testing" - - "github.com/minio/minio/pkg/bucket/policy/condition" -) - -func TestStatementIsAllowed(t *testing.T) { - case1Statement := NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(GetBucketLocationAction, PutObjectAction), - NewResourceSet(NewResource("*", "")), - condition.NewFunctions(), - ) - - case2Statement := NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(GetObjectAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ) - - _, IPNet1, err := net.ParseCIDR("192.168.1.0/24") - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - func1, err := condition.NewIPAddressFunc( - condition.AWSSourceIP, - IPNet1, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Statement := NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(GetObjectAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(func1), - ) - - case4Statement := NewStatement( - Deny, - NewPrincipal("*"), - NewActionSet(GetObjectAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(func1), - ) - - anonGetBucketLocationArgs := Args{ - AccountName: "Q3AM3UQ867SPQQA43P2F", - Action: GetBucketLocationAction, - BucketName: "mybucket", - ConditionValues: map[string][]string{}, - } - - anonPutObjectActionArgs := Args{ - AccountName: "Q3AM3UQ867SPQQA43P2F", - Action: PutObjectAction, - BucketName: "mybucket", - ConditionValues: map[string][]string{ - "x-amz-copy-source": {"mybucket/myobject"}, - "SourceIp": {"192.168.1.10"}, - }, - ObjectName: "myobject", - } - - anonGetObjectActionArgs := Args{ - AccountName: "Q3AM3UQ867SPQQA43P2F", - Action: GetObjectAction, - BucketName: "mybucket", - ConditionValues: map[string][]string{}, - ObjectName: "myobject", - } - - getBucketLocationArgs := Args{ - AccountName: "Q3AM3UQ867SPQQA43P2F", - Action: GetBucketLocationAction, - BucketName: "mybucket", - ConditionValues: map[string][]string{}, - IsOwner: true, - } - - putObjectActionArgs := Args{ - AccountName: "Q3AM3UQ867SPQQA43P2F", - Action: PutObjectAction, - BucketName: "mybucket", - ConditionValues: map[string][]string{ - "x-amz-copy-source": {"mybucket/myobject"}, - "SourceIp": {"192.168.1.10"}, - }, - IsOwner: true, - ObjectName: "myobject", - } - - getObjectActionArgs := Args{ - AccountName: "Q3AM3UQ867SPQQA43P2F", - Action: GetObjectAction, - BucketName: "mybucket", - ConditionValues: map[string][]string{}, - IsOwner: true, - ObjectName: "myobject", - } - - testCases := []struct { - statement Statement - args Args - expectedResult bool - }{ - {case1Statement, anonGetBucketLocationArgs, true}, - {case1Statement, anonPutObjectActionArgs, true}, - {case1Statement, anonGetObjectActionArgs, false}, - {case1Statement, getBucketLocationArgs, true}, - {case1Statement, putObjectActionArgs, true}, - {case1Statement, getObjectActionArgs, false}, - - {case2Statement, anonGetBucketLocationArgs, false}, - {case2Statement, anonPutObjectActionArgs, true}, - {case2Statement, anonGetObjectActionArgs, true}, - {case2Statement, getBucketLocationArgs, false}, - {case2Statement, putObjectActionArgs, true}, - {case2Statement, getObjectActionArgs, true}, - - {case3Statement, anonGetBucketLocationArgs, false}, - {case3Statement, anonPutObjectActionArgs, true}, - {case3Statement, anonGetObjectActionArgs, false}, - {case3Statement, getBucketLocationArgs, false}, - {case3Statement, putObjectActionArgs, true}, - {case3Statement, getObjectActionArgs, false}, - - {case4Statement, anonGetBucketLocationArgs, true}, - {case4Statement, anonPutObjectActionArgs, false}, - {case4Statement, anonGetObjectActionArgs, true}, - {case4Statement, getBucketLocationArgs, true}, - {case4Statement, putObjectActionArgs, false}, - {case4Statement, getObjectActionArgs, true}, - } - - for i, testCase := range testCases { - result := testCase.statement.IsAllowed(testCase.args) - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestStatementIsValid(t *testing.T) { - _, IPNet1, err := net.ParseCIDR("192.168.1.0/24") - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - func1, err := condition.NewIPAddressFunc( - condition.AWSSourceIP, - IPNet1, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - func2, err := condition.NewStringEqualsFunc( - condition.S3XAmzCopySource, - "mybucket/myobject", - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - statement Statement - expectErr bool - }{ - // Invalid effect error. - {NewStatement( - Effect("foo"), - NewPrincipal("*"), - NewActionSet(GetBucketLocationAction, PutObjectAction), - NewResourceSet(NewResource("*", "")), - condition.NewFunctions(), - ), true}, - // Invalid principal error. - {NewStatement( - Allow, - NewPrincipal(), - NewActionSet(GetBucketLocationAction, PutObjectAction), - NewResourceSet(NewResource("*", "")), - condition.NewFunctions(), - ), true}, - // Empty actions error. - {NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(), - NewResourceSet(NewResource("*", "")), - condition.NewFunctions(), - ), true}, - // Empty resources error. - {NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(GetBucketLocationAction, PutObjectAction), - NewResourceSet(), - condition.NewFunctions(), - ), true}, - // Unsupported resource found for object action. - {NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(GetBucketLocationAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "")), - condition.NewFunctions(), - ), true}, - // Unsupported resource found for bucket action. - {NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(GetBucketLocationAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "myobject*")), - condition.NewFunctions(), - ), true}, - // Unsupported condition key for action. - {NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(GetObjectAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "myobject*")), - condition.NewFunctions(func1, func2), - ), true}, - {NewStatement( - Deny, - NewPrincipal("*"), - NewActionSet(GetObjectAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "myobject*")), - condition.NewFunctions(func1), - ), false}, - } - - for i, testCase := range testCases { - err := testCase.statement.isValid() - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - } -} - -func TestStatementMarshalJSON(t *testing.T) { - case1Statement := NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ) - case1Statement.SID = "SomeId1" - case1Data := []byte(`{"Sid":"SomeId1","Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"]}`) - - func1, err := condition.NewNullFunc( - condition.S3XAmzCopySource, - true, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - case2Statement := NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(func1), - ) - case2Data := []byte(`{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"],"Condition":{"Null":{"s3:x-amz-copy-source":[true]}}}`) - - func2, err := condition.NewNullFunc( - condition.S3XAmzServerSideEncryption, - false, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - case3Statement := NewStatement( - Deny, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(func2), - ) - case3Data := []byte(`{"Effect":"Deny","Principal":{"AWS":["*"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"],"Condition":{"Null":{"s3:x-amz-server-side-encryption":[false]}}}`) - - case4Statement := NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(GetObjectAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "myobject*")), - condition.NewFunctions(func1, func2), - ) - - testCases := []struct { - statement Statement - expectedResult []byte - expectErr bool - }{ - {case1Statement, case1Data, false}, - {case2Statement, case2Data, false}, - {case3Statement, case3Data, false}, - // Invalid statement error. - {case4Statement, nil, true}, - } - - for i, testCase := range testCases { - result, err := json.Marshal(testCase.statement) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v", i+1, string(testCase.expectedResult), string(result)) - } - } - } -} - -func TestStatementUnmarshalJSON(t *testing.T) { - case1Data := []byte(`{ - "Sid": "SomeId1", - "Effect": "Allow", - "Principal": "*", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" -}`) - case1Statement := NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ) - case1Statement.SID = "SomeId1" - - case2Data := []byte(`{ - "Effect": "Allow", - "Principal": "*", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*", - "Condition": { - "Null": { - "s3:x-amz-copy-source": true - } - } -}`) - func1, err := condition.NewNullFunc( - condition.S3XAmzCopySource, - true, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - case2Statement := NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(func1), - ) - - case3Data := []byte(`{ - "Effect": "Deny", - "Principal": { - "AWS": "*" - }, - "Action": [ - "s3:PutObject", - "s3:GetObject" - ], - "Resource": "arn:aws:s3:::mybucket/myobject*", - "Condition": { - "Null": { - "s3:x-amz-server-side-encryption": "false" - } - } -}`) - func2, err := condition.NewNullFunc( - condition.S3XAmzServerSideEncryption, - false, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - case3Statement := NewStatement( - Deny, - NewPrincipal("*"), - NewActionSet(PutObjectAction, GetObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(func2), - ) - - case4Data := []byte(`{ - "Effect": "Allow", - "Principal": "Q3AM3UQ867SPQQA43P2F", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" -}`) - - case5Data := []byte(`{ - "Principal": "*", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" -}`) - - case6Data := []byte(`{ - "Effect": "Allow", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" -}`) - - case7Data := []byte(`{ - "Effect": "Allow", - "Principal": "*", - "Resource": "arn:aws:s3:::mybucket/myobject*" -}`) - - case8Data := []byte(`{ - "Effect": "Allow", - "Principal": "*", - "Action": "s3:PutObject" -}`) - - case9Data := []byte(`{ - "Effect": "Allow", - "Principal": "*", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*", - "Condition": { - } -}`) - - case10Data := []byte(`{ - "Effect": "Deny", - "Principal": { - "AWS": "*" - }, - "Action": [ - "s3:PutObject", - "s3:GetObject" - ], - "Resource": "arn:aws:s3:::mybucket/myobject*", - "Condition": { - "StringEquals": { - "s3:x-amz-copy-source": "yourbucket/myobject*" - } - } -}`) - - testCases := []struct { - data []byte - expectedResult Statement - expectErr bool - }{ - {case1Data, case1Statement, false}, - {case2Data, case2Statement, false}, - {case3Data, case3Statement, false}, - // JSON unmarshaling error. - {case4Data, Statement{}, true}, - // Invalid effect error. - {case5Data, Statement{}, true}, - // empty principal error. - {case6Data, Statement{}, true}, - // Empty action error. - {case7Data, Statement{}, true}, - // Empty resource error. - {case8Data, Statement{}, true}, - // Empty condition error. - {case9Data, Statement{}, true}, - // Unsupported condition key error. - {case10Data, Statement{}, true}, - } - - for i, testCase := range testCases { - var result Statement - err := json.Unmarshal(testCase.data, &result) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } - } -} - -func TestStatementValidate(t *testing.T) { - case1Statement := NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ) - - func1, err := condition.NewNullFunc( - condition.S3XAmzCopySource, - true, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - func2, err := condition.NewNullFunc( - condition.S3XAmzServerSideEncryption, - false, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - case2Statement := NewStatement( - Allow, - NewPrincipal("*"), - NewActionSet(GetObjectAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "myobject*")), - condition.NewFunctions(func1, func2), - ) - - testCases := []struct { - statement Statement - bucketName string - expectErr bool - }{ - {case1Statement, "mybucket", false}, - {case2Statement, "mybucket", true}, - {case1Statement, "yourbucket", true}, - } - - for i, testCase := range testCases { - err := testCase.statement.Validate(testCase.bucketName) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - } -} diff --git a/pkg/certs/certs.go b/pkg/certs/certs.go deleted file mode 100644 index 1888db90..00000000 --- a/pkg/certs/certs.go +++ /dev/null @@ -1,202 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package certs - -import ( - "crypto/tls" - "os" - "path/filepath" - "sync" - "time" - - "github.com/rjeczalik/notify" -) - -// A Certs represents a certificate manager able to watch certificate -// and key pairs for changes. -type Certs struct { - sync.RWMutex - // user input params. - certFile string - keyFile string - loadCert LoadX509KeyPairFunc - - // points to the latest certificate. - cert *tls.Certificate - - // internal param to track for events, also - // used to close the watcher. - e chan notify.EventInfo -} - -// LoadX509KeyPairFunc - provides a type for custom cert loader function. -type LoadX509KeyPairFunc func(certFile, keyFile string) (tls.Certificate, error) - -// New initializes a new certs monitor. -func New(certFile, keyFile string, loadCert LoadX509KeyPairFunc) (*Certs, error) { - certFileIsLink, err := checkSymlink(certFile) - if err != nil { - return nil, err - } - keyFileIsLink, err := checkSymlink(keyFile) - if err != nil { - return nil, err - } - c := &Certs{ - certFile: certFile, - keyFile: keyFile, - loadCert: loadCert, - // Make the channel buffered to ensure no event is dropped. Notify will drop - // an event if the receiver is not able to keep up the sending pace. - e: make(chan notify.EventInfo, 1), - } - - if certFileIsLink && keyFileIsLink { - if err := c.watchSymlinks(); err != nil { - return nil, err - } - } else { - if err := c.watch(); err != nil { - return nil, err - } - } - - return c, nil -} - -func checkSymlink(file string) (bool, error) { - st, err := os.Lstat(file) - if err != nil { - return false, err - } - return st.Mode()&os.ModeSymlink == os.ModeSymlink, nil -} - -// watchSymlinks reloads symlinked files since fsnotify cannot watch -// on symbolic links. -func (c *Certs) watchSymlinks() (err error) { - cert, err := c.loadCert(c.certFile, c.keyFile) - if err != nil { - return err - } - c.Lock() - c.cert = &cert - c.Unlock() - go func() { - for { - select { - case <-c.e: - // Once stopped exits this routine. - return - case <-time.After(24 * time.Hour): - cert, cerr := c.loadCert(c.certFile, c.keyFile) - if cerr != nil { - continue - } - c.Lock() - c.cert = &cert - c.Unlock() - } - } - }() - return nil -} - -// watch starts watching for changes to the certificate -// and key files. On any change the certificate and key -// are reloaded. If there is an issue the loading will fail -// and the old (if any) certificates and keys will continue -// to be used. -func (c *Certs) watch() (err error) { - defer func() { - if err != nil { - // Stop any watches previously setup after an error. - notify.Stop(c.e) - } - }() - - // Windows doesn't allow for watching file changes but instead allows - // for directory changes only, while we can still watch for changes - // on files on other platforms. Watch parent directory on all platforms - // for simplicity. - if err = notify.Watch(filepath.Dir(c.certFile), c.e, eventWrite...); err != nil { - return err - } - if err = notify.Watch(filepath.Dir(c.keyFile), c.e, eventWrite...); err != nil { - return err - } - cert, err := c.loadCert(c.certFile, c.keyFile) - if err != nil { - return err - } - c.Lock() - c.cert = &cert - c.Unlock() - if err != nil { - return err - } - go c.run() - return nil -} - -func (c *Certs) run() { - for event := range c.e { - base := filepath.Base(event.Path()) - if isWriteEvent(event.Event()) { - certChanged := base == filepath.Base(c.certFile) - keyChanged := base == filepath.Base(c.keyFile) - if certChanged || keyChanged { - cert, err := c.loadCert(c.certFile, c.keyFile) - if err != nil { - // ignore the error continue to use - // old certificates. - continue - } - c.Lock() - c.cert = &cert - c.Unlock() - } - } - } -} - -// GetCertificateFunc provides a GetCertificate type for custom client implementations. -type GetCertificateFunc func(hello *tls.ClientHelloInfo) (*tls.Certificate, error) - -// GetCertificate returns the loaded certificate for use by -// the TLSConfig fields GetCertificate field in a http.Server. -func (c *Certs) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate, error) { - c.RLock() - defer c.RUnlock() - return c.cert, nil -} - -// GetClientCertificate returns the loaded certificate for use by -// the TLSConfig fields GetClientCertificate field in a http.Server. -func (c *Certs) GetClientCertificate(_ *tls.CertificateRequestInfo) (*tls.Certificate, error) { - c.RLock() - defer c.RUnlock() - return c.cert, nil -} - -// Stop tells loader to stop watching for changes to the -// certificate and key files. -func (c *Certs) Stop() { - if c != nil { - notify.Stop(c.e) - } -} diff --git a/pkg/certs/certs_test.go b/pkg/certs/certs_test.go deleted file mode 100644 index 2eeaac44..00000000 --- a/pkg/certs/certs_test.go +++ /dev/null @@ -1,134 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package certs_test - -import ( - "crypto/tls" - "io" - "os" - "reflect" - "testing" - "time" - - "github.com/minio/minio/pkg/certs" -) - -func updateCerts(crt, key string) { - // ignore error handling - crtSource, _ := os.Open(crt) - defer crtSource.Close() - crtDest, _ := os.Create("server.crt") - defer crtDest.Close() - io.Copy(crtDest, crtSource) - - keySource, _ := os.Open(key) - defer keySource.Close() - keyDest, _ := os.Create("server.key") - defer keyDest.Close() - io.Copy(keyDest, keySource) -} - -func TestCertNew(t *testing.T) { - c, err := certs.New("server.crt", "server.key", tls.LoadX509KeyPair) - if err != nil { - t.Fatal(err) - } - defer c.Stop() - hello := &tls.ClientHelloInfo{} - gcert, err := c.GetCertificate(hello) - if err != nil { - t.Fatal(err) - } - expectedCert, err := tls.LoadX509KeyPair("server.crt", "server.key") - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(gcert.Certificate, expectedCert.Certificate) { - t.Error("certificate doesn't match expected certificate") - } - _, err = certs.New("server.crt", "server2.key", tls.LoadX509KeyPair) - if err == nil { - t.Fatal("Expected to fail but got success") - } -} - -func TestValidPairAfterWrite(t *testing.T) { - expectedCert, err := tls.LoadX509KeyPair("server2.crt", "server2.key") - if err != nil { - t.Fatal(err) - } - - c, err := certs.New("server.crt", "server.key", tls.LoadX509KeyPair) - if err != nil { - t.Fatal(err) - } - defer c.Stop() - - updateCerts("server2.crt", "server2.key") - defer updateCerts("server1.crt", "server1.key") - - // Wait for the write event.. - time.Sleep(200 * time.Millisecond) - - hello := &tls.ClientHelloInfo{} - gcert, err := c.GetCertificate(hello) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(gcert.Certificate, expectedCert.Certificate) { - t.Error("certificate doesn't match expected certificate") - } - - rInfo := &tls.CertificateRequestInfo{} - gcert, err = c.GetClientCertificate(rInfo) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(gcert.Certificate, expectedCert.Certificate) { - t.Error("client certificate doesn't match expected certificate") - } -} - -func TestStop(t *testing.T) { - expectedCert, err := tls.LoadX509KeyPair("server2.crt", "server2.key") - if err != nil { - t.Fatal(err) - } - - c, err := certs.New("server.crt", "server.key", tls.LoadX509KeyPair) - if err != nil { - t.Fatal(err) - } - c.Stop() - - // No one is listening on the event, will be ignored and - // certificate will not be reloaded. - updateCerts("server2.crt", "server2.key") - defer updateCerts("server1.crt", "server1.key") - - hello := &tls.ClientHelloInfo{} - gcert, err := c.GetCertificate(hello) - if err != nil { - t.Fatal(err) - } - - if reflect.DeepEqual(gcert.Certificate, expectedCert.Certificate) { - t.Error("certificate shouldn't match, but matched") - } -} diff --git a/pkg/certs/event.go b/pkg/certs/event.go deleted file mode 100644 index 3a266c65..00000000 --- a/pkg/certs/event.go +++ /dev/null @@ -1,31 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package certs - -import ( - "github.com/rjeczalik/notify" -) - -// isWriteEvent checks if the event returned is a write event -func isWriteEvent(event notify.Event) bool { - for _, ev := range eventWrite { - if event&ev != 0 { - return true - } - } - return false -} diff --git a/pkg/certs/event_linux.go b/pkg/certs/event_linux.go deleted file mode 100644 index e277ea67..00000000 --- a/pkg/certs/event_linux.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build linux - -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package certs - -import "github.com/rjeczalik/notify" - -var ( - // eventWrite contains the notify events that will cause a write - eventWrite = []notify.Event{notify.InCloseWrite} -) diff --git a/pkg/certs/event_others.go b/pkg/certs/event_others.go deleted file mode 100644 index 486e06fa..00000000 --- a/pkg/certs/event_others.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build !linux - -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package certs - -import "github.com/rjeczalik/notify" - -var ( - // eventWrite contains the notify events that will cause a write - eventWrite = []notify.Event{notify.Create, notify.Write} -) diff --git a/pkg/certs/server.crt b/pkg/certs/server.crt deleted file mode 100644 index f0d8d2d9..00000000 --- a/pkg/certs/server.crt +++ /dev/null @@ -1,22 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDqjCCApKgAwIBAgIJAOcv4FsrflS4MA0GCSqGSIb3DQEBCwUAMGoxCzAJBgNV -BAYTAlVTMQswCQYDVQQIDAJDQTEVMBMGA1UEBwwMUmVkd29vZCBDaXR5MQ4wDAYD -VQQKDAVNaW5pbzEUMBIGA1UECwwLRW5naW5lZXJpbmcxETAPBgNVBAMMCG1pbmlv -LmlvMB4XDTE4MDUyMDA4NDc0MFoXDTE5MDUyMDA4NDc0MFowajELMAkGA1UEBhMC -VVMxCzAJBgNVBAgMAkNBMRUwEwYDVQQHDAxSZWR3b29kIENpdHkxDjAMBgNVBAoM -BU1pbmlvMRQwEgYDVQQLDAtFbmdpbmVlcmluZzERMA8GA1UEAwwIbWluaW8uaW8w -ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPszxaYwn+mIz6IGuUlmvW -wUs/yWTH4MC17qey2N5MqcxlfIWHUugcBsbGhi/e1druFW0s7YGMxp+G+Q1IezxX -+VmVaJCN8AgSowbYgpRdpRQ+mhGeQby0JcvO16fyPnUJBz3GGel2bcK8fcQyT0TV -apCiD9oURVmdvDSsRXz+EoPlOve8AWciHHgm1ItO5qdPRP5YtcJfLiwKnoYnpda2 -d9SzmYk+Q2JFArooF7/A1DYz9bXCMo3qp0gQlMpSMDR+MCbxHBzBBr+fQG8QdDrz -WQ2slhniBhFDk0LuPCBLlSeIzkp+DoAGDXf3hWYhechlabZ7nfngg5erEz776WCF -AgMBAAGjUzBRMB0GA1UdDgQWBBRzC09a+3AlbFDg6BsvELolmO8jYjAfBgNVHSME -GDAWgBRzC09a+3AlbFDg6BsvELolmO8jYjAPBgNVHRMBAf8EBTADAQH/MA0GCSqG -SIb3DQEBCwUAA4IBAQBl0cx7qbidKjhoZ1Iv4pCD8xHZgtuWEDApPoGuMtVS66jJ -+oj0ncD5xCtv9XqXtshE65FIsEWnDOIwa+kyjMnxHbFwxveWBT4W0twtqwbVs7NE -I0So6cEmSx4+rB0XorY6mIbD3O9YAStelNhB1jVfQfIMSByYkcGq2Fh+B1LHlOrz -06LJdwYMiILzK0c5fvjZvsDq/9EK+Xo66hphKjs5cl1t9WK7wKOCoZDt2lOTZqEq -UWYGPWlTAxSWQxO4WnvSKqFdsRi8fOO3KlDq1eNqeDSGGCI0DTGgJxidHIpfOPEF -s/zojgc5npE32/1n8og6gLcv7LIKelBfMhUrFTp7 ------END CERTIFICATE----- diff --git a/pkg/certs/server.key b/pkg/certs/server.key deleted file mode 100644 index 25c46cef..00000000 --- a/pkg/certs/server.key +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDPszxaYwn+mIz6 -IGuUlmvWwUs/yWTH4MC17qey2N5MqcxlfIWHUugcBsbGhi/e1druFW0s7YGMxp+G -+Q1IezxX+VmVaJCN8AgSowbYgpRdpRQ+mhGeQby0JcvO16fyPnUJBz3GGel2bcK8 -fcQyT0TVapCiD9oURVmdvDSsRXz+EoPlOve8AWciHHgm1ItO5qdPRP5YtcJfLiwK -noYnpda2d9SzmYk+Q2JFArooF7/A1DYz9bXCMo3qp0gQlMpSMDR+MCbxHBzBBr+f -QG8QdDrzWQ2slhniBhFDk0LuPCBLlSeIzkp+DoAGDXf3hWYhechlabZ7nfngg5er -Ez776WCFAgMBAAECggEBAJcHRyCWmcLm3MRY5MF0K9BKV9R3NnBdTuQ8OPdE2Ui3 -w6gcRuBi+eK/TrU3CAIqUXsEW5Hq1mQuXfwAh5cn/XYfG/QXx91eKBCdOTIgqY/6 -pODsmVkRhg0c2rl6eWYd4m6BNHsjhm8WWx9C+HJ4z528UpV1n2dUElkvbMHD+aKp -Ndwd0W+0PCn/BjMn/sdyy01f8sfaK2Zoy7HBw/fGeBDNLFFj3Iz7BqXYeS+OyfLN -B4xD5I5fFqt1iJeyqVPzGkOAYSqisijbM1GtZJCeVp37/+IDylCKTO3l8Xd8x73U -qTYcYT3heSHyUC2xCM6Va2YkSrOHeqbq91QgHh9LVrUCgYEA9t/wE2S8TE2l1IG9 -68SXdhyaXTnB2qSL7ggY0uazPzBNLQpNMOxicZ6/4QGEi3hSuCqGxxGo9UEoTsVd -pk8oIeDULdPVi4NQxSmkxUyArs/dzOMygUPyosOiEc8z6jWFFKDcQ7mnZnay8dZ4 -e4j+/hZDONtDrJ+zH2xu98ZrJPcCgYEA12CbSRbCkTiRj/dq8Qvgp6+ceTVcAbnk -MWpAhZQaXHrG3XP0L7QTIHG/7a09Mln92zjuAFXDp/Vc5NdxeXcnj9j6oUAxq+0I -dq+vibzjROemmvnmQvXGY9tc0ns6u7GjM0+Sicmas+IH4vuum/aRasABfVe2XBwe -4fVs0n7yU2MCgYA7KevFGg0uVCV7yiQTzqdlvPEZim/00B5gyzv3vyYR7KdyNdfN -87ib9imR6OU0738Td82ZA5h0PktEpXQOGUZK6DCxUuUIbE39Ej/UsMLeIh7LrV87 -L2eErlG25utQI8di7DIdYO7HVYcJAhcZs/k4N2mgxJtxUUyCKWBmrPycfQKBgAo7 -0uUUKcaQs4ntra0qbVBKbdrsiCSk2ozmiY5PTTlbtBtNqSqjGc2O2hnHA4Ni90b1 -W4m0iYlvhSxyeDfXS4/wNWh4DmQm7SIGkwaubPYXM7llamWAHB8eiziNFmtYs3J6 -s3HMnIczlEBayR8sBhjWaruz8TxLMcR2zubplUYVAoGBAItxeC9IT8BGJoZB++qM -f2LXCqJ383x0sDHhwPMFPtwUTzAwc5BJgQe9zFktW5CBxsER+MnUZjlrarT1HQfH -1Y1mJQXtwuBKG4pPPZphH0yoVlYcWkBTMw/KmlVlwRclEzRQwV3TPD+i6ieKeZhz -9eZwhS3H+Zb/693WbBDyH8L+ ------END PRIVATE KEY----- diff --git a/pkg/certs/server1.crt b/pkg/certs/server1.crt deleted file mode 100644 index f0d8d2d9..00000000 --- a/pkg/certs/server1.crt +++ /dev/null @@ -1,22 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDqjCCApKgAwIBAgIJAOcv4FsrflS4MA0GCSqGSIb3DQEBCwUAMGoxCzAJBgNV -BAYTAlVTMQswCQYDVQQIDAJDQTEVMBMGA1UEBwwMUmVkd29vZCBDaXR5MQ4wDAYD -VQQKDAVNaW5pbzEUMBIGA1UECwwLRW5naW5lZXJpbmcxETAPBgNVBAMMCG1pbmlv -LmlvMB4XDTE4MDUyMDA4NDc0MFoXDTE5MDUyMDA4NDc0MFowajELMAkGA1UEBhMC -VVMxCzAJBgNVBAgMAkNBMRUwEwYDVQQHDAxSZWR3b29kIENpdHkxDjAMBgNVBAoM -BU1pbmlvMRQwEgYDVQQLDAtFbmdpbmVlcmluZzERMA8GA1UEAwwIbWluaW8uaW8w -ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPszxaYwn+mIz6IGuUlmvW -wUs/yWTH4MC17qey2N5MqcxlfIWHUugcBsbGhi/e1druFW0s7YGMxp+G+Q1IezxX -+VmVaJCN8AgSowbYgpRdpRQ+mhGeQby0JcvO16fyPnUJBz3GGel2bcK8fcQyT0TV -apCiD9oURVmdvDSsRXz+EoPlOve8AWciHHgm1ItO5qdPRP5YtcJfLiwKnoYnpda2 -d9SzmYk+Q2JFArooF7/A1DYz9bXCMo3qp0gQlMpSMDR+MCbxHBzBBr+fQG8QdDrz -WQ2slhniBhFDk0LuPCBLlSeIzkp+DoAGDXf3hWYhechlabZ7nfngg5erEz776WCF -AgMBAAGjUzBRMB0GA1UdDgQWBBRzC09a+3AlbFDg6BsvELolmO8jYjAfBgNVHSME -GDAWgBRzC09a+3AlbFDg6BsvELolmO8jYjAPBgNVHRMBAf8EBTADAQH/MA0GCSqG -SIb3DQEBCwUAA4IBAQBl0cx7qbidKjhoZ1Iv4pCD8xHZgtuWEDApPoGuMtVS66jJ -+oj0ncD5xCtv9XqXtshE65FIsEWnDOIwa+kyjMnxHbFwxveWBT4W0twtqwbVs7NE -I0So6cEmSx4+rB0XorY6mIbD3O9YAStelNhB1jVfQfIMSByYkcGq2Fh+B1LHlOrz -06LJdwYMiILzK0c5fvjZvsDq/9EK+Xo66hphKjs5cl1t9WK7wKOCoZDt2lOTZqEq -UWYGPWlTAxSWQxO4WnvSKqFdsRi8fOO3KlDq1eNqeDSGGCI0DTGgJxidHIpfOPEF -s/zojgc5npE32/1n8og6gLcv7LIKelBfMhUrFTp7 ------END CERTIFICATE----- diff --git a/pkg/certs/server1.key b/pkg/certs/server1.key deleted file mode 100644 index 25c46cef..00000000 --- a/pkg/certs/server1.key +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDPszxaYwn+mIz6 -IGuUlmvWwUs/yWTH4MC17qey2N5MqcxlfIWHUugcBsbGhi/e1druFW0s7YGMxp+G -+Q1IezxX+VmVaJCN8AgSowbYgpRdpRQ+mhGeQby0JcvO16fyPnUJBz3GGel2bcK8 -fcQyT0TVapCiD9oURVmdvDSsRXz+EoPlOve8AWciHHgm1ItO5qdPRP5YtcJfLiwK -noYnpda2d9SzmYk+Q2JFArooF7/A1DYz9bXCMo3qp0gQlMpSMDR+MCbxHBzBBr+f -QG8QdDrzWQ2slhniBhFDk0LuPCBLlSeIzkp+DoAGDXf3hWYhechlabZ7nfngg5er -Ez776WCFAgMBAAECggEBAJcHRyCWmcLm3MRY5MF0K9BKV9R3NnBdTuQ8OPdE2Ui3 -w6gcRuBi+eK/TrU3CAIqUXsEW5Hq1mQuXfwAh5cn/XYfG/QXx91eKBCdOTIgqY/6 -pODsmVkRhg0c2rl6eWYd4m6BNHsjhm8WWx9C+HJ4z528UpV1n2dUElkvbMHD+aKp -Ndwd0W+0PCn/BjMn/sdyy01f8sfaK2Zoy7HBw/fGeBDNLFFj3Iz7BqXYeS+OyfLN -B4xD5I5fFqt1iJeyqVPzGkOAYSqisijbM1GtZJCeVp37/+IDylCKTO3l8Xd8x73U -qTYcYT3heSHyUC2xCM6Va2YkSrOHeqbq91QgHh9LVrUCgYEA9t/wE2S8TE2l1IG9 -68SXdhyaXTnB2qSL7ggY0uazPzBNLQpNMOxicZ6/4QGEi3hSuCqGxxGo9UEoTsVd -pk8oIeDULdPVi4NQxSmkxUyArs/dzOMygUPyosOiEc8z6jWFFKDcQ7mnZnay8dZ4 -e4j+/hZDONtDrJ+zH2xu98ZrJPcCgYEA12CbSRbCkTiRj/dq8Qvgp6+ceTVcAbnk -MWpAhZQaXHrG3XP0L7QTIHG/7a09Mln92zjuAFXDp/Vc5NdxeXcnj9j6oUAxq+0I -dq+vibzjROemmvnmQvXGY9tc0ns6u7GjM0+Sicmas+IH4vuum/aRasABfVe2XBwe -4fVs0n7yU2MCgYA7KevFGg0uVCV7yiQTzqdlvPEZim/00B5gyzv3vyYR7KdyNdfN -87ib9imR6OU0738Td82ZA5h0PktEpXQOGUZK6DCxUuUIbE39Ej/UsMLeIh7LrV87 -L2eErlG25utQI8di7DIdYO7HVYcJAhcZs/k4N2mgxJtxUUyCKWBmrPycfQKBgAo7 -0uUUKcaQs4ntra0qbVBKbdrsiCSk2ozmiY5PTTlbtBtNqSqjGc2O2hnHA4Ni90b1 -W4m0iYlvhSxyeDfXS4/wNWh4DmQm7SIGkwaubPYXM7llamWAHB8eiziNFmtYs3J6 -s3HMnIczlEBayR8sBhjWaruz8TxLMcR2zubplUYVAoGBAItxeC9IT8BGJoZB++qM -f2LXCqJ383x0sDHhwPMFPtwUTzAwc5BJgQe9zFktW5CBxsER+MnUZjlrarT1HQfH -1Y1mJQXtwuBKG4pPPZphH0yoVlYcWkBTMw/KmlVlwRclEzRQwV3TPD+i6ieKeZhz -9eZwhS3H+Zb/693WbBDyH8L+ ------END PRIVATE KEY----- diff --git a/pkg/certs/server2.crt b/pkg/certs/server2.crt deleted file mode 100644 index aa988c0c..00000000 --- a/pkg/certs/server2.crt +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDYDCCAkigAwIBAgIJALIHkFXjtZ2yMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV -BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX -aWRnaXRzIFB0eSBMdGQwHhcNMTgwNTIwMDg1MzI3WhcNMTkwNTIwMDg1MzI3WjBF -MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50 -ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB -CgKCAQEA+LZ8+eqDHyoCt7HGQIhZK+ZagDxXzJ67a2V88s/rHB3zhi1d6ha6q5sc -ljmCNqj250fjSWpDQ4hssfqyNDmY/IUaphnT9eMBPZX6RXZVFXGtpUUFvGik5hed -2g7j5Jhy+luz5QHn9zR6E7rkqTPl3WJZ2fe4LEfij6/bzZ2CMUFrKyt/uqn4laTl -m4DO+wjoOUGAHmaHbkpkhYTb/qbWzV0qMh0Zy4gQuFYcBVbATcdAjV4bRNkHd0CL -Ekd3A9ae5ZaeOrg2HkPVcinxg1ln5jBe2LBqDFqKkWudzm6jeNw+oE4lKKxDfHH8 -AD08N8qFbfs1YxZAjL3wKpcYVw2pzQIDAQABo1MwUTAdBgNVHQ4EFgQU2Yywgv8p -WfyZxYVx+MnH+VQ5TTUwHwYDVR0jBBgwFoAU2Yywgv8pWfyZxYVx+MnH+VQ5TTUw -DwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEA2maF7DQ7CMpCho9B -9gjGxvt8HqY1pCyuQwcSPb4PTyoKUZ/ZuIDhVOaBX+ox1RzlfGtYs2BUM63/QUDs -dP0GO7/IL/XEqJi1flrFvM7LNSs89qAbPJ440m6jJDzsuL2VeyUX/M72IEsBK2uS -ajtS1+HFQjPMvt7wR6fDPCP7wHPOrkTN4hcHlgzVJShKUnFaHtb2lOnWaoM/Sk91 -IsiyAhKRuCM9et7/bnOj7G8448QDVtQNniT8V/HpqQ7ltSuIGvs3QYTLDTege/74 -Q8Ph1oH7shyRE/PqPfyIuLq3p0N9Sah3oRMHLohYjJL0zAGt0jxSsnhrBSNUUD/v -bAd5VQ== ------END CERTIFICATE----- diff --git a/pkg/certs/server2.key b/pkg/certs/server2.key deleted file mode 100644 index 89a19685..00000000 --- a/pkg/certs/server2.key +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQD4tnz56oMfKgK3 -scZAiFkr5lqAPFfMnrtrZXzyz+scHfOGLV3qFrqrmxyWOYI2qPbnR+NJakNDiGyx -+rI0OZj8hRqmGdP14wE9lfpFdlUVca2lRQW8aKTmF53aDuPkmHL6W7PlAef3NHoT -uuSpM+XdYlnZ97gsR+KPr9vNnYIxQWsrK3+6qfiVpOWbgM77COg5QYAeZoduSmSF -hNv+ptbNXSoyHRnLiBC4VhwFVsBNx0CNXhtE2Qd3QIsSR3cD1p7llp46uDYeQ9Vy -KfGDWWfmMF7YsGoMWoqRa53ObqN43D6gTiUorEN8cfwAPTw3yoVt+zVjFkCMvfAq -lxhXDanNAgMBAAECggEBAIGAI5rNbPCxIzEas6uuUx/0lXLn+J9mlxfYhDK56CV/ -wuk+fgQBSblIzp252/8yAz1xxPrZBaUIR/B0JI3k36+8bp/GGwOQ63hxuxqn/q1n -v46qXc44foQAEAUWc7r3Vgbd8NFxKKMjA916Fs2zZCDdsQM5ZQBJfcJrQvvQ45VY -//UtXdNeIBQOb5Wg4o9fHJolKzCHWRaD2ExoIHZ5Fa6JpBmk9JBHcUbrHrlbOeep -/SkbSa0ma9j3k3jqV970XRoQUCJf+K1Li49jmaYPPGXBUAp6AfU+yiAJ1aups38m -BClLAV9g6vgE3xK2xozGPI1+j9lkruYbvGbPNkXexdECgYEA/47XnKITSnxtV+NK -nDbWNOgpeaRbxAdjp1P0b4VI0S0SuRvKUOCp1UlPg5BjGL0JLPQpGlPzEfLlGWAa -68vhyj0V6HL2+PAJNib1eu6yyRBsSbPdrAD5nydHpbxRcdShhVwb2MHMyBeYH5Al -kL+ed5wCF32kXOOGzhoGzJEKNEcCgYEA+SSdcdbuVpQFkAecIoABwdx/qeOAeS19 -FsvVSTmWlhal8m2Mn8RWZ0IKXT9AoZJ0KQBIKHViPtyV7UQey05uRgLRHZapHpe8 -dhm6SsGYtU3BhLdHJBP0kI79qm2kzqsHp6ghSzaxT9CkRfMniN+TD+w8p7lrOaxv -vV46UHoGX0sCgYB4LlCvVHkF+mXhgv4/YHpz/woiLm0JTwBKXG0DVQbdd/jqHGuU -hVLY/tTp5ij0JVH/VgNOYlRZCIU83blLUmIonXmECyyh/SAX21JuMXram2KRdoi0 -rvC1K9/BzUHv6jLbaGmgEeOf5Zign0VLQRHg5fkF2wxEsqtemVbBNSQ7WQKBgBFk -Y/VRervig2zlixnBc93zpZnXft12tnfD7PS6p298z0LYMOvqSdnVe2G9C6b70U4X -bfIdF6mpvnGcwsWQiRQsGCsHnHC9SPO5og6b6ywk7HB2VuoG1pjM0pp2Iv4mZFdo -3kIg5EndF8qmSck9SkffRvCyefDBv98pV8rMaet3AoGBALjlN2hLoNE5Cs5vTYH8 -W0AN4lEOaTlBRKG8a1h7Fm2vPgzGGkiwU6bVzsh0oTfytc8v8MW9lNQZpE3dBKne -ms3FrNsnBbTczX+xJmndRnVRocdyON6u476VxAuz/dHSFFnZGXX+2lJse9xnWHUz -OpSHUPq3TrUzhgZClE2ZKpNm ------END PRIVATE KEY----- diff --git a/pkg/cgroup/linux.go b/pkg/cgroup/linux.go deleted file mode 100644 index 8fbb57d7..00000000 --- a/pkg/cgroup/linux.go +++ /dev/null @@ -1,177 +0,0 @@ -// +build linux - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package cgroup implements parsing for all the cgroup -// categories and functionality in a simple way. -package cgroup - -import ( - "bufio" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" -) - -// DO NOT EDIT following constants are chosen defaults for any kernel -// after 3.x, please open a github issue https://github.com/minio/minio/issues -// and discuss first if you wish to change this. -const ( - // Default string for looking for kernel memory param. - memoryLimitKernelParam = "memory.limit_in_bytes" - - // Points to sys path memory path. - cgroupMemSysPath = "/sys/fs/cgroup/memory" - - // Default docker prefix. - dockerPrefixName = "/docker/" - - // Proc controller group path. - cgroupFileTemplate = "/proc/%d/cgroup" -) - -// CGEntries - represents all the entries in a process cgroup file -// at /proc//cgroup as key value pairs. -type CGEntries map[string]string - -// GetEntries reads and parses all the cgroup entries for a given process. -func GetEntries(pid int) (CGEntries, error) { - r, err := os.Open(fmt.Sprintf(cgroupFileTemplate, pid)) - if err != nil { - return nil, err - } - defer r.Close() - return parseProcCGroup(r) -} - -// parseProcCGroup - cgroups are always in the following -// format once enabled you need to know the pid of the -// application you are looking for so that the the -// following parsing logic only parses the file located -// at /proc//cgroup. -// -// CGROUP entries id, component and path are always in -// the following format. ``ID:COMPONENT:PATH`` -// -// Following code block parses this information and -// returns a procCGroup which is a parsed list of all -// the line by line entires from /proc//cgroup. -func parseProcCGroup(r io.Reader) (CGEntries, error) { - var cgEntries = CGEntries{} - - // Start reading cgroup categories line by line - // and process them into procCGroup structure. - scanner := bufio.NewScanner(r) - for scanner.Scan() { - line := scanner.Text() - - tokens := strings.SplitN(line, ":", 3) - if len(tokens) < 3 { - continue - } - - name, path := tokens[1], tokens[2] - for _, token := range strings.Split(name, ",") { - name = strings.TrimPrefix(token, "name=") - cgEntries[name] = path - } - } - - // Return upon any error while reading the cgroup categories. - if err := scanner.Err(); err != nil { - return nil, err - } - - return cgEntries, nil -} - -// Fetch value of the cgroup kernel param from the cgroup manager, -// if cgroup manager is configured we should just rely on `cgm` cli -// to fetch all the values for us. -func getManagerKernValue(cname, path, kernParam string) (limit uint64, err error) { - - cmd := exec.Command("cgm", "getvalue", cname, path, kernParam) - var out bytes.Buffer - cmd.Stdout = &out - if err = cmd.Run(); err != nil { - return 0, err - } - - // Parse the cgm output. - limit, err = strconv.ParseUint(strings.TrimSpace(out.String()), 10, 64) - return limit, err -} - -// Get cgroup memory limit file path. -func getMemoryLimitFilePath(cgPath string) string { - path := cgroupMemSysPath - - // Docker generates weird cgroup paths that don't - // really exist on the file system. - // - // For example on regular Linux OS : - // `/user.slice/user-1000.slice/session-1.scope` - // - // But they exist as a bind mount on Docker and - // are not accessible : `/docker/` - // - // We we will just ignore if there is `/docker` in the - // path ignore and fall back to : - // `/sys/fs/cgroup/memory/memory.limit_in_bytes` - if !strings.HasPrefix(cgPath, dockerPrefixName) { - path = filepath.Join(path, cgPath) - } - - // Final path. - return filepath.Join(path, memoryLimitKernelParam) -} - -// GetMemoryLimit - Fetches cgroup memory limit either from -// a file path at '/sys/fs/cgroup/memory', if path fails then -// fallback to querying cgroup manager. -func GetMemoryLimit(pid int) (limit uint64, err error) { - var cg CGEntries - cg, err = GetEntries(pid) - if err != nil { - return 0, err - } - - path := cg["memory"] - - limit, err = getManagerKernValue("memory", path, memoryLimitKernelParam) - if err != nil { - - // Upon any failure returned from `cgm`, on some systems cgm - // might not be installed. We fallback to using the the sysfs - // path instead to lookup memory limits. - var b []byte - b, err = ioutil.ReadFile(getMemoryLimitFilePath(path)) - if err != nil { - return 0, err - } - - limit, err = strconv.ParseUint(strings.TrimSpace(string(b)), 10, 64) - } - - return limit, err -} diff --git a/pkg/cgroup/linux_test.go b/pkg/cgroup/linux_test.go deleted file mode 100644 index 9cf64bba..00000000 --- a/pkg/cgroup/linux_test.go +++ /dev/null @@ -1,140 +0,0 @@ -// +build linux - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cgroup - -import ( - "io/ioutil" - "os" - "testing" -) - -// Testing parsing correctness for various process cgroup files. -func TestProcCGroup(t *testing.T) { - tmpPath, err := ioutil.TempFile("", "cgroup") - if err != nil { - t.Fatal(err) - } - defer os.Remove(tmpPath.Name()) - - cgroup := ` -11:memory:/user.slice -10:blkio:/user.slice -9:hugetlb:/ -8:net_cls,net_prio:/ -7:perf_event:/ -6:pids:/user.slice/user-1000.slice -5:devices:/user.slice -4:cpuset:/ -3:cpu,cpuacct:/user.slice -2:freezer:/ -1:name=systemd:/user.slice/user-1000.slice/session-1.scope -` - _, err = tmpPath.WriteString(cgroup) - if err != nil { - t.Fatal(err) - } - - // Seek back to read from the beginning. - tmpPath.Seek(0, 0) - - cg, err := parseProcCGroup(tmpPath) - if err != nil { - t.Fatal(err) - } - - path := cg["memory"] - if len(path) == 0 { - t.Fatal("Path component cannot be empty") - } - - if path != "/user.slice" { - t.Fatal("Path component cannot be empty") - } - - path = cg["systemd"] - if path != "/user.slice/user-1000.slice/session-1.scope" { - t.Fatal("Path component cannot be empty") - } - - // Mixed cgroups with different group names. - cgroup = ` -11:memory:/newtest/newtest -10:blkio:/user.slice -9:hugetlb:/ -8:net_cls,net_prio:/ -7:perf_event:/ -6:pids:/user.slice/user-1000.slice -5:devices:/user.slice -4:cpuset:/ -3:cpu,cpuacct:/newtest/newtest -2:freezer:/ -1:name=systemd:/user.slice/user-1000.slice/session-1.scope -` - - // Seek back to read from the beginning. - tmpPath.Seek(0, 0) - - _, err = tmpPath.WriteString(cgroup) - if err != nil { - t.Fatal(err) - } - - // Seek back to read from the beginning. - tmpPath.Seek(0, 0) - - cg, err = parseProcCGroup(tmpPath) - if err != nil { - t.Fatal(err) - } - - path = cg["memory"] - if path != "/newtest/newtest" { - t.Fatal("Path component cannot be empty") - } - - path = cg["systemd"] - if path != "/user.slice/user-1000.slice/session-1.scope" { - t.Fatal("Path component cannot be empty") - } - -} - -// Tests cgroup memory limit path construction. -func TestMemoryLimitPath(t *testing.T) { - testCases := []struct { - cgroupPath string - expectedPath string - }{ - { - cgroupPath: "/user.slice", - expectedPath: "/sys/fs/cgroup/memory/user.slice/memory.limit_in_bytes", - }, - { - cgroupPath: "/docker/testing", - expectedPath: "/sys/fs/cgroup/memory/memory.limit_in_bytes", - }, - } - - for i, testCase := range testCases { - actualPath := getMemoryLimitFilePath(testCase.cgroupPath) - if actualPath != testCase.expectedPath { - t.Fatalf("Test: %d: Expected: %s, got %s", i+1, testCase.expectedPath, actualPath) - } - } -} diff --git a/pkg/cgroup/others.go b/pkg/cgroup/others.go deleted file mode 100644 index ade1f0e6..00000000 --- a/pkg/cgroup/others.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !linux - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cgroup diff --git a/pkg/color/color.go b/pkg/color/color.go deleted file mode 100644 index f5353a91..00000000 --- a/pkg/color/color.go +++ /dev/null @@ -1,137 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package color - -import ( - "fmt" - - "github.com/fatih/color" -) - -// global colors. -var ( - // Check if we stderr, stdout are dumb terminals, we do not apply - // ansi coloring on dumb terminals. - IsTerminal = func() bool { - return !color.NoColor - } - - Bold = func() func(a ...interface{}) string { - if IsTerminal() { - return color.New(color.Bold).SprintFunc() - } - return fmt.Sprint - }() - - RedBold = func() func(format string, a ...interface{}) string { - if IsTerminal() { - return color.New(color.FgRed, color.Bold).SprintfFunc() - } - return fmt.Sprintf - }() - - Red = func() func(format string, a ...interface{}) string { - if IsTerminal() { - return color.New(color.FgRed).SprintfFunc() - } - return fmt.Sprintf - }() - - Blue = func() func(format string, a ...interface{}) string { - if IsTerminal() { - return color.New(color.FgBlue).SprintfFunc() - } - return fmt.Sprintf - }() - - Yellow = func() func(format string, a ...interface{}) string { - if IsTerminal() { - return color.New(color.FgYellow).SprintfFunc() - } - return fmt.Sprintf - }() - - Green = func() func(a ...interface{}) string { - if IsTerminal() { - return color.New(color.FgGreen).SprintFunc() - } - return fmt.Sprint - }() - - GreenBold = func() func(a ...interface{}) string { - if IsTerminal() { - return color.New(color.FgGreen, color.Bold).SprintFunc() - } - return fmt.Sprint - }() - - CyanBold = func() func(a ...interface{}) string { - if IsTerminal() { - return color.New(color.FgCyan, color.Bold).SprintFunc() - } - return fmt.Sprint - }() - - YellowBold = func() func(format string, a ...interface{}) string { - if IsTerminal() { - return color.New(color.FgYellow, color.Bold).SprintfFunc() - } - return fmt.Sprintf - }() - - BlueBold = func() func(format string, a ...interface{}) string { - if IsTerminal() { - return color.New(color.FgBlue, color.Bold).SprintfFunc() - } - return fmt.Sprintf - }() - - BgYellow = func() func(format string, a ...interface{}) string { - if IsTerminal() { - return color.New(color.BgYellow).SprintfFunc() - } - return fmt.Sprintf - }() - - Black = func() func(format string, a ...interface{}) string { - if IsTerminal() { - return color.New(color.FgBlack).SprintfFunc() - } - return fmt.Sprintf - }() - - FgRed = func() func(a ...interface{}) string { - if IsTerminal() { - return color.New(color.FgRed).SprintFunc() - } - return fmt.Sprint - }() - - BgRed = func() func(format string, a ...interface{}) string { - if IsTerminal() { - return color.New(color.BgRed).SprintfFunc() - } - return fmt.Sprintf - }() - - FgWhite = func() func(format string, a ...interface{}) string { - if IsTerminal() { - return color.New(color.FgWhite).SprintfFunc() - } - return fmt.Sprintf - }() -) diff --git a/pkg/console/console.go b/pkg/console/console.go deleted file mode 100644 index 3d5d3d1d..00000000 --- a/pkg/console/console.go +++ /dev/null @@ -1,436 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package console implements console printing helpers -package console - -import ( - "fmt" - "os" - "path/filepath" - "strings" - "sync" - - "github.com/fatih/color" - "github.com/mattn/go-colorable" - "github.com/mattn/go-isatty" -) - -var ( - // DebugPrint enables/disables console debug printing. - DebugPrint = false - - // Used by the caller to print multiple lines atomically. Exposed by Lock/Unlock methods. - publicMutex sync.Mutex - - // Used internally by console. - privateMutex sync.Mutex - - stderrColoredOutput = colorable.NewColorableStderr() - - // Print prints a message. - Print = func(data ...interface{}) { - consolePrint("Print", Theme["Print"], data...) - } - - // PrintC prints a message with color. - PrintC = func(data ...interface{}) { - consolePrint("PrintC", Theme["PrintC"], data...) - } - - // Printf prints a formatted message. - Printf = func(format string, data ...interface{}) { - consolePrintf("Print", Theme["Print"], format, data...) - } - - // Println prints a message with a newline. - Println = func(data ...interface{}) { - consolePrintln("Print", Theme["Print"], data...) - } - - // Fatal print a error message and exit. - Fatal = func(data ...interface{}) { - consolePrint("Fatal", Theme["Fatal"], data...) - os.Exit(1) - } - - // Fatalf print a error message with a format specified and exit. - Fatalf = func(format string, data ...interface{}) { - consolePrintf("Fatal", Theme["Fatal"], format, data...) - os.Exit(1) - } - - // Fatalln print a error message with a new line and exit. - Fatalln = func(data ...interface{}) { - consolePrintln("Fatal", Theme["Fatal"], data...) - os.Exit(1) - } - - // Error prints a error message. - Error = func(data ...interface{}) { - consolePrint("Error", Theme["Error"], data...) - } - - // Errorf print a error message with a format specified. - Errorf = func(format string, data ...interface{}) { - consolePrintf("Error", Theme["Error"], format, data...) - } - - // Errorln prints a error message with a new line. - Errorln = func(data ...interface{}) { - consolePrintln("Error", Theme["Error"], data...) - } - - // Info prints a informational message. - Info = func(data ...interface{}) { - consolePrint("Info", Theme["Info"], data...) - } - - // Infof prints a informational message in custom format. - Infof = func(format string, data ...interface{}) { - consolePrintf("Info", Theme["Info"], format, data...) - } - - // Infoln prints a informational message with a new line. - Infoln = func(data ...interface{}) { - consolePrintln("Info", Theme["Info"], data...) - } - - // Debug prints a debug message without a new line - // Debug prints a debug message. - Debug = func(data ...interface{}) { - if DebugPrint { - consolePrint("Debug", Theme["Debug"], data...) - } - } - - // Debugf prints a debug message with a new line. - Debugf = func(format string, data ...interface{}) { - if DebugPrint { - consolePrintf("Debug", Theme["Debug"], format, data...) - } - } - - // Debugln prints a debug message with a new line. - Debugln = func(data ...interface{}) { - if DebugPrint { - consolePrintln("Debug", Theme["Debug"], data...) - } - } - - // Colorize prints message in a colorized form, dictated by the corresponding tag argument. - Colorize = func(tag string, data interface{}) string { - if isatty.IsTerminal(os.Stdout.Fd()) { - colorized, ok := Theme[tag] - if ok { - return colorized.SprintFunc()(data) - } // else: No theme found. Return as string. - } - return fmt.Sprint(data) - } - - // Eraseline Print in new line and adjust to top so that we don't print over the ongoing progress bar. - Eraseline = func() { - consolePrintf("Print", Theme["Print"], "%c[2K\n", 27) - consolePrintf("Print", Theme["Print"], "%c[A", 27) - } -) - -// wrap around standard fmt functions. -// consolePrint prints a message prefixed with message type and program name. -func consolePrint(tag string, c *color.Color, a ...interface{}) { - privateMutex.Lock() - defer privateMutex.Unlock() - - switch tag { - case "Debug": - // if no arguments are given do not invoke debug printer. - if len(a) == 0 { - return - } - output := color.Output - color.Output = stderrColoredOutput - if isatty.IsTerminal(os.Stderr.Fd()) { - c.Print(ProgramName() + ": ") - c.Print(a...) - } else { - fmt.Fprint(color.Output, ProgramName()+": ") - fmt.Fprint(color.Output, a...) - } - color.Output = output - case "Fatal": - fallthrough - case "Error": - // if no arguments are given do not invoke fatal and error printer. - if len(a) == 0 { - return - } - output := color.Output - color.Output = stderrColoredOutput - if isatty.IsTerminal(os.Stderr.Fd()) { - c.Print(ProgramName() + ": ") - c.Print(a...) - } else { - fmt.Fprint(color.Output, ProgramName()+": ") - fmt.Fprint(color.Output, a...) - } - color.Output = output - case "Info": - // if no arguments are given do not invoke info printer. - if len(a) == 0 { - return - } - if isatty.IsTerminal(os.Stdout.Fd()) { - c.Print(ProgramName() + ": ") - c.Print(a...) - } else { - fmt.Fprint(color.Output, ProgramName()+": ") - fmt.Fprint(color.Output, a...) - } - default: - if isatty.IsTerminal(os.Stdout.Fd()) { - c.Print(a...) - } else { - fmt.Fprint(color.Output, a...) - } - } -} - -// consolePrintf - same as print with a new line. -func consolePrintf(tag string, c *color.Color, format string, a ...interface{}) { - privateMutex.Lock() - defer privateMutex.Unlock() - - switch tag { - case "Debug": - // if no arguments are given do not invoke debug printer. - if len(a) == 0 { - return - } - output := color.Output - color.Output = stderrColoredOutput - if isatty.IsTerminal(os.Stderr.Fd()) { - c.Print(ProgramName() + ": ") - c.Printf(format, a...) - } else { - fmt.Fprint(color.Output, ProgramName()+": ") - fmt.Fprintf(color.Output, format, a...) - } - color.Output = output - case "Fatal": - fallthrough - case "Error": - // if no arguments are given do not invoke fatal and error printer. - if len(a) == 0 { - return - } - output := color.Output - color.Output = stderrColoredOutput - if isatty.IsTerminal(os.Stderr.Fd()) { - c.Print(ProgramName() + ": ") - c.Printf(format, a...) - } else { - fmt.Fprint(color.Output, ProgramName()+": ") - fmt.Fprintf(color.Output, format, a...) - } - color.Output = output - case "Info": - // if no arguments are given do not invoke info printer. - if len(a) == 0 { - return - } - if isatty.IsTerminal(os.Stdout.Fd()) { - c.Print(ProgramName() + ": ") - c.Printf(format, a...) - } else { - fmt.Fprint(color.Output, ProgramName()+": ") - fmt.Fprintf(color.Output, format, a...) - } - default: - if isatty.IsTerminal(os.Stdout.Fd()) { - c.Printf(format, a...) - } else { - fmt.Fprintf(color.Output, format, a...) - } - } -} - -// consolePrintln - same as print with a new line. -func consolePrintln(tag string, c *color.Color, a ...interface{}) { - privateMutex.Lock() - defer privateMutex.Unlock() - - switch tag { - case "Debug": - // if no arguments are given do not invoke debug printer. - if len(a) == 0 { - return - } - output := color.Output - color.Output = stderrColoredOutput - if isatty.IsTerminal(os.Stderr.Fd()) { - c.Print(ProgramName() + ": ") - c.Println(a...) - } else { - fmt.Fprint(color.Output, ProgramName()+": ") - fmt.Fprintln(color.Output, a...) - } - color.Output = output - case "Fatal": - fallthrough - case "Error": - // if no arguments are given do not invoke fatal and error printer. - if len(a) == 0 { - return - } - output := color.Output - color.Output = stderrColoredOutput - if isatty.IsTerminal(os.Stderr.Fd()) { - c.Print(ProgramName() + ": ") - c.Println(a...) - } else { - fmt.Fprint(color.Output, ProgramName()+": ") - fmt.Fprintln(color.Output, a...) - } - color.Output = output - case "Info": - // if no arguments are given do not invoke info printer. - if len(a) == 0 { - return - } - if isatty.IsTerminal(os.Stdout.Fd()) { - c.Print(ProgramName() + ": ") - c.Println(a...) - } else { - fmt.Fprint(color.Output, ProgramName()+": ") - fmt.Fprintln(color.Output, a...) - } - default: - if isatty.IsTerminal(os.Stdout.Fd()) { - c.Println(a...) - } else { - fmt.Fprintln(color.Output, a...) - } - } -} - -// Lock console. -func Lock() { - publicMutex.Lock() -} - -// Unlock locked console. -func Unlock() { - publicMutex.Unlock() -} - -// ProgramName - return the name of the executable program. -func ProgramName() string { - _, progName := filepath.Split(os.Args[0]) - return progName -} - -// Table - data to print in table format with fixed row widths. -type Table struct { - // per-row colors - RowColors []*color.Color - - // per-column align-right flag (aligns left by default) - AlignRight []bool - - // Left margin width for table - TableIndentWidth int -} - -// NewTable - create a new Table instance. Takes per-row colors and -// per-column right-align flags and table indentation width (i.e. left -// margin width) -func NewTable(rowColors []*color.Color, alignRight []bool, indentWidth int) *Table { - return &Table{rowColors, alignRight, indentWidth} -} - -// DisplayTable - prints the table -func (t *Table) DisplayTable(rows [][]string) error { - numRows := len(rows) - numCols := len(rows[0]) - if numRows != len(t.RowColors) { - return fmt.Errorf("row count and row-colors mismatch") - } - - // Compute max. column widths - maxColWidths := make([]int, numCols) - for _, row := range rows { - if len(row) != len(t.AlignRight) { - return fmt.Errorf("col count and align-right mismatch") - } - for i, v := range row { - if len([]rune(v)) > maxColWidths[i] { - maxColWidths[i] = len([]rune(v)) - } - } - } - - // Compute per-cell text with padding and alignment applied. - paddedText := make([][]string, numRows) - for r, row := range rows { - paddedText[r] = make([]string, numCols) - for c, cell := range row { - if t.AlignRight[c] { - fmtStr := fmt.Sprintf("%%%ds", maxColWidths[c]) - paddedText[r][c] = fmt.Sprintf(fmtStr, cell) - } else { - extraWidth := maxColWidths[c] - len([]rune(cell)) - fmtStr := fmt.Sprintf("%%s%%%ds", extraWidth) - paddedText[r][c] = fmt.Sprintf(fmtStr, cell, "") - } - } - } - - // Draw table top border - segments := make([]string, numCols) - for i, c := range maxColWidths { - segments[i] = strings.Repeat("─", c+2) - } - indentText := strings.Repeat(" ", t.TableIndentWidth) - border := fmt.Sprintf("%s┌%s┐", indentText, strings.Join(segments, "┬")) - fmt.Println(border) - - // Print the table with colors - for r, row := range paddedText { - fmt.Print(indentText + "│ ") - for c, text := range row { - t.RowColors[r].Print(text) - if c != numCols-1 { - fmt.Print(" │ ") - } - } - fmt.Println(" │") - } - - // Draw table bottom border - border = fmt.Sprintf("%s└%s┘", indentText, strings.Join(segments, "┴")) - fmt.Println(border) - - return nil -} - -// RewindLines - uses terminal escape symbols to clear and rewind -// upwards on the console for `n` lines. -func RewindLines(n int) { - for i := 0; i < n; i++ { - fmt.Printf("\033[1A\033[K") - } -} diff --git a/pkg/console/console_test.go b/pkg/console/console_test.go deleted file mode 100644 index 354b7d88..00000000 --- a/pkg/console/console_test.go +++ /dev/null @@ -1,37 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package console - -import ( - "testing" - - "github.com/fatih/color" -) - -func TestSetColor(t *testing.T) { - SetColor("unknown", color.New(color.FgWhite)) - _, ok := Theme["unknown"] - if !ok { - t.Fatal("missing theme") - } -} - -func TestColorLock(t *testing.T) { - Lock() - Print("") // Test for deadlocks. - Unlock() -} diff --git a/pkg/console/themes.go b/pkg/console/themes.go deleted file mode 100644 index d7056ba0..00000000 --- a/pkg/console/themes.go +++ /dev/null @@ -1,54 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package console - -import "github.com/fatih/color" - -var ( - // Theme contains default color mapping. - Theme = map[string]*color.Color{ - "Debug": color.New(color.FgWhite, color.Faint, color.Italic), - "Fatal": color.New(color.FgRed, color.Italic, color.Bold), - "Error": color.New(color.FgYellow, color.Italic), - "Info": color.New(color.FgGreen, color.Bold), - "Print": color.New(), - "PrintB": color.New(color.FgBlue, color.Bold), - "PrintC": color.New(color.FgGreen, color.Bold), - } -) - -// SetColorOff disables coloring for the entire session. -func SetColorOff() { - privateMutex.Lock() - defer privateMutex.Unlock() - color.NoColor = true -} - -// SetColorOn enables coloring for the entire session. -func SetColorOn() { - privateMutex.Lock() - defer privateMutex.Unlock() - color.NoColor = false -} - -// SetColor sets a color for a particular tag. -func SetColor(tag string, cl *color.Color) { - privateMutex.Lock() - defer privateMutex.Unlock() - // add new theme - Theme[tag] = cl -} diff --git a/pkg/csvparser/example_test.go b/pkg/csvparser/example_test.go deleted file mode 100644 index 6f20f1f5..00000000 --- a/pkg/csvparser/example_test.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in https://golang.org/LICENSE - -package csv_test - -import ( - "encoding/csv" - "fmt" - "io" - "log" - "os" - "strings" -) - -func ExampleReader() { - in := `first_name,last_name,username -"Rob","Pike",rob -Ken,Thompson,ken -"Robert","Griesemer","gri" -` - r := csv.NewReader(strings.NewReader(in)) - - for { - record, err := r.Read() - if err == io.EOF { - break - } - if err != nil { - log.Fatal(err) - } - - fmt.Println(record) - } - // Output: - // [first_name last_name username] - // [Rob Pike rob] - // [Ken Thompson ken] - // [Robert Griesemer gri] -} - -// This example shows how csv.Reader can be configured to handle other -// types of CSV files. -func ExampleReader_options() { - in := `first_name;last_name;username -"Rob";"Pike";rob -# lines beginning with a # character are ignored -Ken;Thompson;ken -"Robert";"Griesemer";"gri" -` - r := csv.NewReader(strings.NewReader(in)) - r.Comma = ';' - r.Comment = '#' - - records, err := r.ReadAll() - if err != nil { - log.Fatal(err) - } - - fmt.Print(records) - // Output: - // [[first_name last_name username] [Rob Pike rob] [Ken Thompson ken] [Robert Griesemer gri]] -} - -func ExampleReader_ReadAll() { - in := `first_name,last_name,username -"Rob","Pike",rob -Ken,Thompson,ken -"Robert","Griesemer","gri" -` - r := csv.NewReader(strings.NewReader(in)) - - records, err := r.ReadAll() - if err != nil { - log.Fatal(err) - } - - fmt.Print(records) - // Output: - // [[first_name last_name username] [Rob Pike rob] [Ken Thompson ken] [Robert Griesemer gri]] -} - -func ExampleWriter() { - records := [][]string{ - {"first_name", "last_name", "username"}, - {"Rob", "Pike", "rob"}, - {"Ken", "Thompson", "ken"}, - {"Robert", "Griesemer", "gri"}, - } - - w := csv.NewWriter(os.Stdout) - - for _, record := range records { - if err := w.Write(record); err != nil { - log.Fatalln("error writing record to csv:", err) - } - } - - // Write any buffered data to the underlying writer (standard output). - w.Flush() - - if err := w.Error(); err != nil { - log.Fatal(err) - } - // Output: - // first_name,last_name,username - // Rob,Pike,rob - // Ken,Thompson,ken - // Robert,Griesemer,gri -} - -func ExampleWriter_WriteAll() { - records := [][]string{ - {"first_name", "last_name", "username"}, - {"Rob", "Pike", "rob"}, - {"Ken", "Thompson", "ken"}, - {"Robert", "Griesemer", "gri"}, - } - - w := csv.NewWriter(os.Stdout) - w.WriteAll(records) // calls Flush internally - - if err := w.Error(); err != nil { - log.Fatalln("error writing csv:", err) - } - // Output: - // first_name,last_name,username - // Rob,Pike,rob - // Ken,Thompson,ken - // Robert,Griesemer,gri -} diff --git a/pkg/csvparser/fuzz.go b/pkg/csvparser/fuzz.go deleted file mode 100644 index 643f0336..00000000 --- a/pkg/csvparser/fuzz.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in https://golang.org/LICENSE - -// +build gofuzz - -package csv - -import ( - "bytes" - "fmt" - "reflect" -) - -func Fuzz(data []byte) int { - score := 0 - buf := new(bytes.Buffer) - - for _, tt := range []Reader{ - {}, - {Comma: ';'}, - {Comma: '\t'}, - {LazyQuotes: true}, - {TrimLeadingSpace: true}, - {Comment: '#'}, - {Comment: ';'}, - } { - r := NewReader(bytes.NewReader(data)) - r.Comma = tt.Comma - r.Comment = tt.Comment - r.LazyQuotes = tt.LazyQuotes - r.TrimLeadingSpace = tt.TrimLeadingSpace - - records, err := r.ReadAll() - if err != nil { - continue - } - score = 1 - - buf.Reset() - w := NewWriter(buf) - w.Comma = tt.Comma - err = w.WriteAll(records) - if err != nil { - fmt.Printf("writer = %#v\n", w) - fmt.Printf("records = %v\n", records) - panic(err) - } - - r = NewReader(buf) - r.Comma = tt.Comma - r.Comment = tt.Comment - r.LazyQuotes = tt.LazyQuotes - r.TrimLeadingSpace = tt.TrimLeadingSpace - result, err := r.ReadAll() - if err != nil { - fmt.Printf("reader = %#v\n", r) - fmt.Printf("records = %v\n", records) - panic(err) - } - - if !reflect.DeepEqual(records, result) { - fmt.Println("records = \n", records) - fmt.Println("result = \n", records) - panic("not equal") - } - } - - return score -} diff --git a/pkg/csvparser/reader.go b/pkg/csvparser/reader.go deleted file mode 100644 index c5d75c12..00000000 --- a/pkg/csvparser/reader.go +++ /dev/null @@ -1,445 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in https://golang.org/LICENSE - -// Package csv reads and writes comma-separated values (CSV) files. -// There are many kinds of CSV files; this package supports the format -// described in RFC 4180. -// -// A csv file contains zero or more records of one or more fields per record. -// Each record is separated by the newline character. The final record may -// optionally be followed by a newline character. -// -// field1,field2,field3 -// -// White space is considered part of a field. -// -// Carriage returns before newline characters are silently removed. -// -// Blank lines are ignored. A line with only whitespace characters (excluding -// the ending newline character) is not considered a blank line. -// -// Fields which start and stop with the quote character " are called -// quoted-fields. The beginning and ending quote are not part of the -// field. -// -// The source: -// -// normal string,"quoted-field" -// -// results in the fields -// -// {`normal string`, `quoted-field`} -// -// Within a quoted-field a quote character followed by a second quote -// character is considered a single quote. -// -// "the ""word"" is true","a ""quoted-field""" -// -// results in -// -// {`the "word" is true`, `a "quoted-field"`} -// -// Newlines and commas may be included in a quoted-field -// -// "Multi-line -// field","comma is ," -// -// results in -// -// {`Multi-line -// field`, `comma is ,`} -package csv - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "unicode" - "unicode/utf8" -) - -// A ParseError is returned for parsing errors. -// Line numbers are 1-indexed and columns are 0-indexed. -type ParseError struct { - StartLine int // Line where the record starts - Line int // Line where the error occurred - Column int // Column (rune index) where the error occurred - Err error // The actual error -} - -func (e *ParseError) Error() string { - if e.Err == ErrFieldCount { - return fmt.Sprintf("record on line %d: %v", e.Line, e.Err) - } - if e.StartLine != e.Line { - return fmt.Sprintf("record on line %d; parse error on line %d, column %d: %v", e.StartLine, e.Line, e.Column, e.Err) - } - return fmt.Sprintf("parse error on line %d, column %d: %v", e.Line, e.Column, e.Err) -} - -// Unwrap returns the underlying error -func (e *ParseError) Unwrap() error { return e.Err } - -// These are the errors that can be returned in ParseError.Err. -var ( - ErrTrailingComma = errors.New("extra delimiter at end of line") // Deprecated: No longer used. - ErrBareQuote = errors.New("bare \" in non-quoted-field") - ErrQuote = errors.New("extraneous or missing \" in quoted-field") - ErrFieldCount = errors.New("wrong number of fields") -) - -var errInvalidDelim = errors.New("csv: invalid field or comment delimiter") - -func validDelim(r rune) bool { - return r != 0 && r != '"' && r != '\r' && r != '\n' && utf8.ValidRune(r) && r != utf8.RuneError -} - -// A Reader reads records from a CSV-encoded file. -// -// As returned by NewReader, a Reader expects input conforming to RFC 4180. -// The exported fields can be changed to customize the details before the -// first call to Read or ReadAll. -// -// The Reader converts all \r\n sequences in its input to plain \n, -// including in multiline field values, so that the returned data does -// not depend on which line-ending convention an input file uses. -type Reader struct { - // Comma is the field delimiter. - // It is set to comma (',') by NewReader. - // Comma must be a valid rune and must not be \r, \n, - // or the Unicode replacement character (0xFFFD). - Comma rune - - // Quote is a single rune used for marking fields limits - Quote []rune - - // QuoteEscape is a single rune to escape the quote character - QuoteEscape rune - - // Comment, if not 0, is the comment character. Lines beginning with the - // Comment character without preceding whitespace are ignored. - // With leading whitespace the Comment character becomes part of the - // field, even if TrimLeadingSpace is true. - // Comment must be a valid rune and must not be \r, \n, - // or the Unicode replacement character (0xFFFD). - // It must also not be equal to Comma. - Comment rune - - // FieldsPerRecord is the number of expected fields per record. - // If FieldsPerRecord is positive, Read requires each record to - // have the given number of fields. If FieldsPerRecord is 0, Read sets it to - // the number of fields in the first record, so that future records must - // have the same field count. If FieldsPerRecord is negative, no check is - // made and records may have a variable number of fields. - FieldsPerRecord int - - // If LazyQuotes is true, a quote may appear in an unquoted field and a - // non-doubled quote may appear in a quoted field. - LazyQuotes bool - - // If TrimLeadingSpace is true, leading white space in a field is ignored. - // This is done even if the field delimiter, Comma, is white space. - TrimLeadingSpace bool - - // ReuseRecord controls whether calls to Read may return a slice sharing - // the backing array of the previous call's returned slice for performance. - // By default, each call to Read returns newly allocated memory owned by the caller. - ReuseRecord bool - - TrailingComma bool // Deprecated: No longer used. - - r *bufio.Reader - - // numLine is the current line being read in the CSV file. - numLine int - - // rawBuffer is a line buffer only used by the readLine method. - rawBuffer []byte - - // recordBuffer holds the unescaped fields, one after another. - // The fields can be accessed by using the indexes in fieldIndexes. - // E.g., For the row `a,"b","c""d",e`, recordBuffer will contain `abc"de` - // and fieldIndexes will contain the indexes [1, 2, 5, 6]. - recordBuffer []byte - - // fieldIndexes is an index of fields inside recordBuffer. - // The i'th field ends at offset fieldIndexes[i] in recordBuffer. - fieldIndexes []int - - // lastRecord is a record cache and only used when ReuseRecord == true. - lastRecord []string - - // Caching some values between Read() calls for performance gain - cached bool - cachedQuoteEscapeLen int - cachedQuoteLen int - cachedEncodedQuote []byte - cachedCommaLen int - cachedQuotes string -} - -// NewReader returns a new Reader that reads from r. -func NewReader(r io.Reader) *Reader { - return &Reader{ - Comma: ',', - Quote: []rune(`"`), - QuoteEscape: '"', - r: bufio.NewReader(r), - } -} - -// Read reads one record (a slice of fields) from r. -// If the record has an unexpected number of fields, -// Read returns the record along with the error ErrFieldCount. -// Except for that case, Read always returns either a non-nil -// record or a non-nil error, but not both. -// If there is no data left to be read, Read returns nil, io.EOF. -// If ReuseRecord is true, the returned slice may be shared -// between multiple calls to Read. -func (r *Reader) Read() (record []string, err error) { - if r.ReuseRecord { - record, err = r.readRecord(r.lastRecord) - r.lastRecord = record - } else { - record, err = r.readRecord(nil) - } - return record, err -} - -// ReadAll reads all the remaining records from r. -// Each record is a slice of fields. -// A successful call returns err == nil, not err == io.EOF. Because ReadAll is -// defined to read until EOF, it does not treat end of file as an error to be -// reported. -func (r *Reader) ReadAll() (records [][]string, err error) { - for { - record, err := r.readRecord(nil) - if err == io.EOF { - return records, nil - } - if err != nil { - return nil, err - } - records = append(records, record) - } -} - -// readLine reads the next line (with the trailing endline). -// If EOF is hit without a trailing endline, it will be omitted. -// If some bytes were read, then the error is never io.EOF. -// The result is only valid until the next call to readLine. -func (r *Reader) readLine() ([]byte, error) { - line, err := r.r.ReadSlice('\n') - if err == bufio.ErrBufferFull { - r.rawBuffer = append(r.rawBuffer[:0], line...) - for err == bufio.ErrBufferFull { - line, err = r.r.ReadSlice('\n') - r.rawBuffer = append(r.rawBuffer, line...) - } - line = r.rawBuffer - } - if len(line) > 0 && err == io.EOF { - err = nil - // For backwards compatibility, drop trailing \r before EOF. - if line[len(line)-1] == '\r' { - line = line[:len(line)-1] - } - } - r.numLine++ - // Normalize \r\n to \n on all input lines. - if n := len(line); n >= 2 && line[n-2] == '\r' && line[n-1] == '\n' { - line[n-2] = '\n' - line = line[:n-1] - } - return line, err -} - -// lengthNL reports the number of bytes for the trailing \n. -func lengthNL(b []byte) int { - if len(b) > 0 && b[len(b)-1] == '\n' { - return 1 - } - return 0 -} - -// nextRune returns the next rune in b or utf8.RuneError. -func nextRune(b []byte) rune { - r, _ := utf8.DecodeRune(b) - return r -} - -func encodeRune(r rune) []byte { - rlen := utf8.RuneLen(r) - p := make([]byte, rlen) - _ = utf8.EncodeRune(p, r) - return p -} - -func (r *Reader) readRecord(dst []string) ([]string, error) { - if r.Comma == r.Comment || !validDelim(r.Comma) || (r.Comment != 0 && !validDelim(r.Comment)) { - return nil, errInvalidDelim - } - - // Read line (automatically skipping past empty lines and any comments). - var line, fullLine []byte - var errRead error - for errRead == nil { - line, errRead = r.readLine() - if r.Comment != 0 && nextRune(line) == r.Comment { - line = nil - continue // Skip comment lines - } - if errRead == nil && len(line) == lengthNL(line) { - line = nil - continue // Skip empty lines - } - fullLine = line - break - } - if errRead == io.EOF { - return nil, errRead - } - - if !r.cached { - r.cachedQuoteEscapeLen = utf8.RuneLen(r.QuoteEscape) - if len(r.Quote) > 0 { - r.cachedQuoteLen = utf8.RuneLen(r.Quote[0]) - r.cachedEncodedQuote = encodeRune(r.Quote[0]) - r.cachedQuotes += string(r.Quote[0]) - } - r.cachedCommaLen = utf8.RuneLen(r.Comma) - r.cachedQuotes += string(r.QuoteEscape) - r.cached = true - } - - // Parse each field in the record. - var err error - recLine := r.numLine // Starting line for record - r.recordBuffer = r.recordBuffer[:0] - r.fieldIndexes = r.fieldIndexes[:0] -parseField: - for { - if r.TrimLeadingSpace { - line = bytes.TrimLeftFunc(line, unicode.IsSpace) - } - if len(line) == 0 || r.cachedQuoteLen == 0 || nextRune(line) != r.Quote[0] { - // Non-quoted string field - i := bytes.IndexRune(line, r.Comma) - field := line - if i >= 0 { - field = field[:i] - } else { - field = field[:len(field)-lengthNL(field)] - } - // Check to make sure a quote does not appear in field. - if !r.LazyQuotes { - if j := bytes.IndexRune(field, r.Quote[0]); j >= 0 { - col := utf8.RuneCount(fullLine[:len(fullLine)-len(line[j:])]) - err = &ParseError{StartLine: recLine, Line: r.numLine, Column: col, Err: ErrBareQuote} - break parseField - } - } - r.recordBuffer = append(r.recordBuffer, field...) - r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer)) - if i >= 0 { - line = line[i+r.cachedCommaLen:] - continue parseField - } - break parseField - } else { - // Quoted string field - line = line[r.cachedQuoteLen:] - for { - i := bytes.IndexAny(line, r.cachedQuotes) - if i >= 0 { - // Hit next quote or escape quote - r.recordBuffer = append(r.recordBuffer, line[:i]...) - - escape := nextRune(line[i:]) == r.QuoteEscape - if escape { - line = line[i+r.cachedQuoteEscapeLen:] - } else { - line = line[i+r.cachedQuoteLen:] - } - - switch rn := nextRune(line); { - case escape && r.QuoteEscape != r.Quote[0]: - r.recordBuffer = append(r.recordBuffer, encodeRune(rn)...) - line = line[utf8.RuneLen(rn):] - case rn == r.Quote[0]: - // `""` sequence (append quote). - r.recordBuffer = append(r.recordBuffer, r.cachedEncodedQuote...) - line = line[r.cachedQuoteLen:] - case rn == r.Comma: - // `",` sequence (end of field). - line = line[r.cachedCommaLen:] - r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer)) - continue parseField - case lengthNL(line) == len(line): - // `"\n` sequence (end of line). - r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer)) - break parseField - case r.LazyQuotes: - // `"` sequence (bare quote). - r.recordBuffer = append(r.recordBuffer, r.cachedEncodedQuote...) - default: - // `"*` sequence (invalid non-escaped quote). - col := utf8.RuneCount(fullLine[:len(fullLine)-len(line)-r.cachedQuoteLen]) - err = &ParseError{StartLine: recLine, Line: r.numLine, Column: col, Err: ErrQuote} - break parseField - } - } else if len(line) > 0 { - // Hit end of line (copy all data so far). - r.recordBuffer = append(r.recordBuffer, line...) - if errRead != nil { - break parseField - } - line, errRead = r.readLine() - if errRead == io.EOF { - errRead = nil - } - fullLine = line - } else { - // Abrupt end of file (EOF or error). - if !r.LazyQuotes && errRead == nil { - col := utf8.RuneCount(fullLine) - err = &ParseError{StartLine: recLine, Line: r.numLine, Column: col, Err: ErrQuote} - break parseField - } - r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer)) - break parseField - } - } - } - } - if err == nil { - err = errRead - } - - // Create a single string and create slices out of it. - // This pins the memory of the fields together, but allocates once. - str := string(r.recordBuffer) // Convert to string once to batch allocations - dst = dst[:0] - if cap(dst) < len(r.fieldIndexes) { - dst = make([]string, len(r.fieldIndexes)) - } - dst = dst[:len(r.fieldIndexes)] - var preIdx int - for i, idx := range r.fieldIndexes { - dst[i] = str[preIdx:idx] - preIdx = idx - } - - // Check or update the expected fields per record. - if r.FieldsPerRecord > 0 { - if len(dst) != r.FieldsPerRecord && err == nil { - err = &ParseError{StartLine: recLine, Line: recLine, Err: ErrFieldCount} - } - } else if r.FieldsPerRecord == 0 { - r.FieldsPerRecord = len(dst) - } - return dst, err -} diff --git a/pkg/csvparser/reader_test.go b/pkg/csvparser/reader_test.go deleted file mode 100644 index b129cfc6..00000000 --- a/pkg/csvparser/reader_test.go +++ /dev/null @@ -1,509 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in https://golang.org/LICENSE - -package csv - -import ( - "io" - "reflect" - "strings" - "testing" - "unicode/utf8" -) - -func TestRead(t *testing.T) { - tests := []struct { - Name string - Input string - Output [][]string - Error error - - // These fields are copied into the Reader - Comma rune - Comment rune - UseFieldsPerRecord bool // false (default) means FieldsPerRecord is -1 - FieldsPerRecord int - LazyQuotes bool - TrimLeadingSpace bool - ReuseRecord bool - }{{ - Name: "Simple", - Input: "a,b,c\n", - Output: [][]string{{"a", "b", "c"}}, - }, { - Name: "CRLF", - Input: "a,b\r\nc,d\r\n", - Output: [][]string{{"a", "b"}, {"c", "d"}}, - }, { - Name: "BareCR", - Input: "a,b\rc,d\r\n", - Output: [][]string{{"a", "b\rc", "d"}}, - }, { - Name: "RFC4180test", - Input: `#field1,field2,field3 -"aaa","bb -b","ccc" -"a,a","b""bb","ccc" -zzz,yyy,xxx -`, - Output: [][]string{ - {"#field1", "field2", "field3"}, - {"aaa", "bb\nb", "ccc"}, - {"a,a", `b"bb`, "ccc"}, - {"zzz", "yyy", "xxx"}, - }, - UseFieldsPerRecord: true, - FieldsPerRecord: 0, - }, { - Name: "NoEOLTest", - Input: "a,b,c", - Output: [][]string{{"a", "b", "c"}}, - }, { - Name: "Semicolon", - Input: "a;b;c\n", - Output: [][]string{{"a", "b", "c"}}, - Comma: ';', - }, { - Name: "MultiLine", - Input: `"two -line","one line","three -line -field"`, - Output: [][]string{{"two\nline", "one line", "three\nline\nfield"}}, - }, { - Name: "BlankLine", - Input: "a,b,c\n\nd,e,f\n\n", - Output: [][]string{ - {"a", "b", "c"}, - {"d", "e", "f"}, - }, - }, { - Name: "BlankLineFieldCount", - Input: "a,b,c\n\nd,e,f\n\n", - Output: [][]string{ - {"a", "b", "c"}, - {"d", "e", "f"}, - }, - UseFieldsPerRecord: true, - FieldsPerRecord: 0, - }, { - Name: "TrimSpace", - Input: " a, b, c\n", - Output: [][]string{{"a", "b", "c"}}, - TrimLeadingSpace: true, - }, { - Name: "LeadingSpace", - Input: " a, b, c\n", - Output: [][]string{{" a", " b", " c"}}, - }, { - Name: "Comment", - Input: "#1,2,3\na,b,c\n#comment", - Output: [][]string{{"a", "b", "c"}}, - Comment: '#', - }, { - Name: "NoComment", - Input: "#1,2,3\na,b,c", - Output: [][]string{{"#1", "2", "3"}, {"a", "b", "c"}}, - }, { - Name: "LazyQuotes", - Input: `a "word","1"2",a","b`, - Output: [][]string{{`a "word"`, `1"2`, `a"`, `b`}}, - LazyQuotes: true, - }, { - Name: "BareQuotes", - Input: `a "word","1"2",a"`, - Output: [][]string{{`a "word"`, `1"2`, `a"`}}, - LazyQuotes: true, - }, { - Name: "BareDoubleQuotes", - Input: `a""b,c`, - Output: [][]string{{`a""b`, `c`}}, - LazyQuotes: true, - }, { - Name: "BadDoubleQuotes", - Input: `a""b,c`, - Error: &ParseError{StartLine: 1, Line: 1, Column: 1, Err: ErrBareQuote}, - }, { - Name: "TrimQuote", - Input: ` "a"," b",c`, - Output: [][]string{{"a", " b", "c"}}, - TrimLeadingSpace: true, - }, { - Name: "BadBareQuote", - Input: `a "word","b"`, - Error: &ParseError{StartLine: 1, Line: 1, Column: 2, Err: ErrBareQuote}, - }, { - Name: "BadTrailingQuote", - Input: `"a word",b"`, - Error: &ParseError{StartLine: 1, Line: 1, Column: 10, Err: ErrBareQuote}, - }, { - Name: "ExtraneousQuote", - Input: `"a "word","b"`, - Error: &ParseError{StartLine: 1, Line: 1, Column: 3, Err: ErrQuote}, - }, { - Name: "BadFieldCount", - Input: "a,b,c\nd,e", - Error: &ParseError{StartLine: 2, Line: 2, Err: ErrFieldCount}, - UseFieldsPerRecord: true, - FieldsPerRecord: 0, - }, { - Name: "BadFieldCount1", - Input: `a,b,c`, - Error: &ParseError{StartLine: 1, Line: 1, Err: ErrFieldCount}, - UseFieldsPerRecord: true, - FieldsPerRecord: 2, - }, { - Name: "FieldCount", - Input: "a,b,c\nd,e", - Output: [][]string{{"a", "b", "c"}, {"d", "e"}}, - }, { - Name: "TrailingCommaEOF", - Input: "a,b,c,", - Output: [][]string{{"a", "b", "c", ""}}, - }, { - Name: "TrailingCommaEOL", - Input: "a,b,c,\n", - Output: [][]string{{"a", "b", "c", ""}}, - }, { - Name: "TrailingCommaSpaceEOF", - Input: "a,b,c, ", - Output: [][]string{{"a", "b", "c", ""}}, - TrimLeadingSpace: true, - }, { - Name: "TrailingCommaSpaceEOL", - Input: "a,b,c, \n", - Output: [][]string{{"a", "b", "c", ""}}, - TrimLeadingSpace: true, - }, { - Name: "TrailingCommaLine3", - Input: "a,b,c\nd,e,f\ng,hi,", - Output: [][]string{{"a", "b", "c"}, {"d", "e", "f"}, {"g", "hi", ""}}, - TrimLeadingSpace: true, - }, { - Name: "NotTrailingComma3", - Input: "a,b,c, \n", - Output: [][]string{{"a", "b", "c", " "}}, - }, { - Name: "CommaFieldTest", - Input: `x,y,z,w -x,y,z, -x,y,, -x,,, -,,, -"x","y","z","w" -"x","y","z","" -"x","y","","" -"x","","","" -"","","","" -`, - Output: [][]string{ - {"x", "y", "z", "w"}, - {"x", "y", "z", ""}, - {"x", "y", "", ""}, - {"x", "", "", ""}, - {"", "", "", ""}, - {"x", "y", "z", "w"}, - {"x", "y", "z", ""}, - {"x", "y", "", ""}, - {"x", "", "", ""}, - {"", "", "", ""}, - }, - }, { - Name: "TrailingCommaIneffective1", - Input: "a,b,\nc,d,e", - Output: [][]string{ - {"a", "b", ""}, - {"c", "d", "e"}, - }, - TrimLeadingSpace: true, - }, { - Name: "ReadAllReuseRecord", - Input: "a,b\nc,d", - Output: [][]string{ - {"a", "b"}, - {"c", "d"}, - }, - ReuseRecord: true, - }, { - Name: "StartLine1", // Issue 19019 - Input: "a,\"b\nc\"d,e", - Error: &ParseError{StartLine: 1, Line: 2, Column: 1, Err: ErrQuote}, - }, { - Name: "StartLine2", - Input: "a,b\n\"d\n\n,e", - Error: &ParseError{StartLine: 2, Line: 5, Column: 0, Err: ErrQuote}, - }, { - Name: "CRLFInQuotedField", // Issue 21201 - Input: "A,\"Hello\r\nHi\",B\r\n", - Output: [][]string{ - {"A", "Hello\nHi", "B"}, - }, - }, { - Name: "BinaryBlobField", // Issue 19410 - Input: "x09\x41\xb4\x1c,aktau", - Output: [][]string{{"x09A\xb4\x1c", "aktau"}}, - }, { - Name: "TrailingCR", - Input: "field1,field2\r", - Output: [][]string{{"field1", "field2"}}, - }, { - Name: "QuotedTrailingCR", - Input: "\"field\"\r", - Output: [][]string{{"field"}}, - }, { - Name: "QuotedTrailingCRCR", - Input: "\"field\"\r\r", - Error: &ParseError{StartLine: 1, Line: 1, Column: 6, Err: ErrQuote}, - }, { - Name: "FieldCR", - Input: "field\rfield\r", - Output: [][]string{{"field\rfield"}}, - }, { - Name: "FieldCRCR", - Input: "field\r\rfield\r\r", - Output: [][]string{{"field\r\rfield\r"}}, - }, { - Name: "FieldCRCRLF", - Input: "field\r\r\nfield\r\r\n", - Output: [][]string{{"field\r"}, {"field\r"}}, - }, { - Name: "FieldCRCRLFCR", - Input: "field\r\r\n\rfield\r\r\n\r", - Output: [][]string{{"field\r"}, {"\rfield\r"}}, - }, { - Name: "FieldCRCRLFCRCR", - Input: "field\r\r\n\r\rfield\r\r\n\r\r", - Output: [][]string{{"field\r"}, {"\r\rfield\r"}, {"\r"}}, - }, { - Name: "MultiFieldCRCRLFCRCR", - Input: "field1,field2\r\r\n\r\rfield1,field2\r\r\n\r\r,", - Output: [][]string{ - {"field1", "field2\r"}, - {"\r\rfield1", "field2\r"}, - {"\r\r", ""}, - }, - }, { - Name: "NonASCIICommaAndComment", - Input: "a£b,c£ \td,e\n€ comment\n", - Output: [][]string{{"a", "b,c", "d,e"}}, - TrimLeadingSpace: true, - Comma: '£', - Comment: '€', - }, { - Name: "NonASCIICommaAndCommentWithQuotes", - Input: "a€\" b,\"€ c\nλ comment\n", - Output: [][]string{{"a", " b,", " c"}}, - Comma: '€', - Comment: 'λ', - }, { - // λ and θ start with the same byte. - // This tests that the parser doesn't confuse such characters. - Name: "NonASCIICommaConfusion", - Input: "\"abθcd\"λefθgh", - Output: [][]string{{"abθcd", "efθgh"}}, - Comma: 'λ', - Comment: '€', - }, { - Name: "NonASCIICommentConfusion", - Input: "λ\nλ\nθ\nλ\n", - Output: [][]string{{"λ"}, {"λ"}, {"λ"}}, - Comment: 'θ', - }, { - Name: "QuotedFieldMultipleLF", - Input: "\"\n\n\n\n\"", - Output: [][]string{{"\n\n\n\n"}}, - }, { - Name: "MultipleCRLF", - Input: "\r\n\r\n\r\n\r\n", - }, { - // The implementation may read each line in several chunks if it doesn't fit entirely - // in the read buffer, so we should test the code to handle that condition. - Name: "HugeLines", - Input: strings.Repeat("#ignore\n", 10000) + strings.Repeat("@", 5000) + "," + strings.Repeat("*", 5000), - Output: [][]string{{strings.Repeat("@", 5000), strings.Repeat("*", 5000)}}, - Comment: '#', - }, { - Name: "QuoteWithTrailingCRLF", - Input: "\"foo\"bar\"\r\n", - Error: &ParseError{StartLine: 1, Line: 1, Column: 4, Err: ErrQuote}, - }, { - Name: "LazyQuoteWithTrailingCRLF", - Input: "\"foo\"bar\"\r\n", - Output: [][]string{{`foo"bar`}}, - LazyQuotes: true, - }, { - Name: "DoubleQuoteWithTrailingCRLF", - Input: "\"foo\"\"bar\"\r\n", - Output: [][]string{{`foo"bar`}}, - }, { - Name: "EvenQuotes", - Input: `""""""""`, - Output: [][]string{{`"""`}}, - }, { - Name: "OddQuotes", - Input: `"""""""`, - Error: &ParseError{StartLine: 1, Line: 1, Column: 7, Err: ErrQuote}, - }, { - Name: "LazyOddQuotes", - Input: `"""""""`, - Output: [][]string{{`"""`}}, - LazyQuotes: true, - }, { - Name: "BadComma1", - Comma: '\n', - Error: errInvalidDelim, - }, { - Name: "BadComma2", - Comma: '\r', - Error: errInvalidDelim, - }, { - Name: "BadComma3", - Comma: '"', - Error: errInvalidDelim, - }, { - Name: "BadComma4", - Comma: utf8.RuneError, - Error: errInvalidDelim, - }, { - Name: "BadComment1", - Comment: '\n', - Error: errInvalidDelim, - }, { - Name: "BadComment2", - Comment: '\r', - Error: errInvalidDelim, - }, { - Name: "BadComment3", - Comment: utf8.RuneError, - Error: errInvalidDelim, - }, { - Name: "BadCommaComment", - Comma: 'X', - Comment: 'X', - Error: errInvalidDelim, - }} - - for _, tt := range tests { - t.Run(tt.Name, func(t *testing.T) { - r := NewReader(strings.NewReader(tt.Input)) - - if tt.Comma != 0 { - r.Comma = tt.Comma - } - r.Comment = tt.Comment - if tt.UseFieldsPerRecord { - r.FieldsPerRecord = tt.FieldsPerRecord - } else { - r.FieldsPerRecord = -1 - } - r.LazyQuotes = tt.LazyQuotes - r.TrimLeadingSpace = tt.TrimLeadingSpace - r.ReuseRecord = tt.ReuseRecord - - out, err := r.ReadAll() - if !reflect.DeepEqual(err, tt.Error) { - t.Errorf("ReadAll() error:\ngot %v\nwant %v", err, tt.Error) - } else if !reflect.DeepEqual(out, tt.Output) { - t.Errorf("ReadAll() output:\ngot %q\nwant %q", out, tt.Output) - } - }) - } -} - -// nTimes is an io.Reader which yields the string s n times. -type nTimes struct { - s string - n int - off int -} - -func (r *nTimes) Read(p []byte) (n int, err error) { - for { - if r.n <= 0 || r.s == "" { - return n, io.EOF - } - n0 := copy(p, r.s[r.off:]) - p = p[n0:] - n += n0 - r.off += n0 - if r.off == len(r.s) { - r.off = 0 - r.n-- - } - if len(p) == 0 { - return - } - } -} - -// benchmarkRead measures reading the provided CSV rows data. -// initReader, if non-nil, modifies the Reader before it's used. -func benchmarkRead(b *testing.B, initReader func(*Reader), rows string) { - b.ReportAllocs() - r := NewReader(&nTimes{s: rows, n: b.N}) - if initReader != nil { - initReader(r) - } - for { - _, err := r.Read() - if err == io.EOF { - break - } - if err != nil { - b.Fatal(err) - } - } -} - -const benchmarkCSVData = `x,y,z,w -x,y,z, -x,y,, -x,,, -,,, -"x","y","z","w" -"x","y","z","" -"x","y","","" -"x","","","" -"","","","" -` - -func BenchmarkRead(b *testing.B) { - benchmarkRead(b, nil, benchmarkCSVData) -} - -func BenchmarkReadWithFieldsPerRecord(b *testing.B) { - benchmarkRead(b, func(r *Reader) { r.FieldsPerRecord = 4 }, benchmarkCSVData) -} - -func BenchmarkReadWithoutFieldsPerRecord(b *testing.B) { - benchmarkRead(b, func(r *Reader) { r.FieldsPerRecord = -1 }, benchmarkCSVData) -} - -func BenchmarkReadLargeFields(b *testing.B) { - benchmarkRead(b, nil, strings.Repeat(`xxxxxxxxxxxxxxxx,yyyyyyyyyyyyyyyy,zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww,vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv -xxxxxxxxxxxxxxxxxxxxxxxx,yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy,zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww,vvvv -,,zzzz,wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww,vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv -xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy,zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww,vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv -`, 3)) -} - -func BenchmarkReadReuseRecord(b *testing.B) { - benchmarkRead(b, func(r *Reader) { r.ReuseRecord = true }, benchmarkCSVData) -} - -func BenchmarkReadReuseRecordWithFieldsPerRecord(b *testing.B) { - benchmarkRead(b, func(r *Reader) { r.ReuseRecord = true; r.FieldsPerRecord = 4 }, benchmarkCSVData) -} - -func BenchmarkReadReuseRecordWithoutFieldsPerRecord(b *testing.B) { - benchmarkRead(b, func(r *Reader) { r.ReuseRecord = true; r.FieldsPerRecord = -1 }, benchmarkCSVData) -} - -func BenchmarkReadReuseRecordLargeFields(b *testing.B) { - benchmarkRead(b, func(r *Reader) { r.ReuseRecord = true }, strings.Repeat(`xxxxxxxxxxxxxxxx,yyyyyyyyyyyyyyyy,zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww,vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv -xxxxxxxxxxxxxxxxxxxxxxxx,yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy,zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww,vvvv -,,zzzz,wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww,vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv -xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy,zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww,vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv -`, 3)) -} diff --git a/pkg/csvparser/writer.go b/pkg/csvparser/writer.go deleted file mode 100644 index 2376a476..00000000 --- a/pkg/csvparser/writer.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in https://golang.org/LICENSE - -package csv - -import ( - "bufio" - "io" - "strings" - "unicode" - "unicode/utf8" -) - -// A Writer writes records using CSV encoding. -// -// As returned by NewWriter, a Writer writes records terminated by a -// newline and uses ',' as the field delimiter. The exported fields can be -// changed to customize the details before the first call to Write or WriteAll. -// -// Comma is the field delimiter. -// -// If UseCRLF is true, the Writer ends each output line with \r\n instead of \n. -// -// The writes of individual records are buffered. -// After all data has been written, the client should call the -// Flush method to guarantee all data has been forwarded to -// the underlying io.Writer. Any errors that occurred should -// be checked by calling the Error method. -type Writer struct { - Comma rune // Field delimiter (set to ',' by NewWriter) - Quote rune // Fields quote character - QuoteEscape rune - AlwaysQuote bool // True to quote all fields - UseCRLF bool // True to use \r\n as the line terminator - w *bufio.Writer -} - -// NewWriter returns a new Writer that writes to w. -func NewWriter(w io.Writer) *Writer { - return &Writer{ - Comma: ',', - Quote: '"', - QuoteEscape: '"', - w: bufio.NewWriter(w), - } -} - -// Write writes a single CSV record to w along with any necessary quoting. -// A record is a slice of strings with each string being one field. -// Writes are buffered, so Flush must eventually be called to ensure -// that the record is written to the underlying io.Writer. -func (w *Writer) Write(record []string) error { - if !validDelim(w.Comma) { - return errInvalidDelim - } - - for n, field := range record { - if n > 0 { - if _, err := w.w.WriteRune(w.Comma); err != nil { - return err - } - } - - // If we don't have to have a quoted field then just - // write out the field and continue to the next field. - if !w.AlwaysQuote && !w.fieldNeedsQuotes(field) { - if _, err := w.w.WriteString(field); err != nil { - return err - } - continue - } - - if _, err := w.w.WriteRune(w.Quote); err != nil { - return err - } - - specialChars := "\r\n" + string(w.Quote) - - for len(field) > 0 { - // Search for special characters. - i := strings.IndexAny(field, specialChars) - if i < 0 { - i = len(field) - } - - // Copy verbatim everything before the special character. - if _, err := w.w.WriteString(field[:i]); err != nil { - return err - } - field = field[i:] - - // Encode the special character. - if len(field) > 0 { - var err error - switch nextRune([]byte(field)) { - case w.Quote: - _, err = w.w.WriteRune(w.QuoteEscape) - if err != nil { - break - } - _, err = w.w.WriteRune(w.Quote) - case '\r': - if !w.UseCRLF { - err = w.w.WriteByte('\r') - } - case '\n': - if w.UseCRLF { - _, err = w.w.WriteString("\r\n") - } else { - err = w.w.WriteByte('\n') - } - } - field = field[1:] - if err != nil { - return err - } - } - } - if _, err := w.w.WriteRune(w.Quote); err != nil { - return err - } - } - var err error - if w.UseCRLF { - _, err = w.w.WriteString("\r\n") - } else { - err = w.w.WriteByte('\n') - } - return err -} - -// Flush writes any buffered data to the underlying io.Writer. -// To check if an error occurred during the Flush, call Error. -func (w *Writer) Flush() { - w.w.Flush() -} - -// Error reports any error that has occurred during a previous Write or Flush. -func (w *Writer) Error() error { - _, err := w.w.Write(nil) - return err -} - -// WriteAll writes multiple CSV records to w using Write and then calls Flush, -// returning any error from the Flush. -func (w *Writer) WriteAll(records [][]string) error { - for _, record := range records { - err := w.Write(record) - if err != nil { - return err - } - } - return w.w.Flush() -} - -// fieldNeedsQuotes reports whether our field must be enclosed in quotes. -// Fields with a Comma, fields with a quote or newline, and -// fields which start with a space must be enclosed in quotes. -// We used to quote empty strings, but we do not anymore (as of Go 1.4). -// The two representations should be equivalent, but Postgres distinguishes -// quoted vs non-quoted empty string during database imports, and it has -// an option to force the quoted behavior for non-quoted CSV but it has -// no option to force the non-quoted behavior for quoted CSV, making -// CSV with quoted empty strings strictly less useful. -// Not quoting the empty string also makes this package match the behavior -// of Microsoft Excel and Google Drive. -// For Postgres, quote the data terminating string `\.`. -func (w *Writer) fieldNeedsQuotes(field string) bool { - if field == "" { - return false - } - if field == `\.` || strings.ContainsAny(field, "\r\n"+string(w.Quote)+string(w.Comma)) { - return true - } - - r1, _ := utf8.DecodeRuneInString(field) - return unicode.IsSpace(r1) -} diff --git a/pkg/csvparser/writer_test.go b/pkg/csvparser/writer_test.go deleted file mode 100644 index e64c1540..00000000 --- a/pkg/csvparser/writer_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in https://golang.org/LICENSE - -package csv - -import ( - "bytes" - "errors" - "testing" -) - -var writeTests = []struct { - Input [][]string - Output string - Error error - UseCRLF bool - Comma rune - Quote rune - AlwaysQuote bool -}{ - {Input: [][]string{{"abc"}}, Output: "abc\n"}, - {Input: [][]string{{"abc"}}, Output: "abc\r\n", UseCRLF: true}, - {Input: [][]string{{`"abc"`}}, Output: `"""abc"""` + "\n"}, - {Input: [][]string{{`a"b`}}, Output: `"a""b"` + "\n"}, - {Input: [][]string{{`"a"b"`}}, Output: `"""a""b"""` + "\n"}, - {Input: [][]string{{" abc"}}, Output: `" abc"` + "\n"}, - {Input: [][]string{{"abc,def"}}, Output: `"abc,def"` + "\n"}, - {Input: [][]string{{"abc", "def"}}, Output: "abc,def\n"}, - {Input: [][]string{{"abc"}, {"def"}}, Output: "abc\ndef\n"}, - {Input: [][]string{{"abc\ndef"}}, Output: "\"abc\ndef\"\n"}, - {Input: [][]string{{"abc\ndef"}}, Output: "\"abc\r\ndef\"\r\n", UseCRLF: true}, - {Input: [][]string{{"abc\rdef"}}, Output: "\"abcdef\"\r\n", UseCRLF: true}, - {Input: [][]string{{"abc\rdef"}}, Output: "\"abc\rdef\"\n", UseCRLF: false}, - {Input: [][]string{{""}}, Output: "\n"}, - {Input: [][]string{{"", ""}}, Output: ",\n"}, - {Input: [][]string{{"", "", ""}}, Output: ",,\n"}, - {Input: [][]string{{"", "", "a"}}, Output: ",,a\n"}, - {Input: [][]string{{"", "a", ""}}, Output: ",a,\n"}, - {Input: [][]string{{"", "a", "a"}}, Output: ",a,a\n"}, - {Input: [][]string{{"a", "", ""}}, Output: "a,,\n"}, - {Input: [][]string{{"a", "", "a"}}, Output: "a,,a\n"}, - {Input: [][]string{{"a", "a", ""}}, Output: "a,a,\n"}, - {Input: [][]string{{"a", "a", "a"}}, Output: "a,a,a\n"}, - {Input: [][]string{{`\.`}}, Output: "\"\\.\"\n"}, - {Input: [][]string{{"x09\x41\xb4\x1c", "aktau"}}, Output: "x09\x41\xb4\x1c,aktau\n"}, - {Input: [][]string{{",x09\x41\xb4\x1c", "aktau"}}, Output: "\",x09\x41\xb4\x1c\",aktau\n"}, - {Input: [][]string{{"a", "a", ""}}, Output: "a|a|\n", Comma: '|'}, - {Input: [][]string{{",", ",", ""}}, Output: ",|,|\n", Comma: '|'}, - {Input: [][]string{{"foo"}}, Comma: '"', Error: errInvalidDelim}, - {Input: [][]string{{"a", "a", ""}}, Quote: '"', AlwaysQuote: true, Output: "\"a\"|\"a\"|\"\"\n", Comma: '|'}, -} - -func TestWrite(t *testing.T) { - for n, tt := range writeTests { - b := &bytes.Buffer{} - f := NewWriter(b) - f.UseCRLF = tt.UseCRLF - if tt.Comma != 0 { - f.Comma = tt.Comma - } - if tt.Quote != 0 { - f.Quote = tt.Quote - } - f.AlwaysQuote = tt.AlwaysQuote - err := f.WriteAll(tt.Input) - if err != tt.Error { - t.Errorf("Unexpected error:\ngot %v\nwant %v", err, tt.Error) - } - out := b.String() - if out != tt.Output { - t.Errorf("#%d: out=%q want %q", n, out, tt.Output) - } - } -} - -type errorWriter struct{} - -func (e errorWriter) Write(b []byte) (int, error) { - return 0, errors.New("Test") -} - -func TestError(t *testing.T) { - b := &bytes.Buffer{} - f := NewWriter(b) - f.Write([]string{"abc"}) - f.Flush() - err := f.Error() - - if err != nil { - t.Errorf("Unexpected error: %s\n", err) - } - - f = NewWriter(errorWriter{}) - f.Write([]string{"abc"}) - f.Flush() - err = f.Error() - - if err == nil { - t.Error("Error should not be nil") - } -} diff --git a/pkg/disk/directio_darwin.go b/pkg/disk/directio_darwin.go deleted file mode 100644 index b6cd4a2b..00000000 --- a/pkg/disk/directio_darwin.go +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2019-2020 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package disk - -import ( - "os" - - "github.com/ncw/directio" - "golang.org/x/sys/unix" -) - -// OpenFileDirectIO - bypass kernel cache. -func OpenFileDirectIO(filePath string, flag int, perm os.FileMode) (*os.File, error) { - return directio.OpenFile(filePath, flag, perm) -} - -// DisableDirectIO - disables directio mode. -func DisableDirectIO(f *os.File) error { - fd := f.Fd() - _, err := unix.FcntlInt(fd, unix.F_NOCACHE, 0) - return err -} - -// AlignedBlock - pass through to directio implementation. -func AlignedBlock(BlockSize int) []byte { - return directio.AlignedBlock(BlockSize) -} diff --git a/pkg/disk/directio_unix.go b/pkg/disk/directio_unix.go deleted file mode 100644 index 16c2cf11..00000000 --- a/pkg/disk/directio_unix.go +++ /dev/null @@ -1,49 +0,0 @@ -// +build linux netbsd freebsd - -/* - * Minio Cloud Storage, (C) 2019-2020 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package disk - -import ( - "os" - "syscall" - - "github.com/ncw/directio" - "golang.org/x/sys/unix" -) - -// OpenFileDirectIO - bypass kernel cache. -func OpenFileDirectIO(filePath string, flag int, perm os.FileMode) (*os.File, error) { - return directio.OpenFile(filePath, flag, perm) -} - -// DisableDirectIO - disables directio mode. -func DisableDirectIO(f *os.File) error { - fd := f.Fd() - flag, err := unix.FcntlInt(fd, unix.F_GETFL, 0) - if err != nil { - return err - } - flag = flag & ^(syscall.O_DIRECT) - _, err = unix.FcntlInt(fd, unix.F_SETFL, flag) - return err -} - -// AlignedBlock - pass through to directio implementation. -func AlignedBlock(BlockSize int) []byte { - return directio.AlignedBlock(BlockSize) -} diff --git a/pkg/disk/directio_unsupported.go b/pkg/disk/directio_unsupported.go deleted file mode 100644 index 7241f456..00000000 --- a/pkg/disk/directio_unsupported.go +++ /dev/null @@ -1,69 +0,0 @@ -// +build !linux,!netbsd,!freebsd,!darwin,!openbsd - -/* - * Minio Cloud Storage, (C) 2019-2020 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package disk - -import ( - "os" -) - -// OpenBSD, Windows, and illumos do not support O_DIRECT. -// On Windows there is no documentation on disabling O_DIRECT. -// For these systems we do not attempt to build the 'directio' dependency since -// the O_DIRECT symbol may not be exposed resulting in a failed build. -// -// -// On illumos an explicit O_DIRECT flag is not necessary for two primary -// reasons. Note that ZFS is effectively the default filesystem on illumos -// systems. -// -// One benefit of using DirectIO on Linux is that the page cache will not be -// polluted with single-access data. The ZFS read cache (ARC) is scan-resistant -// so there is no risk of polluting the entire cache with data accessed once. -// Another goal of DirectIO is to minimize the mutation of data by the kernel -// before issuing IO to underlying devices. ZFS users often enable features like -// compression and checksumming which currently necessitates mutating data in -// the kernel. -// -// DirectIO semantics for a filesystem like ZFS would be quite different than -// the semantics on filesystems like XFS, and these semantics are not -// implemented at this time. -// For more information on why typical DirectIO semantics do not apply to ZFS -// see this ZFS-on-Linux commit message: -// https://github.com/openzfs/zfs/commit/a584ef26053065f486d46a7335bea222cb03eeea - -// OpenFileDirectIO wrapper around os.OpenFile nothing special -func OpenFileDirectIO(filePath string, flag int, perm os.FileMode) (*os.File, error) { - return os.OpenFile(filePath, flag, perm) -} - -// DisableDirectIO is a no-op -func DisableDirectIO(f *os.File) error { - return nil -} - -// AlignedBlock simply returns an unaligned buffer -// for systems that do not support DirectIO. -func AlignedBlock(BlockSize int) []byte { - return make([]byte, BlockSize) -} - -// Fdatasync is a no-op -func Fdatasync(f *os.File) error { - return nil -} diff --git a/pkg/disk/disk.go b/pkg/disk/disk.go deleted file mode 100644 index 504b5fd6..00000000 --- a/pkg/disk/disk.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package disk - -// Info stat fs struct is container which holds following values -// Total - total size of the volume / disk -// Free - free size of the volume / disk -// Files - total inodes available -// Ffree - free inodes available -// FSType - file system type -type Info struct { - Total uint64 - Free uint64 - Files uint64 - Ffree uint64 - FSType string - - // Usage is calculated per tenant. - Usage uint64 -} diff --git a/pkg/disk/disk_test.go b/pkg/disk/disk_test.go deleted file mode 100644 index 678f2745..00000000 --- a/pkg/disk/disk_test.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build !netbsd,!solaris - -/* - * MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package disk_test - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/minio/minio/pkg/disk" -) - -func TestFree(t *testing.T) { - path, err := ioutil.TempDir(os.TempDir(), "minio-") - defer os.RemoveAll(path) - if err != nil { - t.Fatal(err) - } - - di, err := disk.GetInfo(path) - if err != nil { - t.Fatal(err) - } - - if di.FSType == "UNKNOWN" { - t.Error("Unexpected FSType", di.FSType) - } -} diff --git a/pkg/disk/fdatasync_linux.go b/pkg/disk/fdatasync_linux.go deleted file mode 100644 index d88b1d53..00000000 --- a/pkg/disk/fdatasync_linux.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build linux - -/* - * Minio Cloud Storage, (C) 2020 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package disk - -import ( - "os" - "syscall" -) - -// Fdatasync - fdatasync() is similar to fsync(), but does not flush modified metadata -// unless that metadata is needed in order to allow a subsequent data retrieval -// to be correctly handled. For example, changes to st_atime or st_mtime -// (respectively, time of last access and time of last modification; see inode(7)) -// do not require flushing because they are not necessary for a subsequent data -// read to be handled correctly. On the other hand, a change to the file size -// (st_size, as made by say ftruncate(2)), would require a metadata flush. -// -// The aim of fdatasync() is to reduce disk activity for applications that -// do not require all metadata to be synchronized with the disk. -func Fdatasync(f *os.File) error { - return syscall.Fdatasync(int(f.Fd())) -} diff --git a/pkg/disk/fdatasync_unix.go b/pkg/disk/fdatasync_unix.go deleted file mode 100644 index 44d7c620..00000000 --- a/pkg/disk/fdatasync_unix.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build freebsd netbsd openbsd darwin - -/* - * Minio Cloud Storage, (C) 2020 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package disk - -import ( - "os" - "syscall" -) - -// Fdatasync is fsync on freebsd/darwin -func Fdatasync(f *os.File) error { - return syscall.Fsync(int(f.Fd())) -} diff --git a/pkg/disk/obd.go b/pkg/disk/obd.go deleted file mode 100644 index 4580967b..00000000 --- a/pkg/disk/obd.go +++ /dev/null @@ -1,174 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package disk - -import ( - "context" - "fmt" - "os" - "time" - - "github.com/dustin/go-humanize" - "github.com/montanaflynn/stats" -) - -var globalLatency = map[string]Latency{} -var globalThroughput = map[string]Throughput{} - -// Latency holds latency information for write operations to the drive -type Latency struct { - Avg float64 `json:"avg_secs,omitempty"` - Percentile50 float64 `json:"percentile50_secs,omitempty"` - Percentile90 float64 `json:"percentile90_secs,omitempty"` - Percentile99 float64 `json:"percentile99_secs,omitempty"` - Min float64 `json:"min_secs,omitempty"` - Max float64 `json:"max_secs,omitempty"` -} - -// Throughput holds throughput information for write operations to the drive -type Throughput struct { - Avg float64 `json:"avg_bytes_per_sec,omitempty"` - Percentile50 float64 `json:"percentile50_bytes_per_sec,omitempty"` - Percentile90 float64 `json:"percentile90_bytes_per_sec,omitempty"` - Percentile99 float64 `json:"percentile99_bytes_per_sec,omitempty"` - Min float64 `json:"min_bytes_per_sec,omitempty"` - Max float64 `json:"max_bytes_per_sec,omitempty"` -} - -// GetOBDInfo about the drive -func GetOBDInfo(ctx context.Context, drive, fsPath string) (Latency, Throughput, error) { - - // Create a file with O_DIRECT flag, choose default umask and also make sure - // we are exclusively writing to a new file using O_EXCL. - w, err := OpenFileDirectIO(fsPath, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0666) - if err != nil { - return Latency{}, Throughput{}, err - } - - defer func() { - w.Close() - os.Remove(fsPath) - }() - - // going to leave this here incase we decide to go back to caching again - // if gl, ok := globalLatency[drive]; ok { - // if gt, ok := globalThroughput[drive]; ok { - // return gl, gt, nil - // } - // } - - blockSize := 4 * humanize.MiByte - fileSize := 256 * humanize.MiByte - - latencies := make([]float64, fileSize/blockSize) - throughputs := make([]float64, fileSize/blockSize) - - data := AlignedBlock(blockSize) - - for i := 0; i < (fileSize / blockSize); i++ { - if ctx.Err() != nil { - return Latency{}, Throughput{}, ctx.Err() - } - startTime := time.Now() - if n, err := w.Write(data); err != nil { - return Latency{}, Throughput{}, err - } else if n != blockSize { - return Latency{}, Throughput{}, fmt.Errorf("Expected to write %d, but only wrote %d", blockSize, n) - } - latencyInSecs := time.Since(startTime).Seconds() - latencies[i] = float64(latencyInSecs) - } - - for i := range latencies { - throughput := float64(blockSize) / latencies[i] - throughputs[i] = throughput - } - - var avgLatency float64 - var percentile50Latency float64 - var percentile90Latency float64 - var percentile99Latency float64 - var minLatency float64 - var maxLatency float64 - - var avgThroughput float64 - var percentile50Throughput float64 - var percentile90Throughput float64 - var percentile99Throughput float64 - var minThroughput float64 - var maxThroughput float64 - - if avgLatency, err = stats.Mean(latencies); err != nil { - return Latency{}, Throughput{}, err - } - if percentile50Latency, err = stats.Percentile(latencies, 50); err != nil { - return Latency{}, Throughput{}, err - } - if percentile90Latency, err = stats.Percentile(latencies, 90); err != nil { - return Latency{}, Throughput{}, err - } - if percentile99Latency, err = stats.Percentile(latencies, 99); err != nil { - return Latency{}, Throughput{}, err - } - if maxLatency, err = stats.Max(latencies); err != nil { - return Latency{}, Throughput{}, err - } - if minLatency, err = stats.Min(latencies); err != nil { - return Latency{}, Throughput{}, err - } - l := Latency{ - Avg: avgLatency, - Percentile50: percentile50Latency, - Percentile90: percentile90Latency, - Percentile99: percentile99Latency, - Min: minLatency, - Max: maxLatency, - } - - if avgThroughput, err = stats.Mean(throughputs); err != nil { - return Latency{}, Throughput{}, err - } - if percentile50Throughput, err = stats.Percentile(throughputs, 50); err != nil { - return Latency{}, Throughput{}, err - } - if percentile90Throughput, err = stats.Percentile(throughputs, 90); err != nil { - return Latency{}, Throughput{}, err - } - if percentile99Throughput, err = stats.Percentile(throughputs, 99); err != nil { - return Latency{}, Throughput{}, err - } - if maxThroughput, err = stats.Max(throughputs); err != nil { - return Latency{}, Throughput{}, err - } - if minThroughput, err = stats.Min(throughputs); err != nil { - return Latency{}, Throughput{}, err - } - t := Throughput{ - Avg: avgThroughput, - Percentile50: percentile50Throughput, - Percentile90: percentile90Throughput, - Percentile99: percentile99Throughput, - Min: minThroughput, - Max: maxThroughput, - } - - globalLatency[drive] = l - globalThroughput[drive] = t - - return l, t, nil -} diff --git a/pkg/disk/root_disk_unix.go b/pkg/disk/root_disk_unix.go deleted file mode 100644 index 4a1025fe..00000000 --- a/pkg/disk/root_disk_unix.go +++ /dev/null @@ -1,48 +0,0 @@ -// +build !windows - -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package disk - -import ( - "os" - "syscall" -) - -// IsRootDisk returns if diskPath belongs to root-disk, i.e the disk mounted at "/" -func IsRootDisk(diskPath string) (bool, error) { - rootDisk := false - diskInfo, err := os.Stat(diskPath) - if err != nil { - return false, err - } - rootInfo, err := os.Stat("/") - if err != nil { - return false, err - } - diskStat, diskStatOK := diskInfo.Sys().(*syscall.Stat_t) - rootStat, rootStatOK := rootInfo.Sys().(*syscall.Stat_t) - if diskStatOK && rootStatOK { - if diskStat.Dev == rootStat.Dev { - // Indicate if the disk path is on root disk. This is used to indicate the healing - // process not to format the drive and end up healing it. - rootDisk = true - } - } - return rootDisk, nil -} diff --git a/pkg/disk/root_disk_windows.go b/pkg/disk/root_disk_windows.go deleted file mode 100644 index 3cf4dce2..00000000 --- a/pkg/disk/root_disk_windows.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build windows - -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package disk - -// IsRootDisk returns if diskPath belongs to root-disk, i.e the disk mounted at "/" -func IsRootDisk(diskPath string) (bool, error) { - // On windows a disk can never be mounted on a subpath. - return false, nil -} diff --git a/pkg/disk/stat_bsd.go b/pkg/disk/stat_bsd.go deleted file mode 100644 index d286a271..00000000 --- a/pkg/disk/stat_bsd.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build darwin freebsd dragonfly - -/* - * MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package disk - -import ( - "syscall" -) - -// GetInfo returns total and free bytes available in a directory, e.g. `/`. -func GetInfo(path string) (info Info, err error) { - s := syscall.Statfs_t{} - err = syscall.Statfs(path, &s) - if err != nil { - return Info{}, err - } - reservedBlocks := uint64(s.Bfree) - uint64(s.Bavail) - info = Info{ - Total: uint64(s.Bsize) * (uint64(s.Blocks) - reservedBlocks), - Free: uint64(s.Bsize) * uint64(s.Bavail), - Files: uint64(s.Files), - Ffree: uint64(s.Ffree), - FSType: getFSType(s.Fstypename[:]), - } - return info, nil -} diff --git a/pkg/disk/stat_fallback.go b/pkg/disk/stat_fallback.go deleted file mode 100644 index 66f7c6f6..00000000 --- a/pkg/disk/stat_fallback.go +++ /dev/null @@ -1,24 +0,0 @@ -// +build netbsd - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package disk - -// GetInfo returns total and free bytes available in a directory, e.g. `/`. -func GetInfo(path string) (info Info, err error) { - return Info{}, nil -} diff --git a/pkg/disk/stat_linux.go b/pkg/disk/stat_linux.go deleted file mode 100644 index 055c4242..00000000 --- a/pkg/disk/stat_linux.go +++ /dev/null @@ -1,49 +0,0 @@ -// +build linux - -/* - * MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package disk - -import ( - "fmt" - "syscall" -) - -// GetInfo returns total and free bytes available in a directory, e.g. `/`. -func GetInfo(path string) (info Info, err error) { - s := syscall.Statfs_t{} - err = syscall.Statfs(path, &s) - if err != nil { - return Info{}, err - } - reservedBlocks := uint64(s.Bfree) - uint64(s.Bavail) - info = Info{ - Total: uint64(s.Frsize) * (uint64(s.Blocks) - reservedBlocks), - Free: uint64(s.Frsize) * uint64(s.Bavail), - Files: uint64(s.Files), - Ffree: uint64(s.Ffree), - FSType: getFSType(int64(s.Type)), - } - // Check for overflows. - // https://github.com/minio/minio/issues/8035 - // XFS can show wrong values at times error out - // in such scenarios. - if info.Free > info.Total { - return info, fmt.Errorf("detected free space (%d) > total disk space (%d), fs corruption at (%s). please run 'fsck'", info.Free, info.Total, path) - } - return info, nil -} diff --git a/pkg/disk/stat_openbsd.go b/pkg/disk/stat_openbsd.go deleted file mode 100644 index 7f43a10e..00000000 --- a/pkg/disk/stat_openbsd.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build openbsd - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package disk - -import ( - "syscall" -) - -// GetInfo returns total and free bytes available in a directory, e.g. `/`. -func GetInfo(path string) (info Info, err error) { - s := syscall.Statfs_t{} - err = syscall.Statfs(path, &s) - if err != nil { - return Info{}, err - } - reservedBlocks := uint64(s.F_bfree) - uint64(s.F_bavail) - info = Info{ - Total: uint64(s.F_bsize) * (uint64(s.F_blocks) - reservedBlocks), - Free: uint64(s.F_bsize) * uint64(s.F_bavail), - Files: uint64(s.F_files), - Ffree: uint64(s.F_ffree), - FSType: getFSType(s.F_fstypename[:]), - } - return info, nil -} diff --git a/pkg/disk/stat_solaris.go b/pkg/disk/stat_solaris.go deleted file mode 100644 index ea4802d1..00000000 --- a/pkg/disk/stat_solaris.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build solaris - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package disk - -import ( - "golang.org/x/sys/unix" -) - -// GetInfo returns total and free bytes available in a directory, e.g. `/`. -func GetInfo(path string) (info Info, err error) { - s := unix.Statvfs_t{} - if err = unix.Statvfs(path, &s); err != nil { - return Info{}, err - } - reservedBlocks := uint64(s.Bfree) - uint64(s.Bavail) - info = Info{ - Total: uint64(s.Frsize) * (uint64(s.Blocks) - reservedBlocks), - Free: uint64(s.Frsize) * uint64(s.Bavail), - Files: uint64(s.Files), - Ffree: uint64(s.Ffree), - FSType: getFSType(s.Fstr[:]), - } - return info, nil -} diff --git a/pkg/disk/stat_windows.go b/pkg/disk/stat_windows.go deleted file mode 100644 index 62ed08c5..00000000 --- a/pkg/disk/stat_windows.go +++ /dev/null @@ -1,94 +0,0 @@ -// +build windows - -/* - * MinIO Cloud Storage, (C) 2015 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package disk - -import ( - "os" - "syscall" - "unsafe" -) - -var ( - kernel32 = syscall.NewLazyDLL("kernel32.dll") - - // GetDiskFreeSpaceEx - https://msdn.microsoft.com/en-us/library/windows/desktop/aa364937(v=vs.85).aspx - // Retrieves information about the amount of space that is available on a disk volume, - // which is the total amount of space, the total amount of free space, and the total - // amount of free space available to the user that is associated with the calling thread. - GetDiskFreeSpaceEx = kernel32.NewProc("GetDiskFreeSpaceExW") - // GetDiskFreeSpace - https://msdn.microsoft.com/en-us/library/windows/desktop/aa364935(v=vs.85).aspx - // Retrieves information about the specified disk, including the amount of free space on the disk. - GetDiskFreeSpace = kernel32.NewProc("GetDiskFreeSpaceW") -) - -// GetInfo returns total and free bytes available in a directory, e.g. `C:\`. -// It returns free space available to the user (including quota limitations) -// -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa364937(v=vs.85).aspx -func GetInfo(path string) (info Info, err error) { - // Stat to know if the path exists. - if _, err = os.Stat(path); err != nil { - return Info{}, err - } - - lpFreeBytesAvailable := int64(0) - lpTotalNumberOfBytes := int64(0) - lpTotalNumberOfFreeBytes := int64(0) - - // Extract values safely - // BOOL WINAPI GetDiskFreeSpaceEx( - // _In_opt_ LPCTSTR lpDirectoryName, - // _Out_opt_ PULARGE_INTEGER lpFreeBytesAvailable, - // _Out_opt_ PULARGE_INTEGER lpTotalNumberOfBytes, - // _Out_opt_ PULARGE_INTEGER lpTotalNumberOfFreeBytes - // ); - _, _, _ = GetDiskFreeSpaceEx.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(path))), - uintptr(unsafe.Pointer(&lpFreeBytesAvailable)), - uintptr(unsafe.Pointer(&lpTotalNumberOfBytes)), - uintptr(unsafe.Pointer(&lpTotalNumberOfFreeBytes))) - info = Info{} - info.Total = uint64(lpTotalNumberOfBytes) - info.Free = uint64(lpFreeBytesAvailable) - info.FSType = getFSType(path) - - // Return values of GetDiskFreeSpace() - lpSectorsPerCluster := uint32(0) - lpBytesPerSector := uint32(0) - lpNumberOfFreeClusters := uint32(0) - lpTotalNumberOfClusters := uint32(0) - - // Extract values safely - // BOOL WINAPI GetDiskFreeSpace( - // _In_ LPCTSTR lpRootPathName, - // _Out_ LPDWORD lpSectorsPerCluster, - // _Out_ LPDWORD lpBytesPerSector, - // _Out_ LPDWORD lpNumberOfFreeClusters, - // _Out_ LPDWORD lpTotalNumberOfClusters - // ); - _, _, _ = GetDiskFreeSpace.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(path))), - uintptr(unsafe.Pointer(&lpSectorsPerCluster)), - uintptr(unsafe.Pointer(&lpBytesPerSector)), - uintptr(unsafe.Pointer(&lpNumberOfFreeClusters)), - uintptr(unsafe.Pointer(&lpTotalNumberOfClusters))) - - info.Files = uint64(lpTotalNumberOfClusters) - info.Ffree = uint64(lpNumberOfFreeClusters) - - return info, nil -} diff --git a/pkg/disk/type_bsd.go b/pkg/disk/type_bsd.go deleted file mode 100644 index 2957ae6a..00000000 --- a/pkg/disk/type_bsd.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build darwin freebsd dragonfly openbsd solaris - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package disk - -// getFSType returns the filesystem type of the underlying mounted filesystem -func getFSType(fstype []int8) string { - b := make([]byte, len(fstype)) - for i, v := range fstype { - b[i] = byte(v) - } - return string(b) -} diff --git a/pkg/disk/type_linux.go b/pkg/disk/type_linux.go deleted file mode 100644 index 2c4e0b48..00000000 --- a/pkg/disk/type_linux.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build linux - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package disk - -import "strconv" - -// fsType2StringMap - list of filesystems supported on linux -var fsType2StringMap = map[string]string{ - "1021994": "TMPFS", - "137d": "EXT", - "4244": "HFS", - "4d44": "MSDOS", - "52654973": "REISERFS", - "5346544e": "NTFS", - "58465342": "XFS", - "61756673": "AUFS", - "6969": "NFS", - "ef51": "EXT2OLD", - "ef53": "EXT4", - "f15f": "ecryptfs", - "794c7630": "overlayfs", - "2fc12fc1": "zfs", - "ff534d42": "cifs", - "53464846": "wslfs", -} - -// getFSType returns the filesystem type of the underlying mounted filesystem -func getFSType(ftype int64) string { - fsTypeHex := strconv.FormatInt(ftype, 16) - fsTypeString, ok := fsType2StringMap[fsTypeHex] - if !ok { - return "UNKNOWN" - } - return fsTypeString -} diff --git a/pkg/disk/type_windows.go b/pkg/disk/type_windows.go deleted file mode 100644 index 6ea3e150..00000000 --- a/pkg/disk/type_windows.go +++ /dev/null @@ -1,62 +0,0 @@ -// +build windows - -/* - * MinIO Cloud Storage, (C) 2015 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package disk - -import ( - "path/filepath" - "syscall" - "unsafe" -) - -var ( - // GetVolumeInformation provides windows drive volume information. - GetVolumeInformation = kernel32.NewProc("GetVolumeInformationW") -) - -// getFSType returns the filesystem type of the underlying mounted filesystem -func getFSType(path string) string { - volumeNameSize, nFileSystemNameSize := uint32(260), uint32(260) - var lpVolumeSerialNumber uint32 - var lpFileSystemFlags, lpMaximumComponentLength uint32 - var lpFileSystemNameBuffer, volumeName [260]uint16 - var ps = syscall.StringToUTF16Ptr(filepath.VolumeName(path)) - - // Extract values safely - // BOOL WINAPI GetVolumeInformation( - // _In_opt_ LPCTSTR lpRootPathName, - // _Out_opt_ LPTSTR lpVolumeNameBuffer, - // _In_ DWORD nVolumeNameSize, - // _Out_opt_ LPDWORD lpVolumeSerialNumber, - // _Out_opt_ LPDWORD lpMaximumComponentLength, - // _Out_opt_ LPDWORD lpFileSystemFlags, - // _Out_opt_ LPTSTR lpFileSystemNameBuffer, - // _In_ DWORD nFileSystemNameSize - // ); - - _, _, _ = GetVolumeInformation.Call(uintptr(unsafe.Pointer(ps)), - uintptr(unsafe.Pointer(&volumeName)), - uintptr(volumeNameSize), - uintptr(unsafe.Pointer(&lpVolumeSerialNumber)), - uintptr(unsafe.Pointer(&lpMaximumComponentLength)), - uintptr(unsafe.Pointer(&lpFileSystemFlags)), - uintptr(unsafe.Pointer(&lpFileSystemNameBuffer)), - uintptr(nFileSystemNameSize)) - - return syscall.UTF16ToString(lpFileSystemNameBuffer[:]) -} diff --git a/pkg/dsync/.gitignore b/pkg/dsync/.gitignore deleted file mode 100644 index c17ac1ee..00000000 --- a/pkg/dsync/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -dsync.test -coverage.txt -*.out \ No newline at end of file diff --git a/pkg/dsync/drwmutex.go b/pkg/dsync/drwmutex.go deleted file mode 100644 index 9399151b..00000000 --- a/pkg/dsync/drwmutex.go +++ /dev/null @@ -1,418 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package dsync - -import ( - "context" - "errors" - golog "log" - "math/rand" - "os" - "sync" - "time" - - "github.com/minio/minio/pkg/retry" -) - -// Indicator if logging is enabled. -var dsyncLog bool - -func init() { - // Check for MINIO_DSYNC_TRACE env variable, if set logging will be enabled for failed REST operations. - dsyncLog = os.Getenv("MINIO_DSYNC_TRACE") == "1" - rand.Seed(time.Now().UnixNano()) -} - -func log(msg ...interface{}) { - if dsyncLog { - golog.Println(msg...) - } -} - -// DRWMutexAcquireTimeout - tolerance limit to wait for lock acquisition before. -const DRWMutexAcquireTimeout = 1 * time.Second // 1 second. -const drwMutexInfinite = time.Duration(1<<63 - 1) - -// A DRWMutex is a distributed mutual exclusion lock. -type DRWMutex struct { - Names []string - writeLocks []string // Array of nodes that granted a write lock - readersLocks [][]string // Array of array of nodes that granted reader locks - m sync.Mutex // Mutex to prevent multiple simultaneous locks from this node - clnt *Dsync -} - -// Granted - represents a structure of a granted lock. -type Granted struct { - index int - lockUID string // Locked if set with UID string, unlocked if empty -} - -func (g *Granted) isLocked() bool { - return isLocked(g.lockUID) -} - -func isLocked(uid string) bool { - return len(uid) > 0 -} - -// NewDRWMutex - initializes a new dsync RW mutex. -func NewDRWMutex(clnt *Dsync, names ...string) *DRWMutex { - return &DRWMutex{ - writeLocks: make([]string, len(clnt.GetLockersFn())), - Names: names, - clnt: clnt, - } -} - -// Lock holds a write lock on dm. -// -// If the lock is already in use, the calling go routine -// blocks until the mutex is available. -func (dm *DRWMutex) Lock(id, source string) { - - isReadLock := false - dm.lockBlocking(context.Background(), drwMutexInfinite, id, source, isReadLock) -} - -// GetLock tries to get a write lock on dm before the timeout elapses. -// -// If the lock is already in use, the calling go routine -// blocks until either the mutex becomes available and return success or -// more time has passed than the timeout value and return false. -func (dm *DRWMutex) GetLock(ctx context.Context, id, source string, timeout time.Duration) (locked bool) { - - isReadLock := false - return dm.lockBlocking(ctx, timeout, id, source, isReadLock) -} - -// RLock holds a read lock on dm. -// -// If one or more read locks are already in use, it will grant another lock. -// Otherwise the calling go routine blocks until the mutex is available. -func (dm *DRWMutex) RLock(id, source string) { - - isReadLock := true - dm.lockBlocking(context.Background(), drwMutexInfinite, id, source, isReadLock) -} - -// GetRLock tries to get a read lock on dm before the timeout elapses. -// -// If one or more read locks are already in use, it will grant another lock. -// Otherwise the calling go routine blocks until either the mutex becomes -// available and return success or more time has passed than the timeout -// value and return false. -func (dm *DRWMutex) GetRLock(ctx context.Context, id, source string, timeout time.Duration) (locked bool) { - - isReadLock := true - return dm.lockBlocking(ctx, timeout, id, source, isReadLock) -} - -// lockBlocking will try to acquire either a read or a write lock -// -// The function will loop using a built-in timing randomized back-off -// algorithm until either the lock is acquired successfully or more -// time has elapsed than the timeout value. -func (dm *DRWMutex) lockBlocking(ctx context.Context, timeout time.Duration, id, source string, isReadLock bool) (locked bool) { - restClnts := dm.clnt.GetLockersFn() - - retryCtx, cancel := context.WithTimeout(ctx, timeout) - - defer cancel() - - // Use incremental back-off algorithm for repeated attempts to acquire the lock - for range retry.NewTimer(retryCtx) { - // Create temp array on stack. - locks := make([]string, len(restClnts)) - - // Try to acquire the lock. - success := lock(dm.clnt, &locks, id, source, isReadLock, dm.Names...) - if !success { - continue - } - - dm.m.Lock() - - // If success, copy array to object - if isReadLock { - // Append new array of strings at the end - dm.readersLocks = append(dm.readersLocks, make([]string, len(restClnts))) - // and copy stack array into last spot - copy(dm.readersLocks[len(dm.readersLocks)-1], locks[:]) - } else { - copy(dm.writeLocks, locks[:]) - } - - dm.m.Unlock() - return true - } - - // Failed to acquire the lock on this attempt, incrementally wait - // for a longer back-off time and try again afterwards. - return false -} - -// lock tries to acquire the distributed lock, returning true or false. -func lock(ds *Dsync, locks *[]string, id, source string, isReadLock bool, lockNames ...string) bool { - - restClnts := ds.GetLockersFn() - - // Create buffered channel of size equal to total number of nodes. - ch := make(chan Granted, len(restClnts)) - defer close(ch) - - var wg sync.WaitGroup - for index, c := range restClnts { - - wg.Add(1) - // broadcast lock request to all nodes - go func(index int, isReadLock bool, c NetLocker) { - defer wg.Done() - - g := Granted{index: index} - if c == nil { - ch <- g - return - } - - args := LockArgs{ - UID: id, - Resources: lockNames, - Source: source, - } - - var locked bool - var err error - if isReadLock { - if locked, err = c.RLock(args); err != nil { - log("Unable to call RLock", err) - } - } else { - if locked, err = c.Lock(args); err != nil { - log("Unable to call Lock", err) - } - } - - if locked { - g.lockUID = args.UID - } - - ch <- g - - }(index, isReadLock, c) - } - - quorum := false - - wg.Add(1) - go func(isReadLock bool) { - - // Wait until we have either - // - // a) received all lock responses - // b) received too many 'non-'locks for quorum to be still possible - // c) timedout - // - i, locksFailed := 0, 0 - done := false - timeout := time.After(DRWMutexAcquireTimeout) - - dquorumReads := (len(restClnts) + 1) / 2 - dquorum := dquorumReads + 1 - - for ; i < len(restClnts); i++ { // Loop until we acquired all locks - - select { - case grant := <-ch: - if grant.isLocked() { - // Mark that this node has acquired the lock - (*locks)[grant.index] = grant.lockUID - } else { - locksFailed++ - if !isReadLock && locksFailed > len(restClnts)-dquorum || - isReadLock && locksFailed > len(restClnts)-dquorumReads { - // We know that we are not going to get the lock anymore, - // so exit out and release any locks that did get acquired - done = true - // Increment the number of grants received from the buffered channel. - i++ - releaseAll(ds, locks, isReadLock, restClnts, lockNames...) - } - } - case <-timeout: - done = true - // timeout happened, maybe one of the nodes is slow, count - // number of locks to check whether we have quorum or not - if !quorumMet(locks, isReadLock, dquorum, dquorumReads) { - releaseAll(ds, locks, isReadLock, restClnts, lockNames...) - } - } - - if done { - break - } - } - - // Count locks in order to determine whether we have quorum or not - quorum = quorumMet(locks, isReadLock, dquorum, dquorumReads) - - // Signal that we have the quorum - wg.Done() - - // Wait for the other responses and immediately release the locks - // (do not add them to the locks array because the DRWMutex could - // already has been unlocked again by the original calling thread) - for ; i < len(restClnts); i++ { - grantToBeReleased := <-ch - if grantToBeReleased.isLocked() { - // release lock - sendRelease(ds, restClnts[grantToBeReleased.index], - grantToBeReleased.lockUID, isReadLock, lockNames...) - } - } - }(isReadLock) - - wg.Wait() - - return quorum -} - -// quorumMet determines whether we have acquired the required quorum of underlying locks or not -func quorumMet(locks *[]string, isReadLock bool, quorum, quorumReads int) bool { - - count := 0 - for _, uid := range *locks { - if isLocked(uid) { - count++ - } - } - - var metQuorum bool - if isReadLock { - metQuorum = count >= quorumReads - } else { - metQuorum = count >= quorum - } - - return metQuorum -} - -// releaseAll releases all locks that are marked as locked -func releaseAll(ds *Dsync, locks *[]string, isReadLock bool, restClnts []NetLocker, lockNames ...string) { - for lock := range restClnts { - if isLocked((*locks)[lock]) { - sendRelease(ds, restClnts[lock], (*locks)[lock], isReadLock, lockNames...) - (*locks)[lock] = "" - } - } -} - -// Unlock unlocks the write lock. -// -// It is a run-time error if dm is not locked on entry to Unlock. -func (dm *DRWMutex) Unlock() { - - restClnts := dm.clnt.GetLockersFn() - // create temp array on stack - locks := make([]string, len(restClnts)) - - { - dm.m.Lock() - defer dm.m.Unlock() - - // Check if minimally a single bool is set in the writeLocks array - lockFound := false - for _, uid := range dm.writeLocks { - if isLocked(uid) { - lockFound = true - break - } - } - if !lockFound { - panic("Trying to Unlock() while no Lock() is active") - } - - // Copy write locks to stack array - copy(locks, dm.writeLocks[:]) - // Clear write locks array - dm.writeLocks = make([]string, len(restClnts)) - } - - isReadLock := false - unlock(dm.clnt, locks, isReadLock, restClnts, dm.Names...) -} - -// RUnlock releases a read lock held on dm. -// -// It is a run-time error if dm is not locked on entry to RUnlock. -func (dm *DRWMutex) RUnlock() { - - // create temp array on stack - restClnts := dm.clnt.GetLockersFn() - - locks := make([]string, len(restClnts)) - { - dm.m.Lock() - defer dm.m.Unlock() - if len(dm.readersLocks) == 0 { - panic("Trying to RUnlock() while no RLock() is active") - } - // Copy out first element to release it first (FIFO) - copy(locks, dm.readersLocks[0][:]) - // Drop first element from array - dm.readersLocks = dm.readersLocks[1:] - } - - isReadLock := true - unlock(dm.clnt, locks, isReadLock, restClnts, dm.Names...) -} - -func unlock(ds *Dsync, locks []string, isReadLock bool, restClnts []NetLocker, names ...string) { - - // We don't need to synchronously wait until we have released all the locks (or the quorum) - // (a subsequent lock will retry automatically in case it would fail to get quorum) - - for index, c := range restClnts { - - if isLocked(locks[index]) { - // broadcast lock release to all nodes that granted the lock - sendRelease(ds, c, locks[index], isReadLock, names...) - } - } -} - -// sendRelease sends a release message to a node that previously granted a lock -func sendRelease(ds *Dsync, c NetLocker, uid string, isReadLock bool, names ...string) { - if c == nil { - log("Unable to call RUnlock", errors.New("netLocker is offline")) - return - } - - args := LockArgs{ - UID: uid, - Resources: names, - } - if isReadLock { - if _, err := c.RUnlock(args); err != nil { - log("Unable to call RUnlock", err) - } - } else { - if _, err := c.Unlock(args); err != nil { - log("Unable to call Unlock", err) - } - } -} diff --git a/pkg/dsync/drwmutex_test.go b/pkg/dsync/drwmutex_test.go deleted file mode 100644 index 422b966a..00000000 --- a/pkg/dsync/drwmutex_test.go +++ /dev/null @@ -1,344 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package dsync_test - -import ( - "context" - "fmt" - "runtime" - "sync/atomic" - "testing" - "time" - - . "github.com/minio/minio/pkg/dsync" -) - -const ( - id = "1234-5678" - source = "main.go" -) - -func testSimpleWriteLock(t *testing.T, duration time.Duration) (locked bool) { - - drwm := NewDRWMutex(ds, "simplelock") - - if !drwm.GetRLock(context.Background(), id, source, time.Second) { - panic("Failed to acquire read lock") - } - // fmt.Println("1st read lock acquired, waiting...") - - if !drwm.GetRLock(context.Background(), id, source, time.Second) { - panic("Failed to acquire read lock") - } - // fmt.Println("2nd read lock acquired, waiting...") - - go func() { - time.Sleep(2 * time.Second) - drwm.RUnlock() - // fmt.Println("1st read lock released, waiting...") - }() - - go func() { - time.Sleep(3 * time.Second) - drwm.RUnlock() - // fmt.Println("2nd read lock released, waiting...") - }() - - // fmt.Println("Trying to acquire write lock, waiting...") - locked = drwm.GetLock(context.Background(), id, source, duration) - if locked { - // fmt.Println("Write lock acquired, waiting...") - time.Sleep(time.Second) - - drwm.Unlock() - } - // fmt.Println("Write lock failed due to timeout") - return -} - -func TestSimpleWriteLockAcquired(t *testing.T) { - locked := testSimpleWriteLock(t, 5*time.Second) - - expected := true - if locked != expected { - t.Errorf("TestSimpleWriteLockAcquired(): \nexpected %#v\ngot %#v", expected, locked) - } -} - -func TestSimpleWriteLockTimedOut(t *testing.T) { - locked := testSimpleWriteLock(t, time.Second) - - expected := false - if locked != expected { - t.Errorf("TestSimpleWriteLockTimedOut(): \nexpected %#v\ngot %#v", expected, locked) - } -} - -func testDualWriteLock(t *testing.T, duration time.Duration) (locked bool) { - - drwm := NewDRWMutex(ds, "duallock") - - // fmt.Println("Getting initial write lock") - if !drwm.GetLock(context.Background(), id, source, time.Second) { - panic("Failed to acquire initial write lock") - } - - go func() { - time.Sleep(2 * time.Second) - drwm.Unlock() - // fmt.Println("Initial write lock released, waiting...") - }() - - // fmt.Println("Trying to acquire 2nd write lock, waiting...") - locked = drwm.GetLock(context.Background(), id, source, duration) - if locked { - // fmt.Println("2nd write lock acquired, waiting...") - time.Sleep(time.Second) - - drwm.Unlock() - } - // fmt.Println("2nd write lock failed due to timeout") - return -} - -func TestDualWriteLockAcquired(t *testing.T) { - locked := testDualWriteLock(t, 5*time.Second) - - expected := true - if locked != expected { - t.Errorf("TestDualWriteLockAcquired(): \nexpected %#v\ngot %#v", expected, locked) - } - -} - -func TestDualWriteLockTimedOut(t *testing.T) { - locked := testDualWriteLock(t, time.Second) - - expected := false - if locked != expected { - t.Errorf("TestDualWriteLockTimedOut(): \nexpected %#v\ngot %#v", expected, locked) - } - -} - -// Test cases below are copied 1 to 1 from sync/rwmutex_test.go (adapted to use DRWMutex) - -// Borrowed from rwmutex_test.go -func parallelReader(ctx context.Context, m *DRWMutex, clocked, cunlock, cdone chan bool) { - if m.GetRLock(ctx, id, source, time.Second) { - clocked <- true - <-cunlock - m.RUnlock() - cdone <- true - } -} - -// Borrowed from rwmutex_test.go -func doTestParallelReaders(numReaders, gomaxprocs int) { - runtime.GOMAXPROCS(gomaxprocs) - m := NewDRWMutex(ds, "test-parallel") - - clocked := make(chan bool) - cunlock := make(chan bool) - cdone := make(chan bool) - for i := 0; i < numReaders; i++ { - go parallelReader(context.Background(), m, clocked, cunlock, cdone) - } - // Wait for all parallel RLock()s to succeed. - for i := 0; i < numReaders; i++ { - <-clocked - } - for i := 0; i < numReaders; i++ { - cunlock <- true - } - // Wait for the goroutines to finish. - for i := 0; i < numReaders; i++ { - <-cdone - } -} - -// Borrowed from rwmutex_test.go -func TestParallelReaders(t *testing.T) { - defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1)) - doTestParallelReaders(1, 4) - doTestParallelReaders(3, 4) - doTestParallelReaders(4, 2) -} - -// Borrowed from rwmutex_test.go -func reader(rwm *DRWMutex, numIterations int, activity *int32, cdone chan bool) { - for i := 0; i < numIterations; i++ { - if rwm.GetRLock(context.Background(), id, source, time.Second) { - n := atomic.AddInt32(activity, 1) - if n < 1 || n >= 10000 { - panic(fmt.Sprintf("wlock(%d)\n", n)) - } - for i := 0; i < 100; i++ { - } - atomic.AddInt32(activity, -1) - rwm.RUnlock() - } - } - cdone <- true -} - -// Borrowed from rwmutex_test.go -func writer(rwm *DRWMutex, numIterations int, activity *int32, cdone chan bool) { - for i := 0; i < numIterations; i++ { - if rwm.GetLock(context.Background(), id, source, time.Second) { - n := atomic.AddInt32(activity, 10000) - if n != 10000 { - panic(fmt.Sprintf("wlock(%d)\n", n)) - } - for i := 0; i < 100; i++ { - } - atomic.AddInt32(activity, -10000) - rwm.Unlock() - } - } - cdone <- true -} - -// Borrowed from rwmutex_test.go -func HammerRWMutex(gomaxprocs, numReaders, numIterations int) { - runtime.GOMAXPROCS(gomaxprocs) - // Number of active readers + 10000 * number of active writers. - var activity int32 - rwm := NewDRWMutex(ds, "test") - cdone := make(chan bool) - go writer(rwm, numIterations, &activity, cdone) - var i int - for i = 0; i < numReaders/2; i++ { - go reader(rwm, numIterations, &activity, cdone) - } - go writer(rwm, numIterations, &activity, cdone) - for ; i < numReaders; i++ { - go reader(rwm, numIterations, &activity, cdone) - } - // Wait for the 2 writers and all readers to finish. - for i := 0; i < 2+numReaders; i++ { - <-cdone - } -} - -// Borrowed from rwmutex_test.go -func TestRWMutex(t *testing.T) { - defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1)) - n := 100 - if testing.Short() { - n = 5 - } - HammerRWMutex(1, 1, n) - HammerRWMutex(1, 3, n) - HammerRWMutex(1, 10, n) - HammerRWMutex(4, 1, n) - HammerRWMutex(4, 3, n) - HammerRWMutex(4, 10, n) - HammerRWMutex(10, 1, n) - HammerRWMutex(10, 3, n) - HammerRWMutex(10, 10, n) - HammerRWMutex(10, 5, n) -} - -// Borrowed from rwmutex_test.go -func TestUnlockPanic(t *testing.T) { - defer func() { - if recover() == nil { - t.Fatalf("unlock of unlocked RWMutex did not panic") - } - }() - mu := NewDRWMutex(ds, "test") - mu.Unlock() -} - -// Borrowed from rwmutex_test.go -func TestUnlockPanic2(t *testing.T) { - defer func() { - if recover() == nil { - t.Fatalf("unlock of unlocked RWMutex did not panic") - } - }() - mu := NewDRWMutex(ds, "test-unlock-panic-2") - mu.RLock(id, source) - mu.Unlock() -} - -// Borrowed from rwmutex_test.go -func TestRUnlockPanic(t *testing.T) { - defer func() { - if recover() == nil { - t.Fatalf("read unlock of unlocked RWMutex did not panic") - } - }() - mu := NewDRWMutex(ds, "test") - mu.RUnlock() -} - -// Borrowed from rwmutex_test.go -func TestRUnlockPanic2(t *testing.T) { - defer func() { - if recover() == nil { - t.Fatalf("read unlock of unlocked RWMutex did not panic") - } - }() - mu := NewDRWMutex(ds, "test-runlock-panic-2") - mu.Lock(id, source) - mu.RUnlock() -} - -// Borrowed from rwmutex_test.go -func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) { - rwm := NewDRWMutex(ds, "test") - b.RunParallel(func(pb *testing.PB) { - foo := 0 - for pb.Next() { - foo++ - if foo%writeRatio == 0 { - rwm.Lock(id, source) - rwm.Unlock() - } else { - rwm.RLock(id, source) - for i := 0; i != localWork; i++ { - foo *= 2 - foo /= 2 - } - rwm.RUnlock() - } - } - _ = foo - }) -} - -// Borrowed from rwmutex_test.go -func BenchmarkRWMutexWrite100(b *testing.B) { - benchmarkRWMutex(b, 0, 100) -} - -// Borrowed from rwmutex_test.go -func BenchmarkRWMutexWrite10(b *testing.B) { - benchmarkRWMutex(b, 0, 10) -} - -// Borrowed from rwmutex_test.go -func BenchmarkRWMutexWorkWrite100(b *testing.B) { - benchmarkRWMutex(b, 100, 100) -} - -// Borrowed from rwmutex_test.go -func BenchmarkRWMutexWorkWrite10(b *testing.B) { - benchmarkRWMutex(b, 100, 10) -} diff --git a/pkg/dsync/dsync-server_test.go b/pkg/dsync/dsync-server_test.go deleted file mode 100644 index fad0fb7b..00000000 --- a/pkg/dsync/dsync-server_test.go +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package dsync_test - -import ( - "fmt" - "sync" - - . "github.com/minio/minio/pkg/dsync" -) - -const WriteLock = -1 - -type lockServer struct { - mutex sync.Mutex - // Map of locks, with negative value indicating (exclusive) write lock - // and positive values indicating number of read locks - lockMap map[string]int64 -} - -func (l *lockServer) Lock(args *LockArgs, reply *bool) error { - l.mutex.Lock() - defer l.mutex.Unlock() - if _, *reply = l.lockMap[args.Resources[0]]; !*reply { - l.lockMap[args.Resources[0]] = WriteLock // No locks held on the given name, so claim write lock - } - *reply = !*reply // Negate *reply to return true when lock is granted or false otherwise - return nil -} - -func (l *lockServer) Unlock(args *LockArgs, reply *bool) error { - l.mutex.Lock() - defer l.mutex.Unlock() - var locksHeld int64 - if locksHeld, *reply = l.lockMap[args.Resources[0]]; !*reply { // No lock is held on the given name - return fmt.Errorf("Unlock attempted on an unlocked entity: %s", args.Resources[0]) - } - if *reply = locksHeld == WriteLock; !*reply { // Unless it is a write lock - return fmt.Errorf("Unlock attempted on a read locked entity: %s (%d read locks active)", args.Resources[0], locksHeld) - } - delete(l.lockMap, args.Resources[0]) // Remove the write lock - return nil -} - -const ReadLock = 1 - -func (l *lockServer) RLock(args *LockArgs, reply *bool) error { - l.mutex.Lock() - defer l.mutex.Unlock() - var locksHeld int64 - if locksHeld, *reply = l.lockMap[args.Resources[0]]; !*reply { - l.lockMap[args.Resources[0]] = ReadLock // No locks held on the given name, so claim (first) read lock - *reply = true - } else { - if *reply = locksHeld != WriteLock; *reply { // Unless there is a write lock - l.lockMap[args.Resources[0]] = locksHeld + ReadLock // Grant another read lock - } - } - return nil -} - -func (l *lockServer) RUnlock(args *LockArgs, reply *bool) error { - l.mutex.Lock() - defer l.mutex.Unlock() - var locksHeld int64 - if locksHeld, *reply = l.lockMap[args.Resources[0]]; !*reply { // No lock is held on the given name - return fmt.Errorf("RUnlock attempted on an unlocked entity: %s", args.Resources[0]) - } - if *reply = locksHeld != WriteLock; !*reply { // A write-lock is held, cannot release a read lock - return fmt.Errorf("RUnlock attempted on a write locked entity: %s", args.Resources[0]) - } - if locksHeld > ReadLock { - l.lockMap[args.Resources[0]] = locksHeld - ReadLock // Remove one of the read locks held - } else { - delete(l.lockMap, args.Resources[0]) // Remove the (last) read lock - } - return nil -} - -func (l *lockServer) ForceUnlock(args *LockArgs, reply *bool) error { - l.mutex.Lock() - defer l.mutex.Unlock() - if len(args.UID) != 0 { - return fmt.Errorf("ForceUnlock called with non-empty UID: %s", args.UID) - } - delete(l.lockMap, args.Resources[0]) // Remove the lock (irrespective of write or read lock) - *reply = true - return nil -} diff --git a/pkg/dsync/dsync.go b/pkg/dsync/dsync.go deleted file mode 100644 index a4022c7f..00000000 --- a/pkg/dsync/dsync.go +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package dsync - -// Dsync represents dsync client object which is initialized with -// authenticated clients, used to initiate lock REST calls. -type Dsync struct { - // List of rest client objects, one per lock server. - GetLockersFn func() []NetLocker -} diff --git a/pkg/dsync/dsync_test.go b/pkg/dsync/dsync_test.go deleted file mode 100644 index a5788d5b..00000000 --- a/pkg/dsync/dsync_test.go +++ /dev/null @@ -1,361 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// GOMAXPROCS=10 go test - -package dsync_test - -import ( - "fmt" - "log" - "math/rand" - "net" - "net/http" - "net/rpc" - "os" - "strconv" - "sync" - "testing" - "time" - - . "github.com/minio/minio/pkg/dsync" -) - -var ds *Dsync -var rpcPaths []string // list of rpc paths where lock server is serving. - -func startRPCServers(nodes []string) { - for i := range nodes { - server := rpc.NewServer() - server.RegisterName("Dsync", &lockServer{ - mutex: sync.Mutex{}, - lockMap: make(map[string]int64), - }) - // For some reason the registration paths need to be different (even for different server objs) - server.HandleHTTP(rpcPaths[i], fmt.Sprintf("%s-debug", rpcPaths[i])) - l, e := net.Listen("tcp", ":"+strconv.Itoa(i+12345)) - if e != nil { - log.Fatal("listen error:", e) - } - go http.Serve(l, nil) - } - - // Let servers start - time.Sleep(10 * time.Millisecond) -} - -// TestMain initializes the testing framework -func TestMain(m *testing.M) { - const rpcPath = "/dsync" - - rand.Seed(time.Now().UTC().UnixNano()) - - nodes := make([]string, 4) // list of node IP addrs or hostname with ports. - for i := range nodes { - nodes[i] = fmt.Sprintf("127.0.0.1:%d", i+12345) - } - for i := range nodes { - rpcPaths = append(rpcPaths, rpcPath+"-"+strconv.Itoa(i)) - } - - // Initialize net/rpc clients for dsync. - var clnts []NetLocker - for i := 0; i < len(nodes); i++ { - clnts = append(clnts, newClient(nodes[i], rpcPaths[i])) - } - - ds = &Dsync{ - GetLockersFn: func() []NetLocker { return clnts }, - } - - startRPCServers(nodes) - - os.Exit(m.Run()) -} - -func TestSimpleLock(t *testing.T) { - - dm := NewDRWMutex(ds, "test") - - dm.Lock(id, source) - - // fmt.Println("Lock acquired, waiting...") - time.Sleep(2500 * time.Millisecond) - - dm.Unlock() -} - -func TestSimpleLockUnlockMultipleTimes(t *testing.T) { - - dm := NewDRWMutex(ds, "test") - - dm.Lock(id, source) - time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond) - dm.Unlock() - - dm.Lock(id, source) - time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond) - dm.Unlock() - - dm.Lock(id, source) - time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond) - dm.Unlock() - - dm.Lock(id, source) - time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond) - dm.Unlock() - - dm.Lock(id, source) - time.Sleep(time.Duration(10+(rand.Float32()*50)) * time.Millisecond) - dm.Unlock() -} - -// Test two locks for same resource, one succeeds, one fails (after timeout) -func TestTwoSimultaneousLocksForSameResource(t *testing.T) { - - dm1st := NewDRWMutex(ds, "aap") - dm2nd := NewDRWMutex(ds, "aap") - - dm1st.Lock(id, source) - - // Release lock after 10 seconds - go func() { - time.Sleep(10 * time.Second) - // fmt.Println("Unlocking dm1") - - dm1st.Unlock() - }() - - dm2nd.Lock(id, source) - - // fmt.Printf("2nd lock obtained after 1st lock is released\n") - time.Sleep(2500 * time.Millisecond) - - dm2nd.Unlock() -} - -// Test three locks for same resource, one succeeds, one fails (after timeout) -func TestThreeSimultaneousLocksForSameResource(t *testing.T) { - - dm1st := NewDRWMutex(ds, "aap") - dm2nd := NewDRWMutex(ds, "aap") - dm3rd := NewDRWMutex(ds, "aap") - - dm1st.Lock(id, source) - - // Release lock after 10 seconds - go func() { - time.Sleep(10 * time.Second) - // fmt.Println("Unlocking dm1") - - dm1st.Unlock() - }() - - var wg sync.WaitGroup - wg.Add(2) - - go func() { - defer wg.Done() - - dm2nd.Lock(id, source) - - // Release lock after 10 seconds - go func() { - time.Sleep(2500 * time.Millisecond) - // fmt.Println("Unlocking dm2") - - dm2nd.Unlock() - }() - - dm3rd.Lock(id, source) - - // fmt.Printf("3rd lock obtained after 1st & 2nd locks are released\n") - time.Sleep(2500 * time.Millisecond) - - dm3rd.Unlock() - }() - - go func() { - defer wg.Done() - - dm3rd.Lock(id, source) - - // Release lock after 10 seconds - go func() { - time.Sleep(2500 * time.Millisecond) - // fmt.Println("Unlocking dm3") - - dm3rd.Unlock() - }() - - dm2nd.Lock(id, source) - - // fmt.Printf("2nd lock obtained after 1st & 3rd locks are released\n") - time.Sleep(2500 * time.Millisecond) - - dm2nd.Unlock() - }() - - wg.Wait() -} - -// Test two locks for different resources, both succeed -func TestTwoSimultaneousLocksForDifferentResources(t *testing.T) { - - dm1 := NewDRWMutex(ds, "aap") - dm2 := NewDRWMutex(ds, "noot") - - dm1.Lock(id, source) - dm2.Lock(id, source) - - // fmt.Println("Both locks acquired, waiting...") - time.Sleep(2500 * time.Millisecond) - - dm1.Unlock() - dm2.Unlock() - - time.Sleep(10 * time.Millisecond) -} - -// Borrowed from mutex_test.go -func HammerMutex(m *DRWMutex, loops int, cdone chan bool) { - for i := 0; i < loops; i++ { - m.Lock(id, source) - m.Unlock() - } - cdone <- true -} - -// Borrowed from mutex_test.go -func TestMutex(t *testing.T) { - loops := 200 - if testing.Short() { - loops = 5 - } - c := make(chan bool) - m := NewDRWMutex(ds, "test") - for i := 0; i < 10; i++ { - go HammerMutex(m, loops, c) - } - for i := 0; i < 10; i++ { - <-c - } -} - -func BenchmarkMutexUncontended(b *testing.B) { - type PaddedMutex struct { - *DRWMutex - } - b.RunParallel(func(pb *testing.PB) { - var mu = PaddedMutex{NewDRWMutex(ds, "")} - for pb.Next() { - mu.Lock(id, source) - mu.Unlock() - } - }) -} - -func benchmarkMutex(b *testing.B, slack, work bool) { - mu := NewDRWMutex(ds, "") - if slack { - b.SetParallelism(10) - } - b.RunParallel(func(pb *testing.PB) { - foo := 0 - for pb.Next() { - mu.Lock(id, source) - mu.Unlock() - if work { - for i := 0; i < 100; i++ { - foo *= 2 - foo /= 2 - } - } - } - _ = foo - }) -} - -func BenchmarkMutex(b *testing.B) { - benchmarkMutex(b, false, false) -} - -func BenchmarkMutexSlack(b *testing.B) { - benchmarkMutex(b, true, false) -} - -func BenchmarkMutexWork(b *testing.B) { - benchmarkMutex(b, false, true) -} - -func BenchmarkMutexWorkSlack(b *testing.B) { - benchmarkMutex(b, true, true) -} - -func BenchmarkMutexNoSpin(b *testing.B) { - // This benchmark models a situation where spinning in the mutex should be - // non-profitable and allows to confirm that spinning does not do harm. - // To achieve this we create excess of goroutines most of which do local work. - // These goroutines yield during local work, so that switching from - // a blocked goroutine to other goroutines is profitable. - // As a matter of fact, this benchmark still triggers some spinning in the mutex. - m := NewDRWMutex(ds, "") - var acc0, acc1 uint64 - b.SetParallelism(4) - b.RunParallel(func(pb *testing.PB) { - c := make(chan bool) - var data [4 << 10]uint64 - for i := 0; pb.Next(); i++ { - if i%4 == 0 { - m.Lock(id, source) - acc0 -= 100 - acc1 += 100 - m.Unlock() - } else { - for i := 0; i < len(data); i += 4 { - data[i]++ - } - // Elaborate way to say runtime.Gosched - // that does not put the goroutine onto global runq. - go func() { - c <- true - }() - <-c - } - } - }) -} - -func BenchmarkMutexSpin(b *testing.B) { - // This benchmark models a situation where spinning in the mutex should be - // profitable. To achieve this we create a goroutine per-proc. - // These goroutines access considerable amount of local data so that - // unnecessary rescheduling is penalized by cache misses. - m := NewDRWMutex(ds, "") - var acc0, acc1 uint64 - b.RunParallel(func(pb *testing.PB) { - var data [16 << 10]uint64 - for i := 0; pb.Next(); i++ { - m.Lock(id, source) - acc0 -= 100 - acc1 += 100 - m.Unlock() - for i := 0; i < len(data); i += 4 { - data[i]++ - } - } - }) -} diff --git a/pkg/dsync/rpc-client-impl_test.go b/pkg/dsync/rpc-client-impl_test.go deleted file mode 100644 index 042ec563..00000000 --- a/pkg/dsync/rpc-client-impl_test.go +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package dsync_test - -import ( - "net/rpc" - "sync" - - . "github.com/minio/minio/pkg/dsync" -) - -// ReconnectRPCClient is a wrapper type for rpc.Client which provides reconnect on first failure. -type ReconnectRPCClient struct { - mutex sync.Mutex - rpc *rpc.Client - addr string - endpoint string -} - -// newClient constructs a ReconnectRPCClient object with addr and endpoint initialized. -// It _doesn't_ connect to the remote endpoint. See Call method to see when the -// connect happens. -func newClient(addr, endpoint string) NetLocker { - return &ReconnectRPCClient{ - addr: addr, - endpoint: endpoint, - } -} - -// Close closes the underlying socket file descriptor. -func (rpcClient *ReconnectRPCClient) IsOnline() bool { - rpcClient.mutex.Lock() - defer rpcClient.mutex.Unlock() - // If rpc client has not connected yet there is nothing to close. - return rpcClient.rpc != nil -} - -// Close closes the underlying socket file descriptor. -func (rpcClient *ReconnectRPCClient) Close() error { - rpcClient.mutex.Lock() - defer rpcClient.mutex.Unlock() - // If rpc client has not connected yet there is nothing to close. - if rpcClient.rpc == nil { - return nil - } - // Reset rpcClient.rpc to allow for subsequent calls to use a new - // (socket) connection. - clnt := rpcClient.rpc - rpcClient.rpc = nil - return clnt.Close() -} - -// Call makes a RPC call to the remote endpoint using the default codec, namely encoding/gob. -func (rpcClient *ReconnectRPCClient) Call(serviceMethod string, args interface{}, reply interface{}) (err error) { - rpcClient.mutex.Lock() - defer rpcClient.mutex.Unlock() - dialCall := func() error { - // If the rpc.Client is nil, we attempt to (re)connect with the remote endpoint. - if rpcClient.rpc == nil { - clnt, derr := rpc.DialHTTPPath("tcp", rpcClient.addr, rpcClient.endpoint) - if derr != nil { - return derr - } - rpcClient.rpc = clnt - } - // If the RPC fails due to a network-related error, then we reset - // rpc.Client for a subsequent reconnect. - return rpcClient.rpc.Call(serviceMethod, args, reply) - } - if err = dialCall(); err == rpc.ErrShutdown { - rpcClient.rpc.Close() - rpcClient.rpc = nil - err = dialCall() - } - return err -} - -func (rpcClient *ReconnectRPCClient) RLock(args LockArgs) (status bool, err error) { - err = rpcClient.Call("Dsync.RLock", &args, &status) - return status, err -} - -func (rpcClient *ReconnectRPCClient) Lock(args LockArgs) (status bool, err error) { - err = rpcClient.Call("Dsync.Lock", &args, &status) - return status, err -} - -func (rpcClient *ReconnectRPCClient) RUnlock(args LockArgs) (status bool, err error) { - err = rpcClient.Call("Dsync.RUnlock", &args, &status) - return status, err -} - -func (rpcClient *ReconnectRPCClient) Unlock(args LockArgs) (status bool, err error) { - err = rpcClient.Call("Dsync.Unlock", &args, &status) - return status, err -} - -func (rpcClient *ReconnectRPCClient) Expired(args LockArgs) (expired bool, err error) { - err = rpcClient.Call("Dsync.Expired", &args, &expired) - return expired, err -} - -func (rpcClient *ReconnectRPCClient) String() string { - return "http://" + rpcClient.addr + "/" + rpcClient.endpoint -} diff --git a/pkg/dsync/rpc-client-interface.go b/pkg/dsync/rpc-client-interface.go deleted file mode 100644 index aec5187e..00000000 --- a/pkg/dsync/rpc-client-interface.go +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package dsync - -// LockArgs is minimal required values for any dsync compatible lock operation. -type LockArgs struct { - // Unique ID of lock/unlock request. - UID string - - // Resources contains single or multiple entries to be locked/unlocked. - Resources []string - - // Source contains the line number, function and file name of the code - // on the client node that requested the lock. - Source string -} - -// NetLocker is dsync compatible locker interface. -type NetLocker interface { - // Do read lock for given LockArgs. It should return - // * a boolean to indicate success/failure of the operation - // * an error on failure of lock request operation. - RLock(args LockArgs) (bool, error) - - // Do write lock for given LockArgs. It should return - // * a boolean to indicate success/failure of the operation - // * an error on failure of lock request operation. - Lock(args LockArgs) (bool, error) - - // Do read unlock for given LockArgs. It should return - // * a boolean to indicate success/failure of the operation - // * an error on failure of unlock request operation. - RUnlock(args LockArgs) (bool, error) - - // Do write unlock for given LockArgs. It should return - // * a boolean to indicate success/failure of the operation - // * an error on failure of unlock request operation. - Unlock(args LockArgs) (bool, error) - - // Expired returns if current lock args has expired. - Expired(args LockArgs) (bool, error) - - // Returns underlying endpoint of this lock client instance. - String() string - - // Close closes any underlying connection to the service endpoint - Close() error - - // Is the underlying connection online? (is always true for any local lockers) - IsOnline() bool -} diff --git a/pkg/ellipses/ellipses.go b/pkg/ellipses/ellipses.go deleted file mode 100644 index 8ec8d132..00000000 --- a/pkg/ellipses/ellipses.go +++ /dev/null @@ -1,226 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ellipses - -import ( - "errors" - "fmt" - "regexp" - "strconv" - "strings" -) - -var ( - // Regex to extract ellipses syntax inputs. - regexpEllipses = regexp.MustCompile(`(.*)({[0-9a-z]*\.\.\.[0-9a-z]*})(.*)`) - - // Ellipses constants - openBraces = "{" - closeBraces = "}" - ellipses = "..." -) - -// Parses an ellipses range pattern of following style -// `{1...64}` -// `{33...64}` -func parseEllipsesRange(pattern string) (seq []string, err error) { - if !strings.Contains(pattern, openBraces) { - return nil, errors.New("Invalid argument") - } - if !strings.Contains(pattern, closeBraces) { - return nil, errors.New("Invalid argument") - } - - pattern = strings.TrimPrefix(pattern, openBraces) - pattern = strings.TrimSuffix(pattern, closeBraces) - - ellipsesRange := strings.Split(pattern, ellipses) - if len(ellipsesRange) != 2 { - return nil, errors.New("Invalid argument") - } - - var hexadecimal bool - var start, end uint64 - if start, err = strconv.ParseUint(ellipsesRange[0], 10, 64); err != nil { - // Look for hexadecimal conversions if any. - start, err = strconv.ParseUint(ellipsesRange[0], 16, 64) - if err != nil { - return nil, err - } - hexadecimal = true - } - - if end, err = strconv.ParseUint(ellipsesRange[1], 10, 64); err != nil { - // Look for hexadecimal conversions if any. - end, err = strconv.ParseUint(ellipsesRange[1], 16, 64) - if err != nil { - return nil, err - } - hexadecimal = true - } - - if start > end { - return nil, fmt.Errorf("Incorrect range start %d cannot be bigger than end %d", start, end) - } - - for i := start; i <= end; i++ { - if strings.HasPrefix(ellipsesRange[0], "0") && len(ellipsesRange[0]) > 1 || strings.HasPrefix(ellipsesRange[1], "0") { - if hexadecimal { - seq = append(seq, fmt.Sprintf(fmt.Sprintf("%%0%dx", len(ellipsesRange[1])), i)) - } else { - seq = append(seq, fmt.Sprintf(fmt.Sprintf("%%0%dd", len(ellipsesRange[1])), i)) - } - } else { - if hexadecimal { - seq = append(seq, fmt.Sprintf("%x", i)) - } else { - seq = append(seq, fmt.Sprintf("%d", i)) - } - } - } - - return seq, nil -} - -// Pattern - ellipses pattern, describes the range and also the -// associated prefix and suffixes. -type Pattern struct { - Prefix string - Suffix string - Seq []string -} - -// argExpander - recursively expands labels into its respective forms. -func argExpander(labels [][]string) (out [][]string) { - if len(labels) == 1 { - for _, v := range labels[0] { - out = append(out, []string{v}) - } - return out - } - for _, lbl := range labels[0] { - rs := argExpander(labels[1:]) - for _, rlbls := range rs { - r := append(rlbls, []string{lbl}...) - out = append(out, r) - } - } - return out -} - -// ArgPattern contains a list of patterns provided in the input. -type ArgPattern []Pattern - -// Expand - expands all the ellipses patterns in -// the given argument. -func (a ArgPattern) Expand() [][]string { - labels := make([][]string, len(a)) - for i := range labels { - labels[i] = a[i].Expand() - } - return argExpander(labels) -} - -// Expand - expands a ellipses pattern. -func (p Pattern) Expand() []string { - var labels []string - for i := range p.Seq { - switch { - case p.Prefix != "" && p.Suffix == "": - labels = append(labels, fmt.Sprintf("%s%s", p.Prefix, p.Seq[i])) - case p.Suffix != "" && p.Prefix == "": - labels = append(labels, fmt.Sprintf("%s%s", p.Seq[i], p.Suffix)) - case p.Suffix == "" && p.Prefix == "": - labels = append(labels, p.Seq[i]) - default: - labels = append(labels, fmt.Sprintf("%s%s%s", p.Prefix, p.Seq[i], p.Suffix)) - } - } - return labels -} - -// HasEllipses - returns true if input arg has ellipses type pattern. -func HasEllipses(args ...string) bool { - var ok = true - for _, arg := range args { - ok = ok && (strings.Count(arg, ellipses) > 0 || (strings.Count(arg, openBraces) > 0 && strings.Count(arg, closeBraces) > 0)) - } - return ok -} - -// ErrInvalidEllipsesFormatFn error returned when invalid ellipses format is detected. -var ErrInvalidEllipsesFormatFn = func(arg string) error { - return fmt.Errorf("Invalid ellipsis format in (%s), Ellipsis range must be provided in format {N...M} where N and M are positive integers, M must be greater than N, with an allowed minimum range of 4", arg) -} - -// FindEllipsesPatterns - finds all ellipses patterns, recursively and parses the ranges numerically. -func FindEllipsesPatterns(arg string) (ArgPattern, error) { - var patterns []Pattern - parts := regexpEllipses.FindStringSubmatch(arg) - if len(parts) == 0 { - // We throw an error if arg doesn't have any recognizable ellipses pattern. - return nil, ErrInvalidEllipsesFormatFn(arg) - } - - parts = parts[1:] - patternFound := regexpEllipses.MatchString(parts[0]) - for patternFound { - seq, err := parseEllipsesRange(parts[1]) - if err != nil { - return patterns, err - } - patterns = append(patterns, Pattern{ - Prefix: "", - Suffix: parts[2], - Seq: seq, - }) - parts = regexpEllipses.FindStringSubmatch(parts[0]) - if len(parts) > 0 { - parts = parts[1:] - patternFound = HasEllipses(parts[0]) - continue - } - break - } - - if len(parts) > 0 { - seq, err := parseEllipsesRange(parts[1]) - if err != nil { - return patterns, err - } - - patterns = append(patterns, Pattern{ - Prefix: parts[0], - Suffix: parts[2], - Seq: seq, - }) - } - - // Check if any of the prefix or suffixes now have flower braces - // left over, in such a case we generally think that there is - // perhaps a typo in users input and error out accordingly. - for _, pattern := range patterns { - if strings.Count(pattern.Prefix, openBraces) > 0 || strings.Count(pattern.Prefix, closeBraces) > 0 { - return nil, ErrInvalidEllipsesFormatFn(arg) - } - if strings.Count(pattern.Suffix, openBraces) > 0 || strings.Count(pattern.Suffix, closeBraces) > 0 { - return nil, ErrInvalidEllipsesFormatFn(arg) - } - } - - return patterns, nil -} diff --git a/pkg/ellipses/ellipses_test.go b/pkg/ellipses/ellipses_test.go deleted file mode 100644 index 03aa3da7..00000000 --- a/pkg/ellipses/ellipses_test.go +++ /dev/null @@ -1,255 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ellipses - -import ( - "fmt" - "testing" -) - -// Test tests args with ellipses. -func TestHasEllipses(t *testing.T) { - testCases := []struct { - args []string - expectedOk bool - }{ - // Tests for all args without ellipses. - { - []string{"64"}, - false, - }, - // Found flower braces, still attempt to parse and throw an error. - { - []string{"{1..64}"}, - true, - }, - { - []string{"{1..2..}"}, - true, - }, - // Test for valid input. - { - []string{"1...64"}, - true, - }, - { - []string{"{1...2O}"}, - true, - }, - { - []string{"..."}, - true, - }, - { - []string{"{-1...1}"}, - true, - }, - { - []string{"{0...-1}"}, - true, - }, - { - []string{"{1....4}"}, - true, - }, - { - []string{"{1...64}"}, - true, - }, - { - []string{"{...}"}, - true, - }, - { - []string{"{1...64}", "{65...128}"}, - true, - }, - { - []string{"http://minio{2...3}/export/set{1...64}"}, - true, - }, - { - []string{ - "http://minio{2...3}/export/set{1...64}", - "http://minio{2...3}/export/set{65...128}", - }, - true, - }, - { - []string{ - "mydisk-{a...z}{1...20}", - }, - true, - }, - { - - []string{ - "mydisk-{1...4}{1..2.}", - }, - true, - }, - } - - for i, testCase := range testCases { - t.Run(fmt.Sprintf("Test%d", i+1), func(t *testing.T) { - gotOk := HasEllipses(testCase.args...) - if gotOk != testCase.expectedOk { - t.Errorf("Expected %t, got %t", testCase.expectedOk, gotOk) - } - }) - } -} - -// Test tests find ellipses patterns. -func TestFindEllipsesPatterns(t *testing.T) { - testCases := []struct { - pattern string - success bool - expectedCount int - }{ - // Tests for all invalid inputs - { - "{1..64}", - false, - 0, - }, - { - "1...64", - false, - 0, - }, - { - "...", - false, - 0, - }, - { - "{1...", - false, - 0, - }, - { - "...64}", - false, - 0, - }, - { - "{...}", - false, - 0, - }, - { - "{-1...1}", - false, - 0, - }, - { - "{0...-1}", - false, - 0, - }, - { - "{1...2O}", - false, - 0, - }, - { - "{64...1}", - false, - 0, - }, - { - "{1....4}", - false, - 0, - }, - { - "mydisk-{a...z}{1...20}", - false, - 0, - }, - { - "mydisk-{1...4}{1..2.}", - false, - 0, - }, - { - "{1..2.}-mydisk-{1...4}", - false, - 0, - }, - { - "{{1...4}}", - false, - 0, - }, - { - "{4...02}", - false, - 0, - }, - { - "{f...z}", - false, - 0, - }, - // Test for valid input. - { - "{1...64}", - true, - 64, - }, - { - "{1...64} {65...128}", - true, - 4096, - }, - { - "{01...036}", - true, - 36, - }, - { - "{001...036}", - true, - 36, - }, - { - "{1...a}", - true, - 10, - }, - } - - for i, testCase := range testCases { - t.Run(fmt.Sprintf("Test%d", i+1), func(t *testing.T) { - argP, err := FindEllipsesPatterns(testCase.pattern) - if err != nil && testCase.success { - t.Errorf("Expected success but failed instead %s", err) - } - if err == nil && !testCase.success { - t.Errorf("Expected failure but passed instead") - } - if err == nil { - gotCount := len(argP.Expand()) - if gotCount != testCase.expectedCount { - t.Errorf("Expected %d, got %d", testCase.expectedCount, gotCount) - } - } - }) - } -} diff --git a/pkg/env/env.go b/pkg/env/env.go deleted file mode 100644 index bdc406eb..00000000 --- a/pkg/env/env.go +++ /dev/null @@ -1,81 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package env - -import ( - "os" - "strings" - "sync" -) - -var ( - privateMutex sync.RWMutex - envOff bool -) - -// SetEnvOff - turns off env lookup -func SetEnvOff() { - privateMutex.Lock() - defer privateMutex.Unlock() - - envOff = true -} - -// SetEnvOn - turns on env lookup -func SetEnvOn() { - privateMutex.Lock() - defer privateMutex.Unlock() - - envOff = false -} - -// IsSet returns if the given env key is set. -func IsSet(key string) bool { - _, ok := os.LookupEnv(key) - return ok -} - -// Get retrieves the value of the environment variable named -// by the key. If the variable is present in the environment the -// value (which may be empty) is returned. Otherwise it returns -// the specified default value. -func Get(key, defaultValue string) string { - privateMutex.RLock() - ok := envOff - privateMutex.RUnlock() - if ok { - return defaultValue - } - if v, ok := os.LookupEnv(key); ok { - return v - } - return defaultValue -} - -// List all envs with a given prefix. -func List(prefix string) (envs []string) { - for _, env := range os.Environ() { - if strings.HasPrefix(env, prefix) { - values := strings.SplitN(env, "=", 2) - if len(values) == 2 { - envs = append(envs, values[0]) - } - } - } - return envs -} diff --git a/pkg/event/arn.go b/pkg/event/arn.go deleted file mode 100644 index 1111701c..00000000 --- a/pkg/event/arn.go +++ /dev/null @@ -1,83 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package event - -import ( - "encoding/xml" - "strings" -) - -// ARN - SQS resource name representation. -type ARN struct { - TargetID - region string -} - -// String - returns string representation. -func (arn ARN) String() string { - if arn.TargetID.ID == "" && arn.TargetID.Name == "" && arn.region == "" { - return "" - } - - return "arn:minio:sqs:" + arn.region + ":" + arn.TargetID.String() -} - -// MarshalXML - encodes to XML data. -func (arn ARN) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - return e.EncodeElement(arn.String(), start) -} - -// UnmarshalXML - decodes XML data. -func (arn *ARN) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - var s string - if err := d.DecodeElement(&s, &start); err != nil { - return err - } - - parsedARN, err := parseARN(s) - if err != nil { - return err - } - - *arn = *parsedARN - return nil -} - -// parseARN - parses string to ARN. -func parseARN(s string) (*ARN, error) { - // ARN must be in the format of arn:minio:sqs::: - if !strings.HasPrefix(s, "arn:minio:sqs:") { - return nil, &ErrInvalidARN{s} - } - - tokens := strings.Split(s, ":") - if len(tokens) != 6 { - return nil, &ErrInvalidARN{s} - } - - if tokens[4] == "" || tokens[5] == "" { - return nil, &ErrInvalidARN{s} - } - - return &ARN{ - region: tokens[3], - TargetID: TargetID{ - ID: tokens[4], - Name: tokens[5], - }, - }, nil -} diff --git a/pkg/event/arn_test.go b/pkg/event/arn_test.go deleted file mode 100644 index fd6e1868..00000000 --- a/pkg/event/arn_test.go +++ /dev/null @@ -1,129 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package event - -import ( - "encoding/xml" - "reflect" - "testing" -) - -func TestARNString(t *testing.T) { - testCases := []struct { - arn ARN - expectedResult string - }{ - {ARN{}, ""}, - {ARN{TargetID{"1", "webhook"}, ""}, "arn:minio:sqs::1:webhook"}, - {ARN{TargetID{"1", "webhook"}, "us-east-1"}, "arn:minio:sqs:us-east-1:1:webhook"}, - } - - for i, testCase := range testCases { - result := testCase.arn.String() - - if result != testCase.expectedResult { - t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestARNMarshalXML(t *testing.T) { - testCases := []struct { - arn ARN - expectedData []byte - expectErr bool - }{ - {ARN{}, []byte(""), false}, - {ARN{TargetID{"1", "webhook"}, ""}, []byte("arn:minio:sqs::1:webhook"), false}, - {ARN{TargetID{"1", "webhook"}, "us-east-1"}, []byte("arn:minio:sqs:us-east-1:1:webhook"), false}, - } - - for i, testCase := range testCases { - data, err := xml.Marshal(testCase.arn) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(data, testCase.expectedData) { - t.Fatalf("test %v: data: expected: %v, got: %v", i+1, string(testCase.expectedData), string(data)) - } - } - } -} - -func TestARNUnmarshalXML(t *testing.T) { - testCases := []struct { - data []byte - expectedARN *ARN - expectErr bool - }{ - {[]byte(""), nil, true}, - {[]byte("arn:minio:sqs:::"), nil, true}, - {[]byte("arn:minio:sqs::1:webhook"), &ARN{TargetID{"1", "webhook"}, ""}, false}, - {[]byte("arn:minio:sqs:us-east-1:1:webhook"), &ARN{TargetID{"1", "webhook"}, "us-east-1"}, false}, - } - - for i, testCase := range testCases { - arn := &ARN{} - err := xml.Unmarshal(testCase.data, &arn) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if *arn != *testCase.expectedARN { - t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedARN, arn) - } - } - } -} - -func TestParseARN(t *testing.T) { - testCases := []struct { - s string - expectedARN *ARN - expectErr bool - }{ - {"", nil, true}, - {"arn:minio:sqs:::", nil, true}, - {"arn:minio:sqs::1:webhook:remote", nil, true}, - {"arn:aws:sqs::1:webhook", nil, true}, - {"arn:minio:sns::1:webhook", nil, true}, - {"arn:minio:sqs::1:webhook", &ARN{TargetID{"1", "webhook"}, ""}, false}, - {"arn:minio:sqs:us-east-1:1:webhook", &ARN{TargetID{"1", "webhook"}, "us-east-1"}, false}, - } - - for i, testCase := range testCases { - arn, err := parseARN(testCase.s) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if *arn != *testCase.expectedARN { - t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedARN, arn) - } - } - } -} diff --git a/pkg/event/config.go b/pkg/event/config.go deleted file mode 100644 index bd9a1cb6..00000000 --- a/pkg/event/config.go +++ /dev/null @@ -1,299 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package event - -import ( - "encoding/xml" - "errors" - "io" - "reflect" - "strings" - "unicode/utf8" - - "github.com/minio/minio-go/v6/pkg/set" -) - -// ValidateFilterRuleValue - checks if given value is filter rule value or not. -func ValidateFilterRuleValue(value string) error { - for _, segment := range strings.Split(value, "/") { - if segment == "." || segment == ".." { - return &ErrInvalidFilterValue{value} - } - } - - if len(value) <= 1024 && utf8.ValidString(value) && !strings.Contains(value, `\`) { - return nil - } - - return &ErrInvalidFilterValue{value} -} - -// FilterRule - represents elements inside ... -type FilterRule struct { - Name string `xml:"Name"` - Value string `xml:"Value"` -} - -// UnmarshalXML - decodes XML data. -func (filter *FilterRule) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - // Make subtype to avoid recursive UnmarshalXML(). - type filterRule FilterRule - rule := filterRule{} - if err := d.DecodeElement(&rule, &start); err != nil { - return err - } - - if rule.Name != "prefix" && rule.Name != "suffix" { - return &ErrInvalidFilterName{rule.Name} - } - - if err := ValidateFilterRuleValue(filter.Value); err != nil { - return err - } - - *filter = FilterRule(rule) - - return nil -} - -// FilterRuleList - represents multiple ... -type FilterRuleList struct { - Rules []FilterRule `xml:"FilterRule,omitempty"` -} - -// UnmarshalXML - decodes XML data. -func (ruleList *FilterRuleList) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - // Make subtype to avoid recursive UnmarshalXML(). - type filterRuleList FilterRuleList - rules := filterRuleList{} - if err := d.DecodeElement(&rules, &start); err != nil { - return err - } - - // FilterRuleList must have only one prefix and/or suffix. - nameSet := set.NewStringSet() - for _, rule := range rules.Rules { - if nameSet.Contains(rule.Name) { - if rule.Name == "prefix" { - return &ErrFilterNamePrefix{} - } - - return &ErrFilterNameSuffix{} - } - - nameSet.Add(rule.Name) - } - - *ruleList = FilterRuleList(rules) - return nil -} - -// Pattern - returns pattern using prefix and suffix values. -func (ruleList FilterRuleList) Pattern() string { - var prefix string - var suffix string - - for _, rule := range ruleList.Rules { - switch rule.Name { - case "prefix": - prefix = rule.Value - case "suffix": - suffix = rule.Value - } - } - - return NewPattern(prefix, suffix) -} - -// S3Key - represents elements inside ... -type S3Key struct { - RuleList FilterRuleList `xml:"S3Key,omitempty" json:"S3Key,omitempty"` -} - -// common - represents common elements inside , -// and -type common struct { - ID string `xml:"Id" json:"Id"` - Filter S3Key `xml:"Filter" json:"Filter"` - Events []Name `xml:"Event" json:"Event"` -} - -// Queue - represents elements inside -type Queue struct { - common - ARN ARN `xml:"Queue"` -} - -// UnmarshalXML - decodes XML data. -func (q *Queue) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - // Make subtype to avoid recursive UnmarshalXML(). - type queue Queue - parsedQueue := queue{} - if err := d.DecodeElement(&parsedQueue, &start); err != nil { - return err - } - - if len(parsedQueue.Events) == 0 { - return errors.New("missing event name(s)") - } - - eventStringSet := set.NewStringSet() - for _, eventName := range parsedQueue.Events { - if eventStringSet.Contains(eventName.String()) { - return &ErrDuplicateEventName{eventName} - } - - eventStringSet.Add(eventName.String()) - } - - *q = Queue(parsedQueue) - - return nil -} - -// Validate - checks whether queue has valid values or not. -func (q Queue) Validate(region string, targetList *TargetList) error { - if q.ARN.region == "" { - if !targetList.Exists(q.ARN.TargetID) { - return &ErrARNNotFound{q.ARN} - } - return nil - } - - if region != "" && q.ARN.region != region { - return &ErrUnknownRegion{q.ARN.region} - } - - if !targetList.Exists(q.ARN.TargetID) { - return &ErrARNNotFound{q.ARN} - } - - return nil -} - -// SetRegion - sets region value to queue's ARN. -func (q *Queue) SetRegion(region string) { - q.ARN.region = region -} - -// ToRulesMap - converts Queue to RulesMap -func (q Queue) ToRulesMap() RulesMap { - pattern := q.Filter.RuleList.Pattern() - return NewRulesMap(q.Events, pattern, q.ARN.TargetID) -} - -// Unused. Available for completion. -type lambda struct { - ARN string `xml:"CloudFunction"` -} - -// Unused. Available for completion. -type topic struct { - ARN string `xml:"Topic" json:"Topic"` -} - -// Config - notification configuration described in -// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html -type Config struct { - XMLNS string `xml:"xmlns,attr,omitempty"` - XMLName xml.Name `xml:"NotificationConfiguration"` - QueueList []Queue `xml:"QueueConfiguration,omitempty"` - LambdaList []lambda `xml:"CloudFunctionConfiguration,omitempty"` - TopicList []topic `xml:"TopicConfiguration,omitempty"` -} - -// UnmarshalXML - decodes XML data. -func (conf *Config) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - // Make subtype to avoid recursive UnmarshalXML(). - type config Config - parsedConfig := config{} - if err := d.DecodeElement(&parsedConfig, &start); err != nil { - return err - } - - // Empty queue list means user wants to delete the notification configuration. - if len(parsedConfig.QueueList) > 0 { - for i, q1 := range parsedConfig.QueueList[:len(parsedConfig.QueueList)-1] { - for _, q2 := range parsedConfig.QueueList[i+1:] { - // Removes the region from ARN if server region is not set - if q2.ARN.region != "" && q1.ARN.region == "" { - q2.ARN.region = "" - } - if reflect.DeepEqual(q1, q2) { - return &ErrDuplicateQueueConfiguration{q1} - } - } - } - } - - if len(parsedConfig.LambdaList) > 0 || len(parsedConfig.TopicList) > 0 { - return &ErrUnsupportedConfiguration{} - } - - *conf = Config(parsedConfig) - - return nil -} - -// Validate - checks whether config has valid values or not. -func (conf Config) Validate(region string, targetList *TargetList) error { - for _, queue := range conf.QueueList { - if err := queue.Validate(region, targetList); err != nil { - return err - } - } - - return nil -} - -// SetRegion - sets region to all queue configuration. -func (conf *Config) SetRegion(region string) { - for i := range conf.QueueList { - conf.QueueList[i].SetRegion(region) - } -} - -// ToRulesMap - converts all queue configuration to RulesMap. -func (conf *Config) ToRulesMap() RulesMap { - rulesMap := make(RulesMap) - - for _, queue := range conf.QueueList { - rulesMap.Add(queue.ToRulesMap()) - } - - return rulesMap -} - -// ParseConfig - parses data in reader to notification configuration. -func ParseConfig(reader io.Reader, region string, targetList *TargetList) (*Config, error) { - var config Config - - if err := xml.NewDecoder(reader).Decode(&config); err != nil { - return nil, err - } - - if err := config.Validate(region, targetList); err != nil { - return nil, err - } - - config.SetRegion(region) - //If xml namespace is empty, set a default value before returning. - if config.XMLNS == "" { - config.XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/" - } - return &config, nil -} diff --git a/pkg/event/config_test.go b/pkg/event/config_test.go deleted file mode 100644 index a7a92e75..00000000 --- a/pkg/event/config_test.go +++ /dev/null @@ -1,960 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package event - -import ( - "encoding/xml" - "reflect" - "strings" - "testing" -) - -func TestValidateFilterRuleValue(t *testing.T) { - testCases := []struct { - value string - expectErr bool - }{ - {"foo/.", true}, - {"../foo", true}, - {`foo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/baz`, true}, - {string([]byte{0xff, 0xfe, 0xfd}), true}, - {`foo\bar`, true}, - {"Hello/世界", false}, - } - - for i, testCase := range testCases { - err := ValidateFilterRuleValue(testCase.value) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - } -} - -func TestFilterRuleUnmarshalXML(t *testing.T) { - testCases := []struct { - data []byte - expectedResult *FilterRule - expectErr bool - }{ - {[]byte(``), nil, true}, - {[]byte(``), nil, true}, - {[]byte(``), nil, true}, - {[]byte(``), nil, true}, - {[]byte(`PrefixHello/世界`), nil, true}, - {[]byte(`endsfoo/bar`), nil, true}, - {[]byte(`prefixHello/世界`), &FilterRule{"prefix", "Hello/世界"}, false}, - {[]byte(`suffixfoo/bar`), &FilterRule{"suffix", "foo/bar"}, false}, - } - - for i, testCase := range testCases { - result := &FilterRule{} - err := xml.Unmarshal(testCase.data, result) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } - } -} - -func TestFilterRuleListUnmarshalXML(t *testing.T) { - testCases := []struct { - data []byte - expectedResult *FilterRuleList - expectErr bool - }{ - {[]byte(`suffixHello/世界suffixfoo/bar`), nil, true}, - {[]byte(`prefixHello/世界prefixfoo/bar`), nil, true}, - {[]byte(`prefixHello/世界`), &FilterRuleList{[]FilterRule{{"prefix", "Hello/世界"}}}, false}, - {[]byte(`suffixfoo/bar`), &FilterRuleList{[]FilterRule{{"suffix", "foo/bar"}}}, false}, - {[]byte(`prefixHello/世界suffixfoo/bar`), &FilterRuleList{[]FilterRule{{"prefix", "Hello/世界"}, {"suffix", "foo/bar"}}}, false}, - } - - for i, testCase := range testCases { - result := &FilterRuleList{} - err := xml.Unmarshal(testCase.data, result) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } - } -} - -func TestFilterRuleListPattern(t *testing.T) { - testCases := []struct { - filterRuleList FilterRuleList - expectedResult string - }{ - {FilterRuleList{}, ""}, - {FilterRuleList{[]FilterRule{{"prefix", "Hello/世界"}}}, "Hello/世界*"}, - {FilterRuleList{[]FilterRule{{"suffix", "foo/bar"}}}, "*foo/bar"}, - {FilterRuleList{[]FilterRule{{"prefix", "Hello/世界"}, {"suffix", "foo/bar"}}}, "Hello/世界*foo/bar"}, - } - - for i, testCase := range testCases { - result := testCase.filterRuleList.Pattern() - - if result != testCase.expectedResult { - t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestQueueUnmarshalXML(t *testing.T) { - dataCase1 := []byte(` - - 1 - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectAccessed:* - s3:ObjectCreated:* - s3:ObjectRemoved:* -`) - - dataCase2 := []byte(` - - 1 - - - - prefix - images/ - - - suffix - jpg - - - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectCreated:Put -`) - - dataCase3 := []byte(` - - 1 - - - - prefix - images/ - - - suffix - jpg - - - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectCreated:Put - s3:ObjectCreated:Put -`) - - testCases := []struct { - data []byte - expectErr bool - }{ - {dataCase1, false}, - {dataCase2, false}, - {dataCase3, true}, - } - - for i, testCase := range testCases { - err := xml.Unmarshal(testCase.data, &Queue{}) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - } -} - -func TestQueueValidate(t *testing.T) { - data := []byte(` - - 1 - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectAccessed:* - s3:ObjectCreated:* - s3:ObjectRemoved:* -`) - queue1 := &Queue{} - if err := xml.Unmarshal(data, queue1); err != nil { - panic(err) - } - - data = []byte(` - - 1 - - - - prefix - images/ - - - suffix - jpg - - - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectCreated:Put -`) - queue2 := &Queue{} - if err := xml.Unmarshal(data, queue2); err != nil { - panic(err) - } - - data = []byte(` - - 1 - - arn:minio:sqs:eu-west-2:1:webhook - s3:ObjectAccessed:* - s3:ObjectCreated:* - s3:ObjectRemoved:* -`) - queue3 := &Queue{} - if err := xml.Unmarshal(data, queue3); err != nil { - panic(err) - } - - targetList1 := NewTargetList() - - targetList2 := NewTargetList() - if err := targetList2.Add(&ExampleTarget{TargetID{"1", "webhook"}, false, false}); err != nil { - panic(err) - } - - testCases := []struct { - queue *Queue - region string - targetList *TargetList - expectErr bool - }{ - {queue1, "eu-west-1", nil, true}, - {queue2, "us-east-1", targetList1, true}, - {queue3, "", targetList2, false}, - {queue2, "us-east-1", targetList2, false}, - } - - for i, testCase := range testCases { - err := testCase.queue.Validate(testCase.region, testCase.targetList) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - } -} - -func TestQueueSetRegion(t *testing.T) { - data := []byte(` - - 1 - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectAccessed:* - s3:ObjectCreated:* - s3:ObjectRemoved:* -`) - queue1 := &Queue{} - if err := xml.Unmarshal(data, queue1); err != nil { - panic(err) - } - - data = []byte(` - - 1 - - - - prefix - images/ - - - suffix - jpg - - - - arn:minio:sqs::1:webhook - s3:ObjectCreated:Put -`) - queue2 := &Queue{} - if err := xml.Unmarshal(data, queue2); err != nil { - panic(err) - } - - testCases := []struct { - queue *Queue - region string - expectedResult ARN - }{ - {queue1, "eu-west-1", ARN{TargetID{"1", "webhook"}, "eu-west-1"}}, - {queue1, "", ARN{TargetID{"1", "webhook"}, ""}}, - {queue2, "us-east-1", ARN{TargetID{"1", "webhook"}, "us-east-1"}}, - {queue2, "", ARN{TargetID{"1", "webhook"}, ""}}, - } - - for i, testCase := range testCases { - testCase.queue.SetRegion(testCase.region) - result := testCase.queue.ARN - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestQueueToRulesMap(t *testing.T) { - data := []byte(` - - 1 - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectAccessed:* - s3:ObjectCreated:* - s3:ObjectRemoved:* -`) - queueCase1 := &Queue{} - if err := xml.Unmarshal(data, queueCase1); err != nil { - panic(err) - } - - data = []byte(` - - 1 - - - - prefix - images/ - - - suffix - jpg - - - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectCreated:Put -`) - queueCase2 := &Queue{} - if err := xml.Unmarshal(data, queueCase2); err != nil { - panic(err) - } - - rulesMapCase1 := NewRulesMap([]Name{ObjectAccessedAll, ObjectCreatedAll, ObjectRemovedAll}, "*", TargetID{"1", "webhook"}) - rulesMapCase2 := NewRulesMap([]Name{ObjectCreatedPut}, "images/*jpg", TargetID{"1", "webhook"}) - - testCases := []struct { - queue *Queue - expectedResult RulesMap - }{ - {queueCase1, rulesMapCase1}, - {queueCase2, rulesMapCase2}, - } - - for i, testCase := range testCases { - result := testCase.queue.ToRulesMap() - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestConfigUnmarshalXML(t *testing.T) { - dataCase1 := []byte(` - - - 1 - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectAccessed:* - s3:ObjectCreated:* - s3:ObjectRemoved:* - - -`) - - dataCase2 := []byte(` - - - 1 - - - - prefix - images/ - - - suffix - jpg - - - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectCreated:Put - - - `) - - dataCase3 := []byte(` - - - 1 - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectAccessed:* - s3:ObjectCreated:* - s3:ObjectRemoved:* - - - 2 - - - - prefix - images/ - - - suffix - jpg - - - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectCreated:Put - - - `) - - dataCase4 := []byte(` - - - 1 - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectAccessed:* - s3:ObjectCreated:* - s3:ObjectRemoved:* - - - 1 - - - - suffix - .jpg - - - - arn:aws:lambda:us-west-2:444455556666:cloud-function-A - s3:ObjectCreated:Put - - - arn:aws:sns:us-west-2:444455556666:sns-notification-one - s3:ObjectCreated:* - - - `) - - dataCase5 := []byte(``) - - testCases := []struct { - data []byte - expectErr bool - }{ - {dataCase1, false}, - {dataCase2, false}, - {dataCase3, false}, - {dataCase4, true}, - // make sure we don't fail when queue is empty. - {dataCase5, false}, - } - - for i, testCase := range testCases { - err := xml.Unmarshal(testCase.data, &Config{}) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - } -} - -func TestConfigValidate(t *testing.T) { - data := []byte(` - - - 1 - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectAccessed:* - s3:ObjectCreated:* - s3:ObjectRemoved:* - - -`) - config1 := &Config{} - if err := xml.Unmarshal(data, config1); err != nil { - panic(err) - } - - data = []byte(` - - - 1 - - - - prefix - images/ - - - suffix - jpg - - - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectCreated:Put - - -`) - config2 := &Config{} - if err := xml.Unmarshal(data, config2); err != nil { - panic(err) - } - - data = []byte(` - - - 1 - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectAccessed:* - s3:ObjectCreated:* - s3:ObjectRemoved:* - - - 2 - - - - prefix - images/ - - - suffix - jpg - - - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectCreated:Put - - -`) - config3 := &Config{} - if err := xml.Unmarshal(data, config3); err != nil { - panic(err) - } - - targetList1 := NewTargetList() - - targetList2 := NewTargetList() - if err := targetList2.Add(&ExampleTarget{TargetID{"1", "webhook"}, false, false}); err != nil { - panic(err) - } - - testCases := []struct { - config *Config - region string - targetList *TargetList - expectErr bool - }{ - {config1, "eu-west-1", nil, true}, - {config2, "us-east-1", targetList1, true}, - {config3, "", targetList2, false}, - {config2, "us-east-1", targetList2, false}, - } - - for i, testCase := range testCases { - err := testCase.config.Validate(testCase.region, testCase.targetList) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - } -} - -func TestConfigSetRegion(t *testing.T) { - data := []byte(` - - - 1 - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectAccessed:* - s3:ObjectCreated:* - s3:ObjectRemoved:* - - -`) - config1 := &Config{} - if err := xml.Unmarshal(data, config1); err != nil { - panic(err) - } - - data = []byte(` - - - 1 - - - - prefix - images/ - - - suffix - jpg - - - - arn:minio:sqs::1:webhook - s3:ObjectCreated:Put - - -`) - config2 := &Config{} - if err := xml.Unmarshal(data, config2); err != nil { - panic(err) - } - - data = []byte(` - - - 1 - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectAccessed:* - s3:ObjectCreated:* - s3:ObjectRemoved:* - - - 2 - - - - prefix - images/ - - - suffix - jpg - - - - arn:minio:sqs:us-east-1:2:amqp - s3:ObjectCreated:Put - - -`) - config3 := &Config{} - if err := xml.Unmarshal(data, config3); err != nil { - panic(err) - } - - testCases := []struct { - config *Config - region string - expectedResult []ARN - }{ - {config1, "eu-west-1", []ARN{{TargetID{"1", "webhook"}, "eu-west-1"}}}, - {config1, "", []ARN{{TargetID{"1", "webhook"}, ""}}}, - {config2, "us-east-1", []ARN{{TargetID{"1", "webhook"}, "us-east-1"}}}, - {config2, "", []ARN{{TargetID{"1", "webhook"}, ""}}}, - {config3, "us-east-1", []ARN{{TargetID{"1", "webhook"}, "us-east-1"}, {TargetID{"2", "amqp"}, "us-east-1"}}}, - {config3, "", []ARN{{TargetID{"1", "webhook"}, ""}, {TargetID{"2", "amqp"}, ""}}}, - } - - for i, testCase := range testCases { - testCase.config.SetRegion(testCase.region) - result := []ARN{} - for _, queue := range testCase.config.QueueList { - result = append(result, queue.ARN) - } - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestConfigToRulesMap(t *testing.T) { - data := []byte(` - - - 1 - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectAccessed:* - s3:ObjectCreated:* - s3:ObjectRemoved:* - - -`) - config1 := &Config{} - if err := xml.Unmarshal(data, config1); err != nil { - panic(err) - } - - data = []byte(` - - - 1 - - - - prefix - images/ - - - suffix - jpg - - - - arn:minio:sqs::1:webhook - s3:ObjectCreated:Put - - -`) - config2 := &Config{} - if err := xml.Unmarshal(data, config2); err != nil { - panic(err) - } - - data = []byte(` - - - 1 - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectAccessed:* - s3:ObjectCreated:* - s3:ObjectRemoved:* - - - 2 - - - - prefix - images/ - - - suffix - jpg - - - - arn:minio:sqs:us-east-1:2:amqp - s3:ObjectCreated:Put - - -`) - config3 := &Config{} - if err := xml.Unmarshal(data, config3); err != nil { - panic(err) - } - - rulesMapCase1 := NewRulesMap([]Name{ObjectAccessedAll, ObjectCreatedAll, ObjectRemovedAll}, "*", TargetID{"1", "webhook"}) - - rulesMapCase2 := NewRulesMap([]Name{ObjectCreatedPut}, "images/*jpg", TargetID{"1", "webhook"}) - - rulesMapCase3 := NewRulesMap([]Name{ObjectAccessedAll, ObjectCreatedAll, ObjectRemovedAll}, "*", TargetID{"1", "webhook"}) - rulesMapCase3.add([]Name{ObjectCreatedPut}, "images/*jpg", TargetID{"2", "amqp"}) - - testCases := []struct { - config *Config - expectedResult RulesMap - }{ - {config1, rulesMapCase1}, - {config2, rulesMapCase2}, - {config3, rulesMapCase3}, - } - - for i, testCase := range testCases { - result := testCase.config.ToRulesMap() - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestParseConfig(t *testing.T) { - reader1 := strings.NewReader(` - - - 1 - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectAccessed:* - s3:ObjectCreated:* - s3:ObjectRemoved:* - - -`) - - reader2 := strings.NewReader(` - - - 1 - - - - prefix - images/ - - - suffix - jpg - - - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectCreated:Put - - -`) - - reader3 := strings.NewReader(` - - - 1 - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectAccessed:* - s3:ObjectCreated:* - s3:ObjectRemoved:* - - - 2 - - - - prefix - images/ - - - suffix - jpg - - - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectCreated:Put - - -`) - - reader4 := strings.NewReader(` - - - 1 - - arn:minio:sqs:us-east-1:1:webhook - s3:ObjectAccessed:* - s3:ObjectCreated:* - s3:ObjectRemoved:* - - - 1 - - - - suffix - .jpg - - - - arn:aws:lambda:us-west-2:444455556666:cloud-function-A - s3:ObjectCreated:Put - - - arn:aws:sns:us-west-2:444455556666:sns-notification-one - s3:ObjectCreated:* - - -`) - - targetList1 := NewTargetList() - - targetList2 := NewTargetList() - if err := targetList2.Add(&ExampleTarget{TargetID{"1", "webhook"}, false, false}); err != nil { - panic(err) - } - - testCases := []struct { - reader *strings.Reader - region string - targetList *TargetList - expectErr bool - }{ - {reader1, "eu-west-1", nil, true}, - {reader2, "us-east-1", targetList1, true}, - {reader4, "us-east-1", targetList1, true}, - {reader3, "", targetList2, false}, - {reader2, "us-east-1", targetList2, false}, - } - - for i, testCase := range testCases { - if _, err := testCase.reader.Seek(0, 0); err != nil { - panic(err) - } - _, err := ParseConfig(testCase.reader, testCase.region, testCase.targetList) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - } -} diff --git a/pkg/event/errors.go b/pkg/event/errors.go deleted file mode 100644 index d06acd48..00000000 --- a/pkg/event/errors.go +++ /dev/null @@ -1,152 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package event - -import ( - "encoding/xml" - "fmt" -) - -// IsEventError - checks whether given error is event error or not. -func IsEventError(err error) bool { - switch err.(type) { - case ErrInvalidFilterName, *ErrInvalidFilterName: - return true - case ErrFilterNamePrefix, *ErrFilterNamePrefix: - return true - case ErrFilterNameSuffix, *ErrFilterNameSuffix: - return true - case ErrInvalidFilterValue, *ErrInvalidFilterValue: - return true - case ErrDuplicateEventName, *ErrDuplicateEventName: - return true - case ErrUnsupportedConfiguration, *ErrUnsupportedConfiguration: - return true - case ErrDuplicateQueueConfiguration, *ErrDuplicateQueueConfiguration: - return true - case ErrUnknownRegion, *ErrUnknownRegion: - return true - case ErrARNNotFound, *ErrARNNotFound: - return true - case ErrInvalidARN, *ErrInvalidARN: - return true - case ErrInvalidEventName, *ErrInvalidEventName: - return true - } - - return false -} - -// ErrInvalidFilterName - invalid filter name error. -type ErrInvalidFilterName struct { - FilterName string -} - -func (err ErrInvalidFilterName) Error() string { - return fmt.Sprintf("invalid filter name '%v'", err.FilterName) -} - -// ErrFilterNamePrefix - more than one prefix usage error. -type ErrFilterNamePrefix struct{} - -func (err ErrFilterNamePrefix) Error() string { - return "more than one prefix in filter rule" -} - -// ErrFilterNameSuffix - more than one suffix usage error. -type ErrFilterNameSuffix struct{} - -func (err ErrFilterNameSuffix) Error() string { - return "more than one suffix in filter rule" -} - -// ErrInvalidFilterValue - invalid filter value error. -type ErrInvalidFilterValue struct { - FilterValue string -} - -func (err ErrInvalidFilterValue) Error() string { - return fmt.Sprintf("invalid filter value '%v'", err.FilterValue) -} - -// ErrDuplicateEventName - duplicate event name error. -type ErrDuplicateEventName struct { - EventName Name -} - -func (err ErrDuplicateEventName) Error() string { - return fmt.Sprintf("duplicate event name '%v' found", err.EventName) -} - -// ErrUnsupportedConfiguration - unsupported configuration error. -type ErrUnsupportedConfiguration struct{} - -func (err ErrUnsupportedConfiguration) Error() string { - return "topic or cloud function configuration is not supported" -} - -// ErrDuplicateQueueConfiguration - duplicate queue configuration error. -type ErrDuplicateQueueConfiguration struct { - Queue Queue -} - -func (err ErrDuplicateQueueConfiguration) Error() string { - var message string - if data, xerr := xml.Marshal(err.Queue); xerr != nil { - message = fmt.Sprintf("%+v", err.Queue) - } else { - message = string(data) - } - - return fmt.Sprintf("duplicate queue configuration %v", message) -} - -// ErrUnknownRegion - unknown region error. -type ErrUnknownRegion struct { - Region string -} - -func (err ErrUnknownRegion) Error() string { - return fmt.Sprintf("unknown region '%v'", err.Region) -} - -// ErrARNNotFound - ARN not found error. -type ErrARNNotFound struct { - ARN ARN -} - -func (err ErrARNNotFound) Error() string { - return fmt.Sprintf("ARN '%v' not found", err.ARN) -} - -// ErrInvalidARN - invalid ARN error. -type ErrInvalidARN struct { - ARN string -} - -func (err ErrInvalidARN) Error() string { - return fmt.Sprintf("invalid ARN '%v'", err.ARN) -} - -// ErrInvalidEventName - invalid event name error. -type ErrInvalidEventName struct { - Name string -} - -func (err ErrInvalidEventName) Error() string { - return fmt.Sprintf("invalid event name '%v'", err.Name) -} diff --git a/pkg/event/event.go b/pkg/event/event.go deleted file mode 100644 index 85fb8e9a..00000000 --- a/pkg/event/event.go +++ /dev/null @@ -1,88 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package event - -const ( - // NamespaceFormat - namespace log format used in some event targets. - NamespaceFormat = "namespace" - - // AccessFormat - access log format used in some event targets. - AccessFormat = "access" - - // AMZTimeFormat - event time format. - AMZTimeFormat = "2006-01-02T15:04:05.000Z" -) - -// Identity represents access key who caused the event. -type Identity struct { - PrincipalID string `json:"principalId"` -} - -// Bucket represents bucket metadata of the event. -type Bucket struct { - Name string `json:"name"` - OwnerIdentity Identity `json:"ownerIdentity"` - ARN string `json:"arn"` -} - -// Object represents object metadata of the event. -type Object struct { - Key string `json:"key"` - Size int64 `json:"size,omitempty"` - ETag string `json:"eTag,omitempty"` - ContentType string `json:"contentType,omitempty"` - UserMetadata map[string]string `json:"userMetadata,omitempty"` - VersionID string `json:"versionId,omitempty"` - Sequencer string `json:"sequencer"` -} - -// Metadata represents event metadata. -type Metadata struct { - SchemaVersion string `json:"s3SchemaVersion"` - ConfigurationID string `json:"configurationId"` - Bucket Bucket `json:"bucket"` - Object Object `json:"object"` -} - -// Source represents client information who triggered the event. -type Source struct { - Host string `json:"host"` - Port string `json:"port"` - UserAgent string `json:"userAgent"` -} - -// Event represents event notification information defined in -// http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html. -type Event struct { - EventVersion string `json:"eventVersion"` - EventSource string `json:"eventSource"` - AwsRegion string `json:"awsRegion"` - EventTime string `json:"eventTime"` - EventName Name `json:"eventName"` - UserIdentity Identity `json:"userIdentity"` - RequestParameters map[string]string `json:"requestParameters"` - ResponseElements map[string]string `json:"responseElements"` - S3 Metadata `json:"s3"` - Source Source `json:"source"` -} - -// Log represents event information for some event targets. -type Log struct { - EventName Name - Key string - Records []Event -} diff --git a/pkg/event/name.go b/pkg/event/name.go deleted file mode 100644 index 03d00094..00000000 --- a/pkg/event/name.go +++ /dev/null @@ -1,172 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package event - -import ( - "encoding/json" - "encoding/xml" -) - -// Name - event type enum. -// Refer http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations -type Name int - -// Values of Name -const ( - ObjectAccessedAll Name = 1 + iota - ObjectAccessedGet - ObjectAccessedGetRetention - ObjectAccessedGetLegalHold - ObjectAccessedHead - ObjectCreatedAll - ObjectCreatedCompleteMultipartUpload - ObjectCreatedCopy - ObjectCreatedPost - ObjectCreatedPut - ObjectCreatedPutRetention - ObjectCreatedPutLegalHold - ObjectRemovedAll - ObjectRemovedDelete -) - -// Expand - returns expanded values of abbreviated event type. -func (name Name) Expand() []Name { - switch name { - case ObjectAccessedAll: - return []Name{ObjectAccessedGet, ObjectAccessedHead, ObjectAccessedGetRetention, ObjectAccessedGetLegalHold} - case ObjectCreatedAll: - return []Name{ObjectCreatedCompleteMultipartUpload, ObjectCreatedCopy, ObjectCreatedPost, ObjectCreatedPut, ObjectCreatedPutRetention, ObjectCreatedPutLegalHold} - case ObjectRemovedAll: - return []Name{ObjectRemovedDelete} - default: - return []Name{name} - } -} - -// String - returns string representation of event type. -func (name Name) String() string { - switch name { - case ObjectAccessedAll: - return "s3:ObjectAccessed:*" - case ObjectAccessedGet: - return "s3:ObjectAccessed:Get" - case ObjectAccessedGetRetention: - return "s3:ObjectAccessed:GetRetention" - case ObjectAccessedGetLegalHold: - return "s3:ObjectAccessed:GetLegalHold" - case ObjectAccessedHead: - return "s3:ObjectAccessed:Head" - case ObjectCreatedAll: - return "s3:ObjectCreated:*" - case ObjectCreatedCompleteMultipartUpload: - return "s3:ObjectCreated:CompleteMultipartUpload" - case ObjectCreatedCopy: - return "s3:ObjectCreated:Copy" - case ObjectCreatedPost: - return "s3:ObjectCreated:Post" - case ObjectCreatedPut: - return "s3:ObjectCreated:Put" - case ObjectCreatedPutRetention: - return "s3:ObjectCreated:PutRetention" - case ObjectCreatedPutLegalHold: - return "s3:ObjectCreated:PutLegalHold" - case ObjectRemovedAll: - return "s3:ObjectRemoved:*" - case ObjectRemovedDelete: - return "s3:ObjectRemoved:Delete" - } - - return "" -} - -// MarshalXML - encodes to XML data. -func (name Name) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - return e.EncodeElement(name.String(), start) -} - -// UnmarshalXML - decodes XML data. -func (name *Name) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - var s string - if err := d.DecodeElement(&s, &start); err != nil { - return err - } - - eventName, err := ParseName(s) - if err != nil { - return err - } - - *name = eventName - return nil -} - -// MarshalJSON - encodes to JSON data. -func (name Name) MarshalJSON() ([]byte, error) { - return json.Marshal(name.String()) -} - -// UnmarshalJSON - decodes JSON data. -func (name *Name) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - - eventName, err := ParseName(s) - if err != nil { - return err - } - - *name = eventName - return nil -} - -// ParseName - parses string to Name. -func ParseName(s string) (Name, error) { - switch s { - case "s3:ObjectAccessed:*": - return ObjectAccessedAll, nil - case "s3:ObjectAccessed:Get": - return ObjectAccessedGet, nil - case "s3:ObjectAccessed:GetRetention": - return ObjectAccessedGetRetention, nil - case "s3:ObjectAccessed:GetLegalHold": - return ObjectAccessedGetLegalHold, nil - case "s3:ObjectAccessed:Head": - return ObjectAccessedHead, nil - case "s3:ObjectCreated:*": - return ObjectCreatedAll, nil - case "s3:ObjectCreated:CompleteMultipartUpload": - return ObjectCreatedCompleteMultipartUpload, nil - case "s3:ObjectCreated:Copy": - return ObjectCreatedCopy, nil - case "s3:ObjectCreated:Post": - return ObjectCreatedPost, nil - case "s3:ObjectCreated:Put": - return ObjectCreatedPut, nil - case "s3:ObjectCreated:PutRetention": - return ObjectCreatedPutRetention, nil - case "s3:ObjectCreated:PutLegalHold": - return ObjectCreatedPutLegalHold, nil - case "s3:ObjectRemoved:*": - return ObjectRemovedAll, nil - case "s3:ObjectRemoved:Delete": - return ObjectRemovedDelete, nil - default: - return 0, &ErrInvalidEventName{s} - } -} diff --git a/pkg/event/name_test.go b/pkg/event/name_test.go deleted file mode 100644 index 2e3fa407..00000000 --- a/pkg/event/name_test.go +++ /dev/null @@ -1,225 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package event - -import ( - "encoding/json" - "encoding/xml" - "reflect" - "testing" -) - -func TestNameExpand(t *testing.T) { - testCases := []struct { - name Name - expectedResult []Name - }{ - {ObjectAccessedAll, []Name{ObjectAccessedGet, ObjectAccessedHead, ObjectAccessedGetRetention, ObjectAccessedGetLegalHold}}, - {ObjectCreatedAll, []Name{ObjectCreatedCompleteMultipartUpload, ObjectCreatedCopy, ObjectCreatedPost, ObjectCreatedPut, ObjectCreatedPutRetention, ObjectCreatedPutLegalHold}}, - {ObjectRemovedAll, []Name{ObjectRemovedDelete}}, - {ObjectAccessedHead, []Name{ObjectAccessedHead}}, - } - - for i, testCase := range testCases { - result := testCase.name.Expand() - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Errorf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestNameString(t *testing.T) { - var blankName Name - - testCases := []struct { - name Name - expectedResult string - }{ - {ObjectAccessedAll, "s3:ObjectAccessed:*"}, - {ObjectAccessedGet, "s3:ObjectAccessed:Get"}, - {ObjectAccessedHead, "s3:ObjectAccessed:Head"}, - {ObjectCreatedAll, "s3:ObjectCreated:*"}, - {ObjectCreatedCompleteMultipartUpload, "s3:ObjectCreated:CompleteMultipartUpload"}, - {ObjectCreatedCopy, "s3:ObjectCreated:Copy"}, - {ObjectCreatedPost, "s3:ObjectCreated:Post"}, - {ObjectCreatedPut, "s3:ObjectCreated:Put"}, - {ObjectRemovedAll, "s3:ObjectRemoved:*"}, - {ObjectRemovedDelete, "s3:ObjectRemoved:Delete"}, - {ObjectCreatedPutRetention, "s3:ObjectCreated:PutRetention"}, - {ObjectCreatedPutLegalHold, "s3:ObjectCreated:PutLegalHold"}, - {ObjectAccessedGetRetention, "s3:ObjectAccessed:GetRetention"}, - {ObjectAccessedGetLegalHold, "s3:ObjectAccessed:GetLegalHold"}, - - {blankName, ""}, - } - - for i, testCase := range testCases { - result := testCase.name.String() - - if result != testCase.expectedResult { - t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestNameMarshalXML(t *testing.T) { - var blankName Name - - testCases := []struct { - name Name - expectedData []byte - expectErr bool - }{ - {ObjectAccessedAll, []byte("s3:ObjectAccessed:*"), false}, - {ObjectRemovedDelete, []byte("s3:ObjectRemoved:Delete"), false}, - {blankName, []byte(""), false}, - } - - for i, testCase := range testCases { - data, err := xml.Marshal(testCase.name) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(data, testCase.expectedData) { - t.Fatalf("test %v: data: expected: %v, got: %v", i+1, string(testCase.expectedData), string(data)) - } - } - } -} - -func TestNameUnmarshalXML(t *testing.T) { - var blankName Name - - testCases := []struct { - data []byte - expectedName Name - expectErr bool - }{ - {[]byte("s3:ObjectAccessed:*"), ObjectAccessedAll, false}, - {[]byte("s3:ObjectRemoved:Delete"), ObjectRemovedDelete, false}, - {[]byte(""), blankName, true}, - } - - for i, testCase := range testCases { - var name Name - err := xml.Unmarshal(testCase.data, &name) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(name, testCase.expectedName) { - t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedName, name) - } - } - } -} - -func TestNameMarshalJSON(t *testing.T) { - var blankName Name - - testCases := []struct { - name Name - expectedData []byte - expectErr bool - }{ - {ObjectAccessedAll, []byte(`"s3:ObjectAccessed:*"`), false}, - {ObjectRemovedDelete, []byte(`"s3:ObjectRemoved:Delete"`), false}, - {blankName, []byte(`""`), false}, - } - - for i, testCase := range testCases { - data, err := json.Marshal(testCase.name) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(data, testCase.expectedData) { - t.Fatalf("test %v: data: expected: %v, got: %v", i+1, string(testCase.expectedData), string(data)) - } - } - } -} - -func TestNameUnmarshalJSON(t *testing.T) { - var blankName Name - - testCases := []struct { - data []byte - expectedName Name - expectErr bool - }{ - {[]byte(`"s3:ObjectAccessed:*"`), ObjectAccessedAll, false}, - {[]byte(`"s3:ObjectRemoved:Delete"`), ObjectRemovedDelete, false}, - {[]byte(`""`), blankName, true}, - } - - for i, testCase := range testCases { - var name Name - err := json.Unmarshal(testCase.data, &name) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(name, testCase.expectedName) { - t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedName, name) - } - } - } -} - -func TestParseName(t *testing.T) { - var blankName Name - - testCases := []struct { - s string - expectedName Name - expectErr bool - }{ - {"s3:ObjectAccessed:*", ObjectAccessedAll, false}, - {"s3:ObjectRemoved:Delete", ObjectRemovedDelete, false}, - {"", blankName, true}, - } - - for i, testCase := range testCases { - name, err := ParseName(testCase.s) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(name, testCase.expectedName) { - t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedName, name) - } - } - } -} diff --git a/pkg/event/rules.go b/pkg/event/rules.go deleted file mode 100644 index c68d392b..00000000 --- a/pkg/event/rules.go +++ /dev/null @@ -1,112 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package event - -import ( - "strings" - - "github.com/minio/minio/pkg/wildcard" -) - -// NewPattern - create new pattern for prefix/suffix. -func NewPattern(prefix, suffix string) (pattern string) { - if prefix != "" { - if !strings.HasSuffix(prefix, "*") { - prefix += "*" - } - - pattern = prefix - } - - if suffix != "" { - if !strings.HasPrefix(suffix, "*") { - suffix = "*" + suffix - } - - pattern += suffix - } - - pattern = strings.Replace(pattern, "**", "*", -1) - - return pattern -} - -// Rules - event rules -type Rules map[string]TargetIDSet - -// Add - adds pattern and target ID. -func (rules Rules) Add(pattern string, targetID TargetID) { - rules[pattern] = NewTargetIDSet(targetID).Union(rules[pattern]) -} - -// MatchSimple - returns true one of the matching object name in rules. -func (rules Rules) MatchSimple(objectName string) bool { - for pattern := range rules { - if wildcard.MatchSimple(pattern, objectName) { - return true - } - } - return false -} - -// Match - returns TargetIDSet matching object name in rules. -func (rules Rules) Match(objectName string) TargetIDSet { - targetIDs := NewTargetIDSet() - - for pattern, targetIDSet := range rules { - if wildcard.MatchSimple(pattern, objectName) { - targetIDs = targetIDs.Union(targetIDSet) - } - } - - return targetIDs -} - -// Clone - returns copy of this rules. -func (rules Rules) Clone() Rules { - rulesCopy := make(Rules) - - for pattern, targetIDSet := range rules { - rulesCopy[pattern] = targetIDSet.Clone() - } - - return rulesCopy -} - -// Union - returns union with given rules as new rules. -func (rules Rules) Union(rules2 Rules) Rules { - nrules := rules.Clone() - - for pattern, targetIDSet := range rules2 { - nrules[pattern] = nrules[pattern].Union(targetIDSet) - } - - return nrules -} - -// Difference - returns diffrence with given rules as new rules. -func (rules Rules) Difference(rules2 Rules) Rules { - nrules := make(Rules) - - for pattern, targetIDSet := range rules { - if nv := targetIDSet.Difference(rules2[pattern]); len(nv) > 0 { - nrules[pattern] = nv - } - } - - return nrules -} diff --git a/pkg/event/rules_test.go b/pkg/event/rules_test.go deleted file mode 100644 index 4e2e950a..00000000 --- a/pkg/event/rules_test.go +++ /dev/null @@ -1,275 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package event - -import ( - "reflect" - "testing" -) - -func TestNewPattern(t *testing.T) { - testCases := []struct { - prefix string - suffix string - expectedResult string - }{ - {"", "", ""}, - {"*", "", "*"}, - {"", "*", "*"}, - {"images/", "", "images/*"}, - {"images/*", "", "images/*"}, - {"", "jpg", "*jpg"}, - {"", "*jpg", "*jpg"}, - {"images/", "jpg", "images/*jpg"}, - {"images/*", "jpg", "images/*jpg"}, - {"images/", "*jpg", "images/*jpg"}, - {"images/*", "*jpg", "images/*jpg"}, - {"201*/images/", "jpg", "201*/images/*jpg"}, - } - - for i, testCase := range testCases { - result := NewPattern(testCase.prefix, testCase.suffix) - - if result != testCase.expectedResult { - t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestRulesAdd(t *testing.T) { - rulesCase1 := make(Rules) - - rulesCase2 := make(Rules) - rulesCase2.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) - - rulesCase3 := make(Rules) - rulesCase3.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) - - rulesCase4 := make(Rules) - rulesCase4.Add(NewPattern("", "*.jpg"), TargetID{"1", "webhook"}) - - rulesCase5 := make(Rules) - - rulesCase6 := make(Rules) - rulesCase6.Add(NewPattern("", "*.jpg"), TargetID{"1", "webhook"}) - - rulesCase7 := make(Rules) - rulesCase7.Add(NewPattern("", "*.jpg"), TargetID{"1", "webhook"}) - - rulesCase8 := make(Rules) - rulesCase8.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) - - testCases := []struct { - rules Rules - pattern string - targetID TargetID - expectedResult int - }{ - {rulesCase1, NewPattern("*", ""), TargetID{"1", "webhook"}, 1}, - {rulesCase2, NewPattern("*", ""), TargetID{"2", "amqp"}, 2}, - {rulesCase3, NewPattern("2010*", ""), TargetID{"1", "webhook"}, 1}, - {rulesCase4, NewPattern("*", ""), TargetID{"1", "webhook"}, 2}, - {rulesCase5, NewPattern("", "*.jpg"), TargetID{"1", "webhook"}, 1}, - {rulesCase6, NewPattern("", "*"), TargetID{"2", "amqp"}, 2}, - {rulesCase7, NewPattern("", "*.jpg"), TargetID{"1", "webhook"}, 1}, - {rulesCase8, NewPattern("", "*.jpg"), TargetID{"1", "webhook"}, 2}, - } - - for i, testCase := range testCases { - testCase.rules.Add(testCase.pattern, testCase.targetID) - result := len(testCase.rules) - - if result != testCase.expectedResult { - t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestRulesMatch(t *testing.T) { - rulesCase1 := make(Rules) - - rulesCase2 := make(Rules) - rulesCase2.Add(NewPattern("*", "*"), TargetID{"1", "webhook"}) - - rulesCase3 := make(Rules) - rulesCase3.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) - rulesCase3.Add(NewPattern("", "*.png"), TargetID{"2", "amqp"}) - - rulesCase4 := make(Rules) - rulesCase4.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) - - testCases := []struct { - rules Rules - objectName string - expectedResult TargetIDSet - }{ - {rulesCase1, "photos.jpg", NewTargetIDSet()}, - {rulesCase2, "photos.jpg", NewTargetIDSet(TargetID{"1", "webhook"})}, - {rulesCase3, "2010/photos.jpg", NewTargetIDSet(TargetID{"1", "webhook"})}, - {rulesCase4, "2000/photos.jpg", NewTargetIDSet()}, - } - - for i, testCase := range testCases { - result := testCase.rules.Match(testCase.objectName) - - if !reflect.DeepEqual(testCase.expectedResult, result) { - t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestRulesClone(t *testing.T) { - rulesCase1 := make(Rules) - - rulesCase2 := make(Rules) - rulesCase2.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) - - rulesCase3 := make(Rules) - rulesCase3.Add(NewPattern("", "*.jpg"), TargetID{"1", "webhook"}) - - testCases := []struct { - rules Rules - prefix string - targetID TargetID - }{ - {rulesCase1, "2010*", TargetID{"1", "webhook"}}, - {rulesCase2, "2000*", TargetID{"2", "amqp"}}, - {rulesCase3, "2010*", TargetID{"1", "webhook"}}, - } - - for i, testCase := range testCases { - result := testCase.rules.Clone() - - if !reflect.DeepEqual(result, testCase.rules) { - t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.rules, result) - } - - result.Add(NewPattern(testCase.prefix, ""), testCase.targetID) - if reflect.DeepEqual(result, testCase.rules) { - t.Fatalf("test %v: result: expected: not equal, got: equal", i+1) - } - } -} - -func TestRulesUnion(t *testing.T) { - rulesCase1 := make(Rules) - rules2Case1 := make(Rules) - expectedResultCase1 := make(Rules) - - rulesCase2 := make(Rules) - rules2Case2 := make(Rules) - rules2Case2.Add(NewPattern("*", ""), TargetID{"1", "webhook"}) - expectedResultCase2 := make(Rules) - expectedResultCase2.Add(NewPattern("*", ""), TargetID{"1", "webhook"}) - - rulesCase3 := make(Rules) - rulesCase3.Add(NewPattern("", "*"), TargetID{"1", "webhook"}) - rules2Case3 := make(Rules) - expectedResultCase3 := make(Rules) - expectedResultCase3.Add(NewPattern("", "*"), TargetID{"1", "webhook"}) - - rulesCase4 := make(Rules) - rulesCase4.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) - rules2Case4 := make(Rules) - rules2Case4.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) - expectedResultCase4 := make(Rules) - expectedResultCase4.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) - - rulesCase5 := make(Rules) - rulesCase5.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) - rulesCase5.Add(NewPattern("", "*.png"), TargetID{"2", "amqp"}) - rules2Case5 := make(Rules) - rules2Case5.Add(NewPattern("*", ""), TargetID{"1", "webhook"}) - expectedResultCase5 := make(Rules) - expectedResultCase5.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) - expectedResultCase5.Add(NewPattern("", "*.png"), TargetID{"2", "amqp"}) - expectedResultCase5.Add(NewPattern("*", ""), TargetID{"1", "webhook"}) - - testCases := []struct { - rules Rules - rules2 Rules - expectedResult Rules - }{ - {rulesCase1, rules2Case1, expectedResultCase1}, - {rulesCase2, rules2Case2, expectedResultCase2}, - {rulesCase3, rules2Case3, expectedResultCase3}, - {rulesCase4, rules2Case4, expectedResultCase4}, - {rulesCase5, rules2Case5, expectedResultCase5}, - } - - for i, testCase := range testCases { - result := testCase.rules.Union(testCase.rules2) - - if !reflect.DeepEqual(testCase.expectedResult, result) { - t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestRulesDifference(t *testing.T) { - rulesCase1 := make(Rules) - rules2Case1 := make(Rules) - expectedResultCase1 := make(Rules) - - rulesCase2 := make(Rules) - rules2Case2 := make(Rules) - rules2Case2.Add(NewPattern("*", "*"), TargetID{"1", "webhook"}) - expectedResultCase2 := make(Rules) - - rulesCase3 := make(Rules) - rulesCase3.Add(NewPattern("*", "*"), TargetID{"1", "webhook"}) - rules2Case3 := make(Rules) - expectedResultCase3 := make(Rules) - expectedResultCase3.Add(NewPattern("*", "*"), TargetID{"1", "webhook"}) - - rulesCase4 := make(Rules) - rulesCase4.Add(NewPattern("*", "*"), TargetID{"1", "webhook"}) - rules2Case4 := make(Rules) - rules2Case4.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) - rules2Case4.Add(NewPattern("", "*.png"), TargetID{"2", "amqp"}) - expectedResultCase4 := make(Rules) - expectedResultCase4.Add(NewPattern("*", "*"), TargetID{"1", "webhook"}) - - rulesCase5 := make(Rules) - rulesCase5.Add(NewPattern("*", ""), TargetID{"1", "webhook"}) - rulesCase5.Add(NewPattern("", "*"), TargetID{"2", "amqp"}) - rules2Case5 := make(Rules) - rules2Case5.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) - rules2Case5.Add(NewPattern("", "*"), TargetID{"2", "amqp"}) - expectedResultCase5 := make(Rules) - expectedResultCase5.Add(NewPattern("*", ""), TargetID{"1", "webhook"}) - - testCases := []struct { - rules Rules - rules2 Rules - expectedResult Rules - }{ - {rulesCase1, rules2Case1, expectedResultCase1}, - {rulesCase2, rules2Case2, expectedResultCase2}, - {rulesCase3, rules2Case3, expectedResultCase3}, - {rulesCase4, rules2Case4, expectedResultCase4}, - {rulesCase5, rules2Case5, expectedResultCase5}, - } - - for i, testCase := range testCases { - result := testCase.rules.Difference(testCase.rules2) - - if !reflect.DeepEqual(testCase.expectedResult, result) { - t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} diff --git a/pkg/event/rulesmap.go b/pkg/event/rulesmap.go deleted file mode 100644 index 8f6e32fa..00000000 --- a/pkg/event/rulesmap.go +++ /dev/null @@ -1,83 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package event - -// RulesMap - map of rules for every event name. -type RulesMap map[Name]Rules - -// add - adds event names, prefixes, suffixes and target ID to rules map. -func (rulesMap RulesMap) add(eventNames []Name, pattern string, targetID TargetID) { - rules := make(Rules) - rules.Add(pattern, targetID) - - for _, eventName := range eventNames { - for _, name := range eventName.Expand() { - rulesMap[name] = rulesMap[name].Union(rules) - } - } -} - -// Clone - returns copy of this rules map. -func (rulesMap RulesMap) Clone() RulesMap { - rulesMapCopy := make(RulesMap) - - for eventName, rules := range rulesMap { - rulesMapCopy[eventName] = rules.Clone() - } - - return rulesMapCopy -} - -// Add - adds given rules map. -func (rulesMap RulesMap) Add(rulesMap2 RulesMap) { - for eventName, rules := range rulesMap2 { - rulesMap[eventName] = rules.Union(rulesMap[eventName]) - } -} - -// Remove - removes given rules map. -func (rulesMap RulesMap) Remove(rulesMap2 RulesMap) { - for eventName, rules := range rulesMap { - if nr := rules.Difference(rulesMap2[eventName]); len(nr) != 0 { - rulesMap[eventName] = nr - } else { - delete(rulesMap, eventName) - } - } -} - -// MatchSimple - returns true if matching object name and event name in rules map. -func (rulesMap RulesMap) MatchSimple(eventName Name, objectName string) bool { - return rulesMap[eventName].MatchSimple(objectName) -} - -// Match - returns TargetIDSet matching object name and event name in rules map. -func (rulesMap RulesMap) Match(eventName Name, objectName string) TargetIDSet { - return rulesMap[eventName].Match(objectName) -} - -// NewRulesMap - creates new rules map with given values. -func NewRulesMap(eventNames []Name, pattern string, targetID TargetID) RulesMap { - // If pattern is empty, add '*' wildcard to match all. - if pattern == "" { - pattern = "*" - } - - rulesMap := make(RulesMap) - rulesMap.add(eventNames, pattern, targetID) - return rulesMap -} diff --git a/pkg/event/rulesmap_test.go b/pkg/event/rulesmap_test.go deleted file mode 100644 index 853a2996..00000000 --- a/pkg/event/rulesmap_test.go +++ /dev/null @@ -1,184 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package event - -import ( - "reflect" - "testing" -) - -func TestRulesMapClone(t *testing.T) { - rulesMapCase1 := make(RulesMap) - rulesMapToAddCase1 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"}) - - rulesMapCase2 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"}) - rulesMapToAddCase2 := NewRulesMap([]Name{ObjectCreatedAll}, "2010*.jpg", TargetID{"1", "webhook"}) - - rulesMapCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "2010*.jpg", TargetID{"1", "webhook"}) - rulesMapToAddCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"}) - - testCases := []struct { - rulesMap RulesMap - rulesMapToAdd RulesMap - }{ - {rulesMapCase1, rulesMapToAddCase1}, - {rulesMapCase2, rulesMapToAddCase2}, - {rulesMapCase3, rulesMapToAddCase3}, - } - - for i, testCase := range testCases { - result := testCase.rulesMap.Clone() - - if !reflect.DeepEqual(result, testCase.rulesMap) { - t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.rulesMap, result) - } - - result.Add(testCase.rulesMapToAdd) - if reflect.DeepEqual(result, testCase.rulesMap) { - t.Fatalf("test %v: result: expected: not equal, got: equal", i+1) - } - } -} - -func TestRulesMapAdd(t *testing.T) { - rulesMapCase1 := make(RulesMap) - rulesMapToAddCase1 := make(RulesMap) - expectedResultCase1 := make(RulesMap) - - rulesMapCase2 := make(RulesMap) - rulesMapToAddCase2 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"}) - expectedResultCase2 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"}) - - rulesMapCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"}) - rulesMapToAddCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "2010*.jpg", TargetID{"1", "webhook"}) - expectedResultCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "2010*.jpg", TargetID{"1", "webhook"}) - expectedResultCase3.add([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"}) - - testCases := []struct { - rulesMap RulesMap - rulesMapToAdd RulesMap - expectedResult RulesMap - }{ - {rulesMapCase1, rulesMapToAddCase1, expectedResultCase1}, - {rulesMapCase2, rulesMapToAddCase2, expectedResultCase2}, - {rulesMapCase3, rulesMapToAddCase3, expectedResultCase3}, - } - - for i, testCase := range testCases { - testCase.rulesMap.Add(testCase.rulesMapToAdd) - - if !reflect.DeepEqual(testCase.rulesMap, testCase.expectedResult) { - t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, testCase.rulesMap) - } - } -} - -func TestRulesMapRemove(t *testing.T) { - rulesMapCase1 := make(RulesMap) - rulesMapToAddCase1 := make(RulesMap) - expectedResultCase1 := make(RulesMap) - - rulesMapCase2 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"}) - rulesMapToAddCase2 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"}) - expectedResultCase2 := make(RulesMap) - - rulesMapCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "2010*.jpg", TargetID{"1", "webhook"}) - rulesMapCase3.add([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"}) - rulesMapToAddCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "2010*.jpg", TargetID{"1", "webhook"}) - expectedResultCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"}) - - testCases := []struct { - rulesMap RulesMap - rulesMapToAdd RulesMap - expectedResult RulesMap - }{ - {rulesMapCase1, rulesMapToAddCase1, expectedResultCase1}, - {rulesMapCase2, rulesMapToAddCase2, expectedResultCase2}, - {rulesMapCase3, rulesMapToAddCase3, expectedResultCase3}, - } - - for i, testCase := range testCases { - testCase.rulesMap.Remove(testCase.rulesMapToAdd) - - if !reflect.DeepEqual(testCase.rulesMap, testCase.expectedResult) { - t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, testCase.rulesMap) - } - } -} - -func TestRulesMapMatch(t *testing.T) { - rulesMapCase1 := make(RulesMap) - - rulesMapCase2 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"}) - - rulesMapCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "2010*.jpg", TargetID{"1", "webhook"}) - - rulesMapCase4 := NewRulesMap([]Name{ObjectCreatedAll}, "2010*.jpg", TargetID{"1", "webhook"}) - rulesMapCase4.add([]Name{ObjectCreatedAll}, "*", TargetID{"2", "amqp"}) - - testCases := []struct { - rulesMap RulesMap - eventName Name - objectName string - expectedResult TargetIDSet - }{ - {rulesMapCase1, ObjectCreatedPut, "2010/photo.jpg", NewTargetIDSet()}, - {rulesMapCase2, ObjectCreatedPut, "2010/photo.jpg", NewTargetIDSet(TargetID{"1", "webhook"})}, - {rulesMapCase3, ObjectCreatedPut, "2000/photo.png", NewTargetIDSet()}, - {rulesMapCase4, ObjectCreatedPut, "2000/photo.png", NewTargetIDSet(TargetID{"2", "amqp"})}, - } - - for i, testCase := range testCases { - result := testCase.rulesMap.Match(testCase.eventName, testCase.objectName) - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestNewRulesMap(t *testing.T) { - rulesMapCase1 := make(RulesMap) - rulesMapCase1.add([]Name{ObjectAccessedGet, ObjectAccessedHead, ObjectAccessedGetRetention, ObjectAccessedGetLegalHold}, - "*", TargetID{"1", "webhook"}) - - rulesMapCase2 := make(RulesMap) - rulesMapCase2.add([]Name{ObjectAccessedGet, ObjectAccessedHead, - ObjectCreatedPut, ObjectAccessedGetRetention, ObjectAccessedGetLegalHold}, "*", TargetID{"1", "webhook"}) - - rulesMapCase3 := make(RulesMap) - rulesMapCase3.add([]Name{ObjectRemovedDelete}, "2010*.jpg", TargetID{"1", "webhook"}) - - testCases := []struct { - eventNames []Name - pattern string - targetID TargetID - expectedResult RulesMap - }{ - {[]Name{ObjectAccessedAll}, "", TargetID{"1", "webhook"}, rulesMapCase1}, - {[]Name{ObjectAccessedAll, ObjectCreatedPut}, "", TargetID{"1", "webhook"}, rulesMapCase2}, - {[]Name{ObjectRemovedDelete}, "2010*.jpg", TargetID{"1", "webhook"}, rulesMapCase3}, - } - - for i, testCase := range testCases { - result := NewRulesMap(testCase.eventNames, testCase.pattern, testCase.targetID) - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Errorf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} diff --git a/pkg/event/target/amqp.go b/pkg/event/target/amqp.go deleted file mode 100644 index 3a9b0a9e..00000000 --- a/pkg/event/target/amqp.go +++ /dev/null @@ -1,314 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package target - -import ( - "context" - "encoding/json" - "errors" - "net" - "net/url" - "os" - "path/filepath" - "sync" - - "github.com/minio/minio/pkg/event" - xnet "github.com/minio/minio/pkg/net" - "github.com/streadway/amqp" -) - -// AMQPArgs - AMQP target arguments. -type AMQPArgs struct { - Enable bool `json:"enable"` - URL xnet.URL `json:"url"` - Exchange string `json:"exchange"` - RoutingKey string `json:"routingKey"` - ExchangeType string `json:"exchangeType"` - DeliveryMode uint8 `json:"deliveryMode"` - Mandatory bool `json:"mandatory"` - Immediate bool `json:"immediate"` - Durable bool `json:"durable"` - Internal bool `json:"internal"` - NoWait bool `json:"noWait"` - AutoDeleted bool `json:"autoDeleted"` - QueueDir string `json:"queueDir"` - QueueLimit uint64 `json:"queueLimit"` -} - -//lint:file-ignore ST1003 We cannot change these exported names. - -// AMQP input constants. -const ( - AmqpQueueDir = "queue_dir" - AmqpQueueLimit = "queue_limit" - - AmqpURL = "url" - AmqpExchange = "exchange" - AmqpRoutingKey = "routing_key" - AmqpExchangeType = "exchange_type" - AmqpDeliveryMode = "delivery_mode" - AmqpMandatory = "mandatory" - AmqpImmediate = "immediate" - AmqpDurable = "durable" - AmqpInternal = "internal" - AmqpNoWait = "no_wait" - AmqpAutoDeleted = "auto_deleted" - AmqpArguments = "arguments" - AmqpPublishingHeaders = "publishing_headers" - - EnvAMQPEnable = "MINIO_NOTIFY_AMQP_ENABLE" - EnvAMQPURL = "MINIO_NOTIFY_AMQP_URL" - EnvAMQPExchange = "MINIO_NOTIFY_AMQP_EXCHANGE" - EnvAMQPRoutingKey = "MINIO_NOTIFY_AMQP_ROUTING_KEY" - EnvAMQPExchangeType = "MINIO_NOTIFY_AMQP_EXCHANGE_TYPE" - EnvAMQPDeliveryMode = "MINIO_NOTIFY_AMQP_DELIVERY_MODE" - EnvAMQPMandatory = "MINIO_NOTIFY_AMQP_MANDATORY" - EnvAMQPImmediate = "MINIO_NOTIFY_AMQP_IMMEDIATE" - EnvAMQPDurable = "MINIO_NOTIFY_AMQP_DURABLE" - EnvAMQPInternal = "MINIO_NOTIFY_AMQP_INTERNAL" - EnvAMQPNoWait = "MINIO_NOTIFY_AMQP_NO_WAIT" - EnvAMQPAutoDeleted = "MINIO_NOTIFY_AMQP_AUTO_DELETED" - EnvAMQPArguments = "MINIO_NOTIFY_AMQP_ARGUMENTS" - EnvAMQPPublishingHeaders = "MINIO_NOTIFY_AMQP_PUBLISHING_HEADERS" - EnvAMQPQueueDir = "MINIO_NOTIFY_AMQP_QUEUE_DIR" - EnvAMQPQueueLimit = "MINIO_NOTIFY_AMQP_QUEUE_LIMIT" -) - -// Validate AMQP arguments -func (a *AMQPArgs) Validate() error { - if !a.Enable { - return nil - } - if _, err := amqp.ParseURI(a.URL.String()); err != nil { - return err - } - if a.QueueDir != "" { - if !filepath.IsAbs(a.QueueDir) { - return errors.New("queueDir path should be absolute") - } - } - - return nil -} - -// AMQPTarget - AMQP target -type AMQPTarget struct { - id event.TargetID - args AMQPArgs - conn *amqp.Connection - connMutex sync.Mutex - store Store - loggerOnce func(ctx context.Context, err error, id interface{}, errKind ...interface{}) -} - -// ID - returns TargetID. -func (target *AMQPTarget) ID() event.TargetID { - return target.id -} - -// IsActive - Return true if target is up and active -func (target *AMQPTarget) IsActive() (bool, error) { - ch, err := target.channel() - if err != nil { - return false, err - } - defer func() { - ch.Close() - }() - return true, nil -} - -// HasQueueStore - Checks if the queueStore has been configured for the target -func (target *AMQPTarget) HasQueueStore() bool { - return target.store != nil -} - -func (target *AMQPTarget) channel() (*amqp.Channel, error) { - var err error - var conn *amqp.Connection - var ch *amqp.Channel - - isAMQPClosedErr := func(err error) bool { - if err == amqp.ErrClosed { - return true - } - - if nerr, ok := err.(*net.OpError); ok { - return (nerr.Err.Error() == "use of closed network connection") - } - - return false - } - - target.connMutex.Lock() - defer target.connMutex.Unlock() - - if target.conn != nil { - ch, err = target.conn.Channel() - if err == nil { - return ch, nil - } - - if !isAMQPClosedErr(err) { - return nil, err - } - } - - conn, err = amqp.Dial(target.args.URL.String()) - if err != nil { - if IsConnRefusedErr(err) { - return nil, errNotConnected - } - return nil, err - } - - ch, err = conn.Channel() - if err != nil { - return nil, err - } - - target.conn = conn - - return ch, nil -} - -// send - sends an event to the AMQP. -func (target *AMQPTarget) send(eventData event.Event, ch *amqp.Channel) error { - objectName, err := url.QueryUnescape(eventData.S3.Object.Key) - if err != nil { - return err - } - key := eventData.S3.Bucket.Name + "/" + objectName - - data, err := json.Marshal(event.Log{EventName: eventData.EventName, Key: key, Records: []event.Event{eventData}}) - if err != nil { - return err - } - - if err = ch.ExchangeDeclare(target.args.Exchange, target.args.ExchangeType, target.args.Durable, - target.args.AutoDeleted, target.args.Internal, target.args.NoWait, nil); err != nil { - return err - } - - if err := ch.Publish(target.args.Exchange, target.args.RoutingKey, target.args.Mandatory, - target.args.Immediate, amqp.Publishing{ - ContentType: "application/json", - DeliveryMode: target.args.DeliveryMode, - Body: data, - }); err != nil { - return err - } - - return nil -} - -// Save - saves the events to the store which will be replayed when the amqp connection is active. -func (target *AMQPTarget) Save(eventData event.Event) error { - if target.store != nil { - return target.store.Put(eventData) - } - ch, err := target.channel() - if err != nil { - return err - } - defer func() { - cErr := ch.Close() - target.loggerOnce(context.Background(), cErr, target.ID()) - }() - - return target.send(eventData, ch) -} - -// Send - sends event to AMQP. -func (target *AMQPTarget) Send(eventKey string) error { - ch, err := target.channel() - if err != nil { - return err - } - defer func() { - cErr := ch.Close() - target.loggerOnce(context.Background(), cErr, target.ID()) - }() - - eventData, eErr := target.store.Get(eventKey) - if eErr != nil { - // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() - // Such events will not exist and wouldve been already been sent successfully. - if os.IsNotExist(eErr) { - return nil - } - return eErr - } - - if err := target.send(eventData, ch); err != nil { - return err - } - - // Delete the event from store. - return target.store.Del(eventKey) -} - -// Close - does nothing and available for interface compatibility. -func (target *AMQPTarget) Close() error { - if target.conn != nil { - return target.conn.Close() - } - return nil -} - -// NewAMQPTarget - creates new AMQP target. -func NewAMQPTarget(id string, args AMQPArgs, doneCh <-chan struct{}, loggerOnce func(ctx context.Context, err error, id interface{}, errKind ...interface{}), test bool) (*AMQPTarget, error) { - var conn *amqp.Connection - var err error - - var store Store - - target := &AMQPTarget{ - id: event.TargetID{ID: id, Name: "amqp"}, - args: args, - loggerOnce: loggerOnce, - } - - if args.QueueDir != "" { - queueDir := filepath.Join(args.QueueDir, storePrefix+"-amqp-"+id) - store = NewQueueStore(queueDir, args.QueueLimit) - if oErr := store.Open(); oErr != nil { - target.loggerOnce(context.Background(), oErr, target.ID()) - return target, oErr - } - target.store = store - } - - conn, err = amqp.Dial(args.URL.String()) - if err != nil { - if store == nil || !(IsConnRefusedErr(err) || IsConnResetErr(err)) { - target.loggerOnce(context.Background(), err, target.ID()) - return target, err - } - } - target.conn = conn - - if target.store != nil && !test { - // Replays the events from the store. - eventKeyCh := replayEvents(target.store, doneCh, target.loggerOnce, target.ID()) - - // Start replaying events from the store. - go sendEvents(target, eventKeyCh, doneCh, target.loggerOnce) - } - - return target, nil -} diff --git a/pkg/event/target/common.go b/pkg/event/target/common.go deleted file mode 100644 index 967db850..00000000 --- a/pkg/event/target/common.go +++ /dev/null @@ -1,28 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package target - -import "github.com/google/uuid" - -func getNewUUID() (string, error) { - u, err := uuid.NewRandom() - if err != nil { - return "", err - } - - return u.String(), nil -} diff --git a/pkg/event/target/elasticsearch.go b/pkg/event/target/elasticsearch.go deleted file mode 100644 index c8ea8967..00000000 --- a/pkg/event/target/elasticsearch.go +++ /dev/null @@ -1,293 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package target - -import ( - "context" - "fmt" - "net/http" - "net/url" - "os" - "path/filepath" - "strings" - "time" - - "github.com/minio/minio/pkg/event" - xnet "github.com/minio/minio/pkg/net" - "github.com/pkg/errors" - - "gopkg.in/olivere/elastic.v5" -) - -// Elastic constants -const ( - ElasticFormat = "format" - ElasticURL = "url" - ElasticIndex = "index" - ElasticQueueDir = "queue_dir" - ElasticQueueLimit = "queue_limit" - - EnvElasticEnable = "MINIO_NOTIFY_ELASTICSEARCH_ENABLE" - EnvElasticFormat = "MINIO_NOTIFY_ELASTICSEARCH_FORMAT" - EnvElasticURL = "MINIO_NOTIFY_ELASTICSEARCH_URL" - EnvElasticIndex = "MINIO_NOTIFY_ELASTICSEARCH_INDEX" - EnvElasticQueueDir = "MINIO_NOTIFY_ELASTICSEARCH_QUEUE_DIR" - EnvElasticQueueLimit = "MINIO_NOTIFY_ELASTICSEARCH_QUEUE_LIMIT" -) - -// ElasticsearchArgs - Elasticsearch target arguments. -type ElasticsearchArgs struct { - Enable bool `json:"enable"` - Format string `json:"format"` - URL xnet.URL `json:"url"` - Index string `json:"index"` - QueueDir string `json:"queueDir"` - QueueLimit uint64 `json:"queueLimit"` -} - -// Validate ElasticsearchArgs fields -func (a ElasticsearchArgs) Validate() error { - if !a.Enable { - return nil - } - if a.URL.IsEmpty() { - return errors.New("empty URL") - } - if a.Format != "" { - f := strings.ToLower(a.Format) - if f != event.NamespaceFormat && f != event.AccessFormat { - return errors.New("format value unrecognized") - } - } - if a.Index == "" { - return errors.New("empty index value") - } - return nil -} - -// ElasticsearchTarget - Elasticsearch target. -type ElasticsearchTarget struct { - id event.TargetID - args ElasticsearchArgs - client *elastic.Client - store Store - loggerOnce func(ctx context.Context, err error, id interface{}, errKind ...interface{}) -} - -// ID - returns target ID. -func (target *ElasticsearchTarget) ID() event.TargetID { - return target.id -} - -// HasQueueStore - Checks if the queueStore has been configured for the target -func (target *ElasticsearchTarget) HasQueueStore() bool { - return target.store != nil -} - -// IsActive - Return true if target is up and active -func (target *ElasticsearchTarget) IsActive() (bool, error) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - if target.client == nil { - client, err := newClient(target.args) - if err != nil { - return false, err - } - target.client = client - } - _, code, err := target.client.Ping(target.args.URL.String()).HttpHeadOnly(true).Do(ctx) - if err != nil { - if elastic.IsConnErr(err) || elastic.IsContextErr(err) || xnet.IsNetworkOrHostDown(err) { - return false, errNotConnected - } - return false, err - } - return !(code >= http.StatusBadRequest), nil -} - -// Save - saves the events to the store if queuestore is configured, which will be replayed when the elasticsearch connection is active. -func (target *ElasticsearchTarget) Save(eventData event.Event) error { - if target.store != nil { - return target.store.Put(eventData) - } - err := target.send(eventData) - if elastic.IsConnErr(err) || elastic.IsContextErr(err) || xnet.IsNetworkOrHostDown(err) { - return errNotConnected - } - return err -} - -// send - sends the event to the target. -func (target *ElasticsearchTarget) send(eventData event.Event) error { - - var key string - - exists := func() (bool, error) { - return target.client.Exists().Index(target.args.Index).Type("event").Id(key).Do(context.Background()) - } - - remove := func() error { - exists, err := exists() - if err == nil && exists { - _, err = target.client.Delete().Index(target.args.Index).Type("event").Id(key).Do(context.Background()) - } - return err - } - - update := func() error { - _, err := target.client.Index().Index(target.args.Index).Type("event").BodyJson(map[string]interface{}{"Records": []event.Event{eventData}}).Id(key).Do(context.Background()) - return err - } - - add := func() error { - _, err := target.client.Index().Index(target.args.Index).Type("event").BodyJson(map[string]interface{}{"Records": []event.Event{eventData}}).Do(context.Background()) - return err - } - - if target.args.Format == event.NamespaceFormat { - objectName, err := url.QueryUnescape(eventData.S3.Object.Key) - if err != nil { - return err - } - - key = eventData.S3.Bucket.Name + "/" + objectName - if eventData.EventName == event.ObjectRemovedDelete { - err = remove() - } else { - err = update() - } - return err - } - - if target.args.Format == event.AccessFormat { - return add() - } - - return nil -} - -// Send - reads an event from store and sends it to Elasticsearch. -func (target *ElasticsearchTarget) Send(eventKey string) error { - var err error - if target.client == nil { - target.client, err = newClient(target.args) - if err != nil { - return err - } - } - - eventData, eErr := target.store.Get(eventKey) - if eErr != nil { - // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() - // Such events will not exist and wouldve been already been sent successfully. - if os.IsNotExist(eErr) { - return nil - } - return eErr - } - - if err := target.send(eventData); err != nil { - if elastic.IsConnErr(err) || elastic.IsContextErr(err) || xnet.IsNetworkOrHostDown(err) { - return errNotConnected - } - return err - } - - // Delete the event from store. - return target.store.Del(eventKey) -} - -// Close - does nothing and available for interface compatibility. -func (target *ElasticsearchTarget) Close() error { - if target.client != nil { - // Stops the background processes that the client is running. - target.client.Stop() - } - return nil -} - -// createIndex - creates the index if it does not exist. -func createIndex(client *elastic.Client, args ElasticsearchArgs) error { - exists, err := client.IndexExists(args.Index).Do(context.Background()) - if err != nil { - return err - } - if !exists { - var createIndex *elastic.IndicesCreateResult - if createIndex, err = client.CreateIndex(args.Index).Do(context.Background()); err != nil { - return err - } - - if !createIndex.Acknowledged { - return fmt.Errorf("index %v not created", args.Index) - } - } - return nil -} - -// newClient - creates a new elastic client with args provided. -func newClient(args ElasticsearchArgs) (*elastic.Client, error) { - client, err := elastic.NewClient(elastic.SetURL(args.URL.String()), elastic.SetMaxRetries(10)) - if err != nil { - // https://github.com/olivere/elastic/wiki/Connection-Errors - if elastic.IsConnErr(err) || elastic.IsContextErr(err) || xnet.IsNetworkOrHostDown(err) { - return nil, errNotConnected - } - return nil, err - } - if err = createIndex(client, args); err != nil { - return nil, err - } - return client, nil -} - -// NewElasticsearchTarget - creates new Elasticsearch target. -func NewElasticsearchTarget(id string, args ElasticsearchArgs, doneCh <-chan struct{}, loggerOnce func(ctx context.Context, err error, id interface{}, kind ...interface{}), test bool) (*ElasticsearchTarget, error) { - target := &ElasticsearchTarget{ - id: event.TargetID{ID: id, Name: "elasticsearch"}, - args: args, - loggerOnce: loggerOnce, - } - - if args.QueueDir != "" { - queueDir := filepath.Join(args.QueueDir, storePrefix+"-elasticsearch-"+id) - target.store = NewQueueStore(queueDir, args.QueueLimit) - if err := target.store.Open(); err != nil { - target.loggerOnce(context.Background(), err, target.ID()) - return target, err - } - } - - var err error - target.client, err = newClient(args) - if err != nil { - if target.store == nil || err != errNotConnected { - target.loggerOnce(context.Background(), err, target.ID()) - return target, err - } - } - - if target.store != nil && !test { - // Replays the events from the store. - eventKeyCh := replayEvents(target.store, doneCh, target.loggerOnce, target.ID()) - // Start replaying events from the store. - go sendEvents(target, eventKeyCh, doneCh, target.loggerOnce) - } - - return target, nil -} diff --git a/pkg/event/target/kafka.go b/pkg/event/target/kafka.go deleted file mode 100644 index a900a26d..00000000 --- a/pkg/event/target/kafka.go +++ /dev/null @@ -1,336 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package target - -import ( - "context" - "crypto/tls" - "crypto/x509" - "encoding/json" - "errors" - "net" - "net/url" - "os" - "path/filepath" - - "github.com/minio/minio/pkg/event" - xnet "github.com/minio/minio/pkg/net" - - sarama "github.com/Shopify/sarama" - saramatls "github.com/Shopify/sarama/tools/tls" -) - -// Kafka input constants -const ( - KafkaBrokers = "brokers" - KafkaTopic = "topic" - KafkaQueueDir = "queue_dir" - KafkaQueueLimit = "queue_limit" - KafkaTLS = "tls" - KafkaTLSSkipVerify = "tls_skip_verify" - KafkaTLSClientAuth = "tls_client_auth" - KafkaSASL = "sasl" - KafkaSASLUsername = "sasl_username" - KafkaSASLPassword = "sasl_password" - KafkaSASLMechanism = "sasl_mechanism" - KafkaClientTLSCert = "client_tls_cert" - KafkaClientTLSKey = "client_tls_key" - KafkaVersion = "version" - - EnvKafkaEnable = "MINIO_NOTIFY_KAFKA_ENABLE" - EnvKafkaBrokers = "MINIO_NOTIFY_KAFKA_BROKERS" - EnvKafkaTopic = "MINIO_NOTIFY_KAFKA_TOPIC" - EnvKafkaQueueDir = "MINIO_NOTIFY_KAFKA_QUEUE_DIR" - EnvKafkaQueueLimit = "MINIO_NOTIFY_KAFKA_QUEUE_LIMIT" - EnvKafkaTLS = "MINIO_NOTIFY_KAFKA_TLS" - EnvKafkaTLSSkipVerify = "MINIO_NOTIFY_KAFKA_TLS_SKIP_VERIFY" - EnvKafkaTLSClientAuth = "MINIO_NOTIFY_KAFKA_TLS_CLIENT_AUTH" - EnvKafkaSASLEnable = "MINIO_NOTIFY_KAFKA_SASL" - EnvKafkaSASLUsername = "MINIO_NOTIFY_KAFKA_SASL_USERNAME" - EnvKafkaSASLPassword = "MINIO_NOTIFY_KAFKA_SASL_PASSWORD" - EnvKafkaSASLMechanism = "MINIO_NOTIFY_KAFKA_SASL_MECHANISM" - EnvKafkaClientTLSCert = "MINIO_NOTIFY_KAFKA_CLIENT_TLS_CERT" - EnvKafkaClientTLSKey = "MINIO_NOTIFY_KAFKA_CLIENT_TLS_KEY" - EnvKafkaVersion = "MINIO_NOTIFY_KAFKA_VERSION" -) - -// KafkaArgs - Kafka target arguments. -type KafkaArgs struct { - Enable bool `json:"enable"` - Brokers []xnet.Host `json:"brokers"` - Topic string `json:"topic"` - QueueDir string `json:"queueDir"` - QueueLimit uint64 `json:"queueLimit"` - Version string `json:"version"` - TLS struct { - Enable bool `json:"enable"` - RootCAs *x509.CertPool `json:"-"` - SkipVerify bool `json:"skipVerify"` - ClientAuth tls.ClientAuthType `json:"clientAuth"` - ClientTLSCert string `json:"clientTLSCert"` - ClientTLSKey string `json:"clientTLSKey"` - } `json:"tls"` - SASL struct { - Enable bool `json:"enable"` - User string `json:"username"` - Password string `json:"password"` - Mechanism string `json:"mechanism"` - } `json:"sasl"` -} - -// Validate KafkaArgs fields -func (k KafkaArgs) Validate() error { - if !k.Enable { - return nil - } - if len(k.Brokers) == 0 { - return errors.New("no broker address found") - } - for _, b := range k.Brokers { - if _, err := xnet.ParseHost(b.String()); err != nil { - return err - } - } - if k.QueueDir != "" { - if !filepath.IsAbs(k.QueueDir) { - return errors.New("queueDir path should be absolute") - } - } - if k.Version != "" { - if _, err := sarama.ParseKafkaVersion(k.Version); err != nil { - return err - } - } - return nil -} - -// KafkaTarget - Kafka target. -type KafkaTarget struct { - id event.TargetID - args KafkaArgs - producer sarama.SyncProducer - config *sarama.Config - store Store - loggerOnce func(ctx context.Context, err error, id interface{}, errKind ...interface{}) -} - -// ID - returns target ID. -func (target *KafkaTarget) ID() event.TargetID { - return target.id -} - -// HasQueueStore - Checks if the queueStore has been configured for the target -func (target *KafkaTarget) HasQueueStore() bool { - return target.store != nil -} - -// IsActive - Return true if target is up and active -func (target *KafkaTarget) IsActive() (bool, error) { - if !target.args.pingBrokers() { - return false, errNotConnected - } - return true, nil -} - -// Save - saves the events to the store which will be replayed when the Kafka connection is active. -func (target *KafkaTarget) Save(eventData event.Event) error { - if target.store != nil { - return target.store.Put(eventData) - } - _, err := target.IsActive() - if err != nil { - return err - } - return target.send(eventData) -} - -// send - sends an event to the kafka. -func (target *KafkaTarget) send(eventData event.Event) error { - objectName, err := url.QueryUnescape(eventData.S3.Object.Key) - if err != nil { - return err - } - key := eventData.S3.Bucket.Name + "/" + objectName - - data, err := json.Marshal(event.Log{EventName: eventData.EventName, Key: key, Records: []event.Event{eventData}}) - if err != nil { - return err - } - - msg := sarama.ProducerMessage{ - Topic: target.args.Topic, - Key: sarama.StringEncoder(key), - Value: sarama.ByteEncoder(data), - } - - _, _, err = target.producer.SendMessage(&msg) - - return err -} - -// Send - reads an event from store and sends it to Kafka. -func (target *KafkaTarget) Send(eventKey string) error { - var err error - _, err = target.IsActive() - if err != nil { - return err - } - - if target.producer == nil { - brokers := []string{} - for _, broker := range target.args.Brokers { - brokers = append(brokers, broker.String()) - } - target.producer, err = sarama.NewSyncProducer(brokers, target.config) - if err != nil { - if err != sarama.ErrOutOfBrokers { - return err - } - return errNotConnected - } - } - - eventData, eErr := target.store.Get(eventKey) - if eErr != nil { - // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() - // Such events will not exist and wouldve been already been sent successfully. - if os.IsNotExist(eErr) { - return nil - } - return eErr - } - - err = target.send(eventData) - if err != nil { - // Sarama opens the ciruit breaker after 3 consecutive connection failures. - if err == sarama.ErrLeaderNotAvailable || err.Error() == "circuit breaker is open" { - return errNotConnected - } - return err - } - - // Delete the event from store. - return target.store.Del(eventKey) -} - -// Close - closes underneath kafka connection. -func (target *KafkaTarget) Close() error { - if target.producer != nil { - return target.producer.Close() - } - return nil -} - -// Check if atleast one broker in cluster is active -func (k KafkaArgs) pingBrokers() bool { - - for _, broker := range k.Brokers { - _, dErr := net.Dial("tcp", broker.String()) - if dErr == nil { - return true - } - } - return false -} - -// NewKafkaTarget - creates new Kafka target with auth credentials. -func NewKafkaTarget(id string, args KafkaArgs, doneCh <-chan struct{}, loggerOnce func(ctx context.Context, err error, id interface{}, kind ...interface{}), test bool) (*KafkaTarget, error) { - config := sarama.NewConfig() - - target := &KafkaTarget{ - id: event.TargetID{ID: id, Name: "kafka"}, - args: args, - loggerOnce: loggerOnce, - } - - if args.Version != "" { - kafkaVersion, err := sarama.ParseKafkaVersion(args.Version) - if err != nil { - target.loggerOnce(context.Background(), err, target.ID()) - return target, err - } - config.Version = kafkaVersion - } - - config.Net.SASL.User = args.SASL.User - config.Net.SASL.Password = args.SASL.Password - if args.SASL.Mechanism == "sha512" { - config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: KafkaSHA512} } - config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA512) - } else if args.SASL.Mechanism == "sha256" { - config.Net.SASL.SCRAMClientGeneratorFunc = func() sarama.SCRAMClient { return &XDGSCRAMClient{HashGeneratorFcn: KafkaSHA256} } - config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypeSCRAMSHA256) - } else { - // default to PLAIN - config.Net.SASL.Mechanism = sarama.SASLMechanism(sarama.SASLTypePlaintext) - } - config.Net.SASL.Enable = args.SASL.Enable - - tlsConfig, err := saramatls.NewConfig(args.TLS.ClientTLSCert, args.TLS.ClientTLSKey) - - if err != nil { - target.loggerOnce(context.Background(), err, target.ID()) - return target, err - } - - config.Net.TLS.Enable = args.TLS.Enable - config.Net.TLS.Config = tlsConfig - config.Net.TLS.Config.InsecureSkipVerify = args.TLS.SkipVerify - config.Net.TLS.Config.ClientAuth = args.TLS.ClientAuth - config.Net.TLS.Config.RootCAs = args.TLS.RootCAs - - config.Producer.RequiredAcks = sarama.WaitForAll - config.Producer.Retry.Max = 10 - config.Producer.Return.Successes = true - - target.config = config - - brokers := []string{} - for _, broker := range args.Brokers { - brokers = append(brokers, broker.String()) - } - - var store Store - - if args.QueueDir != "" { - queueDir := filepath.Join(args.QueueDir, storePrefix+"-kafka-"+id) - store = NewQueueStore(queueDir, args.QueueLimit) - if oErr := store.Open(); oErr != nil { - target.loggerOnce(context.Background(), oErr, target.ID()) - return target, oErr - } - target.store = store - } - - producer, err := sarama.NewSyncProducer(brokers, config) - if err != nil { - if store == nil || err != sarama.ErrOutOfBrokers { - target.loggerOnce(context.Background(), err, target.ID()) - return target, err - } - } - target.producer = producer - - if target.store != nil && !test { - // Replays the events from the store. - eventKeyCh := replayEvents(target.store, doneCh, target.loggerOnce, target.ID()) - // Start replaying events from the store. - go sendEvents(target, eventKeyCh, doneCh, target.loggerOnce) - } - - return target, nil -} diff --git a/pkg/event/target/kafka_scram_client.go b/pkg/event/target/kafka_scram_client.go deleted file mode 100644 index efecec89..00000000 --- a/pkg/event/target/kafka_scram_client.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package target - -import ( - "crypto/sha512" - "hash" - - "github.com/minio/sha256-simd" - "github.com/xdg/scram" -) - -// KafkaSHA256 is a function that returns a crypto/sha256 hasher and should be used -// to create Client objects configured for SHA-256 hashing. -var KafkaSHA256 scram.HashGeneratorFcn = func() hash.Hash { return sha256.New() } - -// KafkaSHA512 is a function that returns a crypto/sha512 hasher and should be used -// to create Client objects configured for SHA-512 hashing. -var KafkaSHA512 scram.HashGeneratorFcn = func() hash.Hash { return sha512.New() } - -// XDGSCRAMClient implements the client-side of an authentication -// conversation with a server. A new conversation must be created for -// each authentication attempt. -type XDGSCRAMClient struct { - *scram.Client - *scram.ClientConversation - scram.HashGeneratorFcn -} - -// Begin constructs a SCRAM client component based on a given hash.Hash -// factory receiver. This constructor will normalize the username, password -// and authzID via the SASLprep algorithm, as recommended by RFC-5802. If -// SASLprep fails, the method returns an error. -func (x *XDGSCRAMClient) Begin(userName, password, authzID string) (err error) { - x.Client, err = x.HashGeneratorFcn.NewClient(userName, password, authzID) - if err != nil { - return err - } - x.ClientConversation = x.Client.NewConversation() - return nil -} - -// Step takes a string provided from a server (or just an empty string for the -// very first conversation step) and attempts to move the authentication -// conversation forward. It returns a string to be sent to the server or an -// error if the server message is invalid. Calling Step after a conversation -// completes is also an error. -func (x *XDGSCRAMClient) Step(challenge string) (response string, err error) { - response, err = x.ClientConversation.Step(challenge) - return -} - -// Done returns true if the conversation is completed or has errored. -func (x *XDGSCRAMClient) Done() bool { - return x.ClientConversation.Done() -} diff --git a/pkg/event/target/mqtt.go b/pkg/event/target/mqtt.go deleted file mode 100644 index 866fed15..00000000 --- a/pkg/event/target/mqtt.go +++ /dev/null @@ -1,280 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018-2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package target - -import ( - "context" - "crypto/tls" - "crypto/x509" - "encoding/json" - "errors" - "fmt" - "net/url" - "os" - "path/filepath" - "time" - - mqtt "github.com/eclipse/paho.mqtt.golang" - "github.com/minio/minio/pkg/event" - xnet "github.com/minio/minio/pkg/net" -) - -const ( - reconnectInterval = 5 // In Seconds - storePrefix = "minio" -) - -// MQTT input constants -const ( - MqttBroker = "broker" - MqttTopic = "topic" - MqttQoS = "qos" - MqttUsername = "username" - MqttPassword = "password" - MqttReconnectInterval = "reconnect_interval" - MqttKeepAliveInterval = "keep_alive_interval" - MqttQueueDir = "queue_dir" - MqttQueueLimit = "queue_limit" - - EnvMQTTEnable = "MINIO_NOTIFY_MQTT_ENABLE" - EnvMQTTBroker = "MINIO_NOTIFY_MQTT_BROKER" - EnvMQTTTopic = "MINIO_NOTIFY_MQTT_TOPIC" - EnvMQTTQoS = "MINIO_NOTIFY_MQTT_QOS" - EnvMQTTUsername = "MINIO_NOTIFY_MQTT_USERNAME" - EnvMQTTPassword = "MINIO_NOTIFY_MQTT_PASSWORD" - EnvMQTTReconnectInterval = "MINIO_NOTIFY_MQTT_RECONNECT_INTERVAL" - EnvMQTTKeepAliveInterval = "MINIO_NOTIFY_MQTT_KEEP_ALIVE_INTERVAL" - EnvMQTTQueueDir = "MINIO_NOTIFY_MQTT_QUEUE_DIR" - EnvMQTTQueueLimit = "MINIO_NOTIFY_MQTT_QUEUE_LIMIT" -) - -// MQTTArgs - MQTT target arguments. -type MQTTArgs struct { - Enable bool `json:"enable"` - Broker xnet.URL `json:"broker"` - Topic string `json:"topic"` - QoS byte `json:"qos"` - User string `json:"username"` - Password string `json:"password"` - MaxReconnectInterval time.Duration `json:"reconnectInterval"` - KeepAlive time.Duration `json:"keepAliveInterval"` - RootCAs *x509.CertPool `json:"-"` - QueueDir string `json:"queueDir"` - QueueLimit uint64 `json:"queueLimit"` -} - -// Validate MQTTArgs fields -func (m MQTTArgs) Validate() error { - if !m.Enable { - return nil - } - u, err := xnet.ParseURL(m.Broker.String()) - if err != nil { - return err - } - switch u.Scheme { - case "ws", "wss", "tcp", "ssl", "tls", "tcps": - default: - return errors.New("unknown protocol in broker address") - } - if m.QueueDir != "" { - if !filepath.IsAbs(m.QueueDir) { - return errors.New("queueDir path should be absolute") - } - if m.QoS == 0 { - return errors.New("qos should be set to 1 or 2 if queueDir is set") - } - } - - return nil -} - -// MQTTTarget - MQTT target. -type MQTTTarget struct { - id event.TargetID - args MQTTArgs - client mqtt.Client - store Store - quitCh chan struct{} - loggerOnce func(ctx context.Context, err error, id interface{}, kind ...interface{}) -} - -// ID - returns target ID. -func (target *MQTTTarget) ID() event.TargetID { - return target.id -} - -// HasQueueStore - Checks if the queueStore has been configured for the target -func (target *MQTTTarget) HasQueueStore() bool { - return target.store != nil -} - -// IsActive - Return true if target is up and active -func (target *MQTTTarget) IsActive() (bool, error) { - if !target.client.IsConnectionOpen() { - return false, errNotConnected - } - return true, nil -} - -// send - sends an event to the mqtt. -func (target *MQTTTarget) send(eventData event.Event) error { - objectName, err := url.QueryUnescape(eventData.S3.Object.Key) - if err != nil { - return err - } - key := eventData.S3.Bucket.Name + "/" + objectName - - data, err := json.Marshal(event.Log{EventName: eventData.EventName, Key: key, Records: []event.Event{eventData}}) - if err != nil { - return err - } - - token := target.client.Publish(target.args.Topic, target.args.QoS, false, string(data)) - if !token.WaitTimeout(reconnectInterval * time.Second) { - return errNotConnected - } - return token.Error() -} - -// Send - reads an event from store and sends it to MQTT. -func (target *MQTTTarget) Send(eventKey string) error { - // Do not send if the connection is not active. - _, err := target.IsActive() - if err != nil { - return err - } - - eventData, err := target.store.Get(eventKey) - if err != nil { - // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() - // Such events will not exist and wouldve been already been sent successfully. - if os.IsNotExist(err) { - return nil - } - return err - } - - if err = target.send(eventData); err != nil { - return err - } - - // Delete the event from store. - return target.store.Del(eventKey) -} - -// Save - saves the events to the store if queuestore is configured, which will -// be replayed when the mqtt connection is active. -func (target *MQTTTarget) Save(eventData event.Event) error { - if target.store != nil { - return target.store.Put(eventData) - } - - // Do not send if the connection is not active. - _, err := target.IsActive() - if err != nil { - return err - } - - return target.send(eventData) -} - -// Close - does nothing and available for interface compatibility. -func (target *MQTTTarget) Close() error { - target.client.Disconnect(100) - close(target.quitCh) - return nil -} - -// NewMQTTTarget - creates new MQTT target. -func NewMQTTTarget(id string, args MQTTArgs, doneCh <-chan struct{}, loggerOnce func(ctx context.Context, err error, id interface{}, kind ...interface{}), test bool) (*MQTTTarget, error) { - if args.MaxReconnectInterval == 0 { - // Default interval - // https://github.com/eclipse/paho.mqtt.golang/blob/master/options.go#L115 - args.MaxReconnectInterval = 10 * time.Minute - } - - options := mqtt.NewClientOptions(). - SetClientID(""). - SetCleanSession(true). - SetUsername(args.User). - SetPassword(args.Password). - SetMaxReconnectInterval(args.MaxReconnectInterval). - SetKeepAlive(args.KeepAlive). - SetTLSConfig(&tls.Config{RootCAs: args.RootCAs}). - AddBroker(args.Broker.String()) - - client := mqtt.NewClient(options) - - target := &MQTTTarget{ - id: event.TargetID{ID: id, Name: "mqtt"}, - args: args, - client: client, - quitCh: make(chan struct{}), - loggerOnce: loggerOnce, - } - - token := client.Connect() - retryRegister := func() { - for { - retry: - select { - case <-doneCh: - return - case <-target.quitCh: - return - default: - ok := token.WaitTimeout(reconnectInterval * time.Second) - if ok && token.Error() != nil { - target.loggerOnce(context.Background(), - fmt.Errorf("Previous connect failed with %w attempting a reconnect", - token.Error()), - target.ID()) - time.Sleep(reconnectInterval * time.Second) - token = client.Connect() - goto retry - } - if ok { - // Successfully connected. - return - } - } - } - } - - if args.QueueDir != "" { - queueDir := filepath.Join(args.QueueDir, storePrefix+"-mqtt-"+id) - target.store = NewQueueStore(queueDir, args.QueueLimit) - if err := target.store.Open(); err != nil { - target.loggerOnce(context.Background(), err, target.ID()) - return target, err - } - - if !test { - go retryRegister() - // Replays the events from the store. - eventKeyCh := replayEvents(target.store, doneCh, target.loggerOnce, target.ID()) - // Start replaying events from the store. - go sendEvents(target, eventKeyCh, doneCh, target.loggerOnce) - } - } else { - if token.Wait() && token.Error() != nil { - return target, token.Error() - } - } - return target, nil -} diff --git a/pkg/event/target/mysql.go b/pkg/event/target/mysql.go deleted file mode 100644 index 969697ea..00000000 --- a/pkg/event/target/mysql.go +++ /dev/null @@ -1,421 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// MySQL Notifier implementation. Two formats, "namespace" and -// "access" are supported. -// -// * Namespace format -// -// On each create or update object event in MinIO Object storage -// server, a row is created or updated in the table in MySQL. On each -// object removal, the corresponding row is deleted from the table. -// -// A table with a specific structure (column names, column types, and -// primary key/uniqueness constraint) is used. The user may set the -// table name in the configuration. A sample SQL command that creates -// a command with the required structure is: -// -// CREATE TABLE myminio ( -// key_name VARCHAR(2048), -// value JSONB, -// PRIMARY KEY (key_name), -// ); -// -// MySQL's "INSERT ... ON DUPLICATE ..." feature (UPSERT) is used -// here. The implementation has been tested with MySQL Ver 14.14 -// Distrib 5.7.17. -// -// * Access format -// -// On each event, a row is appended to the configured table. There is -// no deletion or modification of existing rows. -// -// A different table schema is used for this format. A sample SQL -// commant that creates a table with the required structure is: -// -// CREATE TABLE myminio ( -// event_time TIMESTAMP WITH TIME ZONE NOT NULL, -// event_data JSONB -// ); - -package target - -import ( - "context" - "database/sql" - "encoding/json" - "errors" - "fmt" - "net/url" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/go-sql-driver/mysql" - "github.com/minio/minio/pkg/event" - xnet "github.com/minio/minio/pkg/net" -) - -const ( - mysqlTableExists = `SELECT 1 FROM %s;` - mysqlCreateNamespaceTable = `CREATE TABLE %s (key_name VARCHAR(2048), value JSON, PRIMARY KEY (key_name));` - mysqlCreateAccessTable = `CREATE TABLE %s (event_time DATETIME NOT NULL, event_data JSON);` - - mysqlUpdateRow = `INSERT INTO %s (key_name, value) VALUES (?, ?) ON DUPLICATE KEY UPDATE value=VALUES(value);` - mysqlDeleteRow = `DELETE FROM %s WHERE key_name = ?;` - mysqlInsertRow = `INSERT INTO %s (event_time, event_data) VALUES (?, ?);` -) - -// MySQL related constants -const ( - MySQLFormat = "format" - MySQLDSNString = "dsn_string" - MySQLTable = "table" - MySQLHost = "host" - MySQLPort = "port" - MySQLUsername = "username" - MySQLPassword = "password" - MySQLDatabase = "database" - MySQLQueueLimit = "queue_limit" - MySQLQueueDir = "queue_dir" - - EnvMySQLEnable = "MINIO_NOTIFY_MYSQL_ENABLE" - EnvMySQLFormat = "MINIO_NOTIFY_MYSQL_FORMAT" - EnvMySQLDSNString = "MINIO_NOTIFY_MYSQL_DSN_STRING" - EnvMySQLTable = "MINIO_NOTIFY_MYSQL_TABLE" - EnvMySQLHost = "MINIO_NOTIFY_MYSQL_HOST" - EnvMySQLPort = "MINIO_NOTIFY_MYSQL_PORT" - EnvMySQLUsername = "MINIO_NOTIFY_MYSQL_USERNAME" - EnvMySQLPassword = "MINIO_NOTIFY_MYSQL_PASSWORD" - EnvMySQLDatabase = "MINIO_NOTIFY_MYSQL_DATABASE" - EnvMySQLQueueLimit = "MINIO_NOTIFY_MYSQL_QUEUE_LIMIT" - EnvMySQLQueueDir = "MINIO_NOTIFY_MYSQL_QUEUE_DIR" -) - -// MySQLArgs - MySQL target arguments. -type MySQLArgs struct { - Enable bool `json:"enable"` - Format string `json:"format"` - DSN string `json:"dsnString"` - Table string `json:"table"` - Host xnet.URL `json:"host"` - Port string `json:"port"` - User string `json:"user"` - Password string `json:"password"` - Database string `json:"database"` - QueueDir string `json:"queueDir"` - QueueLimit uint64 `json:"queueLimit"` -} - -// Validate MySQLArgs fields -func (m MySQLArgs) Validate() error { - if !m.Enable { - return nil - } - - if m.Format != "" { - f := strings.ToLower(m.Format) - if f != event.NamespaceFormat && f != event.AccessFormat { - return fmt.Errorf("unrecognized format") - } - } - - if m.Table == "" { - return fmt.Errorf("table unspecified") - } - - if m.DSN != "" { - if _, err := mysql.ParseDSN(m.DSN); err != nil { - return err - } - } else { - // Some fields need to be specified when DSN is unspecified - if m.Port == "" { - return fmt.Errorf("unspecified port") - } - if _, err := strconv.Atoi(m.Port); err != nil { - return fmt.Errorf("invalid port") - } - if m.Database == "" { - return fmt.Errorf("database unspecified") - } - } - - if m.QueueDir != "" { - if !filepath.IsAbs(m.QueueDir) { - return errors.New("queueDir path should be absolute") - } - } - - return nil -} - -// MySQLTarget - MySQL target. -type MySQLTarget struct { - id event.TargetID - args MySQLArgs - updateStmt *sql.Stmt - deleteStmt *sql.Stmt - insertStmt *sql.Stmt - db *sql.DB - store Store - firstPing bool - loggerOnce func(ctx context.Context, err error, id interface{}, errKind ...interface{}) -} - -// ID - returns target ID. -func (target *MySQLTarget) ID() event.TargetID { - return target.id -} - -// HasQueueStore - Checks if the queueStore has been configured for the target -func (target *MySQLTarget) HasQueueStore() bool { - return target.store != nil -} - -// IsActive - Return true if target is up and active -func (target *MySQLTarget) IsActive() (bool, error) { - if target.db == nil { - db, sErr := sql.Open("mysql", target.args.DSN) - if sErr != nil { - return false, sErr - } - target.db = db - } - if err := target.db.Ping(); err != nil { - if IsConnErr(err) { - return false, errNotConnected - } - return false, err - } - return true, nil -} - -// Save - saves the events to the store which will be replayed when the SQL connection is active. -func (target *MySQLTarget) Save(eventData event.Event) error { - if target.store != nil { - return target.store.Put(eventData) - } - _, err := target.IsActive() - if err != nil { - return err - } - return target.send(eventData) -} - -// send - sends an event to the mysql. -func (target *MySQLTarget) send(eventData event.Event) error { - if target.args.Format == event.NamespaceFormat { - objectName, err := url.QueryUnescape(eventData.S3.Object.Key) - if err != nil { - return err - } - key := eventData.S3.Bucket.Name + "/" + objectName - - if eventData.EventName == event.ObjectRemovedDelete { - _, err = target.deleteStmt.Exec(key) - } else { - var data []byte - if data, err = json.Marshal(struct{ Records []event.Event }{[]event.Event{eventData}}); err != nil { - return err - } - - _, err = target.updateStmt.Exec(key, data) - } - - return err - } - - if target.args.Format == event.AccessFormat { - eventTime, err := time.Parse(event.AMZTimeFormat, eventData.EventTime) - if err != nil { - return err - } - - data, err := json.Marshal(struct{ Records []event.Event }{[]event.Event{eventData}}) - if err != nil { - return err - } - - _, err = target.insertStmt.Exec(eventTime, data) - - return err - } - - return nil -} - -// Send - reads an event from store and sends it to MySQL. -func (target *MySQLTarget) Send(eventKey string) error { - - _, err := target.IsActive() - if err != nil { - return err - } - - if !target.firstPing { - if err := target.executeStmts(); err != nil { - if IsConnErr(err) { - return errNotConnected - } - return err - } - } - - eventData, eErr := target.store.Get(eventKey) - if eErr != nil { - // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() - // Such events will not exist and wouldve been already been sent successfully. - if os.IsNotExist(eErr) { - return nil - } - return eErr - } - - if err := target.send(eventData); err != nil { - if IsConnErr(err) { - return errNotConnected - } - return err - } - - // Delete the event from store. - return target.store.Del(eventKey) -} - -// Close - closes underneath connections to MySQL database. -func (target *MySQLTarget) Close() error { - if target.updateStmt != nil { - // FIXME: log returned error. ignore time being. - _ = target.updateStmt.Close() - } - - if target.deleteStmt != nil { - // FIXME: log returned error. ignore time being. - _ = target.deleteStmt.Close() - } - - if target.insertStmt != nil { - // FIXME: log returned error. ignore time being. - _ = target.insertStmt.Close() - } - - return target.db.Close() -} - -// Executes the table creation statements. -func (target *MySQLTarget) executeStmts() error { - - _, err := target.db.Exec(fmt.Sprintf(mysqlTableExists, target.args.Table)) - if err != nil { - createStmt := mysqlCreateNamespaceTable - if target.args.Format == event.AccessFormat { - createStmt = mysqlCreateAccessTable - } - - if _, dbErr := target.db.Exec(fmt.Sprintf(createStmt, target.args.Table)); dbErr != nil { - return dbErr - } - } - - switch target.args.Format { - case event.NamespaceFormat: - // insert or update statement - if target.updateStmt, err = target.db.Prepare(fmt.Sprintf(mysqlUpdateRow, target.args.Table)); err != nil { - return err - } - // delete statement - if target.deleteStmt, err = target.db.Prepare(fmt.Sprintf(mysqlDeleteRow, target.args.Table)); err != nil { - return err - } - case event.AccessFormat: - // insert statement - if target.insertStmt, err = target.db.Prepare(fmt.Sprintf(mysqlInsertRow, target.args.Table)); err != nil { - return err - } - } - - return nil - -} - -// NewMySQLTarget - creates new MySQL target. -func NewMySQLTarget(id string, args MySQLArgs, doneCh <-chan struct{}, loggerOnce func(ctx context.Context, err error, id interface{}, kind ...interface{}), test bool) (*MySQLTarget, error) { - if args.DSN == "" { - config := mysql.Config{ - User: args.User, - Passwd: args.Password, - Net: "tcp", - Addr: args.Host.String() + ":" + args.Port, - DBName: args.Database, - AllowNativePasswords: true, - CheckConnLiveness: true, - } - - args.DSN = config.FormatDSN() - } - - target := &MySQLTarget{ - id: event.TargetID{ID: id, Name: "mysql"}, - args: args, - firstPing: false, - loggerOnce: loggerOnce, - } - - db, err := sql.Open("mysql", args.DSN) - if err != nil { - target.loggerOnce(context.Background(), err, target.ID()) - return target, err - } - target.db = db - - var store Store - - if args.QueueDir != "" { - queueDir := filepath.Join(args.QueueDir, storePrefix+"-mysql-"+id) - store = NewQueueStore(queueDir, args.QueueLimit) - if oErr := store.Open(); oErr != nil { - target.loggerOnce(context.Background(), oErr, target.ID()) - return target, oErr - } - target.store = store - } - - err = target.db.Ping() - if err != nil { - if target.store == nil || !(IsConnRefusedErr(err) || IsConnResetErr(err)) { - target.loggerOnce(context.Background(), err, target.ID()) - return target, err - } - } else { - if err = target.executeStmts(); err != nil { - target.loggerOnce(context.Background(), err, target.ID()) - return target, err - } - target.firstPing = true - } - - if target.store != nil && !test { - // Replays the events from the store. - eventKeyCh := replayEvents(target.store, doneCh, target.loggerOnce, target.ID()) - // Start replaying events from the store. - go sendEvents(target, eventKeyCh, doneCh, target.loggerOnce) - } - - return target, nil -} diff --git a/pkg/event/target/mysql_test.go b/pkg/event/target/mysql_test.go deleted file mode 100644 index b2f79ffb..00000000 --- a/pkg/event/target/mysql_test.go +++ /dev/null @@ -1,37 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package target - -import ( - "database/sql" - "testing" -) - -// TestPostgreSQLRegistration checks if sql driver -// is registered and fails otherwise. -func TestMySQLRegistration(t *testing.T) { - var found bool - for _, drv := range sql.Drivers() { - if drv == "mysql" { - found = true - break - } - } - if !found { - t.Fatal("mysql driver not registered") - } -} diff --git a/pkg/event/target/nats.go b/pkg/event/target/nats.go deleted file mode 100644 index 93e8a933..00000000 --- a/pkg/event/target/nats.go +++ /dev/null @@ -1,372 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package target - -import ( - "context" - "crypto/tls" - "crypto/x509" - "encoding/json" - "errors" - "net/url" - "os" - "path/filepath" - - "github.com/minio/minio/pkg/event" - xnet "github.com/minio/minio/pkg/net" - "github.com/nats-io/nats.go" - "github.com/nats-io/stan.go" -) - -// NATS related constants -const ( - NATSAddress = "address" - NATSSubject = "subject" - NATSUsername = "username" - NATSPassword = "password" - NATSToken = "token" - NATSTLS = "tls" - NATSTLSSkipVerify = "tls_skip_verify" - NATSPingInterval = "ping_interval" - NATSQueueDir = "queue_dir" - NATSQueueLimit = "queue_limit" - NATSCertAuthority = "cert_authority" - NATSClientCert = "client_cert" - NATSClientKey = "client_key" - - // Streaming constants - NATSStreaming = "streaming" - NATSStreamingClusterID = "streaming_cluster_id" - NATSStreamingAsync = "streaming_async" - NATSStreamingMaxPubAcksInFlight = "streaming_max_pub_acks_in_flight" - - EnvNATSEnable = "MINIO_NOTIFY_NATS_ENABLE" - EnvNATSAddress = "MINIO_NOTIFY_NATS_ADDRESS" - EnvNATSSubject = "MINIO_NOTIFY_NATS_SUBJECT" - EnvNATSUsername = "MINIO_NOTIFY_NATS_USERNAME" - EnvNATSPassword = "MINIO_NOTIFY_NATS_PASSWORD" - EnvNATSToken = "MINIO_NOTIFY_NATS_TOKEN" - EnvNATSTLS = "MINIO_NOTIFY_NATS_TLS" - EnvNATSTLSSkipVerify = "MINIO_NOTIFY_NATS_TLS_SKIP_VERIFY" - EnvNATSPingInterval = "MINIO_NOTIFY_NATS_PING_INTERVAL" - EnvNATSQueueDir = "MINIO_NOTIFY_NATS_QUEUE_DIR" - EnvNATSQueueLimit = "MINIO_NOTIFY_NATS_QUEUE_LIMIT" - EnvNATSCertAuthority = "MINIO_NOTIFY_NATS_CERT_AUTHORITY" - EnvNATSClientCert = "MINIO_NOTIFY_NATS_CLIENT_CERT" - EnvNATSClientKey = "MINIO_NOTIFY_NATS_CLIENT_KEY" - - // Streaming constants - EnvNATSStreaming = "MINIO_NOTIFY_NATS_STREAMING" - EnvNATSStreamingClusterID = "MINIO_NOTIFY_NATS_STREAMING_CLUSTER_ID" - EnvNATSStreamingAsync = "MINIO_NOTIFY_NATS_STREAMING_ASYNC" - EnvNATSStreamingMaxPubAcksInFlight = "MINIO_NOTIFY_NATS_STREAMING_MAX_PUB_ACKS_IN_FLIGHT" -) - -// NATSArgs - NATS target arguments. -type NATSArgs struct { - Enable bool `json:"enable"` - Address xnet.Host `json:"address"` - Subject string `json:"subject"` - Username string `json:"username"` - Password string `json:"password"` - Token string `json:"token"` - TLS bool `json:"tls"` - TLSSkipVerify bool `json:"tlsSkipVerify"` - Secure bool `json:"secure"` - CertAuthority string `json:"certAuthority"` - ClientCert string `json:"clientCert"` - ClientKey string `json:"clientKey"` - PingInterval int64 `json:"pingInterval"` - QueueDir string `json:"queueDir"` - QueueLimit uint64 `json:"queueLimit"` - Streaming struct { - Enable bool `json:"enable"` - ClusterID string `json:"clusterID"` - Async bool `json:"async"` - MaxPubAcksInflight int `json:"maxPubAcksInflight"` - } `json:"streaming"` - - RootCAs *x509.CertPool `json:"-"` -} - -// Validate NATSArgs fields -func (n NATSArgs) Validate() error { - if !n.Enable { - return nil - } - - if n.Address.IsEmpty() { - return errors.New("empty address") - } - - if n.Subject == "" { - return errors.New("empty subject") - } - - if n.ClientCert != "" && n.ClientKey == "" || n.ClientCert == "" && n.ClientKey != "" { - return errors.New("cert and key must be specified as a pair") - } - - if n.Username != "" && n.Password == "" || n.Username == "" && n.Password != "" { - return errors.New("username and password must be specified as a pair") - } - - if n.Streaming.Enable { - if n.Streaming.ClusterID == "" { - return errors.New("empty cluster id") - } - } - - if n.QueueDir != "" { - if !filepath.IsAbs(n.QueueDir) { - return errors.New("queueDir path should be absolute") - } - } - - return nil -} - -// To obtain a nats connection from args. -func (n NATSArgs) connectNats() (*nats.Conn, error) { - connOpts := []nats.Option{nats.Name("Minio Notification")} - if n.Username != "" && n.Password != "" { - connOpts = append(connOpts, nats.UserInfo(n.Username, n.Password)) - } - if n.Token != "" { - connOpts = append(connOpts, nats.Token(n.Token)) - } - if n.Secure || n.TLS && n.TLSSkipVerify { - connOpts = append(connOpts, nats.Secure(nil)) - } else if n.TLS { - connOpts = append(connOpts, nats.Secure(&tls.Config{RootCAs: n.RootCAs})) - } - if n.CertAuthority != "" { - connOpts = append(connOpts, nats.RootCAs(n.CertAuthority)) - } - if n.ClientCert != "" && n.ClientKey != "" { - connOpts = append(connOpts, nats.ClientCert(n.ClientCert, n.ClientKey)) - } - return nats.Connect(n.Address.String(), connOpts...) -} - -// To obtain a streaming connection from args. -func (n NATSArgs) connectStan() (stan.Conn, error) { - scheme := "nats" - if n.Secure { - scheme = "tls" - } - - var addressURL string - if n.Username != "" && n.Password != "" { - addressURL = scheme + "://" + n.Username + ":" + n.Password + "@" + n.Address.String() - } else if n.Token != "" { - addressURL = scheme + "://" + n.Token + "@" + n.Address.String() - } else { - addressURL = scheme + "://" + n.Address.String() - } - - clientID, err := getNewUUID() - if err != nil { - return nil, err - } - - connOpts := []stan.Option{stan.NatsURL(addressURL)} - if n.Streaming.MaxPubAcksInflight > 0 { - connOpts = append(connOpts, stan.MaxPubAcksInflight(n.Streaming.MaxPubAcksInflight)) - } - - return stan.Connect(n.Streaming.ClusterID, clientID, connOpts...) -} - -// NATSTarget - NATS target. -type NATSTarget struct { - id event.TargetID - args NATSArgs - natsConn *nats.Conn - stanConn stan.Conn - store Store - loggerOnce func(ctx context.Context, err error, id interface{}, errKind ...interface{}) -} - -// ID - returns target ID. -func (target *NATSTarget) ID() event.TargetID { - return target.id -} - -// HasQueueStore - Checks if the queueStore has been configured for the target -func (target *NATSTarget) HasQueueStore() bool { - return target.store != nil -} - -// IsActive - Return true if target is up and active -func (target *NATSTarget) IsActive() (bool, error) { - var connErr error - if target.args.Streaming.Enable { - if target.stanConn == nil || target.stanConn.NatsConn() == nil { - target.stanConn, connErr = target.args.connectStan() - } else { - if !target.stanConn.NatsConn().IsConnected() { - return false, errNotConnected - } - } - } else { - if target.natsConn == nil { - target.natsConn, connErr = target.args.connectNats() - } else { - if !target.natsConn.IsConnected() { - return false, errNotConnected - } - } - } - - if connErr != nil { - if connErr.Error() == nats.ErrNoServers.Error() { - return false, errNotConnected - } - return false, connErr - } - - return true, nil -} - -// Save - saves the events to the store which will be replayed when the Nats connection is active. -func (target *NATSTarget) Save(eventData event.Event) error { - if target.store != nil { - return target.store.Put(eventData) - } - _, err := target.IsActive() - if err != nil { - return err - } - return target.send(eventData) -} - -// send - sends an event to the Nats. -func (target *NATSTarget) send(eventData event.Event) error { - objectName, err := url.QueryUnescape(eventData.S3.Object.Key) - if err != nil { - return err - } - key := eventData.S3.Bucket.Name + "/" + objectName - - data, err := json.Marshal(event.Log{EventName: eventData.EventName, Key: key, Records: []event.Event{eventData}}) - if err != nil { - return err - } - - if target.stanConn != nil { - if target.args.Streaming.Async { - _, err = target.stanConn.PublishAsync(target.args.Subject, data, nil) - } else { - err = target.stanConn.Publish(target.args.Subject, data) - } - } else { - err = target.natsConn.Publish(target.args.Subject, data) - } - return err -} - -// Send - sends event to Nats. -func (target *NATSTarget) Send(eventKey string) error { - _, err := target.IsActive() - if err != nil { - return err - } - - eventData, eErr := target.store.Get(eventKey) - if eErr != nil { - // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() - // Such events will not exist and wouldve been already been sent successfully. - if os.IsNotExist(eErr) { - return nil - } - return eErr - } - - if err := target.send(eventData); err != nil { - return err - } - - return target.store.Del(eventKey) -} - -// Close - closes underneath connections to NATS server. -func (target *NATSTarget) Close() (err error) { - if target.stanConn != nil { - // closing the streaming connection does not close the provided NATS connection. - if target.stanConn.NatsConn() != nil { - target.stanConn.NatsConn().Close() - } - err = target.stanConn.Close() - } - - if target.natsConn != nil { - target.natsConn.Close() - } - - return err -} - -// NewNATSTarget - creates new NATS target. -func NewNATSTarget(id string, args NATSArgs, doneCh <-chan struct{}, loggerOnce func(ctx context.Context, err error, id interface{}, kind ...interface{}), test bool) (*NATSTarget, error) { - var natsConn *nats.Conn - var stanConn stan.Conn - - var err error - - var store Store - - target := &NATSTarget{ - id: event.TargetID{ID: id, Name: "nats"}, - args: args, - loggerOnce: loggerOnce, - } - - if args.QueueDir != "" { - queueDir := filepath.Join(args.QueueDir, storePrefix+"-nats-"+id) - store = NewQueueStore(queueDir, args.QueueLimit) - if oErr := store.Open(); oErr != nil { - target.loggerOnce(context.Background(), oErr, target.ID()) - return target, oErr - } - target.store = store - } - - if args.Streaming.Enable { - stanConn, err = args.connectStan() - target.stanConn = stanConn - } else { - natsConn, err = args.connectNats() - target.natsConn = natsConn - } - - if err != nil { - if store == nil || err.Error() != nats.ErrNoServers.Error() { - target.loggerOnce(context.Background(), err, target.ID()) - return target, err - } - } - - if target.store != nil && !test { - // Replays the events from the store. - eventKeyCh := replayEvents(target.store, doneCh, target.loggerOnce, target.ID()) - // Start replaying events from the store. - go sendEvents(target, eventKeyCh, doneCh, target.loggerOnce) - } - - return target, nil -} diff --git a/pkg/event/target/nats_test.go b/pkg/event/target/nats_test.go deleted file mode 100644 index c75cf615..00000000 --- a/pkg/event/target/nats_test.go +++ /dev/null @@ -1,138 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package target - -import ( - "path" - "path/filepath" - "testing" - - xnet "github.com/minio/minio/pkg/net" - natsserver "github.com/nats-io/nats-server/v2/test" -) - -func TestNatsConnPlain(t *testing.T) { - opts := natsserver.DefaultTestOptions - opts.Port = 14222 - s := natsserver.RunServer(&opts) - defer s.Shutdown() - - clientConfig := &NATSArgs{ - Enable: true, - Address: xnet.Host{Name: "localhost", - Port: (xnet.Port(opts.Port)), - IsPortSet: true}, - Subject: "test", - } - con, err := clientConfig.connectNats() - if err != nil { - t.Errorf("Could not connect to nats: %v", err) - } - defer con.Close() -} - -func TestNatsConnUserPass(t *testing.T) { - opts := natsserver.DefaultTestOptions - opts.Port = 14223 - opts.Username = "testminio" - opts.Password = "miniotest" - s := natsserver.RunServer(&opts) - defer s.Shutdown() - - clientConfig := &NATSArgs{ - Enable: true, - Address: xnet.Host{Name: "localhost", - Port: (xnet.Port(opts.Port)), - IsPortSet: true}, - Subject: "test", - Username: opts.Username, - Password: opts.Password, - } - - con, err := clientConfig.connectNats() - if err != nil { - t.Errorf("Could not connect to nats: %v", err) - } - defer con.Close() -} - -func TestNatsConnToken(t *testing.T) { - opts := natsserver.DefaultTestOptions - opts.Port = 14223 - opts.Authorization = "s3cr3t" - s := natsserver.RunServer(&opts) - defer s.Shutdown() - - clientConfig := &NATSArgs{ - Enable: true, - Address: xnet.Host{Name: "localhost", - Port: (xnet.Port(opts.Port)), - IsPortSet: true}, - Subject: "test", - Token: opts.Authorization, - } - - con, err := clientConfig.connectNats() - if err != nil { - t.Errorf("Could not connect to nats: %v", err) - } - defer con.Close() -} - -func TestNatsConnTLSCustomCA(t *testing.T) { - s, opts := natsserver.RunServerWithConfig(filepath.Join("testdata", "nats_tls.conf")) - defer s.Shutdown() - - clientConfig := &NATSArgs{ - Enable: true, - Address: xnet.Host{Name: "localhost", - Port: (xnet.Port(opts.Port)), - IsPortSet: true}, - Subject: "test", - Secure: true, - CertAuthority: path.Join("testdata", "certs", "root_ca_cert.pem"), - } - - con, err := clientConfig.connectNats() - if err != nil { - t.Errorf("Could not connect to nats: %v", err) - } - defer con.Close() -} - -func TestNatsConnTLSClientAuthorization(t *testing.T) { - s, opts := natsserver.RunServerWithConfig(filepath.Join("testdata", "nats_tls_client_cert.conf")) - defer s.Shutdown() - - clientConfig := &NATSArgs{ - Enable: true, - Address: xnet.Host{Name: "localhost", - Port: (xnet.Port(opts.Port)), - IsPortSet: true}, - Subject: "test", - Secure: true, - CertAuthority: path.Join("testdata", "certs", "root_ca_cert.pem"), - ClientCert: path.Join("testdata", "certs", "nats_client_cert.pem"), - ClientKey: path.Join("testdata", "certs", "nats_client_key.pem"), - } - - con, err := clientConfig.connectNats() - if err != nil { - t.Errorf("Could not connect to nats: %v", err) - } - defer con.Close() -} diff --git a/pkg/event/target/nsq.go b/pkg/event/target/nsq.go deleted file mode 100644 index 5a1be991..00000000 --- a/pkg/event/target/nsq.go +++ /dev/null @@ -1,241 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package target - -import ( - "context" - "crypto/tls" - "encoding/json" - "errors" - "net/url" - "os" - "path/filepath" - - "github.com/nsqio/go-nsq" - - "github.com/minio/minio/pkg/event" - xnet "github.com/minio/minio/pkg/net" -) - -// NSQ constants -const ( - NSQAddress = "nsqd_address" - NSQTopic = "topic" - NSQTLS = "tls" - NSQTLSSkipVerify = "tls_skip_verify" - NSQQueueDir = "queue_dir" - NSQQueueLimit = "queue_limit" - - EnvNSQEnable = "MINIO_NOTIFY_NSQ" - EnvNSQAddress = "MINIO_NOTIFY_NSQ_NSQD_ADDRESS" - EnvNSQTopic = "MINIO_NOTIFY_NSQ_TOPIC" - EnvNSQTLS = "MINIO_NOTIFY_NSQ_TLS" - EnvNSQTLSSkipVerify = "MINIO_NOTIFY_NSQ_TLS_SKIP_VERIFY" - EnvNSQQueueDir = "MINIO_NOTIFY_NSQ_QUEUE_DIR" - EnvNSQQueueLimit = "MINIO_NOTIFY_NSQ_QUEUE_LIMIT" -) - -// NSQArgs - NSQ target arguments. -type NSQArgs struct { - Enable bool `json:"enable"` - NSQDAddress xnet.Host `json:"nsqdAddress"` - Topic string `json:"topic"` - TLS struct { - Enable bool `json:"enable"` - SkipVerify bool `json:"skipVerify"` - } `json:"tls"` - QueueDir string `json:"queueDir"` - QueueLimit uint64 `json:"queueLimit"` -} - -// Validate NSQArgs fields -func (n NSQArgs) Validate() error { - if !n.Enable { - return nil - } - - if n.NSQDAddress.IsEmpty() { - return errors.New("empty nsqdAddress") - } - - if n.Topic == "" { - return errors.New("empty topic") - } - if n.QueueDir != "" { - if !filepath.IsAbs(n.QueueDir) { - return errors.New("queueDir path should be absolute") - } - } - - return nil -} - -// NSQTarget - NSQ target. -type NSQTarget struct { - id event.TargetID - args NSQArgs - producer *nsq.Producer - store Store - config *nsq.Config - loggerOnce func(ctx context.Context, err error, id interface{}, errKind ...interface{}) -} - -// ID - returns target ID. -func (target *NSQTarget) ID() event.TargetID { - return target.id -} - -// HasQueueStore - Checks if the queueStore has been configured for the target -func (target *NSQTarget) HasQueueStore() bool { - return target.store != nil -} - -// IsActive - Return true if target is up and active -func (target *NSQTarget) IsActive() (bool, error) { - if target.producer == nil { - producer, err := nsq.NewProducer(target.args.NSQDAddress.String(), target.config) - if err != nil { - return false, err - } - target.producer = producer - } - - if err := target.producer.Ping(); err != nil { - // To treat "connection refused" errors as errNotConnected. - if IsConnRefusedErr(err) { - return false, errNotConnected - } - return false, err - } - return true, nil -} - -// Save - saves the events to the store which will be replayed when the nsq connection is active. -func (target *NSQTarget) Save(eventData event.Event) error { - if target.store != nil { - return target.store.Put(eventData) - } - _, err := target.IsActive() - if err != nil { - return err - } - return target.send(eventData) -} - -// send - sends an event to the NSQ. -func (target *NSQTarget) send(eventData event.Event) error { - objectName, err := url.QueryUnescape(eventData.S3.Object.Key) - if err != nil { - return err - } - key := eventData.S3.Bucket.Name + "/" + objectName - - data, err := json.Marshal(event.Log{EventName: eventData.EventName, Key: key, Records: []event.Event{eventData}}) - if err != nil { - return err - } - - return target.producer.Publish(target.args.Topic, data) -} - -// Send - reads an event from store and sends it to NSQ. -func (target *NSQTarget) Send(eventKey string) error { - _, err := target.IsActive() - if err != nil { - return err - } - - eventData, eErr := target.store.Get(eventKey) - if eErr != nil { - // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() - // Such events will not exist and wouldve been already been sent successfully. - if os.IsNotExist(eErr) { - return nil - } - return eErr - } - - if err := target.send(eventData); err != nil { - return err - } - - // Delete the event from store. - return target.store.Del(eventKey) -} - -// Close - closes underneath connections to NSQD server. -func (target *NSQTarget) Close() (err error) { - if target.producer != nil { - // this blocks until complete: - target.producer.Stop() - } - return nil -} - -// NewNSQTarget - creates new NSQ target. -func NewNSQTarget(id string, args NSQArgs, doneCh <-chan struct{}, loggerOnce func(ctx context.Context, err error, id interface{}, kind ...interface{}), test bool) (*NSQTarget, error) { - config := nsq.NewConfig() - if args.TLS.Enable { - config.TlsV1 = true - config.TlsConfig = &tls.Config{ - InsecureSkipVerify: args.TLS.SkipVerify, - } - } - - var store Store - - target := &NSQTarget{ - id: event.TargetID{ID: id, Name: "nsq"}, - args: args, - config: config, - loggerOnce: loggerOnce, - } - - if args.QueueDir != "" { - queueDir := filepath.Join(args.QueueDir, storePrefix+"-nsq-"+id) - store = NewQueueStore(queueDir, args.QueueLimit) - if oErr := store.Open(); oErr != nil { - target.loggerOnce(context.Background(), oErr, target.ID()) - return nil, oErr - } - target.store = store - } - - producer, err := nsq.NewProducer(args.NSQDAddress.String(), config) - if err != nil { - target.loggerOnce(context.Background(), err, target.ID()) - return target, err - } - target.producer = producer - - if err := target.producer.Ping(); err != nil { - // To treat "connection refused" errors as errNotConnected. - if target.store == nil || !(IsConnRefusedErr(err) || IsConnResetErr(err)) { - target.loggerOnce(context.Background(), err, target.ID()) - return nil, err - } - } - - if target.store != nil && !test { - // Replays the events from the store. - eventKeyCh := replayEvents(target.store, doneCh, target.loggerOnce, target.ID()) - // Start replaying events from the store. - go sendEvents(target, eventKeyCh, doneCh, target.loggerOnce) - } - - return target, nil -} diff --git a/pkg/event/target/nsq_test.go b/pkg/event/target/nsq_test.go deleted file mode 100644 index 81d70a00..00000000 --- a/pkg/event/target/nsq_test.go +++ /dev/null @@ -1,97 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package target - -import ( - "testing" - - xnet "github.com/minio/minio/pkg/net" -) - -func TestNSQArgs_Validate(t *testing.T) { - type fields struct { - Enable bool - NSQDAddress xnet.Host - Topic string - TLS struct { - Enable bool - SkipVerify bool - } - } - tests := []struct { - name string - fields fields - wantErr bool - }{ - { - name: "test1_missing_topic", - fields: fields{ - Enable: true, - NSQDAddress: xnet.Host{ - Name: "127.0.0.1", - Port: 4150, - IsPortSet: true, - }, - Topic: "", - }, - wantErr: true, - }, - { - name: "test2_disabled", - fields: fields{ - Enable: false, - NSQDAddress: xnet.Host{}, - Topic: "topic", - }, - wantErr: false, - }, - { - name: "test3_OK", - fields: fields{ - Enable: true, - NSQDAddress: xnet.Host{ - Name: "127.0.0.1", - Port: 4150, - IsPortSet: true, - }, - Topic: "topic", - }, - wantErr: false, - }, - { - name: "test4_emptynsqdaddr", - fields: fields{ - Enable: true, - NSQDAddress: xnet.Host{}, - Topic: "topic", - }, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - n := NSQArgs{ - Enable: tt.fields.Enable, - NSQDAddress: tt.fields.NSQDAddress, - Topic: tt.fields.Topic, - } - if err := n.Validate(); (err != nil) != tt.wantErr { - t.Errorf("NSQArgs.Validate() error = %v, wantErr %v", err, tt.wantErr) - } - }) - } -} diff --git a/pkg/event/target/postgresql.go b/pkg/event/target/postgresql.go deleted file mode 100644 index 409b04f4..00000000 --- a/pkg/event/target/postgresql.go +++ /dev/null @@ -1,425 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// PostgreSQL Notifier implementation. Two formats, "namespace" and -// "access" are supported. -// -// * Namespace format -// -// On each create or update object event in MinIO Object storage -// server, a row is created or updated in the table in Postgres. On -// each object removal, the corresponding row is deleted from the -// table. -// -// A table with a specific structure (column names, column types, and -// primary key/uniqueness constraint) is used. The user may set the -// table name in the configuration. A sample SQL command that creates -// a table with the required structure is: -// -// CREATE TABLE myminio ( -// key VARCHAR PRIMARY KEY, -// value JSONB -// ); -// -// PostgreSQL's "INSERT ... ON CONFLICT ... DO UPDATE ..." feature -// (UPSERT) is used here, so the minimum version of PostgreSQL -// required is 9.5. -// -// * Access format -// -// On each event, a row is appended to the configured table. There is -// no deletion or modification of existing rows. -// -// A different table schema is used for this format. A sample SQL -// commant that creates a table with the required structure is: -// -// CREATE TABLE myminio ( -// event_time TIMESTAMP WITH TIME ZONE NOT NULL, -// event_data JSONB -// ); - -package target - -import ( - "context" - "database/sql" - "encoding/json" - "errors" - "fmt" - "net/url" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - _ "github.com/lib/pq" // Register postgres driver - - "github.com/minio/minio/pkg/event" - xnet "github.com/minio/minio/pkg/net" -) - -const ( - psqlTableExists = `SELECT 1 FROM %s;` - psqlCreateNamespaceTable = `CREATE TABLE %s (key VARCHAR PRIMARY KEY, value JSONB);` - psqlCreateAccessTable = `CREATE TABLE %s (event_time TIMESTAMP WITH TIME ZONE NOT NULL, event_data JSONB);` - - psqlUpdateRow = `INSERT INTO %s (key, value) VALUES ($1, $2) ON CONFLICT (key) DO UPDATE SET value = EXCLUDED.value;` - psqlDeleteRow = `DELETE FROM %s WHERE key = $1;` - psqlInsertRow = `INSERT INTO %s (event_time, event_data) VALUES ($1, $2);` -) - -// Postgres constants -const ( - PostgresFormat = "format" - PostgresConnectionString = "connection_string" - PostgresTable = "table" - PostgresHost = "host" - PostgresPort = "port" - PostgresUsername = "username" - PostgresPassword = "password" - PostgresDatabase = "database" - PostgresQueueDir = "queue_dir" - PostgresQueueLimit = "queue_limit" - - EnvPostgresEnable = "MINIO_NOTIFY_POSTGRES_ENABLE" - EnvPostgresFormat = "MINIO_NOTIFY_POSTGRES_FORMAT" - EnvPostgresConnectionString = "MINIO_NOTIFY_POSTGRES_CONNECTION_STRING" - EnvPostgresTable = "MINIO_NOTIFY_POSTGRES_TABLE" - EnvPostgresHost = "MINIO_NOTIFY_POSTGRES_HOST" - EnvPostgresPort = "MINIO_NOTIFY_POSTGRES_PORT" - EnvPostgresUsername = "MINIO_NOTIFY_POSTGRES_USERNAME" - EnvPostgresPassword = "MINIO_NOTIFY_POSTGRES_PASSWORD" - EnvPostgresDatabase = "MINIO_NOTIFY_POSTGRES_DATABASE" - EnvPostgresQueueDir = "MINIO_NOTIFY_POSTGRES_QUEUE_DIR" - EnvPostgresQueueLimit = "MINIO_NOTIFY_POSTGRES_QUEUE_LIMIT" -) - -// PostgreSQLArgs - PostgreSQL target arguments. -type PostgreSQLArgs struct { - Enable bool `json:"enable"` - Format string `json:"format"` - ConnectionString string `json:"connectionString"` - Table string `json:"table"` - Host xnet.Host `json:"host"` // default: localhost - Port string `json:"port"` // default: 5432 - User string `json:"user"` // default: user running minio - Password string `json:"password"` // default: no password - Database string `json:"database"` // default: same as user - QueueDir string `json:"queueDir"` - QueueLimit uint64 `json:"queueLimit"` -} - -// Validate PostgreSQLArgs fields -func (p PostgreSQLArgs) Validate() error { - if !p.Enable { - return nil - } - if p.Table == "" { - return fmt.Errorf("empty table name") - } - if p.Format != "" { - f := strings.ToLower(p.Format) - if f != event.NamespaceFormat && f != event.AccessFormat { - return fmt.Errorf("unrecognized format value") - } - } - - if p.ConnectionString != "" { - // No pq API doesn't help to validate connection string - // prior connection, so no validation for now. - } else { - // Some fields need to be specified when ConnectionString is unspecified - if p.Port == "" { - return fmt.Errorf("unspecified port") - } - if _, err := strconv.Atoi(p.Port); err != nil { - return fmt.Errorf("invalid port") - } - if p.Database == "" { - return fmt.Errorf("database unspecified") - } - } - - if p.QueueDir != "" { - if !filepath.IsAbs(p.QueueDir) { - return errors.New("queueDir path should be absolute") - } - } - - return nil -} - -// PostgreSQLTarget - PostgreSQL target. -type PostgreSQLTarget struct { - id event.TargetID - args PostgreSQLArgs - updateStmt *sql.Stmt - deleteStmt *sql.Stmt - insertStmt *sql.Stmt - db *sql.DB - store Store - firstPing bool - connString string - loggerOnce func(ctx context.Context, err error, id interface{}, errKind ...interface{}) -} - -// ID - returns target ID. -func (target *PostgreSQLTarget) ID() event.TargetID { - return target.id -} - -// HasQueueStore - Checks if the queueStore has been configured for the target -func (target *PostgreSQLTarget) HasQueueStore() bool { - return target.store != nil -} - -// IsActive - Return true if target is up and active -func (target *PostgreSQLTarget) IsActive() (bool, error) { - if target.db == nil { - db, err := sql.Open("postgres", target.connString) - if err != nil { - return false, err - } - target.db = db - } - if err := target.db.Ping(); err != nil { - if IsConnErr(err) { - return false, errNotConnected - } - return false, err - } - return true, nil -} - -// Save - saves the events to the store if questore is configured, which will be replayed when the PostgreSQL connection is active. -func (target *PostgreSQLTarget) Save(eventData event.Event) error { - if target.store != nil { - return target.store.Put(eventData) - } - _, err := target.IsActive() - if err != nil { - return err - } - return target.send(eventData) -} - -// IsConnErr - To detect a connection error. -func IsConnErr(err error) bool { - return IsConnRefusedErr(err) || err.Error() == "sql: database is closed" || err.Error() == "sql: statement is closed" || err.Error() == "invalid connection" -} - -// send - sends an event to the PostgreSQL. -func (target *PostgreSQLTarget) send(eventData event.Event) error { - if target.args.Format == event.NamespaceFormat { - objectName, err := url.QueryUnescape(eventData.S3.Object.Key) - if err != nil { - return err - } - key := eventData.S3.Bucket.Name + "/" + objectName - - if eventData.EventName == event.ObjectRemovedDelete { - _, err = target.deleteStmt.Exec(key) - } else { - var data []byte - if data, err = json.Marshal(struct{ Records []event.Event }{[]event.Event{eventData}}); err != nil { - return err - } - - _, err = target.updateStmt.Exec(key, data) - } - return err - } - - if target.args.Format == event.AccessFormat { - eventTime, err := time.Parse(event.AMZTimeFormat, eventData.EventTime) - if err != nil { - return err - } - - data, err := json.Marshal(struct{ Records []event.Event }{[]event.Event{eventData}}) - if err != nil { - return err - } - - if _, err = target.insertStmt.Exec(eventTime, data); err != nil { - return err - } - } - - return nil -} - -// Send - reads an event from store and sends it to PostgreSQL. -func (target *PostgreSQLTarget) Send(eventKey string) error { - _, err := target.IsActive() - if err != nil { - return err - } - if !target.firstPing { - if err := target.executeStmts(); err != nil { - if IsConnErr(err) { - return errNotConnected - } - return err - } - } - - eventData, eErr := target.store.Get(eventKey) - if eErr != nil { - // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() - // Such events will not exist and wouldve been already been sent successfully. - if os.IsNotExist(eErr) { - return nil - } - return eErr - } - - if err := target.send(eventData); err != nil { - if IsConnErr(err) { - return errNotConnected - } - return err - } - - // Delete the event from store. - return target.store.Del(eventKey) -} - -// Close - closes underneath connections to PostgreSQL database. -func (target *PostgreSQLTarget) Close() error { - if target.updateStmt != nil { - // FIXME: log returned error. ignore time being. - _ = target.updateStmt.Close() - } - - if target.deleteStmt != nil { - // FIXME: log returned error. ignore time being. - _ = target.deleteStmt.Close() - } - - if target.insertStmt != nil { - // FIXME: log returned error. ignore time being. - _ = target.insertStmt.Close() - } - - return target.db.Close() -} - -// Executes the table creation statements. -func (target *PostgreSQLTarget) executeStmts() error { - - _, err := target.db.Exec(fmt.Sprintf(psqlTableExists, target.args.Table)) - if err != nil { - createStmt := psqlCreateNamespaceTable - if target.args.Format == event.AccessFormat { - createStmt = psqlCreateAccessTable - } - - if _, dbErr := target.db.Exec(fmt.Sprintf(createStmt, target.args.Table)); dbErr != nil { - return dbErr - } - } - - switch target.args.Format { - case event.NamespaceFormat: - // insert or update statement - if target.updateStmt, err = target.db.Prepare(fmt.Sprintf(psqlUpdateRow, target.args.Table)); err != nil { - return err - } - // delete statement - if target.deleteStmt, err = target.db.Prepare(fmt.Sprintf(psqlDeleteRow, target.args.Table)); err != nil { - return err - } - case event.AccessFormat: - // insert statement - if target.insertStmt, err = target.db.Prepare(fmt.Sprintf(psqlInsertRow, target.args.Table)); err != nil { - return err - } - } - - return nil -} - -// NewPostgreSQLTarget - creates new PostgreSQL target. -func NewPostgreSQLTarget(id string, args PostgreSQLArgs, doneCh <-chan struct{}, loggerOnce func(ctx context.Context, err error, id interface{}, kind ...interface{}), test bool) (*PostgreSQLTarget, error) { - params := []string{args.ConnectionString} - if !args.Host.IsEmpty() { - params = append(params, "host="+args.Host.String()) - } - if args.Port != "" { - params = append(params, "port="+args.Port) - } - if args.User != "" { - params = append(params, "user="+args.User) - } - if args.Password != "" { - params = append(params, "password="+args.Password) - } - if args.Database != "" { - params = append(params, "dbname="+args.Database) - } - connStr := strings.Join(params, " ") - - target := &PostgreSQLTarget{ - id: event.TargetID{ID: id, Name: "postgresql"}, - args: args, - firstPing: false, - connString: connStr, - loggerOnce: loggerOnce, - } - - db, err := sql.Open("postgres", connStr) - if err != nil { - return target, err - } - target.db = db - - var store Store - - if args.QueueDir != "" { - queueDir := filepath.Join(args.QueueDir, storePrefix+"-postgresql-"+id) - store = NewQueueStore(queueDir, args.QueueLimit) - if oErr := store.Open(); oErr != nil { - target.loggerOnce(context.Background(), oErr, target.ID()) - return target, oErr - } - target.store = store - } - - err = target.db.Ping() - if err != nil { - if target.store == nil || !(IsConnRefusedErr(err) || IsConnResetErr(err)) { - target.loggerOnce(context.Background(), err, target.ID()) - return target, err - } - } else { - if err = target.executeStmts(); err != nil { - target.loggerOnce(context.Background(), err, target.ID()) - return target, err - } - target.firstPing = true - } - - if target.store != nil && !test { - // Replays the events from the store. - eventKeyCh := replayEvents(target.store, doneCh, target.loggerOnce, target.ID()) - // Start replaying events from the store. - go sendEvents(target, eventKeyCh, doneCh, target.loggerOnce) - } - - return target, nil -} diff --git a/pkg/event/target/postgresql_test.go b/pkg/event/target/postgresql_test.go deleted file mode 100644 index c90777c2..00000000 --- a/pkg/event/target/postgresql_test.go +++ /dev/null @@ -1,37 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package target - -import ( - "database/sql" - "testing" -) - -// TestPostgreSQLRegistration checks if postgres driver -// is registered and fails otherwise. -func TestPostgreSQLRegistration(t *testing.T) { - var found bool - for _, drv := range sql.Drivers() { - if drv == "postgres" { - found = true - break - } - } - if !found { - t.Fatal("postgres driver not registered") - } -} diff --git a/pkg/event/target/queuestore.go b/pkg/event/target/queuestore.go deleted file mode 100644 index 4b2dcf15..00000000 --- a/pkg/event/target/queuestore.go +++ /dev/null @@ -1,206 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package target - -import ( - "encoding/json" - "io/ioutil" - "math" - "os" - "path/filepath" - "sort" - "sync" - - "github.com/minio/minio/pkg/event" - "github.com/minio/minio/pkg/sys" -) - -const ( - defaultLimit = 100000 // Default store limit. - eventExt = ".event" -) - -// QueueStore - Filestore for persisting events. -type QueueStore struct { - sync.RWMutex - currentEntries uint64 - entryLimit uint64 - directory string -} - -// NewQueueStore - Creates an instance for QueueStore. -func NewQueueStore(directory string, limit uint64) Store { - if limit == 0 { - limit = defaultLimit - _, maxRLimit, err := sys.GetMaxOpenFileLimit() - if err == nil { - // Limit the maximum number of entries - // to maximum open file limit - if maxRLimit < limit { - limit = maxRLimit - } - } - } - - return &QueueStore{ - directory: directory, - entryLimit: limit, - } -} - -// Open - Creates the directory if not present. -func (store *QueueStore) Open() error { - store.Lock() - defer store.Unlock() - - if err := os.MkdirAll(store.directory, os.FileMode(0770)); err != nil { - return err - } - - names, err := store.list() - if err != nil { - return err - } - - currentEntries := uint64(len(names)) - if currentEntries >= store.entryLimit { - return errLimitExceeded - } - - store.currentEntries = currentEntries - - return nil -} - -// write - writes event to the directory. -func (store *QueueStore) write(key string, e event.Event) error { - - // Marshalls the event. - eventData, err := json.Marshal(e) - if err != nil { - return err - } - - path := filepath.Join(store.directory, key+eventExt) - if err := ioutil.WriteFile(path, eventData, os.FileMode(0770)); err != nil { - return err - } - - // Increment the event count. - store.currentEntries++ - - return nil -} - -// Put - puts a event to the store. -func (store *QueueStore) Put(e event.Event) error { - store.Lock() - defer store.Unlock() - if store.currentEntries >= store.entryLimit { - return errLimitExceeded - } - key, err := getNewUUID() - if err != nil { - return err - } - return store.write(key, e) -} - -// Get - gets a event from the store. -func (store *QueueStore) Get(key string) (event event.Event, err error) { - store.RLock() - - defer func(store *QueueStore) { - store.RUnlock() - if err != nil { - // Upon error we remove the entry. - store.Del(key) - } - }(store) - - var eventData []byte - eventData, err = ioutil.ReadFile(filepath.Join(store.directory, key+eventExt)) - if err != nil { - return event, err - } - - if len(eventData) == 0 { - return event, os.ErrNotExist - } - - if err = json.Unmarshal(eventData, &event); err != nil { - return event, err - } - - return event, nil -} - -// Del - Deletes an entry from the store. -func (store *QueueStore) Del(key string) error { - store.Lock() - defer store.Unlock() - return store.del(key) -} - -// lockless call -func (store *QueueStore) del(key string) error { - if err := os.Remove(filepath.Join(store.directory, key+eventExt)); err != nil { - return err - } - - // Decrement the current entries count. - store.currentEntries-- - - // Current entries can underflow, when multiple - // events are being pushed in parallel, this code - // is needed to ensure that we don't underflow. - // - // queueStore replayEvents is not serialized, - // this code is needed to protect us under - // such situations. - if store.currentEntries == math.MaxUint64 { - store.currentEntries = 0 - } - return nil -} - -// List - lists all files from the directory. -func (store *QueueStore) List() ([]string, error) { - store.RLock() - defer store.RUnlock() - return store.list() -} - -// list lock less. -func (store *QueueStore) list() ([]string, error) { - var names []string - files, err := ioutil.ReadDir(store.directory) - if err != nil { - return names, err - } - - // Sort the dentries. - sort.Slice(files, func(i, j int) bool { - return files[i].ModTime().Before(files[j].ModTime()) - }) - - for _, file := range files { - names = append(names, file.Name()) - } - - return names, nil -} diff --git a/pkg/event/target/queuestore_test.go b/pkg/event/target/queuestore_test.go deleted file mode 100644 index 3638cb65..00000000 --- a/pkg/event/target/queuestore_test.go +++ /dev/null @@ -1,213 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package target - -import ( - "os" - "path/filepath" - "reflect" - "strings" - "testing" - - "github.com/minio/minio/pkg/event" -) - -// TestDir -var queueDir = filepath.Join(os.TempDir(), "minio_test") - -// Sample test event. -var testEvent = event.Event{EventVersion: "1.0", EventSource: "test_source", AwsRegion: "test_region", EventTime: "test_time", EventName: event.ObjectAccessedGet} - -// Initialize the store. -func setUpStore(directory string, limit uint64) (Store, error) { - store := NewQueueStore(queueDir, limit) - if oErr := store.Open(); oErr != nil { - return nil, oErr - } - return store, nil -} - -// Tear down store -func tearDownStore() error { - return os.RemoveAll(queueDir) -} - -// TestQueueStorePut - tests for store.Put -func TestQueueStorePut(t *testing.T) { - defer func() { - if err := tearDownStore(); err != nil { - t.Fatal("Failed to tear down store ", err) - } - }() - store, err := setUpStore(queueDir, 100) - if err != nil { - t.Fatal("Failed to create a queue store ", err) - - } - // Put 100 events. - for i := 0; i < 100; i++ { - if err := store.Put(testEvent); err != nil { - t.Fatal("Failed to put to queue store ", err) - } - } - // Count the events. - names, err := store.List() - if err != nil { - t.Fatal(err) - } - if len(names) != 100 { - t.Fatalf("List() Expected: 100, got %d", len(names)) - } -} - -// TestQueueStoreGet - tests for store.Get -func TestQueueStoreGet(t *testing.T) { - defer func() { - if err := tearDownStore(); err != nil { - t.Fatal("Failed to tear down store ", err) - } - }() - store, err := setUpStore(queueDir, 10) - if err != nil { - t.Fatal("Failed to create a queue store ", err) - } - // Put 10 events - for i := 0; i < 10; i++ { - if err := store.Put(testEvent); err != nil { - t.Fatal("Failed to put to queue store ", err) - } - } - eventKeys, err := store.List() - if err != nil { - t.Fatal(err) - } - // Get 10 events. - if len(eventKeys) == 10 { - for _, key := range eventKeys { - event, eErr := store.Get(strings.TrimSuffix(key, eventExt)) - if eErr != nil { - t.Fatal("Failed to Get the event from the queue store ", eErr) - } - if !reflect.DeepEqual(testEvent, event) { - t.Fatalf("Failed to read the event: error: expected = %v, got = %v", testEvent, event) - } - } - } else { - t.Fatalf("List() Expected: 10, got %d", len(eventKeys)) - } -} - -// TestQueueStoreDel - tests for store.Del -func TestQueueStoreDel(t *testing.T) { - defer func() { - if err := tearDownStore(); err != nil { - t.Fatal("Failed to tear down store ", err) - } - }() - store, err := setUpStore(queueDir, 20) - if err != nil { - t.Fatal("Failed to create a queue store ", err) - } - // Put 20 events. - for i := 0; i < 20; i++ { - if err := store.Put(testEvent); err != nil { - t.Fatal("Failed to put to queue store ", err) - } - } - eventKeys, err := store.List() - if err != nil { - t.Fatal(err) - } - // Remove all the events. - if len(eventKeys) == 20 { - for _, key := range eventKeys { - err := store.Del(strings.TrimSuffix(key, eventExt)) - if err != nil { - t.Fatal("queue store Del failed with ", err) - } - } - } else { - t.Fatalf("List() Expected: 20, got %d", len(eventKeys)) - } - - names, err := store.List() - if err != nil { - t.Fatal(err) - } - if len(names) != 0 { - t.Fatalf("List() Expected: 0, got %d", len(names)) - } -} - -// TestQueueStoreLimit - tests the event limit for the store. -func TestQueueStoreLimit(t *testing.T) { - defer func() { - if err := tearDownStore(); err != nil { - t.Fatal("Failed to tear down store ", err) - } - }() - // The max limit is set to 5. - store, err := setUpStore(queueDir, 5) - if err != nil { - t.Fatal("Failed to create a queue store ", err) - } - for i := 0; i < 5; i++ { - if err := store.Put(testEvent); err != nil { - t.Fatal("Failed to put to queue store ", err) - } - } - // Should not allow 6th Put. - if err := store.Put(testEvent); err == nil { - t.Fatalf("Expected to fail with %s, but passes", errLimitExceeded) - } -} - -// TestQueueStoreLimit - tests for store.LimitN. -func TestQueueStoreListN(t *testing.T) { - defer func() { - if err := tearDownStore(); err != nil { - t.Fatal("Failed to tear down store ", err) - } - }() - store, err := setUpStore(queueDir, 10) - if err != nil { - t.Fatal("Failed to create a queue store ", err) - } - for i := 0; i < 10; i++ { - if err := store.Put(testEvent); err != nil { - t.Fatal("Failed to put to queue store ", err) - } - } - // Should return all the event keys in the store. - names, err := store.List() - if err != nil { - t.Fatal(err) - } - - if len(names) != 10 { - t.Fatalf("List() Expected: 10, got %d", len(names)) - } - - if err = os.RemoveAll(queueDir); err != nil { - t.Fatal(err) - } - - _, err = store.List() - if !os.IsNotExist(err) { - t.Fatalf("Expected List() to fail with os.ErrNotExist, %s", err) - } -} diff --git a/pkg/event/target/redis.go b/pkg/event/target/redis.go deleted file mode 100644 index fdb093e8..00000000 --- a/pkg/event/target/redis.go +++ /dev/null @@ -1,342 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package target - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net/url" - "os" - "path/filepath" - "strings" - "time" - - "github.com/gomodule/redigo/redis" - "github.com/minio/minio/pkg/event" - xnet "github.com/minio/minio/pkg/net" -) - -// Redis constants -const ( - RedisFormat = "format" - RedisAddress = "address" - RedisPassword = "password" - RedisKey = "key" - RedisQueueDir = "queue_dir" - RedisQueueLimit = "queue_limit" - - EnvRedisEnable = "MINIO_NOTIFY_REDIS_ENABLE" - EnvRedisFormat = "MINIO_NOTIFY_REDIS_FORMAT" - EnvRedisAddress = "MINIO_NOTIFY_REDIS_ADDRESS" - EnvRedisPassword = "MINIO_NOTIFY_REDIS_PASSWORD" - EnvRedisKey = "MINIO_NOTIFY_REDIS_KEY" - EnvRedisQueueDir = "MINIO_NOTIFY_REDIS_QUEUE_DIR" - EnvRedisQueueLimit = "MINIO_NOTIFY_REDIS_QUEUE_LIMIT" -) - -// RedisArgs - Redis target arguments. -type RedisArgs struct { - Enable bool `json:"enable"` - Format string `json:"format"` - Addr xnet.Host `json:"address"` - Password string `json:"password"` - Key string `json:"key"` - QueueDir string `json:"queueDir"` - QueueLimit uint64 `json:"queueLimit"` -} - -// RedisAccessEvent holds event log data and timestamp -type RedisAccessEvent struct { - Event []event.Event - EventTime string -} - -// Validate RedisArgs fields -func (r RedisArgs) Validate() error { - if !r.Enable { - return nil - } - - if r.Format != "" { - f := strings.ToLower(r.Format) - if f != event.NamespaceFormat && f != event.AccessFormat { - return fmt.Errorf("unrecognized format") - } - } - - if r.Key == "" { - return fmt.Errorf("empty key") - } - - if r.QueueDir != "" { - if !filepath.IsAbs(r.QueueDir) { - return errors.New("queueDir path should be absolute") - } - } - - return nil -} - -func (r RedisArgs) validateFormat(c redis.Conn) error { - typeAvailable, err := redis.String(c.Do("TYPE", r.Key)) - if err != nil { - return err - } - - if typeAvailable != "none" { - expectedType := "hash" - if r.Format == event.AccessFormat { - expectedType = "list" - } - - if typeAvailable != expectedType { - return fmt.Errorf("expected type %v does not match with available type %v", expectedType, typeAvailable) - } - } - - return nil -} - -// RedisTarget - Redis target. -type RedisTarget struct { - id event.TargetID - args RedisArgs - pool *redis.Pool - store Store - firstPing bool - loggerOnce func(ctx context.Context, err error, id interface{}, errKind ...interface{}) -} - -// ID - returns target ID. -func (target *RedisTarget) ID() event.TargetID { - return target.id -} - -// HasQueueStore - Checks if the queueStore has been configured for the target -func (target *RedisTarget) HasQueueStore() bool { - return target.store != nil -} - -// IsActive - Return true if target is up and active -func (target *RedisTarget) IsActive() (bool, error) { - conn := target.pool.Get() - defer func() { - cErr := conn.Close() - target.loggerOnce(context.Background(), cErr, target.ID()) - }() - _, pingErr := conn.Do("PING") - if pingErr != nil { - if IsConnRefusedErr(pingErr) { - return false, errNotConnected - } - return false, pingErr - } - return true, nil -} - -// Save - saves the events to the store if questore is configured, which will be replayed when the redis connection is active. -func (target *RedisTarget) Save(eventData event.Event) error { - if target.store != nil { - return target.store.Put(eventData) - } - _, err := target.IsActive() - if err != nil { - return err - } - return target.send(eventData) -} - -// send - sends an event to the redis. -func (target *RedisTarget) send(eventData event.Event) error { - conn := target.pool.Get() - defer func() { - cErr := conn.Close() - target.loggerOnce(context.Background(), cErr, target.ID()) - }() - - if target.args.Format == event.NamespaceFormat { - objectName, err := url.QueryUnescape(eventData.S3.Object.Key) - if err != nil { - return err - } - key := eventData.S3.Bucket.Name + "/" + objectName - - if eventData.EventName == event.ObjectRemovedDelete { - _, err = conn.Do("HDEL", target.args.Key, key) - } else { - var data []byte - if data, err = json.Marshal(struct{ Records []event.Event }{[]event.Event{eventData}}); err != nil { - return err - } - - _, err = conn.Do("HSET", target.args.Key, key, data) - } - if err != nil { - return err - } - } - - if target.args.Format == event.AccessFormat { - data, err := json.Marshal([]RedisAccessEvent{{Event: []event.Event{eventData}, EventTime: eventData.EventTime}}) - if err != nil { - return err - } - if _, err := conn.Do("RPUSH", target.args.Key, data); err != nil { - return err - } - } - - return nil -} - -// Send - reads an event from store and sends it to redis. -func (target *RedisTarget) Send(eventKey string) error { - conn := target.pool.Get() - defer func() { - cErr := conn.Close() - target.loggerOnce(context.Background(), cErr, target.ID()) - }() - _, pingErr := conn.Do("PING") - if pingErr != nil { - if IsConnRefusedErr(pingErr) { - return errNotConnected - } - return pingErr - } - - if !target.firstPing { - if err := target.args.validateFormat(conn); err != nil { - if IsConnRefusedErr(err) { - return errNotConnected - } - return err - } - target.firstPing = true - } - - eventData, eErr := target.store.Get(eventKey) - if eErr != nil { - // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() - // Such events will not exist and would've been already been sent successfully. - if os.IsNotExist(eErr) { - return nil - } - return eErr - } - - if err := target.send(eventData); err != nil { - if IsConnRefusedErr(err) { - return errNotConnected - } - return err - } - - // Delete the event from store. - return target.store.Del(eventKey) -} - -// Close - releases the resources used by the pool. -func (target *RedisTarget) Close() error { - return target.pool.Close() -} - -// NewRedisTarget - creates new Redis target. -func NewRedisTarget(id string, args RedisArgs, doneCh <-chan struct{}, loggerOnce func(ctx context.Context, err error, id interface{}, errKind ...interface{}), test bool) (*RedisTarget, error) { - pool := &redis.Pool{ - MaxIdle: 3, - IdleTimeout: 2 * 60 * time.Second, - Dial: func() (redis.Conn, error) { - conn, err := redis.Dial("tcp", args.Addr.String()) - if err != nil { - return nil, err - } - - if args.Password != "" { - if _, err = conn.Do("AUTH", args.Password); err != nil { - cErr := conn.Close() - targetID := event.TargetID{ID: id, Name: "redis"} - loggerOnce(context.Background(), cErr, targetID) - return nil, err - } - } - - // Must be done after AUTH - if _, err = conn.Do("CLIENT", "SETNAME", "MinIO"); err != nil { - cErr := conn.Close() - targetID := event.TargetID{ID: id, Name: "redis"} - loggerOnce(context.Background(), cErr, targetID) - return nil, err - } - - return conn, nil - }, - TestOnBorrow: func(c redis.Conn, t time.Time) error { - _, err := c.Do("PING") - return err - }, - } - - var store Store - - target := &RedisTarget{ - id: event.TargetID{ID: id, Name: "redis"}, - args: args, - pool: pool, - loggerOnce: loggerOnce, - } - - if args.QueueDir != "" { - queueDir := filepath.Join(args.QueueDir, storePrefix+"-redis-"+id) - store = NewQueueStore(queueDir, args.QueueLimit) - if oErr := store.Open(); oErr != nil { - target.loggerOnce(context.Background(), oErr, target.ID()) - return target, oErr - } - target.store = store - } - - conn := target.pool.Get() - defer func() { - cErr := conn.Close() - target.loggerOnce(context.Background(), cErr, target.ID()) - }() - - _, pingErr := conn.Do("PING") - if pingErr != nil { - if target.store == nil || !(IsConnRefusedErr(pingErr) || IsConnResetErr(pingErr)) { - target.loggerOnce(context.Background(), pingErr, target.ID()) - return target, pingErr - } - } else { - if err := target.args.validateFormat(conn); err != nil { - target.loggerOnce(context.Background(), err, target.ID()) - return target, err - } - target.firstPing = true - } - - if target.store != nil && !test { - // Replays the events from the store. - eventKeyCh := replayEvents(target.store, doneCh, target.loggerOnce, target.ID()) - // Start replaying events from the store. - go sendEvents(target, eventKeyCh, doneCh, target.loggerOnce) - } - - return target, nil -} diff --git a/pkg/event/target/store.go b/pkg/event/target/store.go deleted file mode 100644 index 9f058efc..00000000 --- a/pkg/event/target/store.go +++ /dev/null @@ -1,143 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package target - -import ( - "context" - "errors" - "fmt" - "strings" - "syscall" - "time" - - "github.com/minio/minio/pkg/event" -) - -const retryInterval = 3 * time.Second - -// errNotConnected - indicates that the target connection is not active. -var errNotConnected = errors.New("not connected to target server/service") - -// errLimitExceeded error is sent when the maximum limit is reached. -var errLimitExceeded = errors.New("the maximum store limit reached") - -// Store - To persist the events. -type Store interface { - Put(event event.Event) error - Get(key string) (event.Event, error) - List() ([]string, error) - Del(key string) error - Open() error -} - -// replayEvents - Reads the events from the store and replays. -func replayEvents(store Store, doneCh <-chan struct{}, loggerOnce func(ctx context.Context, err error, id interface{}, kind ...interface{}), id event.TargetID) <-chan string { - eventKeyCh := make(chan string) - - go func() { - retryTicker := time.NewTicker(retryInterval) - defer retryTicker.Stop() - defer close(eventKeyCh) - for { - names, err := store.List() - if err == nil { - for _, name := range names { - select { - case eventKeyCh <- strings.TrimSuffix(name, eventExt): - // Get next key. - case <-doneCh: - return - } - } - } - - if len(names) < 2 { - select { - case <-retryTicker.C: - if err != nil { - loggerOnce(context.Background(), - fmt.Errorf("store.List() failed '%w'", err), id) - } - case <-doneCh: - return - } - } - } - }() - - return eventKeyCh -} - -// IsConnRefusedErr - To check fot "connection refused" error. -func IsConnRefusedErr(err error) bool { - return errors.Is(err, syscall.ECONNREFUSED) -} - -// IsConnResetErr - Checks for connection reset errors. -func IsConnResetErr(err error) bool { - if strings.Contains(err.Error(), "connection reset by peer") { - return true - } - // incase if error message is wrapped. - return errors.Is(err, syscall.ECONNRESET) -} - -// sendEvents - Reads events from the store and re-plays. -func sendEvents(target event.Target, eventKeyCh <-chan string, doneCh <-chan struct{}, loggerOnce func(ctx context.Context, err error, id interface{}, kind ...interface{})) { - retryTicker := time.NewTicker(retryInterval) - defer retryTicker.Stop() - - send := func(eventKey string) bool { - for { - err := target.Send(eventKey) - if err == nil { - break - } - - if err != errNotConnected && !IsConnResetErr(err) { - loggerOnce(context.Background(), - fmt.Errorf("target.Send() failed with '%w'", err), - target.ID()) - } - - // Retrying after 3secs back-off - - select { - case <-retryTicker.C: - case <-doneCh: - return false - } - } - return true - } - - for { - select { - case eventKey, ok := <-eventKeyCh: - if !ok { - // closed channel. - return - } - - if !send(eventKey) { - return - } - case <-doneCh: - return - } - } -} diff --git a/pkg/event/target/testdata/certs/nats_client_cert.pem b/pkg/event/target/testdata/certs/nats_client_cert.pem deleted file mode 100644 index e3efc256..00000000 --- a/pkg/event/target/testdata/certs/nats_client_cert.pem +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDFTCCAf0CFHzIPe2CTpos6fYYVcZD4xAMYD0FMA0GCSqGSIb3DQEBCwUAMEox -CzAJBgNVBAYTAkNBMRMwEQYDVQQIDApTb21lLVN0YXRlMQ4wDAYDVQQKDAVNaW5p -bzEWMBQGA1UECwwNTWluaW8gUm9vdCBDQTAgFw0xOTEwMTEyMDE4NTJaGA8yMTE5 -MDkxNzIwMTg1MlowQjELMAkGA1UEBhMCQ0ExDjAMBgNVBAoMBU1pbmlvMQ8wDQYD -VQQLDAZDbGllbnQxEjAQBgNVBAMMCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEB -BQADggEPADCCAQoCggEBANsLgS7C0BDsfhWQVyuFGrpl9IWGyAQvSECzepdvxcUz -CJ5QTSjtMq+2XbkozsmJ0WKtXY+c2vYuNMcthfNzLmn8KMxDoCqMwLXNMO8lLuF5 -94pH0DmTvEngjEDIOPM9aNbfmLU9Lbeiplkjt0izYObg+MzsTS0lIR2yKtSoVFOV -zzSGRvMfUO/rOlRgBio9Hhx8fL0cckr7SzwJcYtttr+1cySNkGVI8/E5Pe0T3fVW -7kiCHtUbbST0sQWorDDMkwTYyd2TvB8B8BzfSRNUavnb37bp8woqJottYXXEcLff -HL7kOhnmqvOSLSnG9ynF+WlE7ZykHw+VsIrRsl/5NwkCAwEAATANBgkqhkiG9w0B -AQsFAAOCAQEASfRN6esWr68ieeODFuiyhMaZ3kHKWiyvT3MDrFPcBgTIbi4CvFmb -+ynT7aDKxBpO8Ttl208QyLZxn9YeiFCZVVy8+o1JaQd30cjewX6A9dCd16yi3lva -tbVLVl+pJqKwr/wdEwrJ23bUYpFNrex38dxaRUOQ4Zaj6QzCXvB+IR/RvYZ2U23o -pW0BEYdi+V3M0tuAvmX4NOrLvGUF1D93WRQuiueaMF8yNhiwuT/eCcpct3FLqacC -+NvdK0aiBdUgDvdA7OroXy0Ow768wHpLvfsxqepiXXK/9748hMQxgeaUzDE6SRFF -FqgnBuFgIu4MH7ki1EnH+KekFepsPF7BIA== ------END CERTIFICATE----- diff --git a/pkg/event/target/testdata/certs/nats_client_key.pem b/pkg/event/target/testdata/certs/nats_client_key.pem deleted file mode 100644 index 1ce7a425..00000000 --- a/pkg/event/target/testdata/certs/nats_client_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEA2wuBLsLQEOx+FZBXK4UaumX0hYbIBC9IQLN6l2/FxTMInlBN -KO0yr7ZduSjOyYnRYq1dj5za9i40xy2F83MuafwozEOgKozAtc0w7yUu4Xn3ikfQ -OZO8SeCMQMg48z1o1t+YtT0tt6KmWSO3SLNg5uD4zOxNLSUhHbIq1KhUU5XPNIZG -8x9Q7+s6VGAGKj0eHHx8vRxySvtLPAlxi222v7VzJI2QZUjz8Tk97RPd9VbuSIIe -1RttJPSxBaisMMyTBNjJ3ZO8HwHwHN9JE1Rq+dvftunzCiomi21hdcRwt98cvuQ6 -Geaq85ItKcb3KcX5aUTtnKQfD5WwitGyX/k3CQIDAQABAoIBACrO7Mg+unsUPO/p -7Z9LvBWBp3ARDzYCJ5S9fs/pwDTx7FVETFAbSzSb52UwXHl2vb0TNJ3EgeZq0VW7 -x9n0QLXl2fNRpBOsvlzJZS7XjXnzZDVaI4+dF8c4YzCl8LtY3QxhVm7VLgIdf3Uc -Tc2fgOiePwGNjOetwfMTxtsYqqJK4Ang/nop6WIZtJxlq/H4D/OueDScv8yM96WP -Lb+5YMrlTtA79TTn44hdd/IVJBV74UK335bWaV2gqop/r/kymYcmIS6eQ8Fx8Au8 -FhqjL38r6H5mnyH/euBJDf+B9aPej44m093j/gy8C6PFdVHXOw4h3su+RJkV7J+x -JzSCYiECgYEA/0e1h7XVHVjRI/hzlpZlHomw4SYXc1R5DBljsD+0baNWdgwShdiA -qZyhVl1/WJEK6r+tT/a0rGcIz1CkjPvjOf66+FVwukqp4UDdeZA28CC6GvbAlBW7 -Gz3wWjbC3quR8OCcGbNrRIUegeN3KxIpyzTXKv2PRUnkkj2z2+clf1sCgYEA26mj -AiynCFZH2DIk36y1Qvn2Yh/LgO+JrZNmh0YlDENstLXEU5tkLyRxhbYDsDFKHr7a -1pHZZa2SLKMQEKNZ3OgP5osvl4BxSt5SrusGQHACb1FeWN3BdIWI5S0ZIgBih46G -IKA1ce4CnLo3oWm1L9tEFnTtMEqEtkECMnYTtGsCgYAsDgYH65thuygsmv3nqQC+ -ami6EkbNwnA5ZFBN5FCQ8zVCnga8Toa1vrAhJXWKpU1LAdU5DYxUjFt626HqKrYm -Fg3SOXyAyc3Tb8xI5Fh2zE8RxC+r3qwxoVjPWM/8eYNwHHMUBGCorIh9RfIU3seT -qATSCHwnKv9lNXzKoHNaLQKBgGURdkkn1mrFmCTnXYP06Sm57R1U18Opc0WEDqar -JZyw4TF8eKqnUr9GG12UU7ob06i10+bqEIbyB9G17UxafJxhwf8nh2xD0tzJ0m1d -AfFgGB6z558n2T0Nu+EGkQvN9Ye9kgUs8apMArOuEq6X/p/YWUmj3wZbIxjgbGxf -W82lAoGATTwJCXE6y+D5Vc5I9BTPgJd6Wg/6n+BAmadt3lG/VbPxndQBudCNii0Z -C7uyMrbK0RVY+DZAFuZWLskIoU+t82HgIsQNAH5KPz27PCWq+DznrARp+maKXj5f -f5Vsvz6KBzM8Lnts3m20AYcRP4meBa6HNHLnYGedTbRLKYobOgg= ------END RSA PRIVATE KEY----- diff --git a/pkg/event/target/testdata/certs/nats_server_cert.pem b/pkg/event/target/testdata/certs/nats_server_cert.pem deleted file mode 100644 index 43d9ecc5..00000000 --- a/pkg/event/target/testdata/certs/nats_server_cert.pem +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDFTCCAf0CFHzIPe2CTpos6fYYVcZD4xAMYD0EMA0GCSqGSIb3DQEBCwUAMEox -CzAJBgNVBAYTAkNBMRMwEQYDVQQIDApTb21lLVN0YXRlMQ4wDAYDVQQKDAVNaW5p -bzEWMBQGA1UECwwNTWluaW8gUm9vdCBDQTAgFw0xOTEwMTEyMDE1NTRaGA8yMTE5 -MDkxNzIwMTU1NFowQjELMAkGA1UEBhMCQ0ExDjAMBgNVBAoMBU1pbmlvMQ8wDQYD -VQQLDAZTZXJ2ZXIxEjAQBgNVBAMMCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEB -BQADggEPADCCAQoCggEBAMxbKHUPlW4wIX0G45jRG84o5viXtu7E044QpTGwDS+u -KCG3w4pWUwnor/uCjkzKSmg+6Sx/889de7QySPwt4TA+F7qy7TrUnIUiaVFcPmeQ -YsI68JM857nk4ScxK4C4NzJ78DlQreyPwIMx4rAGfEZpdQbAE0LWyOJ9rfMK1uF/ -PFdyNKOUw/fy5iA/DOUeoUqFATIjj5KUWojnt9QZ+M1pGbQsLWc7fzDuavVrAvR7 -wt386bayzLftzHr6rLWWzPoA70ltzvu6JxKU5xZSY5nA2Pip+le6q997+Sm5Dt5L -VZuSTzjKVNDV9l329eZJnozROdlSURVvg6fK5J8vUIsCAwEAATANBgkqhkiG9w0B -AQsFAAOCAQEAg6kLnBrjT4agClF94Rc2y6fOTWWzz7C4Ao6iQp3Tcr/YraY7FEEM -kkZT0osn1JSvut9f2gy2py3+FLAk4OdAt9NBIpjIMDmbIyiQYw+2w3IHJYwdHBXj -HapyjU2zFQvD6PNUUV4YukR28MmMk0UEizXxkEDm+Qf5t4Bpnv7tFiysMWmA6cCJ -1fvGOtCPazi0hZc0VkfTnl5OY/Msi+sYCh4gnCqYWICszMm37QKOJrJbY8mTSzAk -p/0dxAUDDKTuf+UPDqJ2I6o9/XEEwmPGOFovoS9iQhdyWWonRDL3+gCeE2cxEeu5 -QEOwxMQz08XRSsgxag9R3tcywXbaamgQYw== ------END CERTIFICATE----- diff --git a/pkg/event/target/testdata/certs/nats_server_key.pem b/pkg/event/target/testdata/certs/nats_server_key.pem deleted file mode 100644 index 815690c2..00000000 --- a/pkg/event/target/testdata/certs/nats_server_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAzFsodQ+VbjAhfQbjmNEbzijm+Je27sTTjhClMbANL64oIbfD -ilZTCeiv+4KOTMpKaD7pLH/zz117tDJI/C3hMD4XurLtOtSchSJpUVw+Z5Biwjrw -kzznueThJzErgLg3MnvwOVCt7I/AgzHisAZ8Rml1BsATQtbI4n2t8wrW4X88V3I0 -o5TD9/LmID8M5R6hSoUBMiOPkpRaiOe31Bn4zWkZtCwtZzt/MO5q9WsC9HvC3fzp -trLMt+3MevqstZbM+gDvSW3O+7onEpTnFlJjmcDY+Kn6V7qr33v5KbkO3ktVm5JP -OMpU0NX2Xfb15kmejNE52VJRFW+Dp8rkny9QiwIDAQABAoIBABPVsTqTdaSJRWbW -OVcGzNUYwTpVt2q7bfE3CmGlPdJn6/tB09fkgxDOJe6agGdRpyExIf6wuKBi6XPX -AaCAb3/4NuNnJIF2S27cpS5BbsksiXlisSEJY1B2t6fPLPLEbo8W2n0lqIvyc/QH -7oG5T2yiJbqu/++X202ody7E2ZBvVXgwjBJuzxeQ9PPiPyjH2lAgciA0HtmXiHSj -pIz6V6XmN5CkyawvExA3F1wxIxf2CNRHLpls/DFGkVIz6pUlleKe7fBKVqeLuwnd -Lnjt9lrCrEozXcwUQnw5KVccB2+0xU8pIl78tPf9EHOHf5Bx1WCFl/YL5qEmfSsL -x3WM/WkCgYEA+7wdzK2M3LSJ6tat+eM9DT8bqSFhFAA41Jv1UdhTJZb8AKPs+6wX -13rUcjTnSI75KqApGaIbe3dF7640VmD9eyLNkc2SHr/yNPKV96sEiWk7TVgIWa0J -vBqPB9UWrkzqBov5VJEpeKlKuCCZGOxN7nkMtH93XmX9Nm3BxmMCQ8cCgYEAz9GK -FS1pwjj8DpyLW5GEFbcwGugkb/4ebc/OK4W93YvAYwoyJF7oSI9WzKG/R5Ko/0G7 -ESHt0d2pfRDR5XCkoJee4MFFIheLeVM4HDUoIY42I7gl/op+H9fGSNplBYDpVk7O -FNl9NFgUDytMbuh14SmAU3Yc7EjGruFgoZUPRR0CgYEAimwNos/HxlDMCcMUlXT7 -zD3oct7056+bkGVVxzSBvAjC94MsO9OMpKNZEJfAmehsYKEDGKJIJGMYpMwQ4XKh -z8T6bvMwJxJ7F9xQ1IhIjVq9DjGbHdyFntan0bG9sAiBIypy5qqPuFa2zHq4VLkT -vU74yoPQ2qqQSw6dX/5vb18CgYEAnLlZeT8WUvLGo/5K2nOTOQ09qg0H7a2nJQli -YlAqL7oFDKvTxLoOUypGO2x/5GomKNpZSUJdJ7gS6c6VfILGpJWzq6wVhvBartSj -rCIqcaPeEHH/tUacd0cysh6BsPTXA8/Kes7KLX9/ITF2Iu4MeBHkRQz+IvN/YsN1 -LGZNbcECgYAqWDnPa2GwFOgTrVwB1AfsCD1PVhSM+AnWnL7avHwD6WIDKB5qb9Ip -cF83o5bhLrLYVlpZNI1ESKgONyWlYOssBNb0KP51kgkBbtr+5qcCFoKzf5inyve2 -MTE1g+maOLtfRaVrlsGcirWSZdIk8dQmDX8nNM+Rg5hahF2l0/zopg== ------END RSA PRIVATE KEY----- diff --git a/pkg/event/target/testdata/certs/root_ca_cert.pem b/pkg/event/target/testdata/certs/root_ca_cert.pem deleted file mode 100644 index c8d61dd1..00000000 --- a/pkg/event/target/testdata/certs/root_ca_cert.pem +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDdzCCAl+gAwIBAgIUTuCjkiDWaSa3X747ybs/SnZ6aHQwDQYJKoZIhvcNAQEL -BQAwSjELMAkGA1UEBhMCQ0ExEzARBgNVBAgMClNvbWUtU3RhdGUxDjAMBgNVBAoM -BU1pbmlvMRYwFAYDVQQLDA1NaW5pbyBSb290IENBMCAXDTE5MTAxMTE5NTIxOFoY -DzIxMTkwOTE3MTk1MjE4WjBKMQswCQYDVQQGEwJDQTETMBEGA1UECAwKU29tZS1T -dGF0ZTEOMAwGA1UECgwFTWluaW8xFjAUBgNVBAsMDU1pbmlvIFJvb3QgQ0EwggEi -MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDDAOyo76TMpltNUXiRAGaxaVMG -E2cf/JudJ+2ve7lxXo1Zz9PaJUOa5wl2b5ebwhXYP7lxdRRz488TfM4YmAm1fkgS -fhBHP337aeUCeW4mybv7P0jBDHYasiDy92YqX5tv2zPifRaoQ/jkUmEyY29VdTQq -LyuUCo4hmY3tO5daXN+x9DdnZj8VDkC235UpEe9vgvtY62m7w+guV/XdwKBvQKpa -248qaqUxvJ8l9sXCnLjNMWwm/6ZpPPcQvYW95P2bhwGH8KrdKeSutW8vVeUa2DN+ -E7QsEopVvg48t/dSVol9xXVQ2QMlNYBsrPQS04GX3aakVREI0r/krRl11Kf7AgMB -AAGjUzBRMB0GA1UdDgQWBBQtvCicVaNfZaLEXW7sgZYlTyjuuzAfBgNVHSMEGDAW -gBQtvCicVaNfZaLEXW7sgZYlTyjuuzAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 -DQEBCwUAA4IBAQC9LflSKc6l8j280aTQyjjTdMysmKqb8Rs7dtSSaFXQK9nuzOhD -FGtahhsJFv/GnUu0weZoEMYySeNzaUoW0ICc3+iI2KMWcUlF/K3/P47tYrVMGJKW -YbuUS7lTxtra9xbFhxGCF8NwM6JG9TrP7JT9b/tn2Xb+UNsxWTH8pf1plusiOvQl -2JFc+BJzhooLP9sK0YGrb7i3CdQj5QcZjxVCDKAuosvMY/shoW4aEIEECSEtJ9mY -Rrfft0bOQ7ZKhDgu2ou+lyeyyWFS/19wr5LMvy+lUYG9H100peCpv5tHdGUF1lLR -m4g6eQK57EI6BoPpNomYtXkoRKP9MXiZ7InC ------END CERTIFICATE----- diff --git a/pkg/event/target/testdata/certs/root_ca_key.pem b/pkg/event/target/testdata/certs/root_ca_key.pem deleted file mode 100644 index a3fa9784..00000000 --- a/pkg/event/target/testdata/certs/root_ca_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAwwDsqO+kzKZbTVF4kQBmsWlTBhNnH/ybnSftr3u5cV6NWc/T -2iVDmucJdm+Xm8IV2D+5cXUUc+PPE3zOGJgJtX5IEn4QRz99+2nlAnluJsm7+z9I -wQx2GrIg8vdmKl+bb9sz4n0WqEP45FJhMmNvVXU0Ki8rlAqOIZmN7TuXWlzfsfQ3 -Z2Y/FQ5Att+VKRHvb4L7WOtpu8PoLlf13cCgb0CqWtuPKmqlMbyfJfbFwpy4zTFs -Jv+maTz3EL2FveT9m4cBh/Cq3SnkrrVvL1XlGtgzfhO0LBKKVb4OPLf3UlaJfcV1 -UNkDJTWAbKz0EtOBl92mpFURCNK/5K0ZddSn+wIDAQABAoIBABcVbu54n9uR/dDj -ShBwKbfqredUOKryrkEmTt6tGMCw3q65CW0TaDNYEiixARNxIEDfGkayA1/MoeC+ -r2794HhZoJJ/1dF5VKKEYJWwZje1Pcl6LlSb8wcp+viIDNILS00sd7Hh+OKmWTo9 -/j+GwdEt9KThvJjjvvt/P+vLWBlcHD2zKFJnq826jKFESkUkY8P6e0cRzOuwGY+E -LYXAwZC+j7wS3c7tW7PyhGAXkTxohtNNYZ6vOGnGetnTxQDICZ9hCIZMA+7hrtK8 -TAGWtWvwx/5SgEl2+b7M0q70I/2LQSB4ACgOfkhm38z7FwqkvTMl9YhR8E4qvZsZ -2fv4hoECgYEA5GG8W1H82Z8fxY4AMKwbvlZ5pZGuIwwm+ABsg5BlSMfXZS2APJuP -wUUOW2gcjDXq/m+7HphgJADpkf+j6NhTks2gWF32VPCfp1EnxOrjOkrZqtBcud+A -1d0HeWvUMYl4iq9+3haILyfN4nQkFeYO1X0wD2kgrEeIkgipjG//tlsCgYEA2pXd -PvXugREkt+HladpCQXPnxHgS+QZuPNBYcDFMlOpC6RdcLzDWcqGzmYROM9/sbI+c -0H3l9ROOkqkA1jq8K2apOKmH9NBOZAaIfi+A450rnJK0trhMsuUgY/xrj+BBO1Vm -LrAIJ6TMpp8DntO/EVitLBwjR6WrM4pUB9kRxuECgYEA3vEp1luq4SYc9dUxClJ4 -os334lDcFQp/4AlJ5QTIWsv60KIiVQfmxVyML17qv1TDGa4olC1bbMoXOJa9g0fq -DZz9skXHehOLRuJKWEiTmQwIgF72pdwxAJTL/xPsCI+SRJAc4OBOAPpyWWXW9Cmo -wW97ww90/bi28RfTq2yJy5ECgYARPXq6yYjrMx/zROTkSWuqX+rqyxGsBH7TWxdu -meTRZfyrB8WkjzSKzAgvVokYfFPYaCdVJmjpwIYhOSUwwGcxASLdrjlj7L4SE5XW -ZgbDbRUQf12zf6vE/F9mo3UUXvqmJGEv04CBJ/VgOvB9KXRLePQHo5yAvSdYpFNm -Xw+Q4QKBgHbV9YV+b1D+8SKNgpCZcrgTmvXIS4ibHdIYT2cxqGbkYS9T1UuJ1kWd -PBeeYsF3CeCBi1PGKI9eYO85W0aS3fZyrekBOyOVDUqQNpXQPQVdCZtB1l6dtRbD -3hARd9eQ1Gq1XofKX6Lbc+7tQcmZbAKJbr3JjHJMDUORYk+o8hdK ------END RSA PRIVATE KEY----- diff --git a/pkg/event/target/testdata/nats_tls.conf b/pkg/event/target/testdata/nats_tls.conf deleted file mode 100644 index 2ca45f6e..00000000 --- a/pkg/event/target/testdata/nats_tls.conf +++ /dev/null @@ -1,7 +0,0 @@ -port: 14225 -net: localhost - -tls { - cert_file: "./testdata/certs/nats_server_cert.pem" - key_file: "./testdata/certs/nats_server_key.pem" -} diff --git a/pkg/event/target/testdata/nats_tls_client_cert.conf b/pkg/event/target/testdata/nats_tls_client_cert.conf deleted file mode 100644 index f0a7d075..00000000 --- a/pkg/event/target/testdata/nats_tls_client_cert.conf +++ /dev/null @@ -1,18 +0,0 @@ -port: 14226 -net: localhost - -tls { - cert_file: "./testdata/certs/nats_server_cert.pem" - key_file: "./testdata/certs/nats_server_key.pem" - ca_file: "./testdata/certs/root_ca_cert.pem" - verify_and_map: true -} -authorization { - ADMIN = { - publish = ">" - subscribe = ">" - } - users = [ - {user: "CN=localhost,OU=Client,O=Minio,C=CA", permissions: $ADMIN} - ] -} diff --git a/pkg/event/target/webhook.go b/pkg/event/target/webhook.go deleted file mode 100644 index 7cfa62f5..00000000 --- a/pkg/event/target/webhook.go +++ /dev/null @@ -1,263 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package target - -import ( - "bytes" - "context" - "crypto/tls" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "path/filepath" - "time" - - "github.com/minio/minio/pkg/certs" - "github.com/minio/minio/pkg/event" - xnet "github.com/minio/minio/pkg/net" -) - -// Webhook constants -const ( - WebhookEndpoint = "endpoint" - WebhookAuthToken = "auth_token" - WebhookQueueDir = "queue_dir" - WebhookQueueLimit = "queue_limit" - WebhookClientCert = "client_cert" - WebhookClientKey = "client_key" - - EnvWebhookEnable = "MINIO_NOTIFY_WEBHOOK_ENABLE" - EnvWebhookEndpoint = "MINIO_NOTIFY_WEBHOOK_ENDPOINT" - EnvWebhookAuthToken = "MINIO_NOTIFY_WEBHOOK_AUTH_TOKEN" - EnvWebhookQueueDir = "MINIO_NOTIFY_WEBHOOK_QUEUE_DIR" - EnvWebhookQueueLimit = "MINIO_NOTIFY_WEBHOOK_QUEUE_LIMIT" - EnvWebhookClientCert = "MINIO_NOTIFY_WEBHOOK_CLIENT_CERT" - EnvWebhookClientKey = "MINIO_NOTIFY_WEBHOOK_CLIENT_KEY" -) - -// WebhookArgs - Webhook target arguments. -type WebhookArgs struct { - Enable bool `json:"enable"` - Endpoint xnet.URL `json:"endpoint"` - AuthToken string `json:"authToken"` - Transport *http.Transport `json:"-"` - QueueDir string `json:"queueDir"` - QueueLimit uint64 `json:"queueLimit"` - ClientCert string `json:"clientCert"` - ClientKey string `json:"clientKey"` -} - -// Validate WebhookArgs fields -func (w WebhookArgs) Validate() error { - if !w.Enable { - return nil - } - if w.Endpoint.IsEmpty() { - return errors.New("endpoint empty") - } - if w.QueueDir != "" { - if !filepath.IsAbs(w.QueueDir) { - return errors.New("queueDir path should be absolute") - } - } - if w.ClientCert != "" && w.ClientKey == "" || w.ClientCert == "" && w.ClientKey != "" { - return errors.New("cert and key must be specified as a pair") - } - return nil -} - -// WebhookTarget - Webhook target. -type WebhookTarget struct { - id event.TargetID - args WebhookArgs - httpClient *http.Client - store Store - loggerOnce func(ctx context.Context, err error, id interface{}, errKind ...interface{}) -} - -// ID - returns target ID. -func (target WebhookTarget) ID() event.TargetID { - return target.id -} - -// HasQueueStore - Checks if the queueStore has been configured for the target -func (target *WebhookTarget) HasQueueStore() bool { - return target.store != nil -} - -// IsActive - Return true if target is up and active -func (target *WebhookTarget) IsActive() (bool, error) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - req, err := http.NewRequest(http.MethodHead, target.args.Endpoint.String(), nil) - if err != nil { - if xnet.IsNetworkOrHostDown(err) { - return false, errNotConnected - } - return false, err - } - - resp, err := target.httpClient.Do(req.WithContext(ctx)) - if err != nil { - if xnet.IsNetworkOrHostDown(err) || err == context.DeadlineExceeded { - return false, errNotConnected - } - return false, err - } - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - // No network failure i.e response from the target means its up - return true, nil -} - -// Save - saves the events to the store if queuestore is configured, which will be replayed when the wenhook connection is active. -func (target *WebhookTarget) Save(eventData event.Event) error { - if target.store != nil { - return target.store.Put(eventData) - } - err := target.send(eventData) - if err != nil { - if xnet.IsNetworkOrHostDown(err) { - return errNotConnected - } - } - return err -} - -// send - sends an event to the webhook. -func (target *WebhookTarget) send(eventData event.Event) error { - objectName, err := url.QueryUnescape(eventData.S3.Object.Key) - if err != nil { - return err - } - key := eventData.S3.Bucket.Name + "/" + objectName - - data, err := json.Marshal(event.Log{EventName: eventData.EventName, Key: key, Records: []event.Event{eventData}}) - if err != nil { - return err - } - - req, err := http.NewRequest("POST", target.args.Endpoint.String(), bytes.NewReader(data)) - if err != nil { - return err - } - - if target.args.AuthToken != "" { - req.Header.Set("Authorization", "Bearer "+target.args.AuthToken) - } - - req.Header.Set("Content-Type", "application/json") - - resp, err := target.httpClient.Do(req) - if err != nil { - target.Close() - return err - } - defer resp.Body.Close() - io.Copy(ioutil.Discard, resp.Body) - - if resp.StatusCode < 200 || resp.StatusCode > 299 { - target.Close() - return fmt.Errorf("sending event failed with %v", resp.Status) - } - - return nil -} - -// Send - reads an event from store and sends it to webhook. -func (target *WebhookTarget) Send(eventKey string) error { - eventData, eErr := target.store.Get(eventKey) - if eErr != nil { - // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() - // Such events will not exist and would've been already been sent successfully. - if os.IsNotExist(eErr) { - return nil - } - return eErr - } - - if err := target.send(eventData); err != nil { - if xnet.IsNetworkOrHostDown(err) { - return errNotConnected - } - return err - } - - // Delete the event from store. - return target.store.Del(eventKey) -} - -// Close - does nothing and available for interface compatibility. -func (target *WebhookTarget) Close() error { - // Close idle connection with "keep-alive" states - target.httpClient.CloseIdleConnections() - return nil -} - -// NewWebhookTarget - creates new Webhook target. -func NewWebhookTarget(id string, args WebhookArgs, doneCh <-chan struct{}, loggerOnce func(ctx context.Context, err error, id interface{}, kind ...interface{}), transport *http.Transport, test bool) (*WebhookTarget, error) { - - var store Store - - target := &WebhookTarget{ - id: event.TargetID{ID: id, Name: "webhook"}, - args: args, - loggerOnce: loggerOnce, - } - - if target.args.ClientCert != "" && target.args.ClientKey != "" { - c, err := certs.New(target.args.ClientCert, target.args.ClientKey, tls.LoadX509KeyPair) - if err != nil { - return target, err - } - transport.TLSClientConfig.GetClientCertificate = c.GetClientCertificate - } - target.httpClient = &http.Client{Transport: transport} - - if args.QueueDir != "" { - queueDir := filepath.Join(args.QueueDir, storePrefix+"-webhook-"+id) - store = NewQueueStore(queueDir, args.QueueLimit) - if err := store.Open(); err != nil { - target.loggerOnce(context.Background(), err, target.ID()) - return target, err - } - target.store = store - } - - _, err := target.IsActive() - if err != nil { - if target.store == nil || err != errNotConnected { - target.loggerOnce(context.Background(), err, target.ID()) - return target, err - } - } - - if target.store != nil && !test { - // Replays the events from the store. - eventKeyCh := replayEvents(target.store, doneCh, target.loggerOnce, target.ID()) - // Start replaying events from the store. - go sendEvents(target, eventKeyCh, doneCh, target.loggerOnce) - } - - return target, nil -} diff --git a/pkg/event/targetid.go b/pkg/event/targetid.go deleted file mode 100644 index b53f72f2..00000000 --- a/pkg/event/targetid.go +++ /dev/null @@ -1,73 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package event - -import ( - "encoding/json" - "fmt" - "strings" -) - -// TargetID - holds identification and name strings of notification target. -type TargetID struct { - ID string - Name string -} - -// String - returns string representation. -func (tid TargetID) String() string { - return tid.ID + ":" + tid.Name -} - -// ToARN - converts to ARN. -func (tid TargetID) ToARN(region string) ARN { - return ARN{TargetID: tid, region: region} -} - -// MarshalJSON - encodes to JSON data. -func (tid TargetID) MarshalJSON() ([]byte, error) { - return json.Marshal(tid.String()) -} - -// UnmarshalJSON - decodes JSON data. -func (tid *TargetID) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - - targetID, err := parseTargetID(s) - if err != nil { - return err - } - - *tid = *targetID - return nil -} - -// parseTargetID - parses string to TargetID. -func parseTargetID(s string) (*TargetID, error) { - tokens := strings.Split(s, ":") - if len(tokens) != 2 { - return nil, fmt.Errorf("invalid TargetID format '%v'", s) - } - - return &TargetID{ - ID: tokens[0], - Name: tokens[1], - }, nil -} diff --git a/pkg/event/targetid_test.go b/pkg/event/targetid_test.go deleted file mode 100644 index 974c95c6..00000000 --- a/pkg/event/targetid_test.go +++ /dev/null @@ -1,117 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package event - -import ( - "reflect" - "testing" -) - -func TestTargetDString(t *testing.T) { - testCases := []struct { - tid TargetID - expectedResult string - }{ - {TargetID{}, ":"}, - {TargetID{"1", "webhook"}, "1:webhook"}, - {TargetID{"httpclient+2e33cdee-fbec-4bdd-917e-7d8e3c5a2531", "localhost:55638"}, "httpclient+2e33cdee-fbec-4bdd-917e-7d8e3c5a2531:localhost:55638"}, - } - - for i, testCase := range testCases { - result := testCase.tid.String() - - if result != testCase.expectedResult { - t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestTargetDToARN(t *testing.T) { - tid := TargetID{"1", "webhook"} - testCases := []struct { - tid TargetID - region string - expectedARN ARN - }{ - {tid, "", ARN{TargetID: tid, region: ""}}, - {tid, "us-east-1", ARN{TargetID: tid, region: "us-east-1"}}, - } - - for i, testCase := range testCases { - arn := testCase.tid.ToARN(testCase.region) - - if arn != testCase.expectedARN { - t.Fatalf("test %v: ARN: expected: %v, got: %v", i+1, testCase.expectedARN, arn) - } - } -} - -func TestTargetDMarshalJSON(t *testing.T) { - testCases := []struct { - tid TargetID - expectedData []byte - expectErr bool - }{ - {TargetID{}, []byte(`":"`), false}, - {TargetID{"1", "webhook"}, []byte(`"1:webhook"`), false}, - {TargetID{"httpclient+2e33cdee-fbec-4bdd-917e-7d8e3c5a2531", "localhost:55638"}, []byte(`"httpclient+2e33cdee-fbec-4bdd-917e-7d8e3c5a2531:localhost:55638"`), false}, - } - - for i, testCase := range testCases { - data, err := testCase.tid.MarshalJSON() - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(data, testCase.expectedData) { - t.Fatalf("test %v: data: expected: %v, got: %v", i+1, string(testCase.expectedData), string(data)) - } - } - } -} - -func TestTargetDUnmarshalJSON(t *testing.T) { - testCases := []struct { - data []byte - expectedTargetID *TargetID - expectErr bool - }{ - {[]byte(`""`), nil, true}, - {[]byte(`"httpclient+2e33cdee-fbec-4bdd-917e-7d8e3c5a2531:localhost:55638"`), nil, true}, - {[]byte(`":"`), &TargetID{}, false}, - {[]byte(`"1:webhook"`), &TargetID{"1", "webhook"}, false}, - } - - for i, testCase := range testCases { - targetID := &TargetID{} - err := targetID.UnmarshalJSON(testCase.data) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if *targetID != *testCase.expectedTargetID { - t.Fatalf("test %v: TargetID: expected: %v, got: %v", i+1, testCase.expectedTargetID, targetID) - } - } - } -} diff --git a/pkg/event/targetidset.go b/pkg/event/targetidset.go deleted file mode 100644 index f728107a..00000000 --- a/pkg/event/targetidset.go +++ /dev/null @@ -1,71 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package event - -// TargetIDSet - Set representation of TargetIDs. -type TargetIDSet map[TargetID]struct{} - -// IsEmpty returns true if the set is empty. -func (set TargetIDSet) IsEmpty() bool { - return len(set) != 0 -} - -// Clone - returns copy of this set. -func (set TargetIDSet) Clone() TargetIDSet { - setCopy := NewTargetIDSet() - for k, v := range set { - setCopy[k] = v - } - return setCopy -} - -// add - adds TargetID to the set. -func (set TargetIDSet) add(targetID TargetID) { - set[targetID] = struct{}{} -} - -// Union - returns union with given set as new set. -func (set TargetIDSet) Union(sset TargetIDSet) TargetIDSet { - nset := set.Clone() - - for k := range sset { - nset.add(k) - } - - return nset -} - -// Difference - returns diffrence with given set as new set. -func (set TargetIDSet) Difference(sset TargetIDSet) TargetIDSet { - nset := NewTargetIDSet() - for k := range set { - if _, ok := sset[k]; !ok { - nset.add(k) - } - } - - return nset -} - -// NewTargetIDSet - creates new TargetID set with given TargetIDs. -func NewTargetIDSet(targetIDs ...TargetID) TargetIDSet { - set := make(TargetIDSet) - for _, targetID := range targetIDs { - set.add(targetID) - } - return set -} diff --git a/pkg/event/targetidset_test.go b/pkg/event/targetidset_test.go deleted file mode 100644 index b4eb03f4..00000000 --- a/pkg/event/targetidset_test.go +++ /dev/null @@ -1,109 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package event - -import ( - "reflect" - "testing" -) - -func TestTargetIDSetClone(t *testing.T) { - testCases := []struct { - set TargetIDSet - targetIDToAdd TargetID - }{ - {NewTargetIDSet(), TargetID{"1", "webhook"}}, - {NewTargetIDSet(TargetID{"1", "webhook"}), TargetID{"2", "webhook"}}, - {NewTargetIDSet(TargetID{"1", "webhook"}, TargetID{"2", "amqp"}), TargetID{"2", "webhook"}}, - } - - for i, testCase := range testCases { - result := testCase.set.Clone() - - if !reflect.DeepEqual(result, testCase.set) { - t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.set, result) - } - - result.add(testCase.targetIDToAdd) - if reflect.DeepEqual(result, testCase.set) { - t.Fatalf("test %v: result: expected: not equal, got: equal", i+1) - } - } -} - -func TestTargetIDSetUnion(t *testing.T) { - testCases := []struct { - set TargetIDSet - setToAdd TargetIDSet - expectedResult TargetIDSet - }{ - {NewTargetIDSet(), NewTargetIDSet(), NewTargetIDSet()}, - {NewTargetIDSet(), NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet(TargetID{"1", "webhook"})}, - {NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet(), NewTargetIDSet(TargetID{"1", "webhook"})}, - {NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet(TargetID{"2", "amqp"}), NewTargetIDSet(TargetID{"1", "webhook"}, TargetID{"2", "amqp"})}, - {NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet(TargetID{"1", "webhook"})}, - } - - for i, testCase := range testCases { - result := testCase.set.Union(testCase.setToAdd) - - if !reflect.DeepEqual(testCase.expectedResult, result) { - t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestTargetIDSetDifference(t *testing.T) { - testCases := []struct { - set TargetIDSet - setToRemove TargetIDSet - expectedResult TargetIDSet - }{ - {NewTargetIDSet(), NewTargetIDSet(), NewTargetIDSet()}, - {NewTargetIDSet(), NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet()}, - {NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet(), NewTargetIDSet(TargetID{"1", "webhook"})}, - {NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet(TargetID{"2", "amqp"}), NewTargetIDSet(TargetID{"1", "webhook"})}, - {NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet()}, - } - - for i, testCase := range testCases { - result := testCase.set.Difference(testCase.setToRemove) - - if !reflect.DeepEqual(testCase.expectedResult, result) { - t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestNewTargetIDSet(t *testing.T) { - testCases := []struct { - targetIDs []TargetID - expectedResult TargetIDSet - }{ - {[]TargetID{}, NewTargetIDSet()}, - {[]TargetID{{"1", "webhook"}}, NewTargetIDSet(TargetID{"1", "webhook"})}, - {[]TargetID{{"1", "webhook"}, {"2", "amqp"}}, NewTargetIDSet(TargetID{"1", "webhook"}, TargetID{"2", "amqp"})}, - } - - for i, testCase := range testCases { - result := NewTargetIDSet(testCase.targetIDs...) - - if !reflect.DeepEqual(testCase.expectedResult, result) { - t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} diff --git a/pkg/event/targetlist.go b/pkg/event/targetlist.go deleted file mode 100644 index 4b97e472..00000000 --- a/pkg/event/targetlist.go +++ /dev/null @@ -1,149 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package event - -import ( - "fmt" - "sync" -) - -// Target - event target interface -type Target interface { - ID() TargetID - IsActive() (bool, error) - Save(Event) error - Send(string) error - Close() error - HasQueueStore() bool -} - -// TargetList - holds list of targets indexed by target ID. -type TargetList struct { - sync.RWMutex - targets map[TargetID]Target -} - -// Add - adds unique target to target list. -func (list *TargetList) Add(targets ...Target) error { - list.Lock() - defer list.Unlock() - - for _, target := range targets { - if _, ok := list.targets[target.ID()]; ok { - return fmt.Errorf("target %v already exists", target.ID()) - } - list.targets[target.ID()] = target - } - - return nil -} - -// Exists - checks whether target by target ID exists or not. -func (list *TargetList) Exists(id TargetID) bool { - list.RLock() - defer list.RUnlock() - - _, found := list.targets[id] - return found -} - -// TargetIDResult returns result of Remove/Send operation, sets err if -// any for the associated TargetID -type TargetIDResult struct { - // ID where the remove or send were initiated. - ID TargetID - // Stores any error while removing a target or while sending an event. - Err error -} - -// Remove - closes and removes targets by given target IDs. -func (list *TargetList) Remove(targetIDSet TargetIDSet) { - list.Lock() - defer list.Unlock() - - for id := range targetIDSet { - target, ok := list.targets[id] - if ok { - target.Close() - delete(list.targets, id) - } - } -} - -// Targets - list all targets -func (list *TargetList) Targets() []Target { - list.RLock() - defer list.RUnlock() - - targets := []Target{} - for _, tgt := range list.targets { - targets = append(targets, tgt) - } - - return targets -} - -// List - returns available target IDs. -func (list *TargetList) List() []TargetID { - list.RLock() - defer list.RUnlock() - - keys := []TargetID{} - for k := range list.targets { - keys = append(keys, k) - } - - return keys -} - -// TargetMap - returns available targets. -func (list *TargetList) TargetMap() map[TargetID]Target { - list.RLock() - defer list.RUnlock() - return list.targets -} - -// Send - sends events to targets identified by target IDs. -func (list *TargetList) Send(event Event, targetIDset TargetIDSet, resCh chan<- TargetIDResult) { - go func() { - var wg sync.WaitGroup - for id := range targetIDset { - list.RLock() - target, ok := list.targets[id] - list.RUnlock() - if ok { - wg.Add(1) - go func(id TargetID, target Target) { - defer wg.Done() - tgtRes := TargetIDResult{ID: id} - if err := target.Save(event); err != nil { - tgtRes.Err = err - } - resCh <- tgtRes - }(id, target) - } else { - resCh <- TargetIDResult{ID: id} - } - } - wg.Wait() - }() -} - -// NewTargetList - creates TargetList. -func NewTargetList() *TargetList { - return &TargetList{targets: make(map[TargetID]Target)} -} diff --git a/pkg/event/targetlist_test.go b/pkg/event/targetlist_test.go deleted file mode 100644 index 905b442e..00000000 --- a/pkg/event/targetlist_test.go +++ /dev/null @@ -1,260 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package event - -import ( - "crypto/rand" - "errors" - "reflect" - "testing" - "time" -) - -type ExampleTarget struct { - id TargetID - sendErr bool - closeErr bool -} - -func (target ExampleTarget) ID() TargetID { - return target.id -} - -// Save - Sends event directly without persisting. -func (target ExampleTarget) Save(eventData Event) error { - return target.send(eventData) -} - -func (target ExampleTarget) send(eventData Event) error { - b := make([]byte, 1) - if _, err := rand.Read(b); err != nil { - panic(err) - } - - time.Sleep(time.Duration(b[0]) * time.Millisecond) - - if target.sendErr { - return errors.New("send error") - } - - return nil -} - -// Send - interface compatible method does no-op. -func (target ExampleTarget) Send(eventKey string) error { - return nil -} - -func (target ExampleTarget) Close() error { - if target.closeErr { - return errors.New("close error") - } - - return nil -} - -func (target ExampleTarget) IsActive() (bool, error) { - return false, errors.New("not connected to target server/service") -} - -// HasQueueStore - No-Op. Added for interface compatibility -func (target ExampleTarget) HasQueueStore() bool { - return false -} - -func TestTargetListAdd(t *testing.T) { - targetListCase1 := NewTargetList() - - targetListCase2 := NewTargetList() - if err := targetListCase2.Add(&ExampleTarget{TargetID{"2", "testcase"}, false, false}); err != nil { - panic(err) - } - - targetListCase3 := NewTargetList() - if err := targetListCase3.Add(&ExampleTarget{TargetID{"3", "testcase"}, false, false}); err != nil { - panic(err) - } - - testCases := []struct { - targetList *TargetList - target Target - expectedResult []TargetID - expectErr bool - }{ - {targetListCase1, &ExampleTarget{TargetID{"1", "webhook"}, false, false}, []TargetID{{"1", "webhook"}}, false}, - {targetListCase2, &ExampleTarget{TargetID{"1", "webhook"}, false, false}, []TargetID{{"2", "testcase"}, {"1", "webhook"}}, false}, - {targetListCase3, &ExampleTarget{TargetID{"3", "testcase"}, false, false}, nil, true}, - } - - for i, testCase := range testCases { - err := testCase.targetList.Add(testCase.target) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - result := testCase.targetList.List() - - if len(result) != len(testCase.expectedResult) { - t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - - for _, targetID1 := range result { - var found bool - for _, targetID2 := range testCase.expectedResult { - if reflect.DeepEqual(targetID1, targetID2) { - found = true - break - } - } - if !found { - t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } - } - } -} - -func TestTargetListExists(t *testing.T) { - targetListCase1 := NewTargetList() - - targetListCase2 := NewTargetList() - if err := targetListCase2.Add(&ExampleTarget{TargetID{"2", "testcase"}, false, false}); err != nil { - panic(err) - } - - targetListCase3 := NewTargetList() - if err := targetListCase3.Add(&ExampleTarget{TargetID{"3", "testcase"}, false, false}); err != nil { - panic(err) - } - - testCases := []struct { - targetList *TargetList - targetID TargetID - expectedResult bool - }{ - {targetListCase1, TargetID{"1", "webhook"}, false}, - {targetListCase2, TargetID{"1", "webhook"}, false}, - {targetListCase3, TargetID{"3", "testcase"}, true}, - } - - for i, testCase := range testCases { - result := testCase.targetList.Exists(testCase.targetID) - - if result != testCase.expectedResult { - t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestTargetListList(t *testing.T) { - targetListCase1 := NewTargetList() - - targetListCase2 := NewTargetList() - if err := targetListCase2.Add(&ExampleTarget{TargetID{"2", "testcase"}, false, false}); err != nil { - panic(err) - } - - targetListCase3 := NewTargetList() - if err := targetListCase3.Add(&ExampleTarget{TargetID{"3", "testcase"}, false, false}); err != nil { - panic(err) - } - if err := targetListCase3.Add(&ExampleTarget{TargetID{"1", "webhook"}, false, false}); err != nil { - panic(err) - } - - testCases := []struct { - targetList *TargetList - expectedResult []TargetID - }{ - {targetListCase1, []TargetID{}}, - {targetListCase2, []TargetID{{"2", "testcase"}}}, - {targetListCase3, []TargetID{{"3", "testcase"}, {"1", "webhook"}}}, - } - - for i, testCase := range testCases { - result := testCase.targetList.List() - - if len(result) != len(testCase.expectedResult) { - t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - - for _, targetID1 := range result { - var found bool - for _, targetID2 := range testCase.expectedResult { - if reflect.DeepEqual(targetID1, targetID2) { - found = true - break - } - } - if !found { - t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } - } -} - -func TestTargetListSend(t *testing.T) { - targetListCase1 := NewTargetList() - - targetListCase2 := NewTargetList() - if err := targetListCase2.Add(&ExampleTarget{TargetID{"2", "testcase"}, false, false}); err != nil { - panic(err) - } - - targetListCase3 := NewTargetList() - if err := targetListCase3.Add(&ExampleTarget{TargetID{"3", "testcase"}, false, false}); err != nil { - panic(err) - } - - targetListCase4 := NewTargetList() - if err := targetListCase4.Add(&ExampleTarget{TargetID{"4", "testcase"}, true, false}); err != nil { - panic(err) - } - - testCases := []struct { - targetList *TargetList - targetID TargetID - expectErr bool - }{ - {targetListCase1, TargetID{"1", "webhook"}, false}, - {targetListCase2, TargetID{"1", "non-existent"}, false}, - {targetListCase3, TargetID{"3", "testcase"}, false}, - {targetListCase4, TargetID{"4", "testcase"}, true}, - } - - resCh := make(chan TargetIDResult) - for i, testCase := range testCases { - testCase.targetList.Send(Event{}, map[TargetID]struct{}{ - testCase.targetID: {}, - }, resCh) - res := <-resCh - expectErr := (res.Err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - } -} - -func TestNewTargetList(t *testing.T) { - if result := NewTargetList(); result == nil { - t.Fatalf("test: result: expected: , got: ") - } -} diff --git a/pkg/handlers/forwarder.go b/pkg/handlers/forwarder.go deleted file mode 100644 index 5c25f465..00000000 --- a/pkg/handlers/forwarder.go +++ /dev/null @@ -1,181 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018-2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package handlers - -import ( - "context" - "net" - "net/http" - "net/http/httputil" - "net/url" - "strings" - "time" -) - -const defaultFlushInterval = time.Duration(100) * time.Millisecond - -// Forwarder forwards all incoming HTTP requests to configured transport. -type Forwarder struct { - RoundTripper http.RoundTripper - PassHost bool - Logger func(error) - - // internal variables - rewriter *headerRewriter -} - -// NewForwarder creates an instance of Forwarder based on the provided list of configuration options -func NewForwarder(f *Forwarder) *Forwarder { - f.rewriter = &headerRewriter{} - if f.RoundTripper == nil { - f.RoundTripper = http.DefaultTransport - } - - return f -} - -// ServeHTTP forwards HTTP traffic using the configured transport -func (f *Forwarder) ServeHTTP(w http.ResponseWriter, inReq *http.Request) { - outReq := new(http.Request) - *outReq = *inReq // includes shallow copies of maps, but we handle this in Director - - revproxy := httputil.ReverseProxy{ - Director: func(req *http.Request) { - f.modifyRequest(req, inReq.URL) - }, - Transport: f.RoundTripper, - FlushInterval: defaultFlushInterval, - ErrorHandler: f.customErrHandler, - } - revproxy.ServeHTTP(w, outReq) -} - -// customErrHandler is originally implemented to avoid having the following error -// `http: proxy error: context canceled` printed by Golang -func (f *Forwarder) customErrHandler(w http.ResponseWriter, r *http.Request, err error) { - if f.Logger != nil && err != context.Canceled { - f.Logger(err) - } - w.WriteHeader(http.StatusBadGateway) -} - -func (f *Forwarder) getURLFromRequest(req *http.Request) *url.URL { - // If the Request was created by Go via a real HTTP request, RequestURI will - // contain the original query string. If the Request was created in code, RequestURI - // will be empty, and we will use the URL object instead - u := req.URL - if req.RequestURI != "" { - parsedURL, err := url.ParseRequestURI(req.RequestURI) - if err == nil { - u = parsedURL - } - } - return u -} - -// copyURL provides update safe copy by avoiding shallow copying User field -func copyURL(i *url.URL) *url.URL { - out := *i - if i.User != nil { - u := *i.User - out.User = &u - } - return &out -} - -// Modify the request to handle the target URL -func (f *Forwarder) modifyRequest(outReq *http.Request, target *url.URL) { - outReq.URL = copyURL(outReq.URL) - outReq.URL.Scheme = target.Scheme - outReq.URL.Host = target.Host - - u := f.getURLFromRequest(outReq) - - outReq.URL.Path = u.Path - outReq.URL.RawPath = u.RawPath - outReq.URL.RawQuery = u.RawQuery - outReq.RequestURI = "" // Outgoing request should not have RequestURI - - // Do not pass client Host header unless requested. - if !f.PassHost { - outReq.Host = target.Host - } - - // TODO: only supports HTTP 1.1 for now. - outReq.Proto = "HTTP/1.1" - outReq.ProtoMajor = 1 - outReq.ProtoMinor = 1 - - f.rewriter.Rewrite(outReq) - - // Disable closeNotify when method GET for http pipelining - if outReq.Method == http.MethodGet { - quietReq := outReq.WithContext(context.Background()) - *outReq = *quietReq - } -} - -// headerRewriter is responsible for removing hop-by-hop headers and setting forwarding headers -type headerRewriter struct{} - -// Clean up IP in case if it is ipv6 address and it has {zone} information in it, like -// "[fe80::d806:a55d:eb1b:49cc%vEthernet (vmxnet3 Ethernet Adapter - Virtual Switch)]:64692" -func ipv6fix(clientIP string) string { - return strings.Split(clientIP, "%")[0] -} - -func (rw *headerRewriter) Rewrite(req *http.Request) { - if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil { - clientIP = ipv6fix(clientIP) - if req.Header.Get(xRealIP) == "" { - req.Header.Set(xRealIP, clientIP) - } - } - - xfProto := req.Header.Get(xForwardedProto) - if xfProto == "" { - if req.TLS != nil { - req.Header.Set(xForwardedProto, "https") - } else { - req.Header.Set(xForwardedProto, "http") - } - } - - if xfPort := req.Header.Get(xForwardedPort); xfPort == "" { - req.Header.Set(xForwardedPort, forwardedPort(req)) - } - - if xfHost := req.Header.Get(xForwardedHost); xfHost == "" && req.Host != "" { - req.Header.Set(xForwardedHost, req.Host) - } -} - -func forwardedPort(req *http.Request) string { - if req == nil { - return "" - } - - if _, port, err := net.SplitHostPort(req.Host); err == nil && port != "" { - return port - } - - if req.TLS != nil { - return "443" - } - - return "80" -} diff --git a/pkg/handlers/proxy.go b/pkg/handlers/proxy.go deleted file mode 100644 index 761cf0b8..00000000 --- a/pkg/handlers/proxy.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package handlers - -import ( - "net" - "net/http" - "regexp" - "strings" -) - -var ( - // De-facto standard header keys. - xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For") - xForwardedHost = http.CanonicalHeaderKey("X-Forwarded-Host") - xForwardedPort = http.CanonicalHeaderKey("X-Forwarded-Port") - xForwardedProto = http.CanonicalHeaderKey("X-Forwarded-Proto") - xForwardedScheme = http.CanonicalHeaderKey("X-Forwarded-Scheme") - xRealIP = http.CanonicalHeaderKey("X-Real-IP") -) - -var ( - // RFC7239 defines a new "Forwarded: " header designed to replace the - // existing use of X-Forwarded-* headers. - // e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43 - forwarded = http.CanonicalHeaderKey("Forwarded") - // Allows for a sub-match of the first value after 'for=' to the next - // comma, semi-colon or space. The match is case-insensitive. - forRegex = regexp.MustCompile(`(?i)(?:for=)([^(;|,| )]+)(.*)`) - // Allows for a sub-match for the first instance of scheme (http|https) - // prefixed by 'proto='. The match is case-insensitive. - protoRegex = regexp.MustCompile(`(?i)^(;|,| )+(?:proto=)(https|http)`) -) - -// GetSourceScheme retrieves the scheme from the X-Forwarded-Proto and RFC7239 -// Forwarded headers (in that order). -func GetSourceScheme(r *http.Request) string { - var scheme string - - // Retrieve the scheme from X-Forwarded-Proto. - if proto := r.Header.Get(xForwardedProto); proto != "" { - scheme = strings.ToLower(proto) - } else if proto = r.Header.Get(xForwardedScheme); proto != "" { - scheme = strings.ToLower(proto) - } else if proto := r.Header.Get(forwarded); proto != "" { - // match should contain at least two elements if the protocol was - // specified in the Forwarded header. The first element will always be - // the 'for=', which we ignore, subsequently we proceed to look for - // 'proto=' which should precede right after `for=` if not - // we simply ignore the values and return empty. This is in line - // with the approach we took for returning first ip from multiple - // params. - if match := forRegex.FindStringSubmatch(proto); len(match) > 1 { - if match = protoRegex.FindStringSubmatch(match[2]); len(match) > 1 { - scheme = strings.ToLower(match[2]) - } - } - } - - return scheme -} - -// GetSourceIP retrieves the IP from the X-Forwarded-For, X-Real-IP and RFC7239 -// Forwarded headers (in that order), falls back to r.RemoteAddr when all -// else fails. -func GetSourceIP(r *http.Request) string { - var addr string - - if fwd := r.Header.Get(xForwardedFor); fwd != "" { - // Only grab the first (client) address. Note that '192.168.0.1, - // 10.1.1.1' is a valid key for X-Forwarded-For where addresses after - // the first may represent forwarding proxies earlier in the chain. - s := strings.Index(fwd, ", ") - if s == -1 { - s = len(fwd) - } - addr = fwd[:s] - } else if fwd := r.Header.Get(xRealIP); fwd != "" { - // X-Real-IP should only contain one IP address (the client making the - // request). - addr = fwd - } else if fwd := r.Header.Get(forwarded); fwd != "" { - // match should contain at least two elements if the protocol was - // specified in the Forwarded header. The first element will always be - // the 'for=' capture, which we ignore. In the case of multiple IP - // addresses (for=8.8.8.8, 8.8.4.4, 172.16.1.20 is valid) we only - // extract the first, which should be the client IP. - if match := forRegex.FindStringSubmatch(fwd); len(match) > 1 { - // IPv6 addresses in Forwarded headers are quoted-strings. We strip - // these quotes. - addr = strings.Trim(match[1], `"`) - } - } - - if addr != "" { - return addr - } - - // Default to remote address if headers not set. - addr, _, _ = net.SplitHostPort(r.RemoteAddr) - return addr -} diff --git a/pkg/handlers/proxy_test.go b/pkg/handlers/proxy_test.go deleted file mode 100644 index 2b17920c..00000000 --- a/pkg/handlers/proxy_test.go +++ /dev/null @@ -1,83 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package handlers - -import ( - "net/http" - "testing" -) - -type headerTest struct { - key string // header key - val string // header val - expected string // expected result -} - -func TestGetScheme(t *testing.T) { - headers := []headerTest{ - {xForwardedProto, "https", "https"}, - {xForwardedProto, "http", "http"}, - {xForwardedProto, "HTTP", "http"}, - {xForwardedScheme, "https", "https"}, - {xForwardedScheme, "http", "http"}, - {xForwardedScheme, "HTTP", "http"}, - {forwarded, `For="[2001:db8:cafe::17]:4711`, ""}, // No proto - {forwarded, `for=192.0.2.43, for=198.51.100.17;proto=https`, ""}, // Multiple params, will be empty. - {forwarded, `for=172.32.10.15; proto=https;by=127.0.0.1;`, "https"}, // Space before proto - {forwarded, `for=192.0.2.60;proto=http;by=203.0.113.43`, "http"}, // Multiple params - } - for _, v := range headers { - req := &http.Request{ - Header: http.Header{ - v.key: []string{v.val}, - }} - res := GetSourceScheme(req) - if res != v.expected { - t.Errorf("wrong header for %s: got %s want %s", v.key, res, - v.expected) - } - } -} - -// TestGetSourceIP - check the source ip of a request is parsed correctly. -func TestGetSourceIP(t *testing.T) { - headers := []headerTest{ - {xForwardedFor, "8.8.8.8", "8.8.8.8"}, // Single address - {xForwardedFor, "8.8.8.8, 8.8.4.4", "8.8.8.8"}, // Multiple - {xForwardedFor, "", ""}, // None - {xRealIP, "8.8.8.8", "8.8.8.8"}, // Single address - {xRealIP, "[2001:db8:cafe::17]:4711", "[2001:db8:cafe::17]:4711"}, // IPv6 address - {xRealIP, "", ""}, // None - {forwarded, `for="_gazonk"`, "_gazonk"}, // Hostname - {forwarded, `For="[2001:db8:cafe::17]:4711`, `[2001:db8:cafe::17]:4711`}, // IPv6 address - {forwarded, `for=192.0.2.60;proto=http;by=203.0.113.43`, `192.0.2.60`}, // Multiple params - {forwarded, `for=192.0.2.43, for=198.51.100.17`, "192.0.2.43"}, // Multiple params - {forwarded, `for="workstation.local",for=198.51.100.17`, "workstation.local"}, // Hostname - } - - for _, v := range headers { - req := &http.Request{ - Header: http.Header{ - v.key: []string{v.val}, - }} - res := GetSourceIP(req) - if res != v.expected { - t.Errorf("wrong header for %s: got %s want %s", v.key, res, - v.expected) - } - } -} diff --git a/pkg/hash/errors.go b/pkg/hash/errors.go deleted file mode 100644 index af2beb5c..00000000 --- a/pkg/hash/errors.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package hash - -import "fmt" - -// SHA256Mismatch - when content sha256 does not match with what was sent from client. -type SHA256Mismatch struct { - ExpectedSHA256 string - CalculatedSHA256 string -} - -func (e SHA256Mismatch) Error() string { - return "Bad sha256: Expected " + e.ExpectedSHA256 + " does not match calculated " + e.CalculatedSHA256 -} - -// BadDigest - Content-MD5 you specified did not match what we received. -type BadDigest struct { - ExpectedMD5 string - CalculatedMD5 string -} - -func (e BadDigest) Error() string { - return "Bad digest: Expected " + e.ExpectedMD5 + " does not match calculated " + e.CalculatedMD5 -} - -// ErrSizeMismatch error size mismatch -type ErrSizeMismatch struct { - Want int64 - Got int64 -} - -func (e ErrSizeMismatch) Error() string { - return fmt.Sprintf("Size mismatch: got %d, want %d", e.Got, e.Want) -} diff --git a/pkg/hash/reader.go b/pkg/hash/reader.go deleted file mode 100644 index 8f3e5d7b..00000000 --- a/pkg/hash/reader.go +++ /dev/null @@ -1,197 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package hash - -import ( - "bytes" - "crypto/md5" - "encoding/base64" - "encoding/hex" - "errors" - "hash" - "io" - - sha256 "github.com/minio/sha256-simd" -) - -// Reader writes what it reads from an io.Reader to an MD5 and SHA256 hash.Hash. -// Reader verifies that the content of the io.Reader matches the expected checksums. -type Reader struct { - src io.Reader - size int64 - actualSize int64 - bytesRead int64 - - md5sum, sha256sum []byte // Byte values of md5sum, sha256sum of client sent values. - md5Hash, sha256Hash hash.Hash -} - -// NewReader returns a new hash Reader which computes the MD5 sum and -// SHA256 sum (if set) of the provided io.Reader at EOF. -func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64, strictCompat bool) (*Reader, error) { - if r, ok := src.(*Reader); ok { - // Merge expectations and return parent. - return r.merge(size, md5Hex, sha256Hex, actualSize, strictCompat) - } - - // Create empty reader and merge into that. - r := Reader{src: src, size: -1, actualSize: -1} - return r.merge(size, md5Hex, sha256Hex, actualSize, strictCompat) -} - -func (r *Reader) Read(p []byte) (n int, err error) { - n, err = r.src.Read(p) - if n > 0 { - if r.md5Hash != nil { - r.md5Hash.Write(p[:n]) - } - if r.sha256Hash != nil { - r.sha256Hash.Write(p[:n]) - } - } - r.bytesRead += int64(n) - - // At io.EOF verify if the checksums are right. - if err == io.EOF { - if cerr := r.verify(); cerr != nil { - return 0, cerr - } - } - - return -} - -// Size returns the absolute number of bytes the Reader -// will return during reading. It returns -1 for unlimited -// data. -func (r *Reader) Size() int64 { return r.size } - -// ActualSize returns the pre-modified size of the object. -// DecompressedSize - For compressed objects. -func (r *Reader) ActualSize() int64 { return r.actualSize } - -// MD5 - returns byte md5 value -func (r *Reader) MD5() []byte { - return r.md5sum -} - -// MD5Current - returns byte md5 value of the current state -// of the md5 hash after reading the incoming content. -// NOTE: Calling this function multiple times might yield -// different results if they are intermixed with Reader. -func (r *Reader) MD5Current() []byte { - if r.md5Hash != nil { - return r.md5Hash.Sum(nil) - } - return nil -} - -// SHA256 - returns byte sha256 value -func (r *Reader) SHA256() []byte { - return r.sha256sum -} - -// MD5HexString returns hex md5 value. -func (r *Reader) MD5HexString() string { - return hex.EncodeToString(r.md5sum) -} - -// MD5Base64String returns base64 encoded MD5sum value. -func (r *Reader) MD5Base64String() string { - return base64.StdEncoding.EncodeToString(r.md5sum) -} - -// SHA256HexString returns hex sha256 value. -func (r *Reader) SHA256HexString() string { - return hex.EncodeToString(r.sha256sum) -} - -// verify verifies if the computed MD5 sum and SHA256 sum are -// equal to the ones specified when creating the Reader. -func (r *Reader) verify() error { - if r.sha256Hash != nil && len(r.sha256sum) > 0 { - if sum := r.sha256Hash.Sum(nil); !bytes.Equal(r.sha256sum, sum) { - return SHA256Mismatch{hex.EncodeToString(r.sha256sum), hex.EncodeToString(sum)} - } - } - if r.md5Hash != nil && len(r.md5sum) > 0 { - if sum := r.md5Hash.Sum(nil); !bytes.Equal(r.md5sum, sum) { - return BadDigest{hex.EncodeToString(r.md5sum), hex.EncodeToString(sum)} - } - } - return nil -} - -// merge another hash into this one. -// There cannot be conflicting information given. -func (r *Reader) merge(size int64, md5Hex, sha256Hex string, actualSize int64, strictCompat bool) (*Reader, error) { - if r.bytesRead > 0 { - return nil, errors.New("internal error: Already read from hash reader") - } - // Merge sizes. - // If not set before, just add it. - if r.size < 0 && size >= 0 { - r.src = io.LimitReader(r.src, size) - r.size = size - } - // If set before and set now they must match. - if r.size >= 0 && size >= 0 && r.size != size { - return nil, ErrSizeMismatch{Want: r.size, Got: size} - } - - if r.actualSize <= 0 && actualSize >= 0 { - r.actualSize = actualSize - } - - // Merge SHA256. - sha256sum, err := hex.DecodeString(sha256Hex) - if err != nil { - return nil, SHA256Mismatch{} - } - - // If both are set, they must be the same. - if r.sha256Hash != nil && len(sha256sum) > 0 { - if !bytes.Equal(r.sha256sum, sha256sum) { - return nil, SHA256Mismatch{} - } - } else if len(sha256sum) > 0 { - r.sha256Hash = sha256.New() - r.sha256sum = sha256sum - } - - // Merge MD5 Sum. - md5sum, err := hex.DecodeString(md5Hex) - if err != nil { - return nil, BadDigest{} - } - // If both are set, they must expect the same. - if r.md5Hash != nil && len(md5sum) > 0 { - if !bytes.Equal(r.md5sum, md5sum) { - return nil, BadDigest{} - } - } else if len(md5sum) > 0 || (r.md5Hash == nil && strictCompat) { - r.md5Hash = md5.New() - r.md5sum = md5sum - } - return r, nil -} - -// Close and release resources. -func (r *Reader) Close() error { - // Support the io.Closer interface. - return nil -} diff --git a/pkg/hash/reader_test.go b/pkg/hash/reader_test.go deleted file mode 100644 index 1942b377..00000000 --- a/pkg/hash/reader_test.go +++ /dev/null @@ -1,317 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package hash - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "testing" -) - -// Tests functions like Size(), MD5*(), SHA256*() -func TestHashReaderHelperMethods(t *testing.T) { - r, err := NewReader(bytes.NewReader([]byte("abcd")), 4, "e2fc714c4727ee9395f324cd2e7f331f", "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", 4, false) - if err != nil { - t.Fatal(err) - } - _, err = io.Copy(ioutil.Discard, r) - if err != nil { - t.Fatal(err) - } - if r.MD5HexString() != "e2fc714c4727ee9395f324cd2e7f331f" { - t.Errorf("Expected md5hex \"e2fc714c4727ee9395f324cd2e7f331f\", got %s", r.MD5HexString()) - } - if r.SHA256HexString() != "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589" { - t.Errorf("Expected sha256hex \"88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589\", got %s", r.SHA256HexString()) - } - if r.MD5Base64String() != "4vxxTEcn7pOV8yTNLn8zHw==" { - t.Errorf("Expected md5base64 \"4vxxTEcn7pOV8yTNLn8zHw==\", got \"%s\"", r.MD5Base64String()) - } - if r.Size() != 4 { - t.Errorf("Expected size 4, got %d", r.Size()) - } - if r.ActualSize() != 4 { - t.Errorf("Expected size 4, got %d", r.ActualSize()) - } - expectedMD5, err := hex.DecodeString("e2fc714c4727ee9395f324cd2e7f331f") - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(r.MD5(), expectedMD5) { - t.Errorf("Expected md5hex \"e2fc714c4727ee9395f324cd2e7f331f\", got %s", r.MD5HexString()) - } - if !bytes.Equal(r.MD5Current(), expectedMD5) { - t.Errorf("Expected md5hex \"e2fc714c4727ee9395f324cd2e7f331f\", got %s", hex.EncodeToString(r.MD5Current())) - } - expectedSHA256, err := hex.DecodeString("88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589") - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(r.SHA256(), expectedSHA256) { - t.Errorf("Expected md5hex \"88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589\", got %s", r.SHA256HexString()) - } -} - -// Tests hash reader checksum verification. -func TestHashReaderVerification(t *testing.T) { - testCases := []struct { - desc string - src io.Reader - size int64 - actualSize int64 - md5hex, sha256hex string - err error - }{ - { - desc: "Success, no checksum verification provided.", - src: bytes.NewReader([]byte("abcd")), - size: 4, - actualSize: 4, - }, - { - desc: "Failure md5 mismatch.", - src: bytes.NewReader([]byte("abcd")), - size: 4, - actualSize: 4, - md5hex: "d41d8cd98f00b204e9800998ecf8427f", - err: BadDigest{ - "d41d8cd98f00b204e9800998ecf8427f", - "e2fc714c4727ee9395f324cd2e7f331f", - }, - }, - { - desc: "Failure sha256 mismatch.", - src: bytes.NewReader([]byte("abcd")), - size: 4, - actualSize: 4, - sha256hex: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031580", - err: SHA256Mismatch{ - "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031580", - "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", - }, - }, - { - desc: "Nested hash reader NewReader() should merge.", - src: mustReader(t, bytes.NewReader([]byte("abcd")), 4, "", "", 4, false), - size: 4, - actualSize: 4, - }, - { - desc: "Incorrect sha256, nested", - src: mustReader(t, bytes.NewReader([]byte("abcd")), 4, "", "", 4, false), - size: 4, - actualSize: 4, - sha256hex: "50d858e0985ecc7f60418aaf0cc5ab587f42c2570a884095a9e8ccacd0f6545c", - err: SHA256Mismatch{ - ExpectedSHA256: "50d858e0985ecc7f60418aaf0cc5ab587f42c2570a884095a9e8ccacd0f6545c", - CalculatedSHA256: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", - }, - }, - { - desc: "Correct sha256, nested", - src: mustReader(t, bytes.NewReader([]byte("abcd")), 4, "", "", 4, false), - size: 4, - actualSize: 4, - sha256hex: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", - }, - { - desc: "Correct sha256, nested, truncated", - src: mustReader(t, bytes.NewReader([]byte("abcd-more-stuff-to-be ignored")), 4, "", "", 4, false), - size: 4, - actualSize: -1, - sha256hex: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", - }, - { - desc: "Correct sha256, nested, truncated, swapped", - src: mustReader(t, bytes.NewReader([]byte("abcd-more-stuff-to-be ignored")), 4, "", "", -1, false), - size: 4, - actualSize: -1, - sha256hex: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", - }, - { - desc: "Incorrect MD5, nested", - src: mustReader(t, bytes.NewReader([]byte("abcd")), 4, "", "", 4, false), - size: 4, - actualSize: 4, - md5hex: "0773da587b322af3a8718cb418a715ce", - err: BadDigest{ - ExpectedMD5: "0773da587b322af3a8718cb418a715ce", - CalculatedMD5: "e2fc714c4727ee9395f324cd2e7f331f", - }, - }, - { - desc: "Correct sha256, truncated", - src: bytes.NewReader([]byte("abcd-morethan-4-bytes")), - size: 4, - actualSize: 4, - sha256hex: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", - }, - { - desc: "Correct MD5, nested", - src: mustReader(t, bytes.NewReader([]byte("abcd")), 4, "", "", 4, false), - size: 4, - actualSize: 4, - md5hex: "e2fc714c4727ee9395f324cd2e7f331f", - }, - { - desc: "Correct MD5, truncated", - src: bytes.NewReader([]byte("abcd-morethan-4-bytes")), - size: 4, - actualSize: 4, - sha256hex: "", - md5hex: "e2fc714c4727ee9395f324cd2e7f331f", - }, - { - desc: "Correct MD5, nested, truncated", - src: mustReader(t, bytes.NewReader([]byte("abcd-morestuff")), -1, "", "", -1, false), - size: 4, - actualSize: 4, - md5hex: "e2fc714c4727ee9395f324cd2e7f331f", - }, - } - for i, testCase := range testCases { - t.Run(fmt.Sprintf("case-%d", i+1), func(t *testing.T) { - r, err := NewReader(testCase.src, testCase.size, testCase.md5hex, testCase.sha256hex, testCase.actualSize, false) - if err != nil { - t.Fatalf("Test %q: Initializing reader failed %s", testCase.desc, err) - } - _, err = io.Copy(ioutil.Discard, r) - if err != nil { - if err.Error() != testCase.err.Error() { - t.Errorf("Test %q: Expected error %s, got error %s", testCase.desc, testCase.err, err) - } - } - }) - } -} - -func mustReader(t *testing.T, src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64, strictCompat bool) *Reader { - r, err := NewReader(src, size, md5Hex, sha256Hex, actualSize, strictCompat) - if err != nil { - t.Fatal(err) - } - return r -} - -// Tests NewReader() constructor with invalid arguments. -func TestHashReaderInvalidArguments(t *testing.T) { - testCases := []struct { - desc string - src io.Reader - size int64 - actualSize int64 - md5hex, sha256hex string - success bool - expectedErr error - strict bool - }{ - { - desc: "Invalid md5sum NewReader() will fail.", - src: bytes.NewReader([]byte("abcd")), - size: 4, - actualSize: 4, - md5hex: "invalid-md5", - success: false, - expectedErr: BadDigest{}, - }, - { - desc: "Invalid sha256 NewReader() will fail.", - src: bytes.NewReader([]byte("abcd")), - size: 4, - actualSize: 4, - sha256hex: "invalid-sha256", - success: false, - expectedErr: SHA256Mismatch{}, - }, - { - desc: "Nested hash reader NewReader() should merge.", - src: mustReader(t, bytes.NewReader([]byte("abcd")), 4, "", "", 4, false), - size: 4, - actualSize: 4, - success: true, - }, - { - desc: "Mismatching sha256", - src: mustReader(t, bytes.NewReader([]byte("abcd")), 4, "", "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", 4, false), - size: 4, - actualSize: 4, - sha256hex: "50d858e0985ecc7f60418aaf0cc5ab587f42c2570a884095a9e8ccacd0f6545c", - success: false, - expectedErr: SHA256Mismatch{}, - }, - { - desc: "Correct sha256", - src: mustReader(t, bytes.NewReader([]byte("abcd")), 4, "", "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", 4, false), - size: 4, - actualSize: 4, - sha256hex: "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", - success: true, - }, - { - desc: "Mismatching MD5", - src: mustReader(t, bytes.NewReader([]byte("abcd")), 4, "e2fc714c4727ee9395f324cd2e7f331f", "", 4, false), - size: 4, - actualSize: 4, - md5hex: "0773da587b322af3a8718cb418a715ce", - success: false, - expectedErr: BadDigest{}, - }, - { - desc: "Correct MD5", - src: mustReader(t, bytes.NewReader([]byte("abcd")), 4, "e2fc714c4727ee9395f324cd2e7f331f", "", 4, false), - size: 4, - actualSize: 4, - md5hex: "e2fc714c4727ee9395f324cd2e7f331f", - success: true, - }, - { - desc: "Nothing, all ok", - src: bytes.NewReader([]byte("abcd")), - size: 4, - actualSize: 4, - success: true, - }, - { - desc: "Nested, size mismatch", - src: mustReader(t, bytes.NewReader([]byte("abcd-morestuff")), 4, "", "", -1, false), - size: 2, - actualSize: -1, - success: false, - expectedErr: ErrSizeMismatch{Want: 4, Got: 2}, - }, - } - - for i, testCase := range testCases { - t.Run(fmt.Sprintf("case-%d", i+1), func(t *testing.T) { - _, err := NewReader(testCase.src, testCase.size, testCase.md5hex, testCase.sha256hex, testCase.actualSize, testCase.strict) - if err != nil && testCase.success { - t.Errorf("Test %q: Expected success, but got error %s instead", testCase.desc, err) - } - if err == nil && !testCase.success { - t.Errorf("Test %q: Expected error, but got success", testCase.desc) - } - if !testCase.success { - if err != testCase.expectedErr { - t.Errorf("Test %q: Expected error %v, but got %v", testCase.desc, testCase.expectedErr, err) - } - } - }) - } -} diff --git a/pkg/iam/policy/action.go b/pkg/iam/policy/action.go deleted file mode 100644 index cc07f9cf..00000000 --- a/pkg/iam/policy/action.go +++ /dev/null @@ -1,306 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package iampolicy - -import ( - "github.com/minio/minio/pkg/bucket/policy/condition" - "github.com/minio/minio/pkg/wildcard" -) - -// Action - policy action. -// Refer https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazons3.html -// for more information about available actions. -type Action string - -const ( - // AbortMultipartUploadAction - AbortMultipartUpload Rest API action. - AbortMultipartUploadAction Action = "s3:AbortMultipartUpload" - - // CreateBucketAction - CreateBucket Rest API action. - CreateBucketAction = "s3:CreateBucket" - - // DeleteBucketAction - DeleteBucket Rest API action. - DeleteBucketAction = "s3:DeleteBucket" - - // ForceDeleteBucketAction - DeleteBucket Rest API action when x-minio-force-delete flag - // is specified. - ForceDeleteBucketAction = "s3:ForceDeleteBucket" - - // DeleteBucketPolicyAction - DeleteBucketPolicy Rest API action. - DeleteBucketPolicyAction = "s3:DeleteBucketPolicy" - - // DeleteObjectAction - DeleteObject Rest API action. - DeleteObjectAction = "s3:DeleteObject" - - // GetBucketLocationAction - GetBucketLocation Rest API action. - GetBucketLocationAction = "s3:GetBucketLocation" - - // GetBucketNotificationAction - GetBucketNotification Rest API action. - GetBucketNotificationAction = "s3:GetBucketNotification" - - // GetBucketPolicyAction - GetBucketPolicy Rest API action. - GetBucketPolicyAction = "s3:GetBucketPolicy" - - // GetObjectAction - GetObject Rest API action. - GetObjectAction = "s3:GetObject" - - // HeadBucketAction - HeadBucket Rest API action. This action is unused in minio. - HeadBucketAction = "s3:HeadBucket" - - // ListAllMyBucketsAction - ListAllMyBuckets (List buckets) Rest API action. - ListAllMyBucketsAction = "s3:ListAllMyBuckets" - - // ListBucketAction - ListBucket Rest API action. - ListBucketAction = "s3:ListBucket" - - // ListBucketMultipartUploadsAction - ListMultipartUploads Rest API action. - ListBucketMultipartUploadsAction = "s3:ListBucketMultipartUploads" - - // ListenBucketNotificationAction - ListenBucketNotification Rest API action. - // This is MinIO extension. - ListenBucketNotificationAction = "s3:ListenBucketNotification" - - // ListMultipartUploadPartsAction - ListParts Rest API action. - ListMultipartUploadPartsAction = "s3:ListMultipartUploadParts" - - // PutBucketLifecycleAction - PutBucketLifecycle Rest API action. - PutBucketLifecycleAction = "s3:PutLifecycleConfiguration" - - // GetBucketLifecycleAction - GetBucketLifecycle Rest API action. - GetBucketLifecycleAction = "s3:GetLifecycleConfiguration" - - // PutBucketNotificationAction - PutObjectNotification Rest API action. - PutBucketNotificationAction = "s3:PutBucketNotification" - - // PutBucketPolicyAction - PutBucketPolicy Rest API action. - PutBucketPolicyAction = "s3:PutBucketPolicy" - - // PutObjectAction - PutObject Rest API action. - PutObjectAction = "s3:PutObject" - - // BypassGovernanceRetentionAction - bypass governance retention for PutObjectRetention, PutObject and DeleteObject Rest API action. - BypassGovernanceRetentionAction = "s3:BypassGovernanceRetention" - - // PutObjectRetentionAction - PutObjectRetention Rest API action. - PutObjectRetentionAction = "s3:PutObjectRetention" - - // GetObjectRetentionAction - GetObjectRetention, GetObject, HeadObject Rest API action. - GetObjectRetentionAction = "s3:GetObjectRetention" - - // GetObjectLegalHoldAction - GetObjectLegalHold, GetObject Rest API action. - GetObjectLegalHoldAction = "s3:GetObjectLegalHold" - - // PutObjectLegalHoldAction - PutObjectLegalHold, PutObject Rest API action. - PutObjectLegalHoldAction = "s3:PutObjectLegalHold" - - // GetBucketObjectLockConfigurationAction - GetBucketObjectLockConfiguration Rest API action - GetBucketObjectLockConfigurationAction = "s3:GetBucketObjectLockConfiguration" - - // PutBucketObjectLockConfigurationAction - PutBucketObjectLockConfiguration Rest API action - PutBucketObjectLockConfigurationAction = "s3:PutBucketObjectLockConfiguration" - - // GetBucketTaggingAction - GetBucketTagging Rest API action - GetBucketTaggingAction = "s3:GetBucketTagging" - - // PutBucketTaggingAction - PutBucketTagging Rest API action - PutBucketTaggingAction = "s3:PutBucketTagging" - - // GetObjectTaggingAction - Get Object Tags API action - GetObjectTaggingAction = "s3:GetObjectTagging" - - // PutObjectTaggingAction - Put Object Tags API action - PutObjectTaggingAction = "s3:PutObjectTagging" - - // DeleteObjectTaggingAction - Delete Object Tags API action - DeleteObjectTaggingAction = "s3:DeleteObjectTagging" - - // PutBucketEncryptionAction - PutBucketEncryption REST API action - PutBucketEncryptionAction = "s3:PutEncryptionConfiguration" - - // GetBucketEncryptionAction - GetBucketEncryption REST API action - GetBucketEncryptionAction = "s3:GetEncryptionConfiguration" - - // AllActions - all API actions - AllActions = "s3:*" -) - -// List of all supported actions. -var supportedActions = map[Action]struct{}{ - AbortMultipartUploadAction: {}, - CreateBucketAction: {}, - DeleteBucketAction: {}, - ForceDeleteBucketAction: {}, - DeleteBucketPolicyAction: {}, - DeleteObjectAction: {}, - GetBucketLocationAction: {}, - GetBucketNotificationAction: {}, - GetBucketPolicyAction: {}, - GetObjectAction: {}, - HeadBucketAction: {}, - ListAllMyBucketsAction: {}, - ListBucketAction: {}, - ListBucketMultipartUploadsAction: {}, - ListenBucketNotificationAction: {}, - ListMultipartUploadPartsAction: {}, - PutBucketLifecycleAction: {}, - GetBucketLifecycleAction: {}, - PutBucketNotificationAction: {}, - PutBucketPolicyAction: {}, - PutObjectAction: {}, - BypassGovernanceRetentionAction: {}, - PutObjectRetentionAction: {}, - GetObjectRetentionAction: {}, - GetObjectLegalHoldAction: {}, - PutObjectLegalHoldAction: {}, - GetBucketObjectLockConfigurationAction: {}, - PutBucketObjectLockConfigurationAction: {}, - GetBucketTaggingAction: {}, - PutBucketTaggingAction: {}, - GetObjectTaggingAction: {}, - PutObjectTaggingAction: {}, - DeleteObjectTaggingAction: {}, - PutBucketEncryptionAction: {}, - GetBucketEncryptionAction: {}, - AllActions: {}, -} - -// List of all supported object actions. -var supportedObjectActions = map[Action]struct{}{ - AllActions: {}, - AbortMultipartUploadAction: {}, - DeleteObjectAction: {}, - GetObjectAction: {}, - ListMultipartUploadPartsAction: {}, - PutObjectAction: {}, - BypassGovernanceRetentionAction: {}, - PutObjectRetentionAction: {}, - GetObjectRetentionAction: {}, - PutObjectLegalHoldAction: {}, - GetObjectLegalHoldAction: {}, - GetObjectTaggingAction: {}, - PutObjectTaggingAction: {}, - DeleteObjectTaggingAction: {}, -} - -// isObjectAction - returns whether action is object type or not. -func (action Action) isObjectAction() bool { - _, ok := supportedObjectActions[action] - return ok -} - -// Match - matches object name with resource pattern. -func (action Action) Match(a Action) bool { - return wildcard.Match(string(action), string(a)) -} - -// IsValid - checks if action is valid or not. -func (action Action) IsValid() bool { - _, ok := supportedActions[action] - return ok -} - -// actionConditionKeyMap - holds mapping of supported condition key for an action. -var actionConditionKeyMap = map[Action]condition.KeySet{ - AllActions: condition.NewKeySet(condition.AllSupportedKeys...), - - AbortMultipartUploadAction: condition.NewKeySet(condition.CommonKeys...), - - CreateBucketAction: condition.NewKeySet(condition.CommonKeys...), - - DeleteBucketPolicyAction: condition.NewKeySet(condition.CommonKeys...), - - DeleteObjectAction: condition.NewKeySet(condition.CommonKeys...), - - GetBucketLocationAction: condition.NewKeySet(condition.CommonKeys...), - - GetBucketNotificationAction: condition.NewKeySet(condition.CommonKeys...), - - GetBucketPolicyAction: condition.NewKeySet(condition.CommonKeys...), - - GetObjectAction: condition.NewKeySet( - append([]condition.Key{ - condition.S3XAmzServerSideEncryption, - condition.S3XAmzServerSideEncryptionCustomerAlgorithm, - condition.S3XAmzStorageClass, - }, condition.CommonKeys...)...), - - HeadBucketAction: condition.NewKeySet(condition.CommonKeys...), - - ListAllMyBucketsAction: condition.NewKeySet(condition.CommonKeys...), - - ListBucketAction: condition.NewKeySet( - append([]condition.Key{ - condition.S3Prefix, - condition.S3Delimiter, - condition.S3MaxKeys, - }, condition.CommonKeys...)...), - - ListBucketMultipartUploadsAction: condition.NewKeySet(condition.CommonKeys...), - - ListenBucketNotificationAction: condition.NewKeySet(condition.CommonKeys...), - - ListMultipartUploadPartsAction: condition.NewKeySet(condition.CommonKeys...), - - PutBucketNotificationAction: condition.NewKeySet(condition.CommonKeys...), - - PutBucketPolicyAction: condition.NewKeySet(condition.CommonKeys...), - - PutObjectAction: condition.NewKeySet( - append([]condition.Key{ - condition.S3XAmzCopySource, - condition.S3XAmzServerSideEncryption, - condition.S3XAmzServerSideEncryptionCustomerAlgorithm, - condition.S3XAmzMetadataDirective, - condition.S3XAmzStorageClass, - condition.S3ObjectLockRetainUntilDate, - condition.S3ObjectLockMode, - condition.S3ObjectLockLegalHold, - }, condition.CommonKeys...)...), - - // https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html - // LockLegalHold is not supported with PutObjectRetentionAction - PutObjectRetentionAction: condition.NewKeySet( - append([]condition.Key{ - condition.S3ObjectLockRemainingRetentionDays, - condition.S3ObjectLockRetainUntilDate, - condition.S3ObjectLockMode, - }, condition.CommonKeys...)...), - - GetObjectRetentionAction: condition.NewKeySet(condition.CommonKeys...), - PutObjectLegalHoldAction: condition.NewKeySet( - append([]condition.Key{ - condition.S3ObjectLockLegalHold, - }, condition.CommonKeys...)...), - GetObjectLegalHoldAction: condition.NewKeySet(condition.CommonKeys...), - - // https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html - BypassGovernanceRetentionAction: condition.NewKeySet( - append([]condition.Key{ - condition.S3ObjectLockRemainingRetentionDays, - condition.S3ObjectLockRetainUntilDate, - condition.S3ObjectLockMode, - condition.S3ObjectLockLegalHold, - }, condition.CommonKeys...)...), - - GetBucketObjectLockConfigurationAction: condition.NewKeySet(condition.CommonKeys...), - PutBucketObjectLockConfigurationAction: condition.NewKeySet(condition.CommonKeys...), - GetBucketTaggingAction: condition.NewKeySet(condition.CommonKeys...), - PutBucketTaggingAction: condition.NewKeySet(condition.CommonKeys...), - PutObjectTaggingAction: condition.NewKeySet(condition.CommonKeys...), - GetObjectTaggingAction: condition.NewKeySet(condition.CommonKeys...), - DeleteObjectTaggingAction: condition.NewKeySet(condition.CommonKeys...), -} diff --git a/pkg/iam/policy/action_test.go b/pkg/iam/policy/action_test.go deleted file mode 100644 index 653ee4a5..00000000 --- a/pkg/iam/policy/action_test.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package iampolicy - -import ( - "testing" -) - -func TestActionIsObjectAction(t *testing.T) { - testCases := []struct { - action Action - expectedResult bool - }{ - {AbortMultipartUploadAction, true}, - {DeleteObjectAction, true}, - {GetObjectAction, true}, - {ListMultipartUploadPartsAction, true}, - {PutObjectAction, true}, - {CreateBucketAction, false}, - } - - for i, testCase := range testCases { - result := testCase.action.isObjectAction() - - if testCase.expectedResult != result { - t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestActionIsValid(t *testing.T) { - testCases := []struct { - action Action - expectedResult bool - }{ - {PutObjectAction, true}, - {AbortMultipartUploadAction, true}, - {Action("foo"), false}, - } - - for i, testCase := range testCases { - result := testCase.action.IsValid() - - if testCase.expectedResult != result { - t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} diff --git a/pkg/iam/policy/actionset.go b/pkg/iam/policy/actionset.go deleted file mode 100644 index ef6ad588..00000000 --- a/pkg/iam/policy/actionset.go +++ /dev/null @@ -1,167 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package iampolicy - -import ( - "encoding/json" - "fmt" - "sort" - - "github.com/minio/minio-go/v6/pkg/set" -) - -// ActionSet - set of actions. -type ActionSet map[Action]struct{} - -// Add - add action to the set. -func (actionSet ActionSet) Add(action Action) { - actionSet[action] = struct{}{} -} - -// IsEmpty - returns if the current action set is empty -func (actionSet ActionSet) IsEmpty() bool { - return len(actionSet) == 0 -} - -// Match - matches object name with anyone of action pattern in action set. -func (actionSet ActionSet) Match(action Action) bool { - for r := range actionSet { - if r.Match(action) { - return true - } - } - - return false -} - -// Equals - checks whether given action set is equal to current action set or not. -func (actionSet ActionSet) Equals(sactionSet ActionSet) bool { - // If length of set is not equal to length of given set, the - // set is not equal to given set. - if len(actionSet) != len(sactionSet) { - return false - } - - // As both sets are equal in length, check each elements are equal. - for k := range actionSet { - if _, ok := sactionSet[k]; !ok { - return false - } - } - - return true -} - -// Intersection - returns actions available in both ActionSet. -func (actionSet ActionSet) Intersection(sset ActionSet) ActionSet { - nset := NewActionSet() - for k := range actionSet { - if _, ok := sset[k]; ok { - nset.Add(k) - } - } - - return nset -} - -// MarshalJSON - encodes ActionSet to JSON data. -func (actionSet ActionSet) MarshalJSON() ([]byte, error) { - if len(actionSet) == 0 { - return nil, Errorf("empty action set") - } - - return json.Marshal(actionSet.ToSlice()) -} - -func (actionSet ActionSet) String() string { - actions := []string{} - for action := range actionSet { - actions = append(actions, string(action)) - } - sort.Strings(actions) - - return fmt.Sprintf("%v", actions) -} - -// ToSlice - returns slice of actions from the action set. -func (actionSet ActionSet) ToSlice() []Action { - actions := []Action{} - for action := range actionSet { - actions = append(actions, action) - } - - return actions -} - -// ToAdminSlice - returns slice of admin actions from the action set. -func (actionSet ActionSet) ToAdminSlice() []AdminAction { - actions := []AdminAction{} - for action := range actionSet { - actions = append(actions, AdminAction(action)) - } - - return actions -} - -// UnmarshalJSON - decodes JSON data to ActionSet. -func (actionSet *ActionSet) UnmarshalJSON(data []byte) error { - var sset set.StringSet - if err := json.Unmarshal(data, &sset); err != nil { - return err - } - - if len(sset) == 0 { - return Errorf("empty action set") - } - - *actionSet = make(ActionSet) - for _, s := range sset.ToSlice() { - actionSet.Add(Action(s)) - } - - return nil -} - -// ValidateAdmin checks if all actions are valid Admin actions -func (actionSet ActionSet) ValidateAdmin() error { - for _, action := range actionSet.ToAdminSlice() { - if !action.IsValid() { - return Errorf("unsupported admin action '%v'", action) - } - } - return nil -} - -// Validate checks if all actions are valid -func (actionSet ActionSet) Validate() error { - for _, action := range actionSet.ToSlice() { - if !action.IsValid() { - return Errorf("unsupported action '%v'", action) - } - } - return nil -} - -// NewActionSet - creates new action set. -func NewActionSet(actions ...Action) ActionSet { - actionSet := make(ActionSet) - for _, action := range actions { - actionSet.Add(action) - } - - return actionSet -} diff --git a/pkg/iam/policy/actionset_test.go b/pkg/iam/policy/actionset_test.go deleted file mode 100644 index 9cb5c611..00000000 --- a/pkg/iam/policy/actionset_test.go +++ /dev/null @@ -1,166 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package iampolicy - -import ( - "encoding/json" - "reflect" - "testing" -) - -func TestActionSetAdd(t *testing.T) { - testCases := []struct { - set ActionSet - action Action - expectedResult ActionSet - }{ - {NewActionSet(), PutObjectAction, NewActionSet(PutObjectAction)}, - {NewActionSet(PutObjectAction), PutObjectAction, NewActionSet(PutObjectAction)}, - } - - for i, testCase := range testCases { - testCase.set.Add(testCase.action) - - if !reflect.DeepEqual(testCase.expectedResult, testCase.set) { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, testCase.set) - } - } -} - -func TestActionSetMatches(t *testing.T) { - testCases := []struct { - set ActionSet - action Action - expectedResult bool - }{ - {NewActionSet(AllActions), AbortMultipartUploadAction, true}, - {NewActionSet(PutObjectAction), PutObjectAction, true}, - {NewActionSet(PutObjectAction, GetObjectAction), PutObjectAction, true}, - {NewActionSet(PutObjectAction, GetObjectAction), AbortMultipartUploadAction, false}, - } - - for i, testCase := range testCases { - result := testCase.set.Match(testCase.action) - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestActionSetIntersection(t *testing.T) { - testCases := []struct { - set ActionSet - setToIntersect ActionSet - expectedResult ActionSet - }{ - {NewActionSet(), NewActionSet(PutObjectAction), NewActionSet()}, - {NewActionSet(PutObjectAction), NewActionSet(), NewActionSet()}, - {NewActionSet(PutObjectAction), NewActionSet(PutObjectAction, GetObjectAction), NewActionSet(PutObjectAction)}, - } - - for i, testCase := range testCases { - result := testCase.set.Intersection(testCase.setToIntersect) - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, testCase.set) - } - } -} - -func TestActionSetMarshalJSON(t *testing.T) { - testCases := []struct { - actionSet ActionSet - expectedResult []byte - expectErr bool - }{ - {NewActionSet(PutObjectAction), []byte(`["s3:PutObject"]`), false}, - {NewActionSet(), nil, true}, - } - - for i, testCase := range testCases { - result, err := json.Marshal(testCase.actionSet) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, string(testCase.expectedResult), string(result)) - } - } - } -} - -func TestActionSetToSlice(t *testing.T) { - testCases := []struct { - actionSet ActionSet - expectedResult []Action - }{ - {NewActionSet(PutObjectAction), []Action{PutObjectAction}}, - {NewActionSet(), []Action{}}, - } - - for i, testCase := range testCases { - result := testCase.actionSet.ToSlice() - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestActionSetUnmarshalJSON(t *testing.T) { - testCases := []struct { - data []byte - expectedResult ActionSet - expectUnmarshalErr bool - expectValidateErr bool - }{ - {[]byte(`"s3:PutObject"`), NewActionSet(PutObjectAction), false, false}, - {[]byte(`["s3:PutObject"]`), NewActionSet(PutObjectAction), false, false}, - {[]byte(`["s3:PutObject", "s3:GetObject"]`), NewActionSet(PutObjectAction, GetObjectAction), false, false}, - {[]byte(`["s3:PutObject", "s3:GetObject", "s3:PutObject"]`), NewActionSet(PutObjectAction, GetObjectAction), false, false}, - {[]byte(`[]`), NewActionSet(), true, false}, // Empty array. - {[]byte(`"foo"`), nil, false, true}, // Invalid action. - {[]byte(`["s3:PutObject", "foo"]`), nil, false, true}, // Invalid action. - } - - for i, testCase := range testCases { - result := make(ActionSet) - err := json.Unmarshal(testCase.data, &result) - expectErr := (err != nil) - - if expectErr != testCase.expectUnmarshalErr { - t.Fatalf("case %v: error during unmarshal: expected: %v, got: %v\n", i+1, testCase.expectUnmarshalErr, expectErr) - } - - err = result.Validate() - expectErr = (err != nil) - if expectErr != testCase.expectValidateErr { - t.Fatalf("case %v: error during validation: expected: %v, got: %v\n", i+1, testCase.expectValidateErr, expectErr) - } - - if !testCase.expectUnmarshalErr && !testCase.expectValidateErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } - } -} diff --git a/pkg/iam/policy/admin-action.go b/pkg/iam/policy/admin-action.go deleted file mode 100644 index b293f843..00000000 --- a/pkg/iam/policy/admin-action.go +++ /dev/null @@ -1,192 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package iampolicy - -import ( - "github.com/minio/minio/pkg/bucket/policy/condition" -) - -// AdminAction - admin policy action. -type AdminAction string - -const ( - // HealAdminAction - allows heal command - HealAdminAction = "admin:Heal" - - // Service Actions - - // StorageInfoAdminAction - allow listing server info - StorageInfoAdminAction = "admin:StorageInfo" - // DataUsageInfoAdminAction - allow listing data usage info - DataUsageInfoAdminAction = "admin:DataUsageInfo" - // TopLocksAdminAction - allow listing top locks - TopLocksAdminAction = "admin:TopLocksInfo" - // ProfilingAdminAction - allow profiling - ProfilingAdminAction = "admin:Profiling" - // TraceAdminAction - allow listing server trace - TraceAdminAction = "admin:ServerTrace" - // ConsoleLogAdminAction - allow listing console logs on terminal - ConsoleLogAdminAction = "admin:ConsoleLog" - // KMSKeyStatusAdminAction - allow getting KMS key status - KMSKeyStatusAdminAction = "admin:KMSKeyStatus" - // ServerInfoAdminAction - allow listing server info - ServerInfoAdminAction = "admin:ServerInfo" - // OBDInfoAdminAction - allow obtaining cluster on-board diagnostics - OBDInfoAdminAction = "admin:OBDInfo" - - // ServerUpdateAdminAction - allow MinIO binary update - ServerUpdateAdminAction = "admin:ServerUpdate" - // ServiceRestartAdminAction - allow restart of MinIO service. - ServiceRestartAdminAction = "admin:ServiceRestart" - // ServiceStopAdminAction - allow stopping MinIO service. - ServiceStopAdminAction = "admin:ServiceStop" - - // ConfigUpdateAdminAction - allow MinIO config management - ConfigUpdateAdminAction = "admin:ConfigUpdate" - - // CreateUserAdminAction - allow creating MinIO user - CreateUserAdminAction = "admin:CreateUser" - // DeleteUserAdminAction - allow deleting MinIO user - DeleteUserAdminAction = "admin:DeleteUser" - // ListUsersAdminAction - allow list users permission - ListUsersAdminAction = "admin:ListUsers" - // EnableUserAdminAction - allow enable user permission - EnableUserAdminAction = "admin:EnableUser" - // DisableUserAdminAction - allow disable user permission - DisableUserAdminAction = "admin:DisableUser" - // GetUserAdminAction - allows GET permission on user info - GetUserAdminAction = "admin:GetUser" - - // Group Actions - - // AddUserToGroupAdminAction - allow adding user to group permission - AddUserToGroupAdminAction = "admin:AddUserToGroup" - // RemoveUserFromGroupAdminAction - allow removing user to group permission - RemoveUserFromGroupAdminAction = "admin:RemoveUserFromGroup" - // GetGroupAdminAction - allow getting group info - GetGroupAdminAction = "admin:GetGroup" - // ListGroupsAdminAction - allow list groups permission - ListGroupsAdminAction = "admin:ListGroups" - // EnableGroupAdminAction - allow enable group permission - EnableGroupAdminAction = "admin:EnableGroup" - // DisableGroupAdminAction - allow disable group permission - DisableGroupAdminAction = "admin:DisableGroup" - - // Policy Actions - - // CreatePolicyAdminAction - allow create policy permission - CreatePolicyAdminAction = "admin:CreatePolicy" - // DeletePolicyAdminAction - allow delete policy permission - DeletePolicyAdminAction = "admin:DeletePolicy" - // GetPolicyAdminAction - allow get policy permission - GetPolicyAdminAction = "admin:GetPolicy" - // AttachPolicyAdminAction - allows attaching a policy to a user/group - AttachPolicyAdminAction = "admin:AttachUserOrGroupPolicy" - // ListUserPoliciesAdminAction - allows listing user policies - ListUserPoliciesAdminAction = "admin:ListUserPolicies" - - // Bucket quota Actions - - // SetBucketQuotaAdminAction - allow setting bucket quota - SetBucketQuotaAdminAction = "admin:SetBucketQuota" - // GetBucketQuotaAdminAction - allow getting bucket quota - GetBucketQuotaAdminAction = "admin:GetBucketQuota" - - // AllAdminActions - provides all admin permissions - AllAdminActions = "admin:*" -) - -// List of all supported admin actions. -var supportedAdminActions = map[AdminAction]struct{}{ - HealAdminAction: {}, - StorageInfoAdminAction: {}, - DataUsageInfoAdminAction: {}, - TopLocksAdminAction: {}, - ProfilingAdminAction: {}, - TraceAdminAction: {}, - ConsoleLogAdminAction: {}, - KMSKeyStatusAdminAction: {}, - ServerInfoAdminAction: {}, - OBDInfoAdminAction: {}, - ServerUpdateAdminAction: {}, - ServiceRestartAdminAction: {}, - ServiceStopAdminAction: {}, - ConfigUpdateAdminAction: {}, - CreateUserAdminAction: {}, - DeleteUserAdminAction: {}, - ListUsersAdminAction: {}, - EnableUserAdminAction: {}, - DisableUserAdminAction: {}, - GetUserAdminAction: {}, - AddUserToGroupAdminAction: {}, - RemoveUserFromGroupAdminAction: {}, - GetGroupAdminAction: {}, - ListGroupsAdminAction: {}, - EnableGroupAdminAction: {}, - DisableGroupAdminAction: {}, - CreatePolicyAdminAction: {}, - DeletePolicyAdminAction: {}, - GetPolicyAdminAction: {}, - AttachPolicyAdminAction: {}, - ListUserPoliciesAdminAction: {}, - SetBucketQuotaAdminAction: {}, - GetBucketQuotaAdminAction: {}, - AllAdminActions: {}, -} - -// IsValid - checks if action is valid or not. -func (action AdminAction) IsValid() bool { - _, ok := supportedAdminActions[action] - return ok -} - -// adminActionConditionKeyMap - holds mapping of supported condition key for an action. -var adminActionConditionKeyMap = map[Action]condition.KeySet{ - AllAdminActions: condition.NewKeySet(condition.AllSupportedAdminKeys...), - HealAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - StorageInfoAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - ServerInfoAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - DataUsageInfoAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - OBDInfoAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - TopLocksAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - ProfilingAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - TraceAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - ConsoleLogAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - KMSKeyStatusAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - ServerUpdateAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - ServiceRestartAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - ServiceStopAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - ConfigUpdateAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - CreateUserAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - DeleteUserAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - ListUsersAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - EnableUserAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - DisableUserAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - GetUserAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - AddUserToGroupAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - RemoveUserFromGroupAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - ListGroupsAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - EnableGroupAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - DisableGroupAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - CreatePolicyAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - DeletePolicyAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - GetPolicyAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - AttachPolicyAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - ListUserPoliciesAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - SetBucketQuotaAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), - GetBucketQuotaAdminAction: condition.NewKeySet(condition.AllSupportedAdminKeys...), -} diff --git a/pkg/iam/policy/constants.go b/pkg/iam/policy/constants.go deleted file mode 100644 index 846b6622..00000000 --- a/pkg/iam/policy/constants.go +++ /dev/null @@ -1,82 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package iampolicy - -import ( - "github.com/minio/minio/pkg/bucket/policy" -) - -// Policy claim constants -const ( - PolicyName = "policy" - SessionPolicyName = "sessionPolicy" -) - -// ReadWrite - provides full access to all buckets and all objects -var ReadWrite = Policy{ - Version: DefaultVersion, - Statements: []Statement{ - { - SID: policy.ID(""), - Effect: policy.Allow, - Actions: NewActionSet(AllActions), - Resources: NewResourceSet(NewResource("*", "")), - }, - }, -} - -// ReadOnly - read only. -var ReadOnly = Policy{ - Version: DefaultVersion, - Statements: []Statement{ - { - SID: policy.ID(""), - Effect: policy.Allow, - Actions: NewActionSet(GetBucketLocationAction, GetObjectAction), - Resources: NewResourceSet(NewResource("*", "")), - }, - }, -} - -// WriteOnly - provides write access. -var WriteOnly = Policy{ - Version: DefaultVersion, - Statements: []Statement{ - { - SID: policy.ID(""), - Effect: policy.Allow, - Actions: NewActionSet(PutObjectAction), - Resources: NewResourceSet(NewResource("*", "")), - }, - }, -} - -// AdminDiagnostics - provides admin diagnostics access. -var AdminDiagnostics = Policy{ - Version: DefaultVersion, - Statements: []Statement{ - { - SID: policy.ID(""), - Effect: policy.Allow, - Actions: NewActionSet(ProfilingAdminAction, - TraceAdminAction, ConsoleLogAdminAction, - ServerInfoAdminAction, TopLocksAdminAction, - OBDInfoAdminAction), - Resources: NewResourceSet(NewResource("*", "")), - }, - }, -} diff --git a/pkg/iam/policy/error.go b/pkg/iam/policy/error.go deleted file mode 100644 index 726bdd72..00000000 --- a/pkg/iam/policy/error.go +++ /dev/null @@ -1,44 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package iampolicy - -import ( - "fmt" -) - -// Error is the generic type for any error happening during policy -// parsing. -type Error struct { - err error -} - -// Errorf - formats according to a format specifier and returns -// the string as a value that satisfies error of type policy.Error -func Errorf(format string, a ...interface{}) error { - return Error{err: fmt.Errorf(format, a...)} -} - -// Unwrap the internal error. -func (e Error) Unwrap() error { return e.err } - -// Error 'error' compatible method. -func (e Error) Error() string { - if e.err == nil { - return "iam: cause " - } - return e.err.Error() -} diff --git a/pkg/iam/policy/policy.go b/pkg/iam/policy/policy.go deleted file mode 100644 index 3d1138c9..00000000 --- a/pkg/iam/policy/policy.go +++ /dev/null @@ -1,180 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package iampolicy - -import ( - "encoding/json" - "io" - "strings" - - "github.com/minio/minio-go/v6/pkg/set" - "github.com/minio/minio/pkg/bucket/policy" -) - -// DefaultVersion - default policy version as per AWS S3 specification. -const DefaultVersion = "2012-10-17" - -// Args - arguments to policy to check whether it is allowed -type Args struct { - AccountName string `json:"account"` - Action Action `json:"action"` - BucketName string `json:"bucket"` - ConditionValues map[string][]string `json:"conditions"` - IsOwner bool `json:"owner"` - ObjectName string `json:"object"` - Claims map[string]interface{} `json:"claims"` -} - -// GetPolicies get policies -func (a Args) GetPolicies(policyClaimName string) (set.StringSet, bool) { - s := set.NewStringSet() - pname, ok := a.Claims[policyClaimName] - if !ok { - return s, false - } - pnames, ok := pname.([]string) - if !ok { - pnameStr, ok := pname.(string) - if ok { - pnames = strings.Split(pnameStr, ",") - } else { - return s, false - } - } - for _, pname := range pnames { - pname = strings.TrimSpace(pname) - if pname == "" { - // ignore any empty strings, considerate - // towards some user errors. - continue - } - s.Add(pname) - } - return s, true -} - -// Policy - iam bucket iamp. -type Policy struct { - ID policy.ID `json:"ID,omitempty"` - Version string - Statements []Statement `json:"Statement"` -} - -// IsAllowed - checks given policy args is allowed to continue the Rest API. -func (iamp Policy) IsAllowed(args Args) bool { - // Check all deny statements. If any one statement denies, return false. - for _, statement := range iamp.Statements { - if statement.Effect == policy.Deny { - if !statement.IsAllowed(args) { - return false - } - } - } - - // For owner, its allowed by default. - if args.IsOwner { - return true - } - - // Check all allow statements. If any one statement allows, return true. - for _, statement := range iamp.Statements { - if statement.Effect == policy.Allow { - if statement.IsAllowed(args) { - return true - } - } - } - - return false -} - -// IsEmpty - returns whether policy is empty or not. -func (iamp Policy) IsEmpty() bool { - return len(iamp.Statements) == 0 -} - -// isValid - checks if Policy is valid or not. -func (iamp Policy) isValid() error { - if iamp.Version != DefaultVersion && iamp.Version != "" { - return Errorf("invalid version '%v'", iamp.Version) - } - - for _, statement := range iamp.Statements { - if err := statement.isValid(); err != nil { - return err - } - } - return nil -} - -func (iamp *Policy) dropDuplicateStatements() { -redo: - for i := range iamp.Statements { - for j, statement := range iamp.Statements[i+1:] { - if iamp.Statements[i].Effect != statement.Effect { - continue - } - - if !iamp.Statements[i].Actions.Equals(statement.Actions) { - continue - } - - if !iamp.Statements[i].Resources.Equals(statement.Resources) { - continue - } - - if iamp.Statements[i].Conditions.String() != statement.Conditions.String() { - continue - } - iamp.Statements = append(iamp.Statements[:j], iamp.Statements[j+1:]...) - goto redo - } - } -} - -// UnmarshalJSON - decodes JSON data to Iamp. -func (iamp *Policy) UnmarshalJSON(data []byte) error { - // subtype to avoid recursive call to UnmarshalJSON() - type subPolicy Policy - var sp subPolicy - if err := json.Unmarshal(data, &sp); err != nil { - return err - } - - p := Policy(sp) - p.dropDuplicateStatements() - *iamp = p - return nil -} - -// Validate - validates all statements are for given bucket or not. -func (iamp Policy) Validate() error { - return iamp.isValid() -} - -// ParseConfig - parses data in given reader to Iamp. -func ParseConfig(reader io.Reader) (*Policy, error) { - var iamp Policy - - decoder := json.NewDecoder(reader) - decoder.DisallowUnknownFields() - if err := decoder.Decode(&iamp); err != nil { - return nil, Errorf("%w", err) - } - - return &iamp, iamp.Validate() -} diff --git a/pkg/iam/policy/policy_test.go b/pkg/iam/policy/policy_test.go deleted file mode 100644 index b082f097..00000000 --- a/pkg/iam/policy/policy_test.go +++ /dev/null @@ -1,872 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package iampolicy - -import ( - "encoding/json" - "net" - "reflect" - "testing" - - "github.com/minio/minio/pkg/bucket/policy" - "github.com/minio/minio/pkg/bucket/policy/condition" -) - -func TestPolicyIsAllowed(t *testing.T) { - case1Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - policy.Allow, - NewActionSet(GetBucketLocationAction, PutObjectAction), - NewResourceSet(NewResource("*", "")), - condition.NewFunctions(), - )}, - } - - case2Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - policy.Allow, - NewActionSet(GetObjectAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - )}, - } - - _, IPNet, err := net.ParseCIDR("192.168.1.0/24") - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - func1, err := condition.NewIPAddressFunc( - condition.AWSSourceIP, - IPNet, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - policy.Allow, - NewActionSet(GetObjectAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(func1), - )}, - } - - case4Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - policy.Deny, - NewActionSet(GetObjectAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(func1), - )}, - } - - anonGetBucketLocationArgs := Args{ - AccountName: "Q3AM3UQ867SPQQA43P2F", - Action: GetBucketLocationAction, - BucketName: "mybucket", - ConditionValues: map[string][]string{}, - } - - anonPutObjectActionArgs := Args{ - AccountName: "Q3AM3UQ867SPQQA43P2F", - Action: PutObjectAction, - BucketName: "mybucket", - ConditionValues: map[string][]string{ - "x-amz-copy-source": {"mybucket/myobject"}, - "SourceIp": {"192.168.1.10"}, - }, - ObjectName: "myobject", - } - - anonGetObjectActionArgs := Args{ - AccountName: "Q3AM3UQ867SPQQA43P2F", - Action: GetObjectAction, - BucketName: "mybucket", - ConditionValues: map[string][]string{}, - ObjectName: "myobject", - } - - getBucketLocationArgs := Args{ - AccountName: "Q3AM3UQ867SPQQA43P2F", - Action: GetBucketLocationAction, - BucketName: "mybucket", - ConditionValues: map[string][]string{}, - } - - putObjectActionArgs := Args{ - AccountName: "Q3AM3UQ867SPQQA43P2F", - Action: PutObjectAction, - BucketName: "mybucket", - ConditionValues: map[string][]string{ - "x-amz-copy-source": {"mybucket/myobject"}, - "SourceIp": {"192.168.1.10"}, - }, - ObjectName: "myobject", - } - - getObjectActionArgs := Args{ - AccountName: "Q3AM3UQ867SPQQA43P2F", - Action: GetObjectAction, - BucketName: "mybucket", - ConditionValues: map[string][]string{}, - ObjectName: "myobject", - } - - testCases := []struct { - policy Policy - args Args - expectedResult bool - }{ - {case1Policy, anonGetBucketLocationArgs, true}, - {case1Policy, anonPutObjectActionArgs, true}, - {case1Policy, anonGetObjectActionArgs, false}, - {case1Policy, getBucketLocationArgs, true}, - {case1Policy, putObjectActionArgs, true}, - {case1Policy, getObjectActionArgs, false}, - - {case2Policy, anonGetBucketLocationArgs, false}, - {case2Policy, anonPutObjectActionArgs, true}, - {case2Policy, anonGetObjectActionArgs, true}, - {case2Policy, getBucketLocationArgs, false}, - {case2Policy, putObjectActionArgs, true}, - {case2Policy, getObjectActionArgs, true}, - - {case3Policy, anonGetBucketLocationArgs, false}, - {case3Policy, anonPutObjectActionArgs, true}, - {case3Policy, anonGetObjectActionArgs, false}, - {case3Policy, getBucketLocationArgs, false}, - {case3Policy, putObjectActionArgs, true}, - {case3Policy, getObjectActionArgs, false}, - - {case4Policy, anonGetBucketLocationArgs, false}, - {case4Policy, anonPutObjectActionArgs, false}, - {case4Policy, anonGetObjectActionArgs, false}, - {case4Policy, getBucketLocationArgs, false}, - {case4Policy, putObjectActionArgs, false}, - {case4Policy, getObjectActionArgs, false}, - } - - for i, testCase := range testCases { - result := testCase.policy.IsAllowed(testCase.args) - - if result != testCase.expectedResult { - t.Errorf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestPolicyIsEmpty(t *testing.T) { - case1Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - policy.Allow, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - }, - } - - case2Policy := Policy{ - ID: "MyPolicyForMyBucket", - Version: DefaultVersion, - } - - testCases := []struct { - policy Policy - expectedResult bool - }{ - {case1Policy, false}, - {case2Policy, true}, - } - - for i, testCase := range testCases { - result := testCase.policy.IsEmpty() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestPolicyIsValid(t *testing.T) { - case1Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - policy.Allow, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - }, - } - - case2Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - policy.Allow, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - NewStatement( - policy.Deny, - NewActionSet(GetObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - }, - } - - case3Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - policy.Allow, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - NewStatement( - policy.Deny, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/yourobject*")), - condition.NewFunctions(), - ), - }, - } - - func1, err := condition.NewNullFunc( - condition.S3XAmzCopySource, - true, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - func2, err := condition.NewNullFunc( - condition.S3XAmzServerSideEncryption, - false, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case4Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - policy.Allow, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(func1), - ), - NewStatement( - policy.Deny, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(func2), - ), - }, - } - - case5Policy := Policy{ - Version: "17-10-2012", - Statements: []Statement{ - NewStatement( - policy.Allow, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - }, - } - - case6Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - policy.Allow, - NewActionSet(GetObjectAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "myobject*")), - condition.NewFunctions(func1, func2), - ), - }, - } - - case7Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - policy.Allow, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - NewStatement( - policy.Deny, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - }, - } - - case8Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - policy.Allow, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - NewStatement( - policy.Allow, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - }, - } - - testCases := []struct { - policy Policy - expectErr bool - }{ - {case1Policy, false}, - // allowed duplicate principal. - {case2Policy, false}, - // allowed duplicate principal and action. - {case3Policy, false}, - // allowed duplicate principal, action and resource. - {case4Policy, false}, - // Invalid version error. - {case5Policy, true}, - // Invalid statement error. - {case6Policy, true}, - // Duplicate statement different Effects. - {case7Policy, false}, - // Duplicate statement same Effects, duplicate effect will be removed. - {case8Policy, false}, - } - - for i, testCase := range testCases { - err := testCase.policy.isValid() - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - } -} - -func TestPolicyUnmarshalJSONAndValidate(t *testing.T) { - case1Data := []byte(`{ - "ID": "MyPolicyForMyBucket1", - "Version": "2012-10-17", - "Statement": [ - { - "Sid": "SomeId1", - "Effect": "Allow", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" - } - ] -}`) - case1Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - policy.Allow, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - }, - } - case1Policy.Statements[0].SID = "SomeId1" - - case2Data := []byte(`{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" - }, - { - "Effect": "Deny", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::mybucket/yourobject*", - "Condition": { - "IpAddress": { - "aws:SourceIp": "192.168.1.0/24" - } - } - } - ] -}`) - _, IPNet1, err := net.ParseCIDR("192.168.1.0/24") - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - func1, err := condition.NewIPAddressFunc( - condition.AWSSourceIP, - IPNet1, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case2Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - policy.Allow, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - NewStatement( - policy.Deny, - NewActionSet(GetObjectAction), - NewResourceSet(NewResource("mybucket", "/yourobject*")), - condition.NewFunctions(func1), - ), - }, - } - - case3Data := []byte(`{ - "ID": "MyPolicyForMyBucket1", - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" - }, - { - "Effect": "Allow", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" - } - ] -}`) - case3Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - policy.Allow, - NewActionSet(GetObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - NewStatement( - policy.Allow, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - }, - } - - case4Data := []byte(`{ - "ID": "MyPolicyForMyBucket1", - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" - }, - { - "Effect": "Allow", - "Action": "s3:GetObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" - } - ] -}`) - case4Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - policy.Allow, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - NewStatement( - policy.Allow, - NewActionSet(GetObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - }, - } - - case5Data := []byte(`{ - "ID": "MyPolicyForMyBucket1", - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" - }, - { - "Effect": "Allow", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/yourobject*" - } - ] -}`) - case5Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - policy.Allow, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ), - NewStatement( - policy.Allow, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/yourobject*")), - condition.NewFunctions(), - ), - }, - } - - case6Data := []byte(`{ - "ID": "MyPolicyForMyBucket1", - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*", - "Condition": { - "IpAddress": { - "aws:SourceIp": "192.168.1.0/24" - } - } - }, - { - "Effect": "Allow", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*", - "Condition": { - "IpAddress": { - "aws:SourceIp": "192.168.2.0/24" - } - } - } - ] -}`) - _, IPNet2, err := net.ParseCIDR("192.168.2.0/24") - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - func2, err := condition.NewIPAddressFunc( - condition.AWSSourceIP, - IPNet2, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case6Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - policy.Allow, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(func1), - ), - NewStatement( - policy.Allow, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(func2), - ), - }, - } - - case7Data := []byte(`{ - "ID": "MyPolicyForMyBucket1", - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "s3:GetBucketLocation", - "Resource": "arn:aws:s3:::mybucket" - } - ] -}`) - - case7Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - policy.Allow, - NewActionSet(GetBucketLocationAction), - NewResourceSet(NewResource("mybucket", "")), - condition.NewFunctions(), - ), - }, - } - - case8Data := []byte(`{ - "ID": "MyPolicyForMyBucket1", - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "s3:GetBucketLocation", - "Resource": "arn:aws:s3:::*" - } - ] -}`) - - case8Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - policy.Allow, - NewActionSet(GetBucketLocationAction), - NewResourceSet(NewResource("*", "")), - condition.NewFunctions(), - ), - }, - } - - case9Data := []byte(`{ - "ID": "MyPolicyForMyBucket1", - "Version": "17-10-2012", - "Statement": [ - { - "Effect": "Allow", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" - } - ] -}`) - - case10Data := []byte(`{ - "ID": "MyPolicyForMyBucket1", - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" - }, - { - "Effect": "Allow", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" - } - ] -}`) - case10Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - policy.Allow, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "myobject*")), - condition.NewFunctions(), - ), - }, - } - - case11Data := []byte(`{ - "ID": "MyPolicyForMyBucket1", - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" - }, - { - "Effect": "Deny", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" - } - ] -}`) - - case11Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - policy.Allow, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "myobject*")), - condition.NewFunctions(), - ), - NewStatement( - policy.Deny, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "myobject*")), - condition.NewFunctions(), - ), - }, - } - - testCases := []struct { - data []byte - expectedResult Policy - expectUnmarshalErr bool - expectValidationErr bool - }{ - {case1Data, case1Policy, false, false}, - {case2Data, case2Policy, false, false}, - {case3Data, case3Policy, false, false}, - {case4Data, case4Policy, false, false}, - {case5Data, case5Policy, false, false}, - {case6Data, case6Policy, false, false}, - {case7Data, case7Policy, false, false}, - {case8Data, case8Policy, false, false}, - // Invalid version error. - {case9Data, Policy{}, false, true}, - // Duplicate statement success, duplicate statement is removed. - {case10Data, case10Policy, false, false}, - // Duplicate statement success (Effect differs). - {case11Data, case11Policy, false, false}, - } - - for i, testCase := range testCases { - var result Policy - err := json.Unmarshal(testCase.data, &result) - expectErr := (err != nil) - - if expectErr != testCase.expectUnmarshalErr { - t.Errorf("case %v: error during unmarshal: expected: %v, got: %v", i+1, testCase.expectUnmarshalErr, expectErr) - } - - err = result.Validate() - expectErr = (err != nil) - - if expectErr != testCase.expectValidationErr { - t.Errorf("case %v: error during validation: expected: %v, got: %v", i+1, testCase.expectValidationErr, expectErr) - } - - if !testCase.expectUnmarshalErr && !testCase.expectValidationErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Errorf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } - } -} - -func TestPolicyValidate(t *testing.T) { - case1Policy := Policy{ - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - policy.Allow, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("", "")), - condition.NewFunctions(), - ), - }, - } - - func1, err := condition.NewNullFunc( - condition.S3XAmzCopySource, - true, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - func2, err := condition.NewNullFunc( - condition.S3XAmzServerSideEncryption, - false, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - case2Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - policy.Allow, - NewActionSet(GetObjectAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "myobject*")), - condition.NewFunctions(func1, func2), - ), - }, - } - - case3Policy := Policy{ - ID: "MyPolicyForMyBucket1", - Version: DefaultVersion, - Statements: []Statement{ - NewStatement( - policy.Allow, - NewActionSet(GetObjectAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "myobject*")), - condition.NewFunctions(), - ), - }, - } - - testCases := []struct { - policy Policy - expectErr bool - }{ - {case1Policy, true}, - {case2Policy, true}, - {case3Policy, false}, - } - - for i, testCase := range testCases { - err := testCase.policy.Validate() - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - } -} diff --git a/pkg/iam/policy/resource.go b/pkg/iam/policy/resource.go deleted file mode 100644 index efccfd8b..00000000 --- a/pkg/iam/policy/resource.go +++ /dev/null @@ -1,137 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package iampolicy - -import ( - "encoding/json" - "path" - "strings" - - "github.com/minio/minio/pkg/bucket/policy/condition" - "github.com/minio/minio/pkg/wildcard" -) - -// ResourceARNPrefix - resource ARN prefix as per AWS S3 specification. -const ResourceARNPrefix = "arn:aws:s3:::" - -// Resource - resource in policy statement. -type Resource struct { - BucketName string - Pattern string -} - -func (r Resource) isBucketPattern() bool { - return !strings.Contains(r.Pattern, "/") || r.Pattern == "*" -} - -func (r Resource) isObjectPattern() bool { - return strings.Contains(r.Pattern, "/") || strings.Contains(r.BucketName, "*") || r.Pattern == "*/*" -} - -// IsValid - checks whether Resource is valid or not. -func (r Resource) IsValid() bool { - return r.Pattern != "" -} - -// Match - matches object name with resource pattern. -func (r Resource) Match(resource string, conditionValues map[string][]string) bool { - pattern := r.Pattern - for _, key := range condition.CommonKeys { - // Empty values are not supported for policy variables. - if rvalues, ok := conditionValues[key.Name()]; ok && rvalues[0] != "" { - pattern = strings.Replace(pattern, key.VarName(), rvalues[0], -1) - } - } - if path.Clean(resource) == pattern { - return true - } - return wildcard.Match(pattern, resource) -} - -// MarshalJSON - encodes Resource to JSON data. -func (r Resource) MarshalJSON() ([]byte, error) { - if !r.IsValid() { - return nil, Errorf("invalid resource %v", r) - } - - return json.Marshal(r.String()) -} - -func (r Resource) String() string { - return ResourceARNPrefix + r.Pattern -} - -// UnmarshalJSON - decodes JSON data to Resource. -func (r *Resource) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - - parsedResource, err := parseResource(s) - if err != nil { - return err - } - - *r = parsedResource - - return nil -} - -// Validate - validates Resource is for given bucket or not. -func (r Resource) Validate() error { - if !r.IsValid() { - return Errorf("invalid resource") - } - return nil -} - -// parseResource - parses string to Resource. -func parseResource(s string) (Resource, error) { - if !strings.HasPrefix(s, ResourceARNPrefix) { - return Resource{}, Errorf("invalid resource '%v'", s) - } - - pattern := strings.TrimPrefix(s, ResourceARNPrefix) - tokens := strings.SplitN(pattern, "/", 2) - bucketName := tokens[0] - if bucketName == "" { - return Resource{}, Errorf("invalid resource format '%v'", s) - } - - return Resource{ - BucketName: bucketName, - Pattern: pattern, - }, nil -} - -// NewResource - creates new resource. -func NewResource(bucketName, keyName string) Resource { - pattern := bucketName - if keyName != "" { - if !strings.HasPrefix(keyName, "/") { - pattern += "/" - } - - pattern += keyName - } - - return Resource{ - BucketName: bucketName, - Pattern: pattern, - } -} diff --git a/pkg/iam/policy/resource_test.go b/pkg/iam/policy/resource_test.go deleted file mode 100644 index a6c7e6ef..00000000 --- a/pkg/iam/policy/resource_test.go +++ /dev/null @@ -1,223 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package iampolicy - -import ( - "encoding/json" - "fmt" - "reflect" - "testing" -) - -func TestResourceIsBucketPattern(t *testing.T) { - testCases := []struct { - resource Resource - expectedResult bool - }{ - {NewResource("*", ""), true}, - {NewResource("mybucket", ""), true}, - {NewResource("mybucket*", ""), true}, - {NewResource("mybucket?0", ""), true}, - {NewResource("", "*"), false}, - {NewResource("*", "*"), false}, - {NewResource("mybucket", "*"), false}, - {NewResource("mybucket*", "/myobject"), false}, - {NewResource("mybucket?0", "/2010/photos/*"), false}, - } - - for i, testCase := range testCases { - result := testCase.resource.isBucketPattern() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestResourceIsObjectPattern(t *testing.T) { - testCases := []struct { - resource Resource - expectedResult bool - }{ - {NewResource("*", ""), true}, - {NewResource("mybucket*", ""), true}, - {NewResource("", "*"), true}, - {NewResource("*", "*"), true}, - {NewResource("mybucket", "*"), true}, - {NewResource("mybucket*", "/myobject"), true}, - {NewResource("mybucket?0", "/2010/photos/*"), true}, - {NewResource("mybucket", ""), false}, - {NewResource("mybucket?0", ""), false}, - } - - for i, testCase := range testCases { - result := testCase.resource.isObjectPattern() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestResourceIsValid(t *testing.T) { - testCases := []struct { - resource Resource - expectedResult bool - }{ - {NewResource("*", ""), true}, - {NewResource("mybucket*", ""), true}, - {NewResource("*", "*"), true}, - {NewResource("mybucket", "*"), true}, - {NewResource("mybucket*", "/myobject"), true}, - {NewResource("mybucket?0", "/2010/photos/*"), true}, - {NewResource("mybucket", ""), true}, - {NewResource("mybucket?0", ""), true}, - {NewResource("", "*"), true}, - {NewResource("", ""), false}, - } - - for i, testCase := range testCases { - result := testCase.resource.IsValid() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestResourceMatch(t *testing.T) { - testCases := []struct { - resource Resource - objectName string - expectedResult bool - }{ - {NewResource("*", ""), "mybucket", true}, - {NewResource("*", ""), "mybucket/myobject", true}, - {NewResource("mybucket*", ""), "mybucket", true}, - {NewResource("mybucket*", ""), "mybucket/myobject", true}, - {NewResource("", "*"), "/myobject", true}, - {NewResource("*", "*"), "mybucket/myobject", true}, - {NewResource("mybucket", "*"), "mybucket/myobject", true}, - {NewResource("mybucket*", "/myobject"), "mybucket/myobject", true}, - {NewResource("mybucket*", "/myobject"), "mybucket100/myobject", true}, - {NewResource("mybucket?0", "/2010/photos/*"), "mybucket20/2010/photos/1.jpg", true}, - {NewResource("mybucket", ""), "mybucket", true}, - {NewResource("mybucket?0", ""), "mybucket30", true}, - {NewResource("", "*"), "mybucket/myobject", false}, - {NewResource("*", "*"), "mybucket", false}, - {NewResource("mybucket", "*"), "mybucket10/myobject", false}, - {NewResource("mybucket?0", "/2010/photos/*"), "mybucket0/2010/photos/1.jpg", false}, - {NewResource("mybucket", ""), "mybucket/myobject", false}, - } - - for i, testCase := range testCases { - testCase := testCase - t.Run(fmt.Sprintf("Test%d", i+1), func(t *testing.T) { - result := testCase.resource.Match(testCase.objectName, nil) - if result != testCase.expectedResult { - t.Errorf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - }) - } -} - -func TestResourceMarshalJSON(t *testing.T) { - testCases := []struct { - resource Resource - expectedResult []byte - expectErr bool - }{ - {NewResource("*", ""), []byte(`"arn:aws:s3:::*"`), false}, - {NewResource("mybucket*", ""), []byte(`"arn:aws:s3:::mybucket*"`), false}, - {NewResource("mybucket", ""), []byte(`"arn:aws:s3:::mybucket"`), false}, - {NewResource("*", "*"), []byte(`"arn:aws:s3:::*/*"`), false}, - {NewResource("", "*"), []byte(`"arn:aws:s3:::/*"`), false}, - {NewResource("mybucket", "*"), []byte(`"arn:aws:s3:::mybucket/*"`), false}, - {NewResource("mybucket*", "myobject"), []byte(`"arn:aws:s3:::mybucket*/myobject"`), false}, - {NewResource("mybucket?0", "/2010/photos/*"), []byte(`"arn:aws:s3:::mybucket?0/2010/photos/*"`), false}, - {Resource{}, nil, true}, - } - - for i, testCase := range testCases { - result, err := json.Marshal(testCase.resource) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v", i+1, string(testCase.expectedResult), string(result)) - } - } - } -} - -func TestResourceUnmarshalJSON(t *testing.T) { - testCases := []struct { - data []byte - expectedResult Resource - expectErr bool - }{ - {[]byte(`"arn:aws:s3:::*"`), NewResource("*", ""), false}, - {[]byte(`"arn:aws:s3:::mybucket*"`), NewResource("mybucket*", ""), false}, - {[]byte(`"arn:aws:s3:::mybucket"`), NewResource("mybucket", ""), false}, - {[]byte(`"arn:aws:s3:::*/*"`), NewResource("*", "*"), false}, - {[]byte(`"arn:aws:s3:::mybucket/*"`), NewResource("mybucket", "*"), false}, - {[]byte(`"arn:aws:s3:::mybucket*/myobject"`), NewResource("mybucket*", "myobject"), false}, - {[]byte(`"arn:aws:s3:::mybucket?0/2010/photos/*"`), NewResource("mybucket?0", "/2010/photos/*"), false}, - {[]byte(`"mybucket/myobject*"`), Resource{}, true}, - {[]byte(`"arn:aws:s3:::/*"`), Resource{}, true}, - } - - for i, testCase := range testCases { - var result Resource - err := json.Unmarshal(testCase.data, &result) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } - } -} - -func TestResourceValidate(t *testing.T) { - testCases := []struct { - resource Resource - expectErr bool - }{ - {NewResource("mybucket", "/myobject*"), false}, - {NewResource("", "/myobject*"), false}, - {NewResource("", ""), true}, - } - - for i, testCase := range testCases { - err := testCase.resource.Validate() - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - } -} diff --git a/pkg/iam/policy/resourceset.go b/pkg/iam/policy/resourceset.go deleted file mode 100644 index b7eca66f..00000000 --- a/pkg/iam/policy/resourceset.go +++ /dev/null @@ -1,165 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package iampolicy - -import ( - "encoding/json" - "fmt" - "sort" - - "github.com/minio/minio-go/v6/pkg/set" -) - -// ResourceSet - set of resources in policy statement. -type ResourceSet map[Resource]struct{} - -// bucketResourceExists - checks if at least one bucket resource exists in the set. -func (resourceSet ResourceSet) bucketResourceExists() bool { - for resource := range resourceSet { - if resource.isBucketPattern() { - return true - } - } - - return false -} - -// objectResourceExists - checks if at least one object resource exists in the set. -func (resourceSet ResourceSet) objectResourceExists() bool { - for resource := range resourceSet { - if resource.isObjectPattern() { - return true - } - } - - return false -} - -// Add - adds resource to resource set. -func (resourceSet ResourceSet) Add(resource Resource) { - resourceSet[resource] = struct{}{} -} - -// Equals - checks whether given resource set is equal to current resource set or not. -func (resourceSet ResourceSet) Equals(sresourceSet ResourceSet) bool { - // If length of set is not equal to length of given set, the - // set is not equal to given set. - if len(resourceSet) != len(sresourceSet) { - return false - } - - // As both sets are equal in length, check each elements are equal. - for k := range resourceSet { - if _, ok := sresourceSet[k]; !ok { - return false - } - } - - return true -} - -// Intersection - returns resources available in both ResourceSet. -func (resourceSet ResourceSet) Intersection(sset ResourceSet) ResourceSet { - nset := NewResourceSet() - for k := range resourceSet { - if _, ok := sset[k]; ok { - nset.Add(k) - } - } - - return nset -} - -// MarshalJSON - encodes ResourceSet to JSON data. -func (resourceSet ResourceSet) MarshalJSON() ([]byte, error) { - if len(resourceSet) == 0 { - return nil, Errorf("empty resource set") - } - - resources := []Resource{} - for resource := range resourceSet { - resources = append(resources, resource) - } - - return json.Marshal(resources) -} - -// Match - matches object name with anyone of resource pattern in resource set. -func (resourceSet ResourceSet) Match(resource string, conditionValues map[string][]string) bool { - for r := range resourceSet { - if r.Match(resource, conditionValues) { - return true - } - } - - return false -} - -func (resourceSet ResourceSet) String() string { - resources := []string{} - for resource := range resourceSet { - resources = append(resources, resource.String()) - } - sort.Strings(resources) - - return fmt.Sprintf("%v", resources) -} - -// UnmarshalJSON - decodes JSON data to ResourceSet. -func (resourceSet *ResourceSet) UnmarshalJSON(data []byte) error { - var sset set.StringSet - if err := json.Unmarshal(data, &sset); err != nil { - return err - } - - *resourceSet = make(ResourceSet) - for _, s := range sset.ToSlice() { - resource, err := parseResource(s) - if err != nil { - return err - } - - if _, found := (*resourceSet)[resource]; found { - return Errorf("duplicate resource '%v' found", s) - } - - resourceSet.Add(resource) - } - - return nil -} - -// Validate - validates ResourceSet. -func (resourceSet ResourceSet) Validate() error { - for resource := range resourceSet { - if err := resource.Validate(); err != nil { - return err - } - } - - return nil -} - -// NewResourceSet - creates new resource set. -func NewResourceSet(resources ...Resource) ResourceSet { - resourceSet := make(ResourceSet) - for _, resource := range resources { - resourceSet.Add(resource) - } - - return resourceSet -} diff --git a/pkg/iam/policy/resourceset_test.go b/pkg/iam/policy/resourceset_test.go deleted file mode 100644 index 5395bdd6..00000000 --- a/pkg/iam/policy/resourceset_test.go +++ /dev/null @@ -1,242 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package iampolicy - -import ( - "encoding/json" - "fmt" - "reflect" - "testing" -) - -func TestResourceSetBucketResourceExists(t *testing.T) { - testCases := []struct { - resourceSet ResourceSet - expectedResult bool - }{ - {NewResourceSet(NewResource("*", "")), true}, - {NewResourceSet(NewResource("mybucket", "")), true}, - {NewResourceSet(NewResource("mybucket*", "")), true}, - {NewResourceSet(NewResource("mybucket?0", "")), true}, - {NewResourceSet(NewResource("mybucket", "/2010/photos/*"), NewResource("mybucket", "")), true}, - {NewResourceSet(NewResource("", "*")), false}, - {NewResourceSet(NewResource("*", "*")), false}, - {NewResourceSet(NewResource("mybucket", "*")), false}, - {NewResourceSet(NewResource("mybucket*", "/myobject")), false}, - {NewResourceSet(NewResource("mybucket?0", "/2010/photos/*")), false}, - } - - for i, testCase := range testCases { - result := testCase.resourceSet.bucketResourceExists() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestResourceSetObjectResourceExists(t *testing.T) { - testCases := []struct { - resourceSet ResourceSet - expectedResult bool - }{ - {NewResourceSet(NewResource("*", "")), true}, - {NewResourceSet(NewResource("mybucket*", "")), true}, - {NewResourceSet(NewResource("", "*")), true}, - {NewResourceSet(NewResource("*", "*")), true}, - {NewResourceSet(NewResource("mybucket", "*")), true}, - {NewResourceSet(NewResource("mybucket*", "/myobject")), true}, - {NewResourceSet(NewResource("mybucket?0", "/2010/photos/*")), true}, - {NewResourceSet(NewResource("mybucket", ""), NewResource("mybucket", "/2910/photos/*")), true}, - {NewResourceSet(NewResource("mybucket", "")), false}, - {NewResourceSet(NewResource("mybucket?0", "")), false}, - } - - for i, testCase := range testCases { - result := testCase.resourceSet.objectResourceExists() - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestResourceSetAdd(t *testing.T) { - testCases := []struct { - resourceSet ResourceSet - resource Resource - expectedResult ResourceSet - }{ - {NewResourceSet(), NewResource("mybucket", "/myobject*"), - NewResourceSet(NewResource("mybucket", "/myobject*"))}, - {NewResourceSet(NewResource("mybucket", "/myobject*")), - NewResource("mybucket", "/yourobject*"), - NewResourceSet(NewResource("mybucket", "/myobject*"), - NewResource("mybucket", "/yourobject*"))}, - {NewResourceSet(NewResource("mybucket", "/myobject*")), - NewResource("mybucket", "/myobject*"), - NewResourceSet(NewResource("mybucket", "/myobject*"))}, - } - - for i, testCase := range testCases { - testCase.resourceSet.Add(testCase.resource) - - if !reflect.DeepEqual(testCase.resourceSet, testCase.expectedResult) { - t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, testCase.resourceSet) - } - } -} - -func TestResourceSetIntersection(t *testing.T) { - testCases := []struct { - set ResourceSet - setToIntersect ResourceSet - expectedResult ResourceSet - }{ - {NewResourceSet(), NewResourceSet(NewResource("mybucket", "/myobject*")), NewResourceSet()}, - {NewResourceSet(NewResource("mybucket", "/myobject*")), NewResourceSet(), NewResourceSet()}, - {NewResourceSet(NewResource("mybucket", "/myobject*")), - NewResourceSet(NewResource("mybucket", "/myobject*"), NewResource("mybucket", "/yourobject*")), - NewResourceSet(NewResource("mybucket", "/myobject*"))}, - } - - for i, testCase := range testCases { - result := testCase.set.Intersection(testCase.setToIntersect) - - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, testCase.set) - } - } -} - -func TestResourceSetMarshalJSON(t *testing.T) { - testCases := []struct { - resoruceSet ResourceSet - expectedResult []byte - expectErr bool - }{ - {NewResourceSet(NewResource("mybucket", "/myobject*")), - []byte(`["arn:aws:s3:::mybucket/myobject*"]`), false}, - {NewResourceSet(NewResource("mybucket", "/photos/myobject*")), - []byte(`["arn:aws:s3:::mybucket/photos/myobject*"]`), false}, - {NewResourceSet(), nil, true}, - } - - for i, testCase := range testCases { - result, err := json.Marshal(testCase.resoruceSet) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v", i+1, string(testCase.expectedResult), string(result)) - } - } - } -} - -func TestResourceSetMatch(t *testing.T) { - testCases := []struct { - resourceSet ResourceSet - resource string - expectedResult bool - }{ - {NewResourceSet(NewResource("*", "")), "mybucket", true}, - {NewResourceSet(NewResource("*", "")), "mybucket/myobject", true}, - {NewResourceSet(NewResource("mybucket*", "")), "mybucket", true}, - {NewResourceSet(NewResource("mybucket*", "")), "mybucket/myobject", true}, - {NewResourceSet(NewResource("", "*")), "/myobject", true}, - {NewResourceSet(NewResource("*", "*")), "mybucket/myobject", true}, - {NewResourceSet(NewResource("mybucket", "*")), "mybucket/myobject", true}, - {NewResourceSet(NewResource("mybucket*", "/myobject")), "mybucket/myobject", true}, - {NewResourceSet(NewResource("mybucket*", "/myobject")), "mybucket100/myobject", true}, - {NewResourceSet(NewResource("mybucket?0", "/2010/photos/*")), "mybucket20/2010/photos/1.jpg", true}, - {NewResourceSet(NewResource("mybucket", "")), "mybucket", true}, - {NewResourceSet(NewResource("mybucket?0", "")), "mybucket30", true}, - {NewResourceSet(NewResource("mybucket?0", "/2010/photos/*"), - NewResource("mybucket", "/2010/photos/*")), "mybucket/2010/photos/1.jpg", true}, - {NewResourceSet(NewResource("", "*")), "mybucket/myobject", false}, - {NewResourceSet(NewResource("*", "*")), "mybucket", false}, - {NewResourceSet(NewResource("mybucket", "*")), "mybucket10/myobject", false}, - {NewResourceSet(NewResource("mybucket", "")), "mybucket/myobject", false}, - {NewResourceSet(), "mybucket/myobject", false}, - } - - for i, testCase := range testCases { - testCase := testCase - t.Run(fmt.Sprintf("Test%d", i+1), func(t *testing.T) { - result := testCase.resourceSet.Match(testCase.resource, nil) - if result != testCase.expectedResult { - t.Errorf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - }) - } -} - -func TestResourceSetUnmarshalJSON(t *testing.T) { - testCases := []struct { - data []byte - expectedResult ResourceSet - expectErr bool - }{ - {[]byte(`"arn:aws:s3:::mybucket/myobject*"`), - NewResourceSet(NewResource("mybucket", "/myobject*")), false}, - {[]byte(`"arn:aws:s3:::mybucket/photos/myobject*"`), - NewResourceSet(NewResource("mybucket", "/photos/myobject*")), false}, - {[]byte(`"arn:aws:s3:::mybucket"`), NewResourceSet(NewResource("mybucket", "")), false}, - {[]byte(`"mybucket/myobject*"`), nil, true}, - } - - for i, testCase := range testCases { - var result ResourceSet - err := json.Unmarshal(testCase.data, &result) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } - } -} - -func TestResourceSetValidate(t *testing.T) { - testCases := []struct { - resourceSet ResourceSet - expectErr bool - }{ - {NewResourceSet(NewResource("mybucket", "/myobject*")), false}, - {NewResourceSet(NewResource("", "/myobject*")), false}, - {NewResourceSet(NewResource("", "")), true}, - } - - for i, testCase := range testCases { - err := testCase.resourceSet.Validate() - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - } -} diff --git a/pkg/iam/policy/statement.go b/pkg/iam/policy/statement.go deleted file mode 100644 index 74f8874f..00000000 --- a/pkg/iam/policy/statement.go +++ /dev/null @@ -1,140 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package iampolicy - -import ( - "strings" - - "github.com/minio/minio/pkg/bucket/policy" - "github.com/minio/minio/pkg/bucket/policy/condition" -) - -// Statement - iam policy statement. -type Statement struct { - SID policy.ID `json:"Sid,omitempty"` - Effect policy.Effect `json:"Effect"` - Actions ActionSet `json:"Action"` - Resources ResourceSet `json:"Resource,omitempty"` - Conditions condition.Functions `json:"Condition,omitempty"` -} - -// IsAllowed - checks given policy args is allowed to continue the Rest API. -func (statement Statement) IsAllowed(args Args) bool { - check := func() bool { - if !statement.Actions.Match(args.Action) { - return false - } - - resource := args.BucketName - if args.ObjectName != "" { - if !strings.HasPrefix(args.ObjectName, "/") { - resource += "/" - } - - resource += args.ObjectName - } else { - resource += "/" - } - - // For admin statements, resource match can be ignored. - if !statement.Resources.Match(resource, args.ConditionValues) && !statement.isAdmin() { - return false - } - - return statement.Conditions.Evaluate(args.ConditionValues) - } - - return statement.Effect.IsAllowed(check()) -} -func (statement Statement) isAdmin() bool { - for action := range statement.Actions { - if AdminAction(action).IsValid() { - return true - } - } - return false -} - -// isValid - checks whether statement is valid or not. -func (statement Statement) isValid() error { - if !statement.Effect.IsValid() { - return Errorf("invalid Effect %v", statement.Effect) - } - - if len(statement.Actions) == 0 { - return Errorf("Action must not be empty") - } - - if statement.isAdmin() { - if err := statement.Actions.ValidateAdmin(); err != nil { - return err - } - for action := range statement.Actions { - keys := statement.Conditions.Keys() - keyDiff := keys.Difference(adminActionConditionKeyMap[action]) - if !keyDiff.IsEmpty() { - return Errorf("unsupported condition keys '%v' used for action '%v'", keyDiff, action) - } - } - return nil - } - - if !statement.SID.IsValid() { - return Errorf("invalid SID %v", statement.SID) - } - - if len(statement.Resources) == 0 { - return Errorf("Resource must not be empty") - } - - if err := statement.Resources.Validate(); err != nil { - return err - } - - if err := statement.Actions.Validate(); err != nil { - return err - } - - for action := range statement.Actions { - if !statement.Resources.objectResourceExists() && !statement.Resources.bucketResourceExists() { - return Errorf("unsupported Resource found %v for action %v", statement.Resources, action) - } - - keys := statement.Conditions.Keys() - keyDiff := keys.Difference(actionConditionKeyMap[action]) - if !keyDiff.IsEmpty() { - return Errorf("unsupported condition keys '%v' used for action '%v'", keyDiff, action) - } - } - - return nil -} - -// Validate - validates Statement is for given bucket or not. -func (statement Statement) Validate() error { - return statement.isValid() -} - -// NewStatement - creates new statement. -func NewStatement(effect policy.Effect, actionSet ActionSet, resourceSet ResourceSet, conditions condition.Functions) Statement { - return Statement{ - Effect: effect, - Actions: actionSet, - Resources: resourceSet, - Conditions: conditions, - } -} diff --git a/pkg/iam/policy/statement_test.go b/pkg/iam/policy/statement_test.go deleted file mode 100644 index 3b290ff7..00000000 --- a/pkg/iam/policy/statement_test.go +++ /dev/null @@ -1,466 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package iampolicy - -import ( - "encoding/json" - "net" - "reflect" - "testing" - - "github.com/minio/minio/pkg/bucket/policy" - "github.com/minio/minio/pkg/bucket/policy/condition" -) - -func TestStatementIsAllowed(t *testing.T) { - case1Statement := NewStatement( - policy.Allow, - NewActionSet(GetBucketLocationAction, PutObjectAction), - NewResourceSet(NewResource("*", "")), - condition.NewFunctions(), - ) - - case2Statement := NewStatement( - policy.Allow, - NewActionSet(GetObjectAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ) - - _, IPNet1, err := net.ParseCIDR("192.168.1.0/24") - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - func1, err := condition.NewIPAddressFunc( - condition.AWSSourceIP, - IPNet1, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - case3Statement := NewStatement( - policy.Allow, - NewActionSet(GetObjectAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(func1), - ) - - case4Statement := NewStatement( - policy.Deny, - NewActionSet(GetObjectAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(func1), - ) - - anonGetBucketLocationArgs := Args{ - AccountName: "Q3AM3UQ867SPQQA43P2F", - Action: GetBucketLocationAction, - BucketName: "mybucket", - ConditionValues: map[string][]string{}, - } - - anonPutObjectActionArgs := Args{ - AccountName: "Q3AM3UQ867SPQQA43P2F", - Action: PutObjectAction, - BucketName: "mybucket", - ConditionValues: map[string][]string{ - "x-amz-copy-source": {"mybucket/myobject"}, - "SourceIp": {"192.168.1.10"}, - }, - ObjectName: "myobject", - } - - anonGetObjectActionArgs := Args{ - AccountName: "Q3AM3UQ867SPQQA43P2F", - Action: GetObjectAction, - BucketName: "mybucket", - ConditionValues: map[string][]string{}, - ObjectName: "myobject", - } - - getBucketLocationArgs := Args{ - AccountName: "Q3AM3UQ867SPQQA43P2F", - Action: GetBucketLocationAction, - BucketName: "mybucket", - ConditionValues: map[string][]string{}, - } - - putObjectActionArgs := Args{ - AccountName: "Q3AM3UQ867SPQQA43P2F", - Action: PutObjectAction, - BucketName: "mybucket", - ConditionValues: map[string][]string{ - "x-amz-copy-source": {"mybucket/myobject"}, - "SourceIp": {"192.168.1.10"}, - }, - ObjectName: "myobject", - } - - getObjectActionArgs := Args{ - AccountName: "Q3AM3UQ867SPQQA43P2F", - Action: GetObjectAction, - BucketName: "mybucket", - ConditionValues: map[string][]string{}, - ObjectName: "myobject", - } - - testCases := []struct { - statement Statement - args Args - expectedResult bool - }{ - {case1Statement, anonGetBucketLocationArgs, true}, - {case1Statement, anonPutObjectActionArgs, true}, - {case1Statement, anonGetObjectActionArgs, false}, - {case1Statement, getBucketLocationArgs, true}, - {case1Statement, putObjectActionArgs, true}, - {case1Statement, getObjectActionArgs, false}, - - {case2Statement, anonGetBucketLocationArgs, false}, - {case2Statement, anonPutObjectActionArgs, true}, - {case2Statement, anonGetObjectActionArgs, true}, - {case2Statement, getBucketLocationArgs, false}, - {case2Statement, putObjectActionArgs, true}, - {case2Statement, getObjectActionArgs, true}, - - {case3Statement, anonGetBucketLocationArgs, false}, - {case3Statement, anonPutObjectActionArgs, true}, - {case3Statement, anonGetObjectActionArgs, false}, - {case3Statement, getBucketLocationArgs, false}, - {case3Statement, putObjectActionArgs, true}, - {case3Statement, getObjectActionArgs, false}, - - {case4Statement, anonGetBucketLocationArgs, true}, - {case4Statement, anonPutObjectActionArgs, false}, - {case4Statement, anonGetObjectActionArgs, true}, - {case4Statement, getBucketLocationArgs, true}, - {case4Statement, putObjectActionArgs, false}, - {case4Statement, getObjectActionArgs, true}, - } - - for i, testCase := range testCases { - result := testCase.statement.IsAllowed(testCase.args) - - if result != testCase.expectedResult { - t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result) - } - } -} - -func TestStatementIsValid(t *testing.T) { - _, IPNet1, err := net.ParseCIDR("192.168.1.0/24") - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - func1, err := condition.NewIPAddressFunc( - condition.AWSSourceIP, - IPNet1, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - func2, err := condition.NewStringEqualsFunc( - condition.S3XAmzCopySource, - "mybucket/myobject", - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - func3, err := condition.NewStringEqualsFunc( - condition.AWSUserAgent, - "NSPlayer", - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - - testCases := []struct { - statement Statement - expectErr bool - }{ - // Invalid effect error. - {NewStatement( - policy.Effect("foo"), - NewActionSet(GetBucketLocationAction, PutObjectAction), - NewResourceSet(NewResource("*", "")), - condition.NewFunctions(), - ), true}, - // Empty actions error. - {NewStatement( - policy.Allow, - NewActionSet(), - NewResourceSet(NewResource("*", "")), - condition.NewFunctions(), - ), true}, - // Empty resources error. - {NewStatement( - policy.Allow, - NewActionSet(GetBucketLocationAction, PutObjectAction), - NewResourceSet(), - condition.NewFunctions(), - ), true}, - // Unsupported conditions for GetObject - {NewStatement( - policy.Allow, - NewActionSet(GetObjectAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "myobject*")), - condition.NewFunctions(func1, func2), - ), true}, - {NewStatement( - policy.Allow, - NewActionSet(GetBucketLocationAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "myobject*")), - condition.NewFunctions(), - ), false}, - {NewStatement( - policy.Allow, - NewActionSet(GetBucketLocationAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "")), - condition.NewFunctions(), - ), false}, - {NewStatement( - policy.Deny, - NewActionSet(GetObjectAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "myobject*")), - condition.NewFunctions(func1), - ), false}, - {NewStatement( - policy.Allow, - NewActionSet(CreateUserAdminAction, DeleteUserAdminAction), - nil, - condition.NewFunctions(func2, func3), - ), true}, - {NewStatement( - policy.Allow, - NewActionSet(CreateUserAdminAction, DeleteUserAdminAction), - nil, - condition.NewFunctions(), - ), false}, - } - - for i, testCase := range testCases { - err := testCase.statement.isValid() - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - } -} - -func TestStatementUnmarshalJSONAndValidate(t *testing.T) { - case1Data := []byte(`{ - "Sid": "SomeId1", - "Effect": "Allow", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" -}`) - case1Statement := NewStatement( - policy.Allow, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ) - case1Statement.SID = "SomeId1" - - case2Data := []byte(`{ - "Effect": "Allow", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*", - "Condition": { - "Null": { - "s3:x-amz-copy-source": true - } - } -}`) - func1, err := condition.NewNullFunc( - condition.S3XAmzCopySource, - true, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - case2Statement := NewStatement( - policy.Allow, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(func1), - ) - - case3Data := []byte(`{ - "Effect": "Deny", - "Action": [ - "s3:PutObject", - "s3:GetObject" - ], - "Resource": "arn:aws:s3:::mybucket/myobject*", - "Condition": { - "Null": { - "s3:x-amz-server-side-encryption": "false" - } - } -}`) - func2, err := condition.NewNullFunc( - condition.S3XAmzServerSideEncryption, - false, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - case3Statement := NewStatement( - policy.Deny, - NewActionSet(PutObjectAction, GetObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(func2), - ) - - case4Data := []byte(`{ - "Effect": "Allow", - "Action": "s3:PutObjec, - "Resource": "arn:aws:s3:::mybucket/myobject*" -}`) - - case5Data := []byte(`{ - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*" -}`) - - case7Data := []byte(`{ - "Effect": "Allow", - "Resource": "arn:aws:s3:::mybucket/myobject*" -}`) - - case8Data := []byte(`{ - "Effect": "Allow", - "Action": "s3:PutObject" -}`) - - case9Data := []byte(`{ - "Effect": "Allow", - "Action": "s3:PutObject", - "Resource": "arn:aws:s3:::mybucket/myobject*", - "Condition": { - } -}`) - - case10Data := []byte(`{ - "Effect": "Deny", - "Action": [ - "s3:PutObject", - "s3:GetObject" - ], - "Resource": "arn:aws:s3:::mybucket/myobject*", - "Condition": { - "StringEquals": { - "s3:x-amz-copy-source": "yourbucket/myobject*" - } - } -}`) - - testCases := []struct { - data []byte - expectedResult Statement - expectUnmarshalErr bool - expectValidationErr bool - }{ - {case1Data, case1Statement, false, false}, - {case2Data, case2Statement, false, false}, - {case3Data, case3Statement, false, false}, - // JSON unmarshaling error. - {case4Data, Statement{}, true, true}, - // Invalid effect error. - {case5Data, Statement{}, false, true}, - // Empty action error. - {case7Data, Statement{}, false, true}, - // Empty resource error. - {case8Data, Statement{}, false, true}, - // Empty condition error. - {case9Data, Statement{}, true, false}, - // Unsupported condition key error. - {case10Data, Statement{}, false, true}, - } - - for i, testCase := range testCases { - var result Statement - expectErr := (json.Unmarshal(testCase.data, &result) != nil) - - if expectErr != testCase.expectUnmarshalErr { - t.Fatalf("case %v: error during unmarshal: expected: %v, got: %v", i+1, testCase.expectUnmarshalErr, expectErr) - } - - expectErr = (result.Validate() != nil) - if expectErr != testCase.expectValidationErr { - t.Fatalf("case %v: error during validation: expected: %v, got: %v", i+1, testCase.expectValidationErr, expectErr) - } - - if !testCase.expectUnmarshalErr && !testCase.expectValidationErr { - if !reflect.DeepEqual(result, testCase.expectedResult) { - t.Fatalf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } - } -} - -func TestStatementValidate(t *testing.T) { - case1Statement := NewStatement( - policy.Allow, - NewActionSet(PutObjectAction), - NewResourceSet(NewResource("mybucket", "/myobject*")), - condition.NewFunctions(), - ) - - func1, err := condition.NewNullFunc( - condition.S3XAmzCopySource, - true, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - func2, err := condition.NewNullFunc( - condition.S3XAmzServerSideEncryption, - false, - ) - if err != nil { - t.Fatalf("unexpected error. %v\n", err) - } - case2Statement := NewStatement( - policy.Allow, - NewActionSet(GetObjectAction, PutObjectAction), - NewResourceSet(NewResource("mybucket", "myobject*")), - condition.NewFunctions(func1, func2), - ) - - testCases := []struct { - statement Statement - expectErr bool - }{ - {case1Statement, false}, - {case2Statement, true}, - } - - for i, testCase := range testCases { - err := testCase.statement.Validate() - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - } -} diff --git a/pkg/ioutil/append-file_nix.go b/pkg/ioutil/append-file_nix.go deleted file mode 100644 index 468c185d..00000000 --- a/pkg/ioutil/append-file_nix.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build !windows - -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ioutil - -import ( - "io" - "os" -) - -// AppendFile - appends the file "src" to the file "dst" -func AppendFile(dst string, src string, osync bool) error { - flags := os.O_WRONLY | os.O_APPEND | os.O_CREATE - if osync { - flags = flags | os.O_SYNC - } - appendFile, err := os.OpenFile(dst, flags, 0666) - if err != nil { - return err - } - defer appendFile.Close() - - srcFile, err := os.Open(src) - if err != nil { - return err - } - defer srcFile.Close() - // Allocate staging buffer. - var buf = make([]byte, defaultAppendBufferSize) - _, err = io.CopyBuffer(appendFile, srcFile, buf) - return err -} diff --git a/pkg/ioutil/append-file_windows.go b/pkg/ioutil/append-file_windows.go deleted file mode 100644 index ee23bb03..00000000 --- a/pkg/ioutil/append-file_windows.go +++ /dev/null @@ -1,43 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ioutil - -import ( - "io" - "os" - - "github.com/minio/minio/pkg/lock" -) - -// AppendFile - appends the file "src" to the file "dst" -func AppendFile(dst string, src string, osync bool) error { - appendFile, err := lock.Open(dst, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0644) - if err != nil { - return err - } - defer appendFile.Close() - - srcFile, err := lock.Open(src, os.O_RDONLY, 0644) - if err != nil { - return err - } - defer srcFile.Close() - // Allocate staging buffer. - var buf = make([]byte, defaultAppendBufferSize) - _, err = io.CopyBuffer(appendFile, srcFile, buf) - return err -} diff --git a/pkg/ioutil/ioutil.go b/pkg/ioutil/ioutil.go deleted file mode 100644 index 6c66c385..00000000 --- a/pkg/ioutil/ioutil.go +++ /dev/null @@ -1,266 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2017-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package ioutil implements some I/O utility functions which are not covered -// by the standard library. -package ioutil - -import ( - "io" - "os" - - humanize "github.com/dustin/go-humanize" - "github.com/minio/minio/pkg/disk" -) - -// defaultAppendBufferSize - Default buffer size for the AppendFile -const defaultAppendBufferSize = humanize.MiByte - -// WriteOnCloser implements io.WriteCloser and always -// executes at least one write operation if it is closed. -// -// This can be useful within the context of HTTP. At least -// one write operation must happen to send the HTTP headers -// to the peer. -type WriteOnCloser struct { - io.Writer - hasWritten bool -} - -func (w *WriteOnCloser) Write(p []byte) (int, error) { - w.hasWritten = true - return w.Writer.Write(p) -} - -// Close closes the WriteOnCloser. It behaves like io.Closer. -func (w *WriteOnCloser) Close() error { - if !w.hasWritten { - _, err := w.Write(nil) - if err != nil { - return err - } - } - if closer, ok := w.Writer.(io.Closer); ok { - return closer.Close() - } - return nil -} - -// HasWritten returns true if at least one write operation was performed. -func (w *WriteOnCloser) HasWritten() bool { return w.hasWritten } - -// WriteOnClose takes an io.Writer and returns an ioutil.WriteOnCloser. -func WriteOnClose(w io.Writer) *WriteOnCloser { - return &WriteOnCloser{w, false} -} - -// LimitWriter implements io.WriteCloser. -// -// This is implemented such that we want to restrict -// an enscapsulated writer upto a certain length -// and skip a certain number of bytes. -type LimitWriter struct { - io.Writer - skipBytes int64 - wLimit int64 -} - -// Write implements the io.Writer interface limiting upto -// configured length, also skips the first N bytes. -func (w *LimitWriter) Write(p []byte) (n int, err error) { - n = len(p) - var n1 int - if w.skipBytes > 0 { - if w.skipBytes >= int64(len(p)) { - w.skipBytes = w.skipBytes - int64(len(p)) - return n, nil - } - p = p[w.skipBytes:] - w.skipBytes = 0 - } - if w.wLimit == 0 { - return n, nil - } - if w.wLimit < int64(len(p)) { - n1, err = w.Writer.Write(p[:w.wLimit]) - w.wLimit = w.wLimit - int64(n1) - return n, err - } - n1, err = w.Writer.Write(p) - w.wLimit = w.wLimit - int64(n1) - return n, err -} - -// Close closes the LimitWriter. It behaves like io.Closer. -func (w *LimitWriter) Close() error { - if closer, ok := w.Writer.(io.Closer); ok { - return closer.Close() - } - return nil -} - -// LimitedWriter takes an io.Writer and returns an ioutil.LimitWriter. -func LimitedWriter(w io.Writer, skipBytes int64, limit int64) *LimitWriter { - return &LimitWriter{w, skipBytes, limit} -} - -type nopCloser struct { - io.Writer -} - -func (nopCloser) Close() error { return nil } - -// NopCloser returns a WriteCloser with a no-op Close method wrapping -// the provided Writer w. -func NopCloser(w io.Writer) io.WriteCloser { - return nopCloser{w} -} - -// SkipReader skips a given number of bytes and then returns all -// remaining data. -type SkipReader struct { - io.Reader - - skipCount int64 -} - -func (s *SkipReader) Read(p []byte) (int, error) { - l := int64(len(p)) - if l == 0 { - return 0, nil - } - for s.skipCount > 0 { - if l > s.skipCount { - l = s.skipCount - } - n, err := s.Reader.Read(p[:l]) - if err != nil { - return 0, err - } - s.skipCount -= int64(n) - } - return s.Reader.Read(p) -} - -// NewSkipReader - creates a SkipReader -func NewSkipReader(r io.Reader, n int64) io.Reader { - return &SkipReader{r, n} -} - -// SameFile returns if the files are same. -func SameFile(fi1, fi2 os.FileInfo) bool { - if !os.SameFile(fi1, fi2) { - return false - } - if !fi1.ModTime().Equal(fi2.ModTime()) { - return false - } - if fi1.Mode() != fi2.Mode() { - return false - } - if fi1.Size() != fi2.Size() { - return false - } - return true -} - -// DirectIO alignment needs to be 4K. Defined here as -// directio.AlignSize is defined as 0 in MacOS causing divide by 0 error. -const directioAlignSize = 4096 - -// CopyAligned - copies from reader to writer using the aligned input -// buffer, it is expected that input buffer is page aligned to -// 4K page boundaries. Without passing aligned buffer may cause -// this function to return error. -// -// This code is similar in spirit to io.CopyBuffer but it is only to be -// used with DIRECT I/O based file descriptor and it is expected that -// input writer *os.File not a generic io.Writer. Make sure to have -// the file opened for writes with syscall.O_DIRECT flag. -func CopyAligned(w *os.File, r io.Reader, alignedBuf []byte, totalSize int64) (int64, error) { - // Writes remaining bytes in the buffer. - writeUnaligned := func(w *os.File, buf []byte) (remainingWritten int, err error) { - var n int - remaining := len(buf) - // The following logic writes the remainging data such that it writes whatever best is possible (aligned buffer) - // in O_DIRECT mode and remaining (unaligned buffer) in non-O_DIRECT mode. - remainingAligned := (remaining / directioAlignSize) * directioAlignSize - remainingAlignedBuf := buf[:remainingAligned] - remainingUnalignedBuf := buf[remainingAligned:] - if len(remainingAlignedBuf) > 0 { - n, err = w.Write(remainingAlignedBuf) - if err != nil { - return remainingWritten, err - } - remainingWritten += n - } - if len(remainingUnalignedBuf) > 0 { - // Write on O_DIRECT fds fail if buffer is not 4K aligned, hence disable O_DIRECT. - if err = disk.DisableDirectIO(w); err != nil { - return remainingWritten, err - } - n, err = w.Write(remainingUnalignedBuf) - if err != nil { - return remainingWritten, err - } - remainingWritten += n - } - return remainingWritten, nil - } - - var written int64 - for { - buf := alignedBuf - if totalSize != -1 { - remaining := totalSize - written - if remaining < int64(len(buf)) { - buf = buf[:remaining] - } - } - nr, err := io.ReadFull(r, buf) - eof := err == io.EOF || err == io.ErrUnexpectedEOF - if err != nil && !eof { - return written, err - } - buf = buf[:nr] - var nw int - if len(buf)%directioAlignSize == 0 { - // buf is aligned for directio write() - nw, err = w.Write(buf) - } else { - // buf is not aligned, hence use writeUnaligned() - nw, err = writeUnaligned(w, buf) - } - if nw > 0 { - written += int64(nw) - } - if err != nil { - return written, err - } - if nw != len(buf) { - return written, io.ErrShortWrite - } - - if totalSize != -1 { - if written == totalSize { - return written, nil - } - } - if eof { - return written, nil - } - } -} diff --git a/pkg/ioutil/ioutil_test.go b/pkg/ioutil/ioutil_test.go deleted file mode 100644 index c2aaee7c..00000000 --- a/pkg/ioutil/ioutil_test.go +++ /dev/null @@ -1,134 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package ioutil - -import ( - "bytes" - "io" - goioutil "io/ioutil" - "os" - "testing" -) - -func TestCloseOnWriter(t *testing.T) { - writer := WriteOnClose(goioutil.Discard) - if writer.HasWritten() { - t.Error("WriteOnCloser must not be marked as HasWritten") - } - writer.Write(nil) - if !writer.HasWritten() { - t.Error("WriteOnCloser must be marked as HasWritten") - } - - writer = WriteOnClose(goioutil.Discard) - writer.Close() - if !writer.HasWritten() { - t.Error("WriteOnCloser must be marked as HasWritten") - } -} - -// Test for AppendFile. -func TestAppendFile(t *testing.T) { - f, err := goioutil.TempFile("", "") - if err != nil { - t.Fatal(err) - } - name1 := f.Name() - defer os.Remove(name1) - f.WriteString("aaaaaaaaaa") - f.Close() - - f, err = goioutil.TempFile("", "") - if err != nil { - t.Fatal(err) - } - name2 := f.Name() - defer os.Remove(name2) - f.WriteString("bbbbbbbbbb") - f.Close() - - if err = AppendFile(name1, name2, false); err != nil { - t.Error(err) - } - - b, err := goioutil.ReadFile(name1) - if err != nil { - t.Error(err) - } - - expected := "aaaaaaaaaabbbbbbbbbb" - if string(b) != expected { - t.Errorf("AppendFile() failed, expected: %s, got %s", expected, string(b)) - } -} - -func TestSkipReader(t *testing.T) { - testCases := []struct { - src io.Reader - skipLen int64 - expected string - }{ - {bytes.NewBuffer([]byte("")), 0, ""}, - {bytes.NewBuffer([]byte("")), 1, ""}, - {bytes.NewBuffer([]byte("abc")), 0, "abc"}, - {bytes.NewBuffer([]byte("abc")), 1, "bc"}, - {bytes.NewBuffer([]byte("abc")), 2, "c"}, - {bytes.NewBuffer([]byte("abc")), 3, ""}, - {bytes.NewBuffer([]byte("abc")), 4, ""}, - } - for i, testCase := range testCases { - r := NewSkipReader(testCase.src, testCase.skipLen) - b, err := goioutil.ReadAll(r) - if err != nil { - t.Errorf("Case %d: Unexpected err %v", i, err) - } - if string(b) != testCase.expected { - t.Errorf("Case %d: Got wrong result: %v", i, string(b)) - } - } -} - -func TestSameFile(t *testing.T) { - f, err := goioutil.TempFile("", "") - if err != nil { - t.Errorf("Error creating tmp file: %v", err) - } - tmpFile := f.Name() - f.Close() - defer os.Remove(f.Name()) - fi1, err := os.Stat(tmpFile) - if err != nil { - t.Fatalf("Error Stat(): %v", err) - } - fi2, err := os.Stat(tmpFile) - if err != nil { - t.Fatalf("Error Stat(): %v", err) - } - if !SameFile(fi1, fi2) { - t.Fatal("Expected the files to be same") - } - if err = goioutil.WriteFile(tmpFile, []byte("aaa"), 0644); err != nil { - t.Fatal(err) - } - fi2, err = os.Stat(tmpFile) - if err != nil { - t.Fatalf("Error Stat(): %v", err) - } - if SameFile(fi1, fi2) { - t.Fatal("Expected the files not to be same") - } -} diff --git a/pkg/lock/lock.go b/pkg/lock/lock.go deleted file mode 100644 index ed8d622c..00000000 --- a/pkg/lock/lock.go +++ /dev/null @@ -1,101 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package lock - implements filesystem locking wrappers around an -// open file descriptor. -package lock - -import ( - "errors" - "os" - "sync" -) - -var ( - // ErrAlreadyLocked is returned if the underlying fd is already locked. - ErrAlreadyLocked = errors.New("file already locked") -) - -// RLockedFile represents a read locked file, implements a special -// closer which only closes the associated *os.File when the ref count. -// has reached zero, i.e when all the readers have given up their locks. -type RLockedFile struct { - *LockedFile - mutex sync.Mutex - refs int // Holds read lock refs. -} - -// IsClosed - Check if the rlocked file is already closed. -func (r *RLockedFile) IsClosed() bool { - r.mutex.Lock() - defer r.mutex.Unlock() - return r.refs == 0 -} - -// IncLockRef - is used by called to indicate lock refs. -func (r *RLockedFile) IncLockRef() { - r.mutex.Lock() - r.refs++ - r.mutex.Unlock() -} - -// Close - this closer implements a special closer -// closes the underlying fd only when the refs -// reach zero. -func (r *RLockedFile) Close() (err error) { - r.mutex.Lock() - defer r.mutex.Unlock() - - if r.refs == 0 { - return os.ErrInvalid - } - - r.refs-- - if r.refs == 0 { - err = r.File.Close() - } - - return err -} - -// Provides a new initialized read locked struct from *os.File -func newRLockedFile(lkFile *LockedFile) (*RLockedFile, error) { - if lkFile == nil { - return nil, os.ErrInvalid - } - - return &RLockedFile{ - LockedFile: lkFile, - refs: 1, - }, nil -} - -// RLockedOpenFile - returns a wrapped read locked file, if the file -// doesn't exist at path returns an error. -func RLockedOpenFile(path string) (*RLockedFile, error) { - lkFile, err := LockedOpenFile(path, os.O_RDONLY, 0666) - if err != nil { - return nil, err - } - - return newRLockedFile(lkFile) - -} - -// LockedFile represents a locked file -type LockedFile struct { - *os.File -} diff --git a/pkg/lock/lock_nix.go b/pkg/lock/lock_nix.go deleted file mode 100644 index c7c3a1a3..00000000 --- a/pkg/lock/lock_nix.go +++ /dev/null @@ -1,100 +0,0 @@ -// +build !windows,!plan9,!solaris - -/* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package lock - -import ( - "os" - "syscall" -) - -// Internal function implements support for both -// blocking and non blocking lock type. -func lockedOpenFile(path string, flag int, perm os.FileMode, lockType int) (*LockedFile, error) { - switch flag { - case syscall.O_RDONLY: - lockType |= syscall.LOCK_SH - case syscall.O_WRONLY: - fallthrough - case syscall.O_RDWR: - fallthrough - case syscall.O_WRONLY | syscall.O_CREAT: - fallthrough - case syscall.O_RDWR | syscall.O_CREAT: - lockType |= syscall.LOCK_EX - default: - return nil, &os.PathError{ - Op: "open", - Path: path, - Err: syscall.EINVAL, - } - } - - f, err := os.OpenFile(path, flag|syscall.O_SYNC, perm) - if err != nil { - return nil, err - } - - if err = syscall.Flock(int(f.Fd()), lockType); err != nil { - f.Close() - if err == syscall.EWOULDBLOCK { - err = ErrAlreadyLocked - } - return nil, err - } - - st, err := os.Stat(path) - if err != nil { - f.Close() - return nil, err - } - - if st.IsDir() { - f.Close() - return nil, &os.PathError{ - Op: "open", - Path: path, - Err: syscall.EISDIR, - } - } - - return &LockedFile{File: f}, nil -} - -// TryLockedOpenFile - tries a new write lock, functionality -// it is similar to LockedOpenFile with with syscall.LOCK_EX -// mode but along with syscall.LOCK_NB such that the function -// doesn't wait forever but instead returns if it cannot -// acquire a write lock. -func TryLockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { - return lockedOpenFile(path, flag, perm, syscall.LOCK_NB) -} - -// LockedOpenFile - initializes a new lock and protects -// the file from concurrent access across mount points. -// This implementation doesn't support all the open -// flags and shouldn't be considered as replacement -// for os.OpenFile(). -func LockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { - return lockedOpenFile(path, flag, perm, 0) -} - -// Open - Call os.OpenFile -func Open(path string, flag int, perm os.FileMode) (*os.File, error) { - return os.OpenFile(path, flag, perm) -} diff --git a/pkg/lock/lock_solaris.go b/pkg/lock/lock_solaris.go deleted file mode 100644 index e20c6fe8..00000000 --- a/pkg/lock/lock_solaris.go +++ /dev/null @@ -1,108 +0,0 @@ -// +build solaris - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package lock - -import ( - "os" - "syscall" -) - -// lockedOpenFile is an internal function. -func lockedOpenFile(path string, flag int, perm os.FileMode, rlockType int) (*LockedFile, error) { - var lockType int16 - switch flag { - case syscall.O_RDONLY: - lockType = syscall.F_RDLCK - case syscall.O_WRONLY: - fallthrough - case syscall.O_RDWR: - fallthrough - case syscall.O_WRONLY | syscall.O_CREAT: - fallthrough - case syscall.O_RDWR | syscall.O_CREAT: - lockType = syscall.F_WRLCK - default: - return nil, &os.PathError{ - Op: "open", - Path: path, - Err: syscall.EINVAL, - } - } - - var lock = syscall.Flock_t{ - Start: 0, - Len: 0, - Pid: 0, - Type: lockType, - Whence: 0, - } - - f, err := os.OpenFile(path, flag, perm) - if err != nil { - return nil, err - } - - if err = syscall.FcntlFlock(f.Fd(), rlockType, &lock); err != nil { - f.Close() - if err == syscall.EAGAIN { - err = ErrAlreadyLocked - } - return nil, err - } - - st, err := os.Stat(path) - if err != nil { - f.Close() - return nil, err - } - - if st.IsDir() { - f.Close() - return nil, &os.PathError{ - Op: "open", - Path: path, - Err: syscall.EISDIR, - } - } - - return &LockedFile{f}, nil -} - -// TryLockedOpenFile - tries a new write lock, functionality -// it is similar to LockedOpenFile with with syscall.LOCK_EX -// mode but along with syscall.LOCK_NB such that the function -// doesn't wait forever but instead returns if it cannot -// acquire a write lock. -func TryLockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { - return lockedOpenFile(path, flag, perm, syscall.F_SETLK) -} - -// LockedOpenFile - initializes a new lock and protects -// the file from concurrent access across mount points. -// This implementation doesn't support all the open -// flags and shouldn't be considered as replacement -// for os.OpenFile(). -func LockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { - return lockedOpenFile(path, flag, perm, syscall.F_SETLKW) -} - -// Open - Call os.OpenFile -func Open(path string, flag int, perm os.FileMode) (*os.File, error) { - return os.OpenFile(path, flag, perm) -} diff --git a/pkg/lock/lock_test.go b/pkg/lock/lock_test.go deleted file mode 100644 index e00574a4..00000000 --- a/pkg/lock/lock_test.go +++ /dev/null @@ -1,191 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package lock - -import ( - "io/ioutil" - "os" - "testing" - "time" -) - -// Test lock fails. -func TestLockFail(t *testing.T) { - f, err := ioutil.TempFile("", "lock") - if err != nil { - t.Fatal(err) - } - f.Close() - defer func() { - err = os.Remove(f.Name()) - if err != nil { - t.Fatal(err) - } - }() - - _, err = LockedOpenFile(f.Name(), os.O_APPEND, 0600) - if err == nil { - t.Fatal("Should fail here") - } -} - -// Tests lock directory fail. -func TestLockDirFail(t *testing.T) { - d, err := ioutil.TempDir("", "lockDir") - if err != nil { - t.Fatal(err) - } - defer func() { - err = os.Remove(d) - if err != nil { - t.Fatal(err) - } - }() - - _, err = LockedOpenFile(d, os.O_APPEND, 0600) - if err == nil { - t.Fatal("Should fail here") - } -} - -// Tests rwlock methods. -func TestRWLockedFile(t *testing.T) { - f, err := ioutil.TempFile("", "lock") - if err != nil { - t.Fatal(err) - } - f.Close() - defer func() { - err = os.Remove(f.Name()) - if err != nil { - t.Fatal(err) - } - }() - - rlk, err := RLockedOpenFile(f.Name()) - if err != nil { - t.Fatal(err) - } - isClosed := rlk.IsClosed() - if isClosed { - t.Fatal("File ref count shouldn't be zero") - } - - // Increase reference count to 2. - rlk.IncLockRef() - - isClosed = rlk.IsClosed() - if isClosed { - t.Fatal("File ref count shouldn't be zero") - } - - // Decrease reference count by 1. - if err = rlk.Close(); err != nil { - t.Fatal(err) - } - - isClosed = rlk.IsClosed() - if isClosed { - t.Fatal("File ref count shouldn't be zero") - } - - // Decrease reference count by 1. - if err = rlk.Close(); err != nil { - t.Fatal(err) - } - - // Now file should be closed. - isClosed = rlk.IsClosed() - if !isClosed { - t.Fatal("File ref count should be zero") - } - - // Closing a file again should result in invalid argument. - if err = rlk.Close(); err != os.ErrInvalid { - t.Fatal(err) - } - - _, err = newRLockedFile(nil) - if err != os.ErrInvalid { - t.Fatal("Unexpected error", err) - } -} - -// Tests lock and unlock semantics. -func TestLockAndUnlock(t *testing.T) { - f, err := ioutil.TempFile("", "lock") - if err != nil { - t.Fatal(err) - } - f.Close() - defer func() { - err = os.Remove(f.Name()) - if err != nil { - t.Fatal(err) - } - }() - - // lock the file - l, err := LockedOpenFile(f.Name(), os.O_WRONLY, 0600) - if err != nil { - t.Fatal(err) - } - - // unlock the file - if err = l.Close(); err != nil { - t.Fatal(err) - } - - // try lock the unlocked file - dupl, err := LockedOpenFile(f.Name(), os.O_WRONLY|os.O_CREATE, 0600) - if err != nil { - t.Errorf("err = %v, want %v", err, nil) - } - - // blocking on locked file - locked := make(chan struct{}, 1) - go func() { - bl, blerr := LockedOpenFile(f.Name(), os.O_WRONLY, 0600) - if blerr != nil { - t.Error(blerr) - return - } - locked <- struct{}{} - if blerr = bl.Close(); blerr != nil { - t.Error(blerr) - return - } - }() - - select { - case <-locked: - t.Error("unexpected unblocking") - case <-time.After(100 * time.Millisecond): - } - - // unlock - if err = dupl.Close(); err != nil { - t.Fatal(err) - } - - // the previously blocked routine should be unblocked - select { - case <-locked: - case <-time.After(1 * time.Second): - t.Error("unexpected blocking") - } -} diff --git a/pkg/lock/lock_windows.go b/pkg/lock/lock_windows.go deleted file mode 100644 index bd7a6c3c..00000000 --- a/pkg/lock/lock_windows.go +++ /dev/null @@ -1,261 +0,0 @@ -// +build windows - -/* - * MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package lock - -import ( - "fmt" - "os" - "path/filepath" - "syscall" - "unsafe" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - procLockFileEx = modkernel32.NewProc("LockFileEx") -) - -const ( - // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx - lockFileExclusiveLock = 2 - lockFileFailImmediately = 1 - - // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx - errLockViolation syscall.Errno = 0x21 -) - -// lockedOpenFile is an internal function. -func lockedOpenFile(path string, flag int, perm os.FileMode, lockType uint32) (*LockedFile, error) { - f, err := Open(path, flag, perm) - if err != nil { - return nil, err - } - - if err = lockFile(syscall.Handle(f.Fd()), lockType); err != nil { - f.Close() - return nil, err - } - - st, err := os.Stat(path) - if err != nil { - f.Close() - return nil, err - } - - if st.IsDir() { - f.Close() - return nil, &os.PathError{ - Op: "open", - Path: path, - Err: syscall.EISDIR, - } - } - - return &LockedFile{File: f}, nil -} - -// TryLockedOpenFile - tries a new write lock, functionality -// it is similar to LockedOpenFile with with syscall.LOCK_EX -// mode but along with syscall.LOCK_NB such that the function -// doesn't wait forever but instead returns if it cannot -// acquire a write lock. -func TryLockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { - var lockType uint32 = lockFileFailImmediately | lockFileExclusiveLock - switch flag { - case syscall.O_RDONLY: - // https://docs.microsoft.com/en-us/windows/desktop/api/fileapi/nf-fileapi-lockfileex - //lint:ignore SA4016 Reasons - lockType = lockFileFailImmediately | 0 // Set this to enable shared lock and fail immediately. - } - return lockedOpenFile(path, flag, perm, lockType) -} - -// LockedOpenFile - initializes a new lock and protects -// the file from concurrent access. -func LockedOpenFile(path string, flag int, perm os.FileMode) (*LockedFile, error) { - var lockType uint32 = lockFileExclusiveLock - switch flag { - case syscall.O_RDONLY: - // https://docs.microsoft.com/en-us/windows/desktop/api/fileapi/nf-fileapi-lockfileex - lockType = 0 // Set this to enable shared lock. - } - return lockedOpenFile(path, flag, perm, lockType) -} - -// fixLongPath returns the extended-length (\\?\-prefixed) form of -// path when needed, in order to avoid the default 260 character file -// path limit imposed by Windows. If path is not easily converted to -// the extended-length form (for example, if path is a relative path -// or contains .. elements), or is short enough, fixLongPath returns -// path unmodified. -// -// See https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx#maxpath -func fixLongPath(path string) string { - // Do nothing (and don't allocate) if the path is "short". - // Empirically (at least on the Windows Server 2013 builder), - // the kernel is arbitrarily okay with < 248 bytes. That - // matches what the docs above say: - // "When using an API to create a directory, the specified - // path cannot be so long that you cannot append an 8.3 file - // name (that is, the directory name cannot exceed MAX_PATH - // minus 12)." Since MAX_PATH is 260, 260 - 12 = 248. - // - // The MSDN docs appear to say that a normal path that is 248 bytes long - // will work; empirically the path must be less then 248 bytes long. - if len(path) < 248 { - // Don't fix. (This is how Go 1.7 and earlier worked, - // not automatically generating the \\?\ form) - return path - } - - // The extended form begins with \\?\, as in - // \\?\c:\windows\foo.txt or \\?\UNC\server\share\foo.txt. - // The extended form disables evaluation of . and .. path - // elements and disables the interpretation of / as equivalent - // to \. The conversion here rewrites / to \ and elides - // . elements as well as trailing or duplicate separators. For - // simplicity it avoids the conversion entirely for relative - // paths or paths containing .. elements. For now, - // \\server\share paths are not converted to - // \\?\UNC\server\share paths because the rules for doing so - // are less well-specified. - if len(path) >= 2 && path[:2] == `\\` { - // Don't canonicalize UNC paths. - return path - } - if !filepath.IsAbs(path) { - // Relative path - return path - } - - const prefix = `\\?` - - pathbuf := make([]byte, len(prefix)+len(path)+len(`\`)) - copy(pathbuf, prefix) - n := len(path) - r, w := 0, len(prefix) - for r < n { - switch { - case os.IsPathSeparator(path[r]): - // empty block - r++ - case path[r] == '.' && (r+1 == n || os.IsPathSeparator(path[r+1])): - // /./ - r++ - case r+1 < n && path[r] == '.' && path[r+1] == '.' && (r+2 == n || os.IsPathSeparator(path[r+2])): - // /../ is currently unhandled - return path - default: - pathbuf[w] = '\\' - w++ - for ; r < n && !os.IsPathSeparator(path[r]); r++ { - pathbuf[w] = path[r] - w++ - } - } - } - // A drive's root directory needs a trailing \ - if w == len(`\\?\c:`) { - pathbuf[w] = '\\' - w++ - } - return string(pathbuf[:w]) -} - -// Open - perm param is ignored, on windows file perms/NT acls -// are not octet combinations. Providing access to NT -// acls is out of scope here. -func Open(path string, flag int, perm os.FileMode) (*os.File, error) { - if path == "" { - return nil, syscall.ERROR_FILE_NOT_FOUND - } - - pathp, err := syscall.UTF16PtrFromString(fixLongPath(path)) - if err != nil { - return nil, err - } - - var access uint32 - switch flag { - case syscall.O_RDONLY: - access = syscall.GENERIC_READ - case syscall.O_WRONLY: - access = syscall.GENERIC_WRITE - case syscall.O_RDWR: - fallthrough - case syscall.O_RDWR | syscall.O_CREAT: - fallthrough - case syscall.O_WRONLY | syscall.O_CREAT: - access = syscall.GENERIC_READ | syscall.GENERIC_WRITE - case syscall.O_WRONLY | syscall.O_CREAT | syscall.O_APPEND: - access = syscall.FILE_APPEND_DATA - default: - return nil, fmt.Errorf("Unsupported flag (%d)", flag) - } - - var createflag uint32 - switch { - case flag&syscall.O_CREAT == syscall.O_CREAT: - createflag = syscall.OPEN_ALWAYS - default: - createflag = syscall.OPEN_EXISTING - } - - shareflag := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE) - accessAttr := uint32(syscall.FILE_ATTRIBUTE_NORMAL | 0x80000000) - - fd, err := syscall.CreateFile(pathp, access, shareflag, nil, createflag, accessAttr, 0) - if err != nil { - return nil, err - } - - return os.NewFile(uintptr(fd), path), nil -} - -func lockFile(fd syscall.Handle, flags uint32) error { - // https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx - if fd == syscall.InvalidHandle { - return nil - } - - err := lockFileEx(fd, flags, 1, 0, &syscall.Overlapped{}) - if err == nil { - return nil - } else if err.Error() == "The process cannot access the file because another process has locked a portion of the file." { - return ErrAlreadyLocked - } else if err != errLockViolation { - return err - } - - return nil -} - -func lockFileEx(h syscall.Handle, flags, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { - var reserved = uint32(0) - r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(h), uintptr(flags), - uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} diff --git a/pkg/lock/lock_windows_test.go b/pkg/lock/lock_windows_test.go deleted file mode 100644 index ae8bb8b1..00000000 --- a/pkg/lock/lock_windows_test.go +++ /dev/null @@ -1,59 +0,0 @@ -// +build windows - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package lock - -import ( - "strings" - "testing" -) - -func TestFixLongPath(t *testing.T) { - // 248 is long enough to trigger the longer-than-248 checks in - // fixLongPath, but short enough not to make a path component - // longer than 255, which is illegal on Windows. (which - // doesn't really matter anyway, since this is purely a string - // function we're testing, and it's not actually being used to - // do a system call) - veryLong := "l" + strings.Repeat("o", 248) + "ng" - for _, test := range []struct{ in, want string }{ - // Short; unchanged: - {`C:\short.txt`, `C:\short.txt`}, - {`C:\`, `C:\`}, - {`C:`, `C:`}, - // The "long" substring is replaced by a looooooong - // string which triggers the rewriting. Except in the - // cases below where it doesn't. - {`C:\long\foo.txt`, `\\?\C:\long\foo.txt`}, - {`C:/long/foo.txt`, `\\?\C:\long\foo.txt`}, - {`C:\long\foo\\bar\.\baz\\`, `\\?\C:\long\foo\bar\baz`}, - {`\\unc\path`, `\\unc\path`}, - {`long.txt`, `long.txt`}, - {`C:long.txt`, `C:long.txt`}, - {`c:\long\..\bar\baz`, `c:\long\..\bar\baz`}, - {`\\?\c:\long\foo.txt`, `\\?\c:\long\foo.txt`}, - {`\\?\c:\long/foo.txt`, `\\?\c:\long/foo.txt`}, - } { - in := strings.Replace(test.in, "long", veryLong, -1) - want := strings.Replace(test.want, "long", veryLong, -1) - if got := fixLongPath(in); got != want { - got = strings.Replace(got, veryLong, "long", -1) - t.Errorf("fixLongPath(%q) = %q; want %q", test.in, got, test.want) - } - } -} diff --git a/pkg/lsync/lrwmutex.go b/pkg/lsync/lrwmutex.go deleted file mode 100644 index 1235e18d..00000000 --- a/pkg/lsync/lrwmutex.go +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package lsync - -import ( - "context" - "math" - "sync" - "time" - - "github.com/minio/minio/pkg/retry" -) - -// A LRWMutex is a mutual exclusion lock with timeouts. -type LRWMutex struct { - id string - source string - isWriteLock bool - ref int - m sync.Mutex // Mutex to prevent multiple simultaneous locks -} - -// NewLRWMutex - initializes a new lsync RW mutex. -func NewLRWMutex() *LRWMutex { - return &LRWMutex{} -} - -// Lock holds a write lock on lm. -// -// If the lock is already in use, the calling go routine -// blocks until the mutex is available. -func (lm *LRWMutex) Lock() { - - const isWriteLock = true - lm.lockLoop(context.Background(), lm.id, lm.source, time.Duration(math.MaxInt64), isWriteLock) -} - -// GetLock tries to get a write lock on lm before the timeout occurs. -func (lm *LRWMutex) GetLock(ctx context.Context, id string, source string, timeout time.Duration) (locked bool) { - - const isWriteLock = true - return lm.lockLoop(ctx, id, source, timeout, isWriteLock) -} - -// RLock holds a read lock on lm. -// -// If one or more read lock are already in use, it will grant another lock. -// Otherwise the calling go routine blocks until the mutex is available. -func (lm *LRWMutex) RLock() { - - const isWriteLock = false - lm.lockLoop(context.Background(), lm.id, lm.source, time.Duration(1<<63-1), isWriteLock) -} - -// GetRLock tries to get a read lock on lm before the timeout occurs. -func (lm *LRWMutex) GetRLock(ctx context.Context, id string, source string, timeout time.Duration) (locked bool) { - - const isWriteLock = false - return lm.lockLoop(ctx, id, source, timeout, isWriteLock) -} - -func (lm *LRWMutex) lock(id, source string, isWriteLock bool) (locked bool) { - lm.m.Lock() - lm.id = id - lm.source = source - if isWriteLock { - if lm.ref == 0 && !lm.isWriteLock { - lm.ref = 1 - lm.isWriteLock = true - locked = true - } - } else { - if !lm.isWriteLock { - lm.ref++ - locked = true - } - } - lm.m.Unlock() - - return locked -} - -// lockLoop will acquire either a read or a write lock -// -// The call will block until the lock is granted using a built-in -// timing randomized back-off algorithm to try again until successful -func (lm *LRWMutex) lockLoop(ctx context.Context, id, source string, timeout time.Duration, isWriteLock bool) (locked bool) { - retryCtx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - // We timed out on the previous lock, incrementally wait - // for a longer back-off time and try again afterwards. - for range retry.NewTimer(retryCtx) { - if lm.lock(id, source, isWriteLock) { - return true - } - } - - // We timed out on the previous lock, incrementally wait - // for a longer back-off time and try again afterwards. - return false -} - -// Unlock unlocks the write lock. -// -// It is a run-time error if lm is not locked on entry to Unlock. -func (lm *LRWMutex) Unlock() { - - isWriteLock := true - success := lm.unlock(isWriteLock) - if !success { - panic("Trying to Unlock() while no Lock() is active") - } -} - -// RUnlock releases a read lock held on lm. -// -// It is a run-time error if lm is not locked on entry to RUnlock. -func (lm *LRWMutex) RUnlock() { - - isWriteLock := false - success := lm.unlock(isWriteLock) - if !success { - panic("Trying to RUnlock() while no RLock() is active") - } -} - -func (lm *LRWMutex) unlock(isWriteLock bool) (unlocked bool) { - lm.m.Lock() - - // Try to release lock. - if isWriteLock { - if lm.isWriteLock && lm.ref == 1 { - lm.ref = 0 - lm.isWriteLock = false - unlocked = true - } - } else { - if !lm.isWriteLock { - if lm.ref > 0 { - lm.ref-- - unlocked = true - } - } - } - - lm.m.Unlock() - return unlocked -} - -// ForceUnlock will forcefully clear a write or read lock. -func (lm *LRWMutex) ForceUnlock() { - lm.m.Lock() - lm.ref = 0 - lm.isWriteLock = false - lm.m.Unlock() -} - -// DRLocker returns a sync.Locker interface that implements -// the Lock and Unlock methods by calling drw.RLock and drw.RUnlock. -func (lm *LRWMutex) DRLocker() sync.Locker { - return (*drlocker)(lm) -} - -type drlocker LRWMutex - -func (dr *drlocker) Lock() { (*LRWMutex)(dr).RLock() } -func (dr *drlocker) Unlock() { (*LRWMutex)(dr).RUnlock() } diff --git a/pkg/lsync/lrwmutex_test.go b/pkg/lsync/lrwmutex_test.go deleted file mode 100644 index fdc4fc6a..00000000 --- a/pkg/lsync/lrwmutex_test.go +++ /dev/null @@ -1,340 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// GOMAXPROCS=10 go test - -package lsync_test - -import ( - "context" - "fmt" - "sync" - "sync/atomic" - "testing" - "time" - - "runtime" - - . "github.com/minio/minio/pkg/lsync" -) - -func testSimpleWriteLock(t *testing.T, duration time.Duration) (locked bool) { - - ctx := context.Background() - lrwm := NewLRWMutex() - - if !lrwm.GetRLock(ctx, "", "object1", time.Second) { - panic("Failed to acquire read lock") - } - // fmt.Println("1st read lock acquired, waiting...") - - if !lrwm.GetRLock(ctx, "", "object1", time.Second) { - panic("Failed to acquire read lock") - } - // fmt.Println("2nd read lock acquired, waiting...") - - go func() { - time.Sleep(2 * time.Second) - lrwm.RUnlock() - // fmt.Println("1st read lock released, waiting...") - }() - - go func() { - time.Sleep(3 * time.Second) - lrwm.RUnlock() - // fmt.Println("2nd read lock released, waiting...") - }() - - // fmt.Println("Trying to acquire write lock, waiting...") - locked = lrwm.GetLock(ctx, "", "", duration) - if locked { - // fmt.Println("Write lock acquired, waiting...") - time.Sleep(1 * time.Second) - - lrwm.Unlock() - } else { - // fmt.Println("Write lock failed due to timeout") - } - return -} - -func TestSimpleWriteLockAcquired(t *testing.T) { - locked := testSimpleWriteLock(t, 5*time.Second) - - expected := true - if locked != expected { - t.Errorf("TestSimpleWriteLockAcquired(): \nexpected %#v\ngot %#v", expected, locked) - } -} - -func TestSimpleWriteLockTimedOut(t *testing.T) { - locked := testSimpleWriteLock(t, time.Second) - - expected := false - if locked != expected { - t.Errorf("TestSimpleWriteLockTimedOut(): \nexpected %#v\ngot %#v", expected, locked) - } -} - -func testDualWriteLock(t *testing.T, duration time.Duration) (locked bool) { - - ctx := context.Background() - lrwm := NewLRWMutex() - - // fmt.Println("Getting initial write lock") - if !lrwm.GetLock(ctx, "", "", time.Second) { - panic("Failed to acquire initial write lock") - } - - go func() { - time.Sleep(2 * time.Second) - lrwm.Unlock() - // fmt.Println("Initial write lock released, waiting...") - }() - - // fmt.Println("Trying to acquire 2nd write lock, waiting...") - locked = lrwm.GetLock(ctx, "", "", duration) - if locked { - // fmt.Println("2nd write lock acquired, waiting...") - time.Sleep(time.Second) - - lrwm.Unlock() - } else { - // fmt.Println("2nd write lock failed due to timeout") - } - return -} - -func TestDualWriteLockAcquired(t *testing.T) { - locked := testDualWriteLock(t, 3*time.Second) - - expected := true - if locked != expected { - t.Errorf("TestDualWriteLockAcquired(): \nexpected %#v\ngot %#v", expected, locked) - } - -} - -func TestDualWriteLockTimedOut(t *testing.T) { - locked := testDualWriteLock(t, time.Second) - - expected := false - if locked != expected { - t.Errorf("TestDualWriteLockTimedOut(): \nexpected %#v\ngot %#v", expected, locked) - } - -} - -// Test cases below are copied 1 to 1 from sync/rwmutex_test.go (adapted to use LRWMutex) - -// Borrowed from rwmutex_test.go -func parallelReader(ctx context.Context, m *LRWMutex, clocked, cunlock, cdone chan bool) { - if m.GetRLock(ctx, "", "", time.Second) { - clocked <- true - <-cunlock - m.RUnlock() - cdone <- true - } -} - -// Borrowed from rwmutex_test.go -func doTestParallelReaders(numReaders, gomaxprocs int) { - runtime.GOMAXPROCS(gomaxprocs) - m := NewLRWMutex() - - clocked := make(chan bool) - cunlock := make(chan bool) - cdone := make(chan bool) - for i := 0; i < numReaders; i++ { - go parallelReader(context.Background(), m, clocked, cunlock, cdone) - } - // Wait for all parallel RLock()s to succeed. - for i := 0; i < numReaders; i++ { - <-clocked - } - for i := 0; i < numReaders; i++ { - cunlock <- true - } - // Wait for the goroutines to finish. - for i := 0; i < numReaders; i++ { - <-cdone - } -} - -// Borrowed from rwmutex_test.go -func TestParallelReaders(t *testing.T) { - defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1)) - doTestParallelReaders(1, 4) - doTestParallelReaders(3, 4) - doTestParallelReaders(4, 2) -} - -// Borrowed from rwmutex_test.go -func reader(rwm *LRWMutex, numIterations int, activity *int32, cdone chan bool) { - for i := 0; i < numIterations; i++ { - if rwm.GetRLock(context.Background(), "", "", time.Second) { - n := atomic.AddInt32(activity, 1) - if n < 1 || n >= 10000 { - panic(fmt.Sprintf("wlock(%d)\n", n)) - } - for i := 0; i < 100; i++ { - } - atomic.AddInt32(activity, -1) - rwm.RUnlock() - } - } - cdone <- true -} - -// Borrowed from rwmutex_test.go -func writer(rwm *LRWMutex, numIterations int, activity *int32, cdone chan bool) { - for i := 0; i < numIterations; i++ { - if rwm.GetLock(context.Background(), "", "", time.Second) { - n := atomic.AddInt32(activity, 10000) - if n != 10000 { - panic(fmt.Sprintf("wlock(%d)\n", n)) - } - for i := 0; i < 100; i++ { - } - atomic.AddInt32(activity, -10000) - rwm.Unlock() - } - } - cdone <- true -} - -// Borrowed from rwmutex_test.go -func HammerRWMutex(gomaxprocs, numReaders, numIterations int) { - runtime.GOMAXPROCS(gomaxprocs) - // Number of active readers + 10000 * number of active writers. - var activity int32 - rwm := NewLRWMutex() - cdone := make(chan bool) - go writer(rwm, numIterations, &activity, cdone) - var i int - for i = 0; i < numReaders/2; i++ { - go reader(rwm, numIterations, &activity, cdone) - } - go writer(rwm, numIterations, &activity, cdone) - for ; i < numReaders; i++ { - go reader(rwm, numIterations, &activity, cdone) - } - // Wait for the 2 writers and all readers to finish. - for i := 0; i < 2+numReaders; i++ { - <-cdone - } -} - -// Borrowed from rwmutex_test.go -func TestRWMutex(t *testing.T) { - defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1)) - n := 1000 - if testing.Short() { - n = 5 - } - HammerRWMutex(1, 1, n) - HammerRWMutex(1, 3, n) - HammerRWMutex(1, 10, n) - HammerRWMutex(4, 1, n) - HammerRWMutex(4, 3, n) - HammerRWMutex(4, 10, n) - HammerRWMutex(10, 1, n) - HammerRWMutex(10, 3, n) - HammerRWMutex(10, 10, n) - HammerRWMutex(10, 5, n) -} - -// Borrowed from rwmutex_test.go -func TestDRLocker(t *testing.T) { - wl := NewLRWMutex() - var rl sync.Locker - wlocked := make(chan bool, 1) - rlocked := make(chan bool, 1) - rl = wl.DRLocker() - n := 10 - go func() { - for i := 0; i < n; i++ { - rl.Lock() - rl.Lock() - rlocked <- true - wl.Lock() - wlocked <- true - } - }() - for i := 0; i < n; i++ { - <-rlocked - rl.Unlock() - select { - case <-wlocked: - t.Fatal("RLocker() didn't read-lock it") - default: - } - rl.Unlock() - <-wlocked - select { - case <-rlocked: - t.Fatal("RLocker() didn't respect the write lock") - default: - } - wl.Unlock() - } -} - -// Borrowed from rwmutex_test.go -func TestUnlockPanic(t *testing.T) { - defer func() { - if recover() == nil { - t.Fatalf("unlock of unlocked RWMutex did not panic") - } - }() - mu := NewLRWMutex() - mu.Unlock() -} - -// Borrowed from rwmutex_test.go -func TestUnlockPanic2(t *testing.T) { - defer func() { - if recover() == nil { - t.Fatalf("unlock of unlocked RWMutex did not panic") - } - }() - mu := NewLRWMutex() - mu.RLock() - mu.Unlock() -} - -// Borrowed from rwmutex_test.go -func TestRUnlockPanic(t *testing.T) { - defer func() { - if recover() == nil { - t.Fatalf("read unlock of unlocked RWMutex did not panic") - } - }() - mu := NewLRWMutex() - mu.RUnlock() -} - -// Borrowed from rwmutex_test.go -func TestRUnlockPanic2(t *testing.T) { - defer func() { - if recover() == nil { - t.Fatalf("read unlock of unlocked RWMutex did not panic") - } - }() - mu := NewLRWMutex() - mu.Lock() - mu.RUnlock() -} diff --git a/pkg/madmin/README.md b/pkg/madmin/README.md deleted file mode 100644 index b45cf07c..00000000 --- a/pkg/madmin/README.md +++ /dev/null @@ -1,578 +0,0 @@ -# Golang Admin Client API Reference [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) -The MinIO Admin Golang Client SDK provides APIs to manage MinIO services. - -This quickstart guide will show you how to install the MinIO Admin client SDK, connect to MinIO admin service, and provide a walkthrough of a simple file uploader. - -This document assumes that you have a working [Golang setup](https://golang.org/doc/install). - -## Initialize MinIO Admin Client object. - -## MinIO - -```go - -package main - -import ( - "fmt" - - "github.com/minio/minio/pkg/madmin" -) - -func main() { - // Use a secure connection. - ssl := true - - // Initialize minio client object. - mdmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETKEY", ssl) - if err != nil { - fmt.Println(err) - return - } - - // Fetch service status. - st, err := mdmClnt.ServerInfo() - if err != nil { - fmt.Println(err) - return - } - for _, peerInfo := range serversInfo { - log.Printf("Node: %s, Info: %v\n", peerInfo.Addr, peerInfo.Data) - } -} - -``` - -| Service operations | Info operations | Healing operations | Config operations | -|:------------------------------------|:-----------------------------------------|:-------------------|:--------------------------| -| [`ServiceTrace`](#ServiceTrace) | [`ServerInfo`](#ServerInfo) | [`Heal`](#Heal) | [`GetConfig`](#GetConfig) | -| [`ServiceStop`](#ServiceStop) | [`StorageInfo`](#StorageInfo) | | [`SetConfig`](#SetConfig) | -| [`ServiceRestart`](#ServiceRestart) | [`AccountUsageInfo`](#AccountUsageInfo) | | | - - - -| Top operations | IAM operations | Misc | KMS | -|:------------------------|:--------------------------------------|:--------------------------------------------------|:--------------------------------| -| [`TopLocks`](#TopLocks) | [`AddUser`](#AddUser) | [`StartProfiling`](#StartProfiling) | [`GetKeyStatus`](#GetKeyStatus) | -| | [`SetUserPolicy`](#SetUserPolicy) | [`DownloadProfilingData`](#DownloadProfilingData) | | -| | [`ListUsers`](#ListUsers) | [`ServerUpdate`](#ServerUpdate) | | -| | [`AddCannedPolicy`](#AddCannedPolicy) | | | - -## 1. Constructor - - -### New(endpoint string, accessKeyID string, secretAccessKey string, ssl bool) (*AdminClient, error) -Initializes a new admin client object. - -__Parameters__ - -| Param | Type | Description | -|:------------------|:---------|:----------------------------------------------------------| -| `endpoint` | _string_ | MinIO endpoint. | -| `accessKeyID` | _string_ | Access key for the object storage endpoint. | -| `secretAccessKey` | _string_ | Secret key for the object storage endpoint. | -| `ssl` | _bool_ | Set this value to 'true' to enable secure (HTTPS) access. | - -## 2. Service operations - - -### ServiceStatus(ctx context.Context) (ServiceStatusMetadata, error) -Fetch service status, replies disk space used, backend type and total disks offline/online (applicable in distributed mode). - -| Param | Type | Description | -|-----------------|-------------------------|------------------------------------------------------------| -| `serviceStatus` | _ServiceStatusMetadata_ | Represents current server status info in following format: | - - -| Param | Type | Description | -|-----------------------------|-----------------|------------------------------------| -| `st.ServerVersion.Version` | _string_ | Server version. | -| `st.ServerVersion.CommitID` | _string_ | Server commit id. | -| `st.Uptime` | _time.Duration_ | Server uptime duration in seconds. | - - __Example__ - - ```go - - st, err := madmClnt.ServiceStatus(context.Background()) - if err != nil { - log.Fatalln(err) - } - log.Printf("%#v\n", st) - - ``` - - -### ServiceRestart(ctx context.Context) error -Sends a service action restart command to MinIO server. - - __Example__ - -```go - // To restart the service, restarts all servers in the cluster. - err := madmClnt.ServiceRestart(context.Background()) - if err != nil { - log.Fatalln(err) - } - log.Println("Success") -``` - - -### ServiceStop(ctx context.Context) error -Sends a service action stop command to MinIO server. - - __Example__ - -```go - // To stop the service, stops all servers in the cluster. - err := madmClnt.ServiceStop(context.Background()) - if err != nil { - log.Fatalln(err) - } - log.Println("Success") -``` - - -### ServiceTrace(ctx context.Context, allTrace bool, doneCh <-chan struct{}) <-chan TraceInfo -Enable HTTP request tracing on all nodes in a MinIO cluster - -__Example__ - -``` go - doneCh := make(chan struct{}) - defer close(doneCh) - // listen to all trace including internal API calls - allTrace := true - // Start listening on all trace activity. - traceCh := madmClnt.ServiceTrace(context.Background(), allTrace, doneCh) - for traceInfo := range traceCh { - fmt.Println(traceInfo.String()) - } -``` - -## 3. Info operations - - -### ServerInfo(ctx context.Context) ([]ServerInfo, error) -Fetches information for all cluster nodes, such as server properties, storage information, network statistics, etc. - -| Param | Type | Description | -|----------------------------------|--------------------|--------------------------------------------------------------------| -| `si.Addr` | _string_ | Address of the server the following information is retrieved from. | -| `si.ConnStats` | _ServerConnStats_ | Connection statistics from the given server. | -| `si.HTTPStats` | _ServerHTTPStats_ | HTTP connection statistics from the given server. | -| `si.Properties` | _ServerProperties_ | Server properties such as region, notification targets. | - -| Param | Type | Description | -|-----------------------------|-----------------|----------------------------------------------------| -| `ServerProperties.Uptime` | _time.Duration_ | Total duration in seconds since server is running. | -| `ServerProperties.Version` | _string_ | Current server version. | -| `ServerProperties.CommitID` | _string_ | Current server commitID. | -| `ServerProperties.Region` | _string_ | Configured server region. | -| `ServerProperties.SQSARN` | _[]string_ | List of notification target ARNs. | - -| Param | Type | Description | -|------------------------------------|----------|-------------------------------------| -| `ServerConnStats.TotalInputBytes` | _uint64_ | Total bytes received by the server. | -| `ServerConnStats.TotalOutputBytes` | _uint64_ | Total bytes sent by the server. | - -| Param | Type | Description | -|--------------------------------------|-------------------------|---------------------------------------------------------| -| `ServerHTTPStats.TotalHEADStats` | _ServerHTTPMethodStats_ | Total statistics regarding HEAD operations | -| `ServerHTTPStats.SuccessHEADStats` | _ServerHTTPMethodStats_ | Total statistics regarding successful HEAD operations | -| `ServerHTTPStats.TotalGETStats` | _ServerHTTPMethodStats_ | Total statistics regarding GET operations | -| `ServerHTTPStats.SuccessGETStats` | _ServerHTTPMethodStats_ | Total statistics regarding successful GET operations | -| `ServerHTTPStats.TotalPUTStats` | _ServerHTTPMethodStats_ | Total statistics regarding PUT operations | -| `ServerHTTPStats.SuccessPUTStats` | _ServerHTTPMethodStats_ | Total statistics regarding successful PUT operations | -| `ServerHTTPStats.TotalPOSTStats` | _ServerHTTPMethodStats_ | Total statistics regarding POST operations | -| `ServerHTTPStats.SuccessPOSTStats` | _ServerHTTPMethodStats_ | Total statistics regarding successful POST operations | -| `ServerHTTPStats.TotalDELETEStats` | _ServerHTTPMethodStats_ | Total statistics regarding DELETE operations | -| `ServerHTTPStats.SuccessDELETEStats` | _ServerHTTPMethodStats_ | Total statistics regarding successful DELETE operations | - -| Param | Type | Description | -|-------------------------------------|----------|-------------------------------------------------| -| `ServerHTTPMethodStats.Count` | _uint64_ | Total number of operations. | -| `ServerHTTPMethodStats.AvgDuration` | _string_ | Average duration of Count number of operations. | - -| Param | Type | Description | -|----------------------|----------|-------------------------------------------------------| -| `DriveInfo.UUID` | _string_ | Unique ID for each disk provisioned by server format. | -| `DriveInfo.Endpoint` | _string_ | Endpoint location of the remote/local disk. | -| `DriveInfo.State` | _string_ | Current state of the disk at endpoint. | - - __Example__ - - ```go - - serversInfo, err := madmClnt.ServerInfo(context.Background()) - if err != nil { - log.Fatalln(err) - } - - for _, peerInfo := range serversInfo { - log.Printf("Node: %s, Info: %v\n", peerInfo.Addr, peerInfo.Data) - } - - ``` - - -### StorageInfo(ctx context.Context) (StorageInfo, error) - -Fetches Storage information for all cluster nodes. - -| Param | Type | Description | -|-------------------------|------------|---------------------------------------------| -| `storageInfo.Used` | _[]int64_ | Used disk spaces. | -| `storageInfo.Total` | _[]int64_ | Total disk spaces. | -| `storageInfo.Available` | _[]int64_ | Available disk spaces. | -| `StorageInfo.Backend` | _struct{}_ | Represents backend type embedded structure. | - -| Param | Type | Description | -|----------------------------|-----------------|--------------------------------------------------------------------------------------------------------------------------| -| `Backend.Type` | _BackendType_ | Type of backend used by the server currently only FS or Erasure. | -| `Backend.OnlineDisks` | _BackendDisks_ | Total number of disks online per node (only applies to Erasure backend) represented in map[string]int, is empty for FS. | -| `Backend.OfflineDisks` | _BackendDisks_ | Total number of disks offline per node (only applies to Erasure backend) represented in map[string]int, is empty for FS. | -| `Backend.StandardSCData` | _int_ | Data disks set for standard storage class, is empty for FS. | -| `Backend.StandardSCParity` | _int_ | Parity disks set for standard storage class, is empty for FS. | -| `Backend.RRSCData` | _int_ | Data disks set for reduced redundancy storage class, is empty for FS. | -| `Backend.RRSCParity` | _int_ | Parity disks set for reduced redundancy storage class, is empty for FS. | -| `Backend.Sets` | _[][]DriveInfo_ | Represents topology of drives in erasure coded sets. | - -__Example__ - - ```go - - storageInfo, err := madmClnt.StorageInfo(context.Background()) - if err != nil { - log.Fatalln(err) - } - - log.Println(storageInfo) - - ``` - - - -### AccountUsageInfo(ctx context.Context) (AccountUsageInfo, error) - -Fetches accounting usage information for the current authenticated user - -| Param | Type | Description | -|--------------------------------|----------------------|-------------------------| -| `AccountUsageInfo.AccountName` | _string_ | Account name. | -| `AccountUsageInfo.Buckets` | _[]BucketUsageInfo_ | Bucket usage info. | - - -| Param | Type | Description | -|----------------------------|-----------------|-----------------------------------------| -| `BucketUsageInfo.Name` | _string_ | The name of the current bucket -| `BucketUsageInfo.Size` | _uint64_ | The total size of the current bucket -| `BucketUsageInfo.Created` | _time.Time_ | Bucket creation time -| `BucketUsageInfo.Access` | _AccountAccess_ | Type of access of the current account - - -| Param | Type | Description | -|------------------------|---------|------------------------------------------------------------------| -| `AccountAccess.Read` | _bool_ | Indicate if the bucket is readable by the current account name. | -| `AccountAccess.Write` | _bool_ | Indocate if the bucket is writable by the current account name. | - - -__Example__ - -```go - - accountUsageInfo, err := madmClnt.AccountUsageInfo(context.Background()) - if err != nil { - log.Fatalln(err) - } - - log.Println(accountUsageInfo) - -``` - - - -## 5. Heal operations - - -### Heal(ctx context.Context, bucket, prefix string, healOpts HealOpts, clientToken string, forceStart bool, forceStop bool) (start HealStartSuccess, status HealTaskStatus, err error) - -Start a heal sequence that scans data under given (possible empty) -`bucket` and `prefix`. The `recursive` bool turns on recursive -traversal under the given path. `dryRun` does not mutate on-disk data, -but performs data validation. - -Two heal sequences on overlapping paths may not be initiated. - -The progress of a heal should be followed using the same API `Heal` -by providing the `clientToken` previously obtained from a `Heal` -API. The server accumulates results of the heal traversal and waits -for the client to receive and acknowledge them using the status -request by providing `clientToken`. - -__Example__ - -``` go - - opts := madmin.HealOpts{ - Recursive: true, - DryRun: false, - } - forceStart := false - forceStop := false - healPath, err := madmClnt.Heal(context.Background(), "", "", opts, "", forceStart, forceStop) - if err != nil { - log.Fatalln(err) - } - log.Printf("Heal sequence started at %s", healPath) - -``` - -#### HealStartSuccess structure - -| Param | Type | Description | -|-------------------|-------------|----------------------------------------------------------------------------------------------------------------------------------| -| `s.ClientToken` | _string_ | A unique token for a successfully started heal operation, this token is used to request realtime progress of the heal operation. | -| `s.ClientAddress` | _string_ | Address of the client which initiated the heal operation, the client address has the form "host:port". | -| `s.StartTime` | _time.Time_ | Time when heal was initially started. | - -#### HealTaskStatus structure - -| Param | Type | Description | -|-------------------|--------------------|---------------------------------------------------| -| `s.Summary` | _string_ | Short status of heal sequence | -| `s.FailureDetail` | _string_ | Error message in case of heal sequence failure | -| `s.HealSettings` | _HealOpts_ | Contains the booleans set in the `HealStart` call | -| `s.Items` | _[]HealResultItem_ | Heal records for actions performed by server | - -#### HealResultItem structure - -| Param | Type | Description | -|------------------------|----------------|-----------------------------------------------------------------| -| `ResultIndex` | _int64_ | Index of the heal-result record | -| `Type` | _HealItemType_ | Represents kind of heal operation in the heal record | -| `Bucket` | _string_ | Bucket name | -| `Object` | _string_ | Object name | -| `Detail` | _string_ | Details about heal operation | -| `DiskInfo.AvailableOn` | _[]int_ | List of disks on which the healed entity is present and healthy | -| `DiskInfo.HealedOn` | _[]int_ | List of disks on which the healed entity was restored | - -## 6. Config operations - - -### GetConfig(ctx context.Context) ([]byte, error) -Get current `config.json` of a MinIO server. - -__Example__ - -``` go - configBytes, err := madmClnt.GetConfig(context.Background()) - if err != nil { - log.Fatalf("failed due to: %v", err) - } - - // Pretty-print config received as json. - var buf bytes.Buffer - err = json.Indent(buf, configBytes, "", "\t") - if err != nil { - log.Fatalf("failed due to: %v", err) - } - - log.Println("config received successfully: ", string(buf.Bytes())) -``` - - - -### SetConfig(ctx context.Context, config io.Reader) error -Set a new `config.json` for a MinIO server. - -__Example__ - -``` go - config := bytes.NewReader([]byte(`config.json contents go here`)) - if err := madmClnt.SetConfig(context.Background(), config); err != nil { - log.Fatalf("failed due to: %v", err) - } - log.Println("SetConfig was successful") -``` - -## 7. Top operations - - -### TopLocks(ctx context.Context) (LockEntries, error) -Get the oldest locks from MinIO server. - -__Example__ - -``` go - locks, err := madmClnt.TopLocks(context.Background()) - if err != nil { - log.Fatalf("failed due to: %v", err) - } - - out, err := json.Marshal(locks) - if err != nil { - log.Fatalf("Marshal failed due to: %v", err) - } - - log.Println("TopLocks received successfully: ", string(out)) -``` - -## 8. IAM operations - - -### AddCannedPolicy(ctx context.Context, policyName string, policy *iampolicy.Policy) error -Create a new canned policy on MinIO server. - -__Example__ - -``` - policy, err := iampolicy.ParseConfig(strings.NewReader(`{"Version": "2012-10-17","Statement": [{"Action": ["s3:GetObject"],"Effect": "Allow","Resource": ["arn:aws:s3:::my-bucketname/*"],"Sid": ""}]}`)) - if err != nil { - log.Fatalln(err) - } - - if err = madmClnt.AddCannedPolicy(context.Background(), "get-only", policy); err != nil { - log.Fatalln(err) - } -``` - - -### AddUser(ctx context.Context, user string, secret string) error -Add a new user on a MinIO server. - -__Example__ - -``` go - if err = madmClnt.AddUser(context.Background(), "newuser", "newstrongpassword"); err != nil { - log.Fatalln(err) - } -``` - - -### SetUserPolicy(ctx context.Context, user string, policyName string) error -Enable a canned policy `get-only` for a given user on MinIO server. - -__Example__ - -``` go - if err = madmClnt.SetUserPolicy(context.Background(), "newuser", "get-only"); err != nil { - log.Fatalln(err) - } -``` - - -### ListUsers(ctx context.Context) (map[string]UserInfo, error) -Lists all users on MinIO server. - -__Example__ - -``` go - users, err := madmClnt.ListUsers(context.Background()); - if err != nil { - log.Fatalln(err) - } - for k, v := range users { - fmt.Printf("User %s Status %s\n", k, v.Status) - } -``` - -## 9. Misc operations - - -### ServerUpdate(ctx context.Context, updateURL string) (ServerUpdateStatus, error) -Sends a update command to MinIO server, to update MinIO server to latest release. In distributed setup it updates all servers atomically. - - __Example__ - -```go - // Updates all servers and restarts all the servers in the cluster. - // optionally takes an updateURL, which is used to update the binary. - us, err := madmClnt.ServerUpdate(context.Background(), updateURL) - if err != nil { - log.Fatalln(err) - } - if us.CurrentVersion != us.UpdatedVersion { - log.Printf("Updated server version from %s to %s successfully", us.CurrentVersion, us.UpdatedVersion) - } -``` - - -### StartProfiling(ctx context.Context, profiler string) error -Ask all nodes to start profiling using the specified profiler mode - -__Example__ - -``` go - startProfilingResults, err = madmClnt.StartProfiling(context.Background(), "cpu") - if err != nil { - log.Fatalln(err) - } - for _, result := range startProfilingResults { - if !result.Success { - log.Printf("Unable to start profiling on node `%s`, reason = `%s`\n", result.NodeName, result.Error) - } else { - log.Printf("Profiling successfully started on node `%s`\n", result.NodeName) - } - } - -``` - - -### DownloadProfilingData(ctx context.Context) ([]byte, error) -Download profiling data of all nodes in a zip format. - -__Example__ - -``` go - profilingData, err := madmClnt.DownloadProfilingData(context.Background()) - if err != nil { - log.Fatalln(err) - } - - profilingFile, err := os.Create("/tmp/profiling-data.zip") - if err != nil { - log.Fatal(err) - } - - if _, err := io.Copy(profilingFile, profilingData); err != nil { - log.Fatal(err) - } - - if err := profilingFile.Close(); err != nil { - log.Fatal(err) - } - - if err := profilingData.Close(); err != nil { - log.Fatal(err) - } - - log.Println("Profiling data successfully downloaded.") -``` - -## 11. KMS - - -### GetKeyStatus(ctx context.Context, keyID string) (*KMSKeyStatus, error) -Requests status information about one particular KMS master key -from a MinIO server. The keyID is optional and the server will -use the default master key (configured via `MINIO_KMS_VAULT_KEY_NAME` -or `MINIO_KMS_MASTER_KEY`) if the keyID is empty. - -__Example__ - -``` go - keyInfo, err := madmClnt.GetKeyStatus(context.Background(), "my-minio-key") - if err != nil { - log.Fatalln(err) - } - if keyInfo.EncryptionErr != "" { - log.Fatalf("Failed to perform encryption operation using '%s': %v\n", keyInfo.KeyID, keyInfo.EncryptionErr) - } - if keyInfo.UpdateErr != "" { - log.Fatalf("Failed to perform key re-wrap operation using '%s': %v\n", keyInfo.KeyID, keyInfo.UpdateErr) - } - if keyInfo.DecryptionErr != "" { - log.Fatalf("Failed to perform decryption operation using '%s': %v\n", keyInfo.KeyID, keyInfo.DecryptionErr) - } -``` diff --git a/pkg/madmin/api-error-response.go b/pkg/madmin/api-error-response.go deleted file mode 100644 index 48cb779f..00000000 --- a/pkg/madmin/api-error-response.go +++ /dev/null @@ -1,110 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package madmin - -import ( - "encoding/xml" - "fmt" - "net/http" -) - -/* **** SAMPLE ERROR RESPONSE **** - - - AccessDenied - Access Denied - bucketName - objectName - F19772218238A85A - GuWkjyviSiGHizehqpmsD1ndz5NClSP19DOT+s2mv7gXGQ8/X1lhbDGiIJEXpGFD - -*/ - -// ErrorResponse - Is the typed error returned by all API operations. -type ErrorResponse struct { - XMLName xml.Name `xml:"Error" json:"-"` - Code string - Message string - BucketName string - Key string - RequestID string `xml:"RequestId"` - HostID string `xml:"HostId"` - - // Region where the bucket is located. This header is returned - // only in HEAD bucket and ListObjects response. - Region string -} - -// Error - Returns HTTP error string -func (e ErrorResponse) Error() string { - return e.Message -} - -const ( - reportIssue = "Please report this issue at https://github.com/minio/minio/issues." -) - -// httpRespToErrorResponse returns a new encoded ErrorResponse -// structure as error. -func httpRespToErrorResponse(resp *http.Response) error { - if resp == nil { - msg := "Response is empty. " + reportIssue - return ErrInvalidArgument(msg) - } - var errResp ErrorResponse - // Decode the json error - err := jsonDecoder(resp.Body, &errResp) - if err != nil { - return ErrorResponse{ - Code: resp.Status, - Message: fmt.Sprintf("Failed to parse server response: %s.", err), - } - } - closeResponse(resp) - return errResp -} - -// ToErrorResponse - Returns parsed ErrorResponse struct from body and -// http headers. -// -// For example: -// -// import admin "github.com/minio/minio/pkg/madmin" -// ... -// ... -// ss, err := adm.ServiceStatus(...) -// if err != nil { -// resp := admin.ToErrorResponse(err) -// } -// ... -func ToErrorResponse(err error) ErrorResponse { - switch err := err.(type) { - case ErrorResponse: - return err - default: - return ErrorResponse{} - } -} - -// ErrInvalidArgument - Invalid argument response. -func ErrInvalidArgument(message string) error { - return ErrorResponse{ - Code: "InvalidArgument", - Message: message, - RequestID: "minio", - } -} diff --git a/pkg/madmin/api-log-entry.go b/pkg/madmin/api-log-entry.go deleted file mode 100644 index 85bd3cb3..00000000 --- a/pkg/madmin/api-log-entry.go +++ /dev/null @@ -1,52 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package madmin - -// Args - defines the arguments for the API. -type logArgs struct { - Bucket string `json:"bucket,omitempty"` - Object string `json:"object,omitempty"` - Metadata map[string]string `json:"metadata,omitempty"` -} - -// Trace - defines the trace. -type logTrace struct { - Message string `json:"message,omitempty"` - Source []string `json:"source,omitempty"` - Variables map[string]string `json:"variables,omitempty"` -} - -// API - defines the api type and its args. -type logAPI struct { - Name string `json:"name,omitempty"` - Args *logArgs `json:"args,omitempty"` -} - -// Entry - defines fields and values of each log entry. -type logEntry struct { - DeploymentID string `json:"deploymentid,omitempty"` - Level string `json:"level"` - LogKind string `json:"errKind"` - Time string `json:"time"` - API *logAPI `json:"api,omitempty"` - RemoteHost string `json:"remotehost,omitempty"` - Host string `json:"host,omitempty"` - RequestID string `json:"requestID,omitempty"` - UserAgent string `json:"userAgent,omitempty"` - Message string `json:"message,omitempty"` - Trace *logTrace `json:"error,omitempty"` -} diff --git a/pkg/madmin/api-log.go b/pkg/madmin/api-log.go deleted file mode 100644 index 0d1448b5..00000000 --- a/pkg/madmin/api-log.go +++ /dev/null @@ -1,80 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package madmin - -import ( - "context" - "encoding/json" - "net/http" - "net/url" - "strconv" -) - -// LogInfo holds console log messages -type LogInfo struct { - logEntry - ConsoleMsg string - NodeName string `json:"node"` - Err error `json:"-"` -} - -// GetLogs - listen on console log messages. -func (adm AdminClient) GetLogs(ctx context.Context, node string, lineCnt int, logKind string) <-chan LogInfo { - logCh := make(chan LogInfo, 1) - - // Only success, start a routine to start reading line by line. - go func(logCh chan<- LogInfo) { - defer close(logCh) - urlValues := make(url.Values) - urlValues.Set("node", node) - urlValues.Set("limit", strconv.Itoa(lineCnt)) - urlValues.Set("logType", logKind) - for { - reqData := requestData{ - relPath: adminAPIPrefix + "/log", - queryValues: urlValues, - } - // Execute GET to call log handler - resp, err := adm.executeMethod(ctx, http.MethodGet, reqData) - if err != nil { - closeResponse(resp) - return - } - - if resp.StatusCode != http.StatusOK { - logCh <- LogInfo{Err: httpRespToErrorResponse(resp)} - return - } - dec := json.NewDecoder(resp.Body) - for { - var info LogInfo - if err = dec.Decode(&info); err != nil { - break - } - select { - case <-ctx.Done(): - return - case logCh <- info: - } - } - - } - }(logCh) - - // Returns the log info channel, for caller to start reading from. - return logCh -} diff --git a/pkg/madmin/api.go b/pkg/madmin/api.go deleted file mode 100644 index c2fd5f42..00000000 --- a/pkg/madmin/api.go +++ /dev/null @@ -1,506 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package madmin - -import ( - "bytes" - "context" - "encoding/hex" - "errors" - "fmt" - "io" - "io/ioutil" - "math/rand" - "net/http" - "net/http/cookiejar" - "net/http/httputil" - "net/url" - "os" - "regexp" - "runtime" - "strings" - "time" - - "github.com/minio/minio-go/v6/pkg/credentials" - "github.com/minio/minio-go/v6/pkg/s3utils" - "github.com/minio/minio-go/v6/pkg/signer" - "golang.org/x/net/publicsuffix" -) - -// AdminClient implements Amazon S3 compatible methods. -type AdminClient struct { - /// Standard options. - - // Parsed endpoint url provided by the user. - endpointURL *url.URL - - // Holds various credential providers. - credsProvider *credentials.Credentials - - // User supplied. - appInfo struct { - appName string - appVersion string - } - - // Indicate whether we are using https or not - secure bool - - // Needs allocation. - httpClient *http.Client - - random *rand.Rand - - // Advanced functionality. - isTraceEnabled bool - traceOutput io.Writer -} - -// Global constants. -const ( - libraryName = "madmin-go" - libraryVersion = "0.0.1" - - libraryAdminURLPrefix = "/minio/admin" -) - -// User Agent should always following the below style. -// Please open an issue to discuss any new changes here. -// -// MinIO (OS; ARCH) LIB/VER APP/VER -const ( - libraryUserAgentPrefix = "MinIO (" + runtime.GOOS + "; " + runtime.GOARCH + ") " - libraryUserAgent = libraryUserAgentPrefix + libraryName + "/" + libraryVersion -) - -// Options for New method -type Options struct { - Creds *credentials.Credentials - Secure bool - // Add future fields here -} - -// New - instantiate minio admin client -func New(endpoint string, accessKeyID, secretAccessKey string, secure bool) (*AdminClient, error) { - creds := credentials.NewStaticV4(accessKeyID, secretAccessKey, "") - - clnt, err := privateNew(endpoint, creds, secure) - if err != nil { - return nil, err - } - return clnt, nil -} - -// NewWithOptions - instantiate minio admin client with options. -func NewWithOptions(endpoint string, opts *Options) (*AdminClient, error) { - clnt, err := privateNew(endpoint, opts.Creds, opts.Secure) - if err != nil { - return nil, err - } - return clnt, nil -} - -func privateNew(endpoint string, creds *credentials.Credentials, secure bool) (*AdminClient, error) { - // Initialize cookies to preserve server sent cookies if any and replay - // them upon each request. - jar, err := cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List}) - if err != nil { - return nil, err - } - - // construct endpoint. - endpointURL, err := getEndpointURL(endpoint, secure) - if err != nil { - return nil, err - } - - clnt := new(AdminClient) - - // Save the credentials. - clnt.credsProvider = creds - - // Remember whether we are using https or not - clnt.secure = secure - - // Save endpoint URL, user agent for future uses. - clnt.endpointURL = endpointURL - - // Instantiate http client and bucket location cache. - clnt.httpClient = &http.Client{ - Jar: jar, - Transport: DefaultTransport(secure), - } - - // Add locked pseudo-random number generator. - clnt.random = rand.New(&lockedRandSource{src: rand.NewSource(time.Now().UTC().UnixNano())}) - - // Return. - return clnt, nil -} - -// SetAppInfo - add application details to user agent. -func (adm *AdminClient) SetAppInfo(appName string, appVersion string) { - // if app name and version is not set, we do not a new user - // agent. - if appName != "" && appVersion != "" { - adm.appInfo.appName = appName - adm.appInfo.appVersion = appVersion - } -} - -// SetCustomTransport - set new custom transport. -func (adm *AdminClient) SetCustomTransport(customHTTPTransport http.RoundTripper) { - // Set this to override default transport - // ``http.DefaultTransport``. - // - // This transport is usually needed for debugging OR to add your - // own custom TLS certificates on the client transport, for custom - // CA's and certs which are not part of standard certificate - // authority follow this example :- - // - // tr := &http.Transport{ - // TLSClientConfig: &tls.Config{RootCAs: pool}, - // DisableCompression: true, - // } - // api.SetTransport(tr) - // - if adm.httpClient != nil { - adm.httpClient.Transport = customHTTPTransport - } -} - -// TraceOn - enable HTTP tracing. -func (adm *AdminClient) TraceOn(outputStream io.Writer) { - // if outputStream is nil then default to os.Stdout. - if outputStream == nil { - outputStream = os.Stdout - } - // Sets a new output stream. - adm.traceOutput = outputStream - - // Enable tracing. - adm.isTraceEnabled = true -} - -// TraceOff - disable HTTP tracing. -func (adm *AdminClient) TraceOff() { - // Disable tracing. - adm.isTraceEnabled = false -} - -// requestMetadata - is container for all the values to make a -// request. -type requestData struct { - customHeaders http.Header - queryValues url.Values - relPath string // URL path relative to admin API base endpoint - content []byte -} - -// Filter out signature value from Authorization header. -func (adm AdminClient) filterSignature(req *http.Request) { - /// Signature V4 authorization header. - - // Save the original auth. - origAuth := req.Header.Get("Authorization") - // Strip out accessKeyID from: - // Credential=////aws4_request - regCred := regexp.MustCompile("Credential=([A-Z0-9]+)/") - newAuth := regCred.ReplaceAllString(origAuth, "Credential=**REDACTED**/") - - // Strip out 256-bit signature from: Signature=<256-bit signature> - regSign := regexp.MustCompile("Signature=([[0-9a-f]+)") - newAuth = regSign.ReplaceAllString(newAuth, "Signature=**REDACTED**") - - // Set a temporary redacted auth - req.Header.Set("Authorization", newAuth) -} - -// dumpHTTP - dump HTTP request and response. -func (adm AdminClient) dumpHTTP(req *http.Request, resp *http.Response) error { - // Starts http dump. - _, err := fmt.Fprintln(adm.traceOutput, "---------START-HTTP---------") - if err != nil { - return err - } - - // Filter out Signature field from Authorization header. - adm.filterSignature(req) - - // Only display request header. - reqTrace, err := httputil.DumpRequestOut(req, false) - if err != nil { - return err - } - - // Write request to trace output. - _, err = fmt.Fprint(adm.traceOutput, string(reqTrace)) - if err != nil { - return err - } - - // Only display response header. - var respTrace []byte - - // For errors we make sure to dump response body as well. - if resp.StatusCode != http.StatusOK && - resp.StatusCode != http.StatusPartialContent && - resp.StatusCode != http.StatusNoContent { - respTrace, err = httputil.DumpResponse(resp, true) - if err != nil { - return err - } - } else { - // WORKAROUND for https://github.com/golang/go/issues/13942. - // httputil.DumpResponse does not print response headers for - // all successful calls which have response ContentLength set - // to zero. Keep this workaround until the above bug is fixed. - if resp.ContentLength == 0 { - var buffer bytes.Buffer - if err = resp.Header.Write(&buffer); err != nil { - return err - } - respTrace = buffer.Bytes() - respTrace = append(respTrace, []byte("\r\n")...) - } else { - respTrace, err = httputil.DumpResponse(resp, false) - if err != nil { - return err - } - } - } - // Write response to trace output. - _, err = fmt.Fprint(adm.traceOutput, strings.TrimSuffix(string(respTrace), "\r\n")) - if err != nil { - return err - } - - // Ends the http dump. - _, err = fmt.Fprintln(adm.traceOutput, "---------END-HTTP---------") - return err -} - -// do - execute http request. -func (adm AdminClient) do(req *http.Request) (*http.Response, error) { - resp, err := adm.httpClient.Do(req) - if err != nil { - // Handle this specifically for now until future Golang versions fix this issue properly. - if urlErr, ok := err.(*url.Error); ok { - if strings.Contains(urlErr.Err.Error(), "EOF") { - return nil, &url.Error{ - Op: urlErr.Op, - URL: urlErr.URL, - Err: errors.New("Connection closed by foreign host " + urlErr.URL + ". Retry again."), - } - } - } - return nil, err - } - - // Response cannot be non-nil, report if its the case. - if resp == nil { - msg := "Response is empty. " // + reportIssue - return nil, ErrInvalidArgument(msg) - } - - // If trace is enabled, dump http request and response. - if adm.isTraceEnabled { - err = adm.dumpHTTP(req, resp) - if err != nil { - return nil, err - } - } - return resp, nil -} - -// List of success status. -var successStatus = []int{ - http.StatusOK, - http.StatusNoContent, - http.StatusPartialContent, -} - -// executeMethod - instantiates a given method, and retries the -// request upon any error up to maxRetries attempts in a binomially -// delayed manner using a standard back off algorithm. -func (adm AdminClient) executeMethod(ctx context.Context, method string, reqData requestData) (res *http.Response, err error) { - var reqRetry = MaxRetry // Indicates how many times we can retry the request - - defer func() { - if err != nil { - // close idle connections before returning, upon error. - adm.httpClient.CloseIdleConnections() - } - }() - - // Create cancel context to control 'newRetryTimer' go routine. - retryCtx, cancel := context.WithCancel(ctx) - - // Indicate to our routine to exit cleanly upon return. - defer cancel() - - for range adm.newRetryTimer(retryCtx, reqRetry, DefaultRetryUnit, DefaultRetryCap, MaxJitter) { - // Instantiate a new request. - var req *http.Request - req, err = adm.newRequest(method, reqData) - if err != nil { - return nil, err - } - - // Add context to request - req = req.WithContext(ctx) - - // Initiate the request. - res, err = adm.do(req) - if err != nil { - if err == context.Canceled || err == context.DeadlineExceeded { - return nil, err - } - // retry all network errors. - continue - } - - // For any known successful http status, return quickly. - for _, httpStatus := range successStatus { - if httpStatus == res.StatusCode { - return res, nil - } - } - - // Read the body to be saved later. - errBodyBytes, err := ioutil.ReadAll(res.Body) - // res.Body should be closed - closeResponse(res) - if err != nil { - return nil, err - } - - // Save the body. - errBodySeeker := bytes.NewReader(errBodyBytes) - res.Body = ioutil.NopCloser(errBodySeeker) - - // For errors verify if its retryable otherwise fail quickly. - errResponse := ToErrorResponse(httpRespToErrorResponse(res)) - - // Save the body back again. - errBodySeeker.Seek(0, 0) // Seek back to starting point. - res.Body = ioutil.NopCloser(errBodySeeker) - - // Verify if error response code is retryable. - if isS3CodeRetryable(errResponse.Code) { - continue // Retry. - } - - // Verify if http status code is retryable. - if isHTTPStatusRetryable(res.StatusCode) { - continue // Retry. - } - - break - } - - // Return an error when retry is canceled or deadlined - if e := retryCtx.Err(); e != nil { - return nil, e - } - - return res, err -} - -// set User agent. -func (adm AdminClient) setUserAgent(req *http.Request) { - req.Header.Set("User-Agent", libraryUserAgent) - if adm.appInfo.appName != "" && adm.appInfo.appVersion != "" { - req.Header.Set("User-Agent", libraryUserAgent+" "+adm.appInfo.appName+"/"+adm.appInfo.appVersion) - } -} - -func (adm AdminClient) getSecretKey() string { - value, err := adm.credsProvider.Get() - if err != nil { - // Return empty, call will fail. - return "" - } - - return value.SecretAccessKey -} - -// newRequest - instantiate a new HTTP request for a given method. -func (adm AdminClient) newRequest(method string, reqData requestData) (req *http.Request, err error) { - // If no method is supplied default to 'POST'. - if method == "" { - method = "POST" - } - - // Default all requests to "" - location := "" - - // Construct a new target URL. - targetURL, err := adm.makeTargetURL(reqData) - if err != nil { - return nil, err - } - - // Initialize a new HTTP request for the method. - req, err = http.NewRequest(method, targetURL.String(), nil) - if err != nil { - return nil, err - } - - value, err := adm.credsProvider.Get() - if err != nil { - return nil, err - } - - var ( - accessKeyID = value.AccessKeyID - secretAccessKey = value.SecretAccessKey - sessionToken = value.SessionToken - ) - - adm.setUserAgent(req) - for k, v := range reqData.customHeaders { - req.Header.Set(k, v[0]) - } - if length := len(reqData.content); length > 0 { - req.ContentLength = int64(length) - } - req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum256(reqData.content))) - req.Body = ioutil.NopCloser(bytes.NewReader(reqData.content)) - - req = signer.SignV4(*req, accessKeyID, secretAccessKey, sessionToken, location) - return req, nil -} - -// makeTargetURL make a new target url. -func (adm AdminClient) makeTargetURL(r requestData) (*url.URL, error) { - - host := adm.endpointURL.Host - scheme := adm.endpointURL.Scheme - - urlStr := scheme + "://" + host + libraryAdminURLPrefix + r.relPath - - // If there are any query values, add them to the end. - if len(r.queryValues) > 0 { - urlStr = urlStr + "?" + s3utils.QueryEncode(r.queryValues) - } - u, err := url.Parse(urlStr) - if err != nil { - return nil, err - } - return u, nil -} diff --git a/pkg/madmin/api_test.go b/pkg/madmin/api_test.go deleted file mode 100644 index b3881c89..00000000 --- a/pkg/madmin/api_test.go +++ /dev/null @@ -1,32 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package madmin_test -package madmin_test - -import ( - "testing" - - "github.com/minio/minio/pkg/madmin" -) - -func TestMinioAdminClient(t *testing.T) { - _, err := madmin.New("localhost:9000", "food", "food123", true) - if err != nil { - t.Fatal(err) - } -} diff --git a/pkg/madmin/config-commands.go b/pkg/madmin/config-commands.go deleted file mode 100644 index 3cdf43ac..00000000 --- a/pkg/madmin/config-commands.go +++ /dev/null @@ -1,82 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2017-2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package madmin - -import ( - "bytes" - "context" - "io" - "net/http" -) - -// GetConfig - returns the config.json of a minio setup, incoming data is encrypted. -func (adm *AdminClient) GetConfig(ctx context.Context) ([]byte, error) { - // Execute GET on /minio/admin/v3/config to get config of a setup. - resp, err := adm.executeMethod(ctx, - http.MethodGet, - requestData{relPath: adminAPIPrefix + "/config"}) - defer closeResponse(resp) - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp) - } - - return DecryptData(adm.getSecretKey(), resp.Body) -} - -// SetConfig - set config supplied as config.json for the setup. -func (adm *AdminClient) SetConfig(ctx context.Context, config io.Reader) (err error) { - const maxConfigJSONSize = 256 * 1024 // 256KiB - - // Read configuration bytes - configBuf := make([]byte, maxConfigJSONSize+1) - n, err := io.ReadFull(config, configBuf) - if err == nil { - return bytes.ErrTooLarge - } - if err != io.ErrUnexpectedEOF { - return err - } - configBytes := configBuf[:n] - econfigBytes, err := EncryptData(adm.getSecretKey(), configBytes) - if err != nil { - return err - } - - reqData := requestData{ - relPath: adminAPIPrefix + "/config", - content: econfigBytes, - } - - // Execute PUT on /minio/admin/v3/config to set config. - resp, err := adm.executeMethod(ctx, http.MethodPut, reqData) - - defer closeResponse(resp) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp) - } - - return nil -} diff --git a/pkg/madmin/config-help-commands.go b/pkg/madmin/config-help-commands.go deleted file mode 100644 index bfcc61ef..00000000 --- a/pkg/madmin/config-help-commands.go +++ /dev/null @@ -1,90 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package madmin - -import ( - "context" - "encoding/json" - "net/http" - "net/url" -) - -// Help - return sub-system level help -type Help struct { - SubSys string `json:"subSys"` - Description string `json:"description"` - MultipleTargets bool `json:"multipleTargets"` - KeysHelp HelpKVS `json:"keysHelp"` -} - -// HelpKV - implements help messages for keys -// with value as description of the keys. -type HelpKV struct { - Key string `json:"key"` - Description string `json:"description"` - Optional bool `json:"optional"` - Type string `json:"type"` - MultipleTargets bool `json:"multipleTargets"` -} - -// HelpKVS - implement order of keys help messages. -type HelpKVS []HelpKV - -// Keys returns help keys -func (h Help) Keys() []string { - var keys []string - for _, kh := range h.KeysHelp { - keys = append(keys, kh.Key) - } - return keys -} - -// HelpConfigKV - return help for a given sub-system. -func (adm *AdminClient) HelpConfigKV(ctx context.Context, subSys, key string, envOnly bool) (Help, error) { - v := url.Values{} - v.Set("subSys", subSys) - v.Set("key", key) - if envOnly { - v.Set("env", "") - } - - reqData := requestData{ - relPath: adminAPIPrefix + "/help-config-kv", - queryValues: v, - } - - // Execute GET on /minio/admin/v3/help-config-kv - resp, err := adm.executeMethod(ctx, http.MethodGet, reqData) - if err != nil { - return Help{}, err - } - defer closeResponse(resp) - - if resp.StatusCode != http.StatusOK { - return Help{}, httpRespToErrorResponse(resp) - } - - var help = Help{} - d := json.NewDecoder(resp.Body) - d.DisallowUnknownFields() - if err = d.Decode(&help); err != nil { - return help, err - } - - return help, nil -} diff --git a/pkg/madmin/config-history-commands.go b/pkg/madmin/config-history-commands.go deleted file mode 100644 index a1838ac8..00000000 --- a/pkg/madmin/config-history-commands.go +++ /dev/null @@ -1,127 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package madmin - -import ( - "context" - "encoding/json" - "net/http" - "net/url" - "strconv" - "time" -) - -// ClearConfigHistoryKV - clears the config entry represented by restoreID. -// optionally allows setting `all` as a special keyword to automatically -// erase all config set history entires. -func (adm *AdminClient) ClearConfigHistoryKV(ctx context.Context, restoreID string) (err error) { - v := url.Values{} - v.Set("restoreId", restoreID) - reqData := requestData{ - relPath: adminAPIPrefix + "/clear-config-history-kv", - queryValues: v, - } - - // Execute DELETE on /minio/admin/v3/clear-config-history-kv - resp, err := adm.executeMethod(ctx, http.MethodDelete, reqData) - - defer closeResponse(resp) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp) - } - - return nil -} - -// RestoreConfigHistoryKV - Restore a previous config set history. -// Input is a unique id which represents the previous setting. -func (adm *AdminClient) RestoreConfigHistoryKV(ctx context.Context, restoreID string) (err error) { - v := url.Values{} - v.Set("restoreId", restoreID) - reqData := requestData{ - relPath: adminAPIPrefix + "/restore-config-history-kv", - queryValues: v, - } - - // Execute PUT on /minio/admin/v3/set-config-kv to set config key/value. - resp, err := adm.executeMethod(ctx, http.MethodPut, reqData) - - defer closeResponse(resp) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp) - } - - return nil -} - -// ConfigHistoryEntry - captures config set history with a unique -// restore ID and createTime -type ConfigHistoryEntry struct { - RestoreID string `json:"restoreId"` - CreateTime time.Time `json:"createTime"` - Data string `json:"data"` -} - -// CreateTimeFormatted is used to print formatted time for CreateTime. -func (ch ConfigHistoryEntry) CreateTimeFormatted() string { - return ch.CreateTime.Format(http.TimeFormat) -} - -// ListConfigHistoryKV - lists a slice of ConfigHistoryEntries sorted by createTime. -func (adm *AdminClient) ListConfigHistoryKV(ctx context.Context, count int) ([]ConfigHistoryEntry, error) { - if count == 0 { - count = 10 - } - v := url.Values{} - v.Set("count", strconv.Itoa(count)) - - // Execute GET on /minio/admin/v3/list-config-history-kv - resp, err := adm.executeMethod(ctx, - http.MethodGet, - requestData{ - relPath: adminAPIPrefix + "/list-config-history-kv", - queryValues: v, - }) - defer closeResponse(resp) - if err != nil { - return nil, err - } - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp) - } - - data, err := DecryptData(adm.getSecretKey(), resp.Body) - if err != nil { - return nil, err - } - - var chEntries []ConfigHistoryEntry - if err = json.Unmarshal(data, &chEntries); err != nil { - return chEntries, err - } - - return chEntries, nil -} diff --git a/pkg/madmin/config-kv-commands.go b/pkg/madmin/config-kv-commands.go deleted file mode 100644 index 202b148d..00000000 --- a/pkg/madmin/config-kv-commands.go +++ /dev/null @@ -1,104 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package madmin - -import ( - "context" - "net/http" - "net/url" -) - -// DelConfigKV - delete key from server config. -func (adm *AdminClient) DelConfigKV(ctx context.Context, k string) (err error) { - econfigBytes, err := EncryptData(adm.getSecretKey(), []byte(k)) - if err != nil { - return err - } - - reqData := requestData{ - relPath: adminAPIPrefix + "/del-config-kv", - content: econfigBytes, - } - - // Execute DELETE on /minio/admin/v3/del-config-kv to delete config key. - resp, err := adm.executeMethod(ctx, http.MethodDelete, reqData) - - defer closeResponse(resp) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp) - } - - return nil -} - -// SetConfigKV - set key value config to server. -func (adm *AdminClient) SetConfigKV(ctx context.Context, kv string) (err error) { - econfigBytes, err := EncryptData(adm.getSecretKey(), []byte(kv)) - if err != nil { - return err - } - - reqData := requestData{ - relPath: adminAPIPrefix + "/set-config-kv", - content: econfigBytes, - } - - // Execute PUT on /minio/admin/v3/set-config-kv to set config key/value. - resp, err := adm.executeMethod(ctx, http.MethodPut, reqData) - - defer closeResponse(resp) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp) - } - - return nil -} - -// GetConfigKV - returns the key, value of the requested key, incoming data is encrypted. -func (adm *AdminClient) GetConfigKV(ctx context.Context, key string) ([]byte, error) { - v := url.Values{} - v.Set("key", key) - - // Execute GET on /minio/admin/v3/get-config-kv?key={key} to get value of key. - resp, err := adm.executeMethod(ctx, - http.MethodGet, - requestData{ - relPath: adminAPIPrefix + "/get-config-kv", - queryValues: v, - }) - defer closeResponse(resp) - if err != nil { - return nil, err - } - - defer closeResponse(resp) - - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp) - } - - return DecryptData(adm.getSecretKey(), resp.Body) -} diff --git a/pkg/madmin/encrypt.go b/pkg/madmin/encrypt.go deleted file mode 100644 index 278d0490..00000000 --- a/pkg/madmin/encrypt.go +++ /dev/null @@ -1,136 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package madmin - -import ( - "bytes" - "errors" - "io" - "io/ioutil" - - "github.com/secure-io/sio-go" - "github.com/secure-io/sio-go/sioutil" - "golang.org/x/crypto/argon2" -) - -// EncryptData encrypts the data with an unique key -// derived from password using the Argon2id PBKDF. -// -// The returned ciphertext data consists of: -// salt | AEAD ID | nonce | encrypted data -// 32 1 8 ~ len(data) -func EncryptData(password string, data []byte) ([]byte, error) { - salt := sioutil.MustRandom(32) - - // Derive an unique 256 bit key from the password and the random salt. - key := argon2.IDKey([]byte(password), salt, 1, 64*1024, 4, 32) - - var ( - id byte - err error - stream *sio.Stream - ) - if sioutil.NativeAES() { // Only use AES-GCM if we can use an optimized implementation - id = aesGcm - stream, err = sio.AES_256_GCM.Stream(key) - } else { - id = c20p1305 - stream, err = sio.ChaCha20Poly1305.Stream(key) - } - if err != nil { - return nil, err - } - nonce := sioutil.MustRandom(stream.NonceSize()) - - // ciphertext = salt || AEAD ID | nonce | encrypted data - cLen := int64(len(salt)+1+len(nonce)+len(data)) + stream.Overhead(int64(len(data))) - ciphertext := bytes.NewBuffer(make([]byte, 0, cLen)) // pre-alloc correct length - - // Prefix the ciphertext with salt, AEAD ID and nonce - ciphertext.Write(salt) - ciphertext.WriteByte(id) - ciphertext.Write(nonce) - - w := stream.EncryptWriter(ciphertext, nonce, nil) - if _, err = w.Write(data); err != nil { - return nil, err - } - if err = w.Close(); err != nil { - return nil, err - } - return ciphertext.Bytes(), nil -} - -// ErrMaliciousData indicates that the stream cannot be -// decrypted by provided credentials. -var ErrMaliciousData = sio.NotAuthentic - -// DecryptData decrypts the data with the key derived -// from the salt (part of data) and the password using -// the PBKDF used in EncryptData. DecryptData returns -// the decrypted plaintext on success. -// -// The data must be a valid ciphertext produced by -// EncryptData. Otherwise, the decryption will fail. -func DecryptData(password string, data io.Reader) ([]byte, error) { - var ( - salt [32]byte - id [1]byte - nonce [8]byte // This depends on the AEAD but both used ciphers have the same nonce length. - ) - - if _, err := io.ReadFull(data, salt[:]); err != nil { - return nil, err - } - if _, err := io.ReadFull(data, id[:]); err != nil { - return nil, err - } - if _, err := io.ReadFull(data, nonce[:]); err != nil { - return nil, err - } - - key := argon2.IDKey([]byte(password), salt[:], 1, 64*1024, 4, 32) - var ( - err error - stream *sio.Stream - ) - switch id[0] { - case aesGcm: - stream, err = sio.AES_256_GCM.Stream(key) - case c20p1305: - stream, err = sio.ChaCha20Poly1305.Stream(key) - default: - err = errors.New("madmin: invalid AEAD algorithm ID") - } - if err != nil { - return nil, err - } - - enBytes, err := ioutil.ReadAll(stream.DecryptReader(data, nonce[:], nil)) - if err != nil { - if err == sio.NotAuthentic { - return enBytes, ErrMaliciousData - } - } - return enBytes, err -} - -const ( - aesGcm = 0x00 - c20p1305 = 0x01 -) diff --git a/pkg/madmin/encrypt_test.go b/pkg/madmin/encrypt_test.go deleted file mode 100644 index ed94a900..00000000 --- a/pkg/madmin/encrypt_test.go +++ /dev/null @@ -1,96 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package madmin - -import ( - "bytes" - "encoding/hex" - "fmt" - "testing" -) - -var encryptDataTests = []struct { - Password string - Data []byte -}{ - {Password: "", Data: nil}, - {Password: "", Data: make([]byte, 256)}, - {Password: `xPl.8/rhR"Q_1xLt`, Data: make([]byte, 32)}, - {Password: "m69?yz4W-!k+7p0", Data: make([]byte, 1024*1024)}, - {Password: `7h5oU4$te{;K}fgqlI^]`, Data: make([]byte, 256)}, -} - -func TestEncryptData(t *testing.T) { - for i, test := range encryptDataTests { - i, test := i, test - t.Run(fmt.Sprintf("Test-%d", i), func(t *testing.T) { - ciphertext, err := EncryptData(test.Password, test.Data) - if err != nil { - t.Fatalf("Failed to encrypt data: %v", err) - } - plaintext, err := DecryptData(test.Password, bytes.NewReader(ciphertext)) - if err != nil { - t.Fatalf("Failed to decrypt data: %v", err) - } - if !bytes.Equal(plaintext, test.Data) { - t.Fatal("Decrypt plaintext does not match origin data") - } - }) - } -} - -var decryptDataTests = []struct { - Password string - Data string -}{ - {Password: "", Data: "828aa81599df0651c0461adb82283e8b89956baee9f6e719947ef9cddc849028001dc9d3ac0938f66b07bacc9751437e1985f8a9763c240e81"}, - - {Password: "", Data: "1793c71df6647860437134073c15688cbb15961dc0758c7ee1225e66e79c724c00d790dba9c671eae89da2c736d858286ac9bd027abacc6443" + - "0375cd41b63b67c070c7fba475a8dd66ae65ba905176c48cbe6f734fc74df87343d8ccff54bada4aeb0a04bd021633ebe6c4768e23f5dea142" + - "561d4fe3f90ed59d13dc5fb3a585dadec1742325291b9c81692bdd3420b2428127f8195e0ecd9a1c9237712ed67af7339fbbf7ff3ee1c516e1" + - "f81e69d933e057b30997e7274a2c9698e07c39f0e8d6818858f34c8191871b5a52bea9061806bd029024bfc1d9c1f230904968d6c9e10fddcb" + - "c006ba97356ff243570fd96df07dd6894e215a6b24c4ed730369519289ebd877aff6ccbd2265985e4ab1a2b7930bab9cfb767b97348a639ddf" + - "8db81bf5151da7e8f3d9638a1b86eb1dd78cc6a526f10a414c78638f"}, - - {Password: `xPl.8/rhR"Q_1xLt`, Data: "b5c016e93b84b473fc8a37af94936563630c36d6df1841d23a86ee51ca161f9e00ac19116b32f643ff6a56a212b265d8c56" + - "195bb0d12ce199e13dfdc5272f80c1564da2c6fc2fa18da91d8062de02af5cdafea491c6f3cae1f"}, - - {Password: `7h5oU4$te{;K}fgqlI^]`, Data: "c58edf7cfd557b6b655de6f48b1a3049d8d049dadb3a7bfa9ac9ccbb5baf37ec00f83086a26f43b7d6bc9075ad0" + - "38bf5741f118d502ebe94165e4072ba7f98535d6b1e3b6ae67a98115d146d9b4d90e4df4ae82df9cfa17ed7cd42" + - "465181559f7ddf09c98beec521bb4478e0cb73c4e0827af8688ff4e7a07327a10d5a180035e6ddb16d974a85257" + - "981cd9e0360a20f7b4d653190267dfb241148f018ae180568042e864b9e1b5bc05425a3abc2b0324f50c72d5679" + - "8f924405dfc0f8523f4bb564ed65af8e1b1c82a7a0640552ecf81985d95d0993d99172592ddc1393dfa63e8f0b3" + - "d744b2cc4b73384ca4693f0c1aec0e9b00e85f2937e891105d67da8f59c14ca96608e0425c42f9c1e7c2a8b3413" + - "e1381784f9cfe01de7c47cea1f8d7a7d88f5d4aca783cf55332b47f957a6b9a65269d7eb606b877b"}, -} - -func TestDecryptData(t *testing.T) { - for i, test := range decryptDataTests { - i, test := i, test - t.Run(fmt.Sprintf("Test-%d", i), func(t *testing.T) { - ciphertext, err := hex.DecodeString(test.Data) - if err != nil { - t.Fatalf("Failed to decode ciphertext data: %v", err) - } - _, err = DecryptData(test.Password, bytes.NewReader(ciphertext)) - if err != nil { - t.Fatalf("Failed to decrypt data: %v", err) - } - }) - } -} diff --git a/pkg/madmin/examples/accounting-usage-info.go b/pkg/madmin/examples/accounting-usage-info.go deleted file mode 100644 index 969f6eff..00000000 --- a/pkg/madmin/examples/accounting-usage-info.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build ignore - -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import ( - "context" - "log" - - "github.com/minio/minio/pkg/madmin" -) - -func main() { - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are - // dummy values, please replace them with original values. - - // API requests are secure (HTTPS) if secure=true and insecure (HTTPS) otherwise. - // New returns an MinIO Admin client object. - madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) - if err != nil { - log.Fatalln(err) - } - - accountUsageInfo, err := madmClnt.AccountUsageInfo(context.Background()) - if err != nil { - log.Fatalln(err) - } - - log.Println(accountUsageInfo) -} diff --git a/pkg/madmin/examples/add-user-and-policy.go b/pkg/madmin/examples/add-user-and-policy.go deleted file mode 100644 index ae078b68..00000000 --- a/pkg/madmin/examples/add-user-and-policy.go +++ /dev/null @@ -1,69 +0,0 @@ -// +build ignore - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import ( - "context" - "log" - - "github.com/minio/minio/pkg/bucket/policy" - "github.com/minio/minio/pkg/bucket/policy/condition" - iampolicy "github.com/minio/minio/pkg/iam/policy" - "github.com/minio/minio/pkg/madmin" -) - -func main() { - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are - // dummy values, please replace them with original values. - - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are - // dummy values, please replace them with original values. - - // API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise. - // New returns an MinIO Admin client object. - madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) - if err != nil { - log.Fatalln(err) - } - - if err = madmClnt.AddUser(context.Background(), "newuser", "newstrongpassword"); err != nil { - log.Fatalln(err) - } - - // Create policy - p := iampolicy.Policy{ - Version: iampolicy.DefaultVersion, - Statements: []iampolicy.Statement{ - iampolicy.NewStatement( - policy.Allow, - iampolicy.NewActionSet(iampolicy.GetObjectAction), - iampolicy.NewResourceSet(iampolicy.NewResource("testbucket/*", "")), - condition.NewFunctions(), - )}, - } - - if err = madmClnt.AddCannedPolicy(context.Background(), "get-only", &p); err != nil { - log.Fatalln(err) - } - - if err = madmClnt.SetUserPolicy(context.Background(), "newuser", "get-only"); err != nil { - log.Fatalln(err) - } -} diff --git a/pkg/madmin/examples/bucket-quota.go b/pkg/madmin/examples/bucket-quota.go deleted file mode 100644 index 7b75c096..00000000 --- a/pkg/madmin/examples/bucket-quota.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build ignore - -/* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import ( - "context" - "fmt" - "log" - - "github.com/minio/minio/pkg/madmin" -) - -func main() { - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are - // dummy values, please replace them with original values. - - // API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise. - // New returns an MinIO Admin client object. - madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) - if err != nil { - log.Fatalln(err) - } - var kiB int64 = 1 << 10 - ctx := context.Background() - // set bucket quota config - if err := madmClnt.SetBucketQuota(ctx, "bucket-name", 64*kiB, HardQuota); err != nil { - log.Fatalln(err) - } - // gets bucket quota config - quotaCfg, err := madmClnt.GetBucketQuota(ctx, "bucket-name") - if err != nil { - log.Fatalln(err) - } - fmt.Println(quotaCfg) - // remove bucket quota config - if err := madmClnt.RemoveBucketQuota(ctx, "bucket-name"); err != nil { - log.Fatalln(err) - } -} diff --git a/pkg/madmin/examples/data-usage-info.go b/pkg/madmin/examples/data-usage-info.go deleted file mode 100644 index 19a7a001..00000000 --- a/pkg/madmin/examples/data-usage-info.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build ignore - -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import ( - "context" - "log" - - "github.com/minio/minio/pkg/madmin" -) - -func main() { - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are - // dummy values, please replace them with original values. - - // API requests are secure (HTTPS) if secure=true and insecure (HTTPS) otherwise. - // New returns an MinIO Admin client object. - madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) - if err != nil { - log.Fatalln(err) - } - - dataUsageInfo, err := madmClnt.DataUsageInfo(context.Background()) - if err != nil { - log.Fatalln(err) - } - log.Println(dataUsageInfo) -} diff --git a/pkg/madmin/examples/heal-bucket.go b/pkg/madmin/examples/heal-bucket.go deleted file mode 100644 index 24f7cdd0..00000000 --- a/pkg/madmin/examples/heal-bucket.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build ignore - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import ( - "context" - "log" - - "github.com/minio/minio/pkg/madmin" -) - -func main() { - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are - // dummy values, please replace them with original values. - - // API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise. - // New returns an MinIO Admin client object. - madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) - if err != nil { - log.Fatalln(err) - } - - // Heal bucket mybucket - dry run - isDryRun := true - err = madmClnt.HealBucket(context.Background(), "mybucket", isDryRun) - if err != nil { - log.Fatalln(err) - - } - - // Heal bucket mybucket - for real this time. - isDryRun := false - err = madmClnt.HealBucket(context.Background(), "mybucket", isDryRun) - if err != nil { - log.Fatalln(err) - } - - log.Println("successfully healed mybucket") -} diff --git a/pkg/madmin/examples/heal-buckets-list.go b/pkg/madmin/examples/heal-buckets-list.go deleted file mode 100644 index 8c05ac7b..00000000 --- a/pkg/madmin/examples/heal-buckets-list.go +++ /dev/null @@ -1,61 +0,0 @@ -// +build ignore - -package main - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -import ( - "context" - "fmt" - "log" - - "github.com/minio/minio/pkg/madmin" -) - -func main() { - - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are - // dummy values, please replace them with original values. - - // API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise. - // New returns an MinIO Admin client object. - madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) - if err != nil { - log.Fatalln(err) - } - - // List buckets that need healing - healBucketsList, err := madmClnt.ListBucketsHeal(context.Background()) - if err != nil { - log.Fatalln(err) - } - - for _, bucket := range healBucketsList { - if bucket.HealBucketInfo != nil { - switch healInfo := *bucket.HealBucketInfo; healInfo.Status { - case madmin.CanHeal: - fmt.Println(bucket.Name, " can be healed.") - case madmin.QuorumUnavailable: - fmt.Println(bucket.Name, " can't be healed until quorum is available.") - case madmin.Corrupted: - fmt.Println(bucket.Name, " can't be healed, not enough information.") - } - } - fmt.Println("bucket: ", bucket) - } -} diff --git a/pkg/madmin/examples/heal-format.go b/pkg/madmin/examples/heal-format.go deleted file mode 100644 index 2edfa189..00000000 --- a/pkg/madmin/examples/heal-format.go +++ /dev/null @@ -1,58 +0,0 @@ -// +build ignore - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import ( - "context" - "log" - - "github.com/minio/minio/pkg/madmin" -) - -func main() { - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are - // dummy values, please replace them with original values. - - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are - // dummy values, please replace them with original values. - - // API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise. - // New returns an MinIO Admin client object. - madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) - if err != nil { - log.Fatalln(err) - } - - // Attempt healing format in dry-run mode. - isDryRun := true - err = madmClnt.HealFormat(context.Background(), isDryRun) - if err != nil { - log.Fatalln(err) - } - - // Perform actual healing of format. - isDryRun = false - err = madmClnt.HealFormat(context.Background(), isDryRun) - if err != nil { - log.Fatalln(err) - } - - log.Println("successfully healed storage format on available disks.") -} diff --git a/pkg/madmin/examples/heal-object.go b/pkg/madmin/examples/heal-object.go deleted file mode 100644 index 13f2e4e3..00000000 --- a/pkg/madmin/examples/heal-object.go +++ /dev/null @@ -1,58 +0,0 @@ -// +build ignore - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import ( - "context" - "log" - - "github.com/minio/minio/pkg/madmin" -) - -func main() { - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are - // dummy values, please replace them with original values. - - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are - // dummy values, please replace them with original values. - - // API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise. - // New returns an MinIO Admin client object. - madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) - if err != nil { - log.Fatalln(err) - } - - // Heal object mybucket/myobject - dry run. - isDryRun := true - _, err = madmClnt.HealObject(context.Background(), "mybucket", "myobject", isDryRun) - if err != nil { - log.Fatalln(err) - } - - // Heal object mybucket/myobject - this time for real. - isDryRun = false - healResult, err := madmClnt.HealObject(context.Background(), "mybucket", "myobject", isDryRun) - if err != nil { - log.Fatalln(err) - } - - log.Printf("heal result: %#v\n", healResult) -} diff --git a/pkg/madmin/examples/heal-status.go b/pkg/madmin/examples/heal-status.go deleted file mode 100644 index ad8e5900..00000000 --- a/pkg/madmin/examples/heal-status.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build ignore - -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import ( - "context" - "log" - - "github.com/minio/minio/pkg/madmin" -) - -func main() { - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are - // dummy values, please replace them with original values. - - // API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise. - // New returns an MinIO Admin client object. - madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) - if err != nil { - log.Fatalln(err) - } - - healStatusResult, err := madmClnt.BackgroundHealStatus(context.Background()) - if err != nil { - log.Fatalln(err) - } - - log.Printf("Heal status result: %+v\n", healStatusResult) -} diff --git a/pkg/madmin/examples/kms-status.go b/pkg/madmin/examples/kms-status.go deleted file mode 100644 index a18bad58..00000000 --- a/pkg/madmin/examples/kms-status.go +++ /dev/null @@ -1,61 +0,0 @@ -// +build ignore - -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import ( - "context" - "log" - - "github.com/minio/minio/pkg/madmin" -) - -func main() { - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are - // dummy values, please replace them with original values. - - // API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise. - // New returns an MinIO Admin client object. - madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) - if err != nil { - log.Fatalln(err) - } - - status, err := madmClnt.GetKeyStatus(context.Background(), "") // empty string refers to the default master key - if err != nil { - log.Fatalln(err) - } - - log.Printf("Key: %s\n", status.KeyID) - if status.EncryptionErr == "" { - log.Println("\t • Encryption ✔") - } else { - log.Printf("\t • Encryption failed: %s\n", status.EncryptionErr) - } - if status.UpdateErr == "" { - log.Println("\t • Re-wrap ✔") - } else { - log.Printf("\t • Re-wrap failed: %s\n", status.UpdateErr) - } - if status.DecryptionErr == "" { - log.Println("\t • Decryption ✔") - } else { - log.Printf("\t • Decryption failed: %s\n", status.DecryptionErr) - } -} diff --git a/pkg/madmin/examples/lock-clear.go b/pkg/madmin/examples/lock-clear.go deleted file mode 100644 index 04ffaac9..00000000 --- a/pkg/madmin/examples/lock-clear.go +++ /dev/null @@ -1,48 +0,0 @@ -// +build ignore - -/* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import ( - "context" - "log" - "time" - - "github.com/minio/minio/pkg/madmin" -) - -func main() { - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are - // dummy values, please replace them with original values. - - // API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise. - // New returns an MinIO Admin client object. - madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) - if err != nil { - log.Fatalln(err) - } - - // Clear locks held on mybucket/myprefix for longer than 30s. - olderThan := time.Duration(30 * time.Second) - locksCleared, err := madmClnt.ClearLocks(context.Background(), "mybucket", "myprefix", olderThan) - if err != nil { - log.Fatalln(err) - } - log.Println(locksCleared) -} diff --git a/pkg/madmin/examples/profiling.go b/pkg/madmin/examples/profiling.go deleted file mode 100644 index a04c25b2..00000000 --- a/pkg/madmin/examples/profiling.go +++ /dev/null @@ -1,90 +0,0 @@ -// +build ignore - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import ( - "context" - "io" - "log" - "os" - "time" - - "github.com/minio/minio/pkg/madmin" -) - -func main() { - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are - // dummy values, please replace them with original values. - - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are - // dummy values, please replace them with original values. - - // API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise. - // New returns an MinIO Admin client object. - madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) - if err != nil { - log.Fatalln(err) - } - - profiler := madmin.ProfilerCPU - log.Println("Starting " + profiler + " profiling..") - - startResults, err := madmClnt.StartProfiling(context.Background(), profiler) - if err != nil { - log.Fatalln(err) - } - - for _, result := range startResults { - if !result.Success { - log.Printf("Unable to start profiling on node `%s`, reason = `%s`\n", result.NodeName, result.Error) - continue - } - log.Printf("Profiling successfully started on node `%s`\n", result.NodeName) - } - - sleep := time.Duration(10) - time.Sleep(time.Second * sleep) - - log.Println("Stopping profiling..") - - profilingData, err := madmClnt.DownloadProfilingData(context.Background()) - if err != nil { - log.Fatalln(err) - } - - profilingFile, err := os.Create("/tmp/profiling-" + string(profiler) + ".zip") - if err != nil { - log.Fatal(err) - } - - if _, err := io.Copy(profilingFile, profilingData); err != nil { - log.Fatal(err) - } - - if err := profilingFile.Close(); err != nil { - log.Fatal(err) - } - - if err := profilingData.Close(); err != nil { - log.Fatal(err) - } - - log.Println("Profiling files " + profilingFile.Name() + " successfully downloaded.") -} diff --git a/pkg/madmin/examples/server-info.go b/pkg/madmin/examples/server-info.go deleted file mode 100644 index 6e54f433..00000000 --- a/pkg/madmin/examples/server-info.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build ignore - -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import ( - "context" - "log" - - "github.com/minio/minio/pkg/madmin" -) - -func main() { - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are - // dummy values, please replace them with original values. - - // API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise. - // New returns an MinIO Admin client object. - madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) - if err != nil { - log.Fatalln(err) - } - - st, err := madmClnt.ServerInfo(context.Background()) - if err != nil { - log.Fatalln(err) - } - log.Println(st) -} diff --git a/pkg/madmin/examples/service-accounts.go b/pkg/madmin/examples/service-accounts.go deleted file mode 100644 index 28315954..00000000 --- a/pkg/madmin/examples/service-accounts.go +++ /dev/null @@ -1,77 +0,0 @@ -// +build ignore - -/* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import ( - "context" - "fmt" - "log" - - "github.com/minio/minio/pkg/bucket/policy" - "github.com/minio/minio/pkg/bucket/policy/condition" - iampolicy "github.com/minio/minio/pkg/iam/policy" - "github.com/minio/minio/pkg/madmin" -) - -func main() { - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are - // dummy values, please replace them with original values. - - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are - // dummy values, please replace them with original values. - - // API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise. - // New returns an MinIO Admin client object. - madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) - if err != nil { - log.Fatalln(err) - } - - p := iampolicy.Policy{ - Version: iampolicy.DefaultVersion, - Statements: []iampolicy.Statement{ - iampolicy.NewStatement( - policy.Allow, - iampolicy.NewActionSet(iampolicy.GetObjectAction), - iampolicy.NewResourceSet(iampolicy.NewResource("testbucket/*", "")), - condition.NewFunctions(), - )}, - } - - // Create a new service account - creds, err := madmClnt.AddServiceAccount(context.Background(), &p) - if err != nil { - log.Fatalln(err) - } - fmt.Println(creds) - - // List all services accounts - list, err := madmClnt.ListServiceAccounts(context.Background()) - if err != nil { - log.Fatalln(err) - } - fmt.Println(list) - - // Delete a service account - err = madmClnt.DeleteServiceAccount(context.Background(), list.Accounts[0]) - if err != nil { - log.Fatalln(err) - } -} diff --git a/pkg/madmin/examples/service-restart.go b/pkg/madmin/examples/service-restart.go deleted file mode 100644 index 0586d3de..00000000 --- a/pkg/madmin/examples/service-restart.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build ignore - -/* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import ( - "context" - "log" - - "github.com/minio/minio/pkg/madmin" -) - -func main() { - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are - // dummy values, please replace them with original values. - - // API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise. - // New returns an MinIO Admin client object. - madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) - if err != nil { - log.Fatalln(err) - } - - err = madmClnt.ServiceRestart(context.Background()) - if err != nil { - log.Fatalln(err) - } - log.Println("Success") -} diff --git a/pkg/madmin/examples/service-trace.go b/pkg/madmin/examples/service-trace.go deleted file mode 100644 index 99144a13..00000000 --- a/pkg/madmin/examples/service-trace.go +++ /dev/null @@ -1,54 +0,0 @@ -// +build ignore - -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import ( - "context" - "fmt" - "log" - - "github.com/minio/minio/pkg/madmin" -) - -func main() { - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are - // dummy values, please replace them with original values. - - // API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise. - // New returns an MinIO Admin client object. - madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) - if err != nil { - log.Fatalln(err) - } - doneCh := make(chan struct{}) - defer close(doneCh) - - // Start listening on all http trace activity from all servers - // in the minio cluster. - allTrace := false - errTrace := false - traceCh := madmClnt.ServiceTrace(context.Background(), allTrace, errTrace, doneCh) - for traceInfo := range traceCh { - if traceInfo.Err != nil { - fmt.Println(traceInfo.Err) - } - fmt.Println(traceInfo) - } -} diff --git a/pkg/madmin/examples/storage-info.go b/pkg/madmin/examples/storage-info.go deleted file mode 100644 index 5441e278..00000000 --- a/pkg/madmin/examples/storage-info.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build ignore - -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import ( - "context" - "log" - - "github.com/minio/minio/pkg/madmin" -) - -func main() { - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY and my-bucketname are - // dummy values, please replace them with original values. - - // API requests are secure (HTTPS) if secure=true and insecure (HTTPS) otherwise. - // New returns an MinIO Admin client object. - madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) - if err != nil { - log.Fatalln(err) - } - - st, err := madmClnt.StorageInfo(context.Background()) - if err != nil { - log.Fatalln(err) - } - log.Println(st) -} diff --git a/pkg/madmin/examples/top-locks.go b/pkg/madmin/examples/top-locks.go deleted file mode 100644 index 93f6911c..00000000 --- a/pkg/madmin/examples/top-locks.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build ignore - -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package main - -import ( - "context" - "encoding/json" - "log" - - "github.com/minio/minio/pkg/madmin" -) - -func main() { - // Note: YOUR-ACCESSKEYID, YOUR-SECRETACCESSKEY are - // dummy values, please replace them with original values. - - // API requests are secure (HTTPS) if secure=true and insecure (HTTP) otherwise. - // New returns an MinIO Admin client object. - madmClnt, err := madmin.New("your-minio.example.com:9000", "YOUR-ACCESSKEYID", "YOUR-SECRETACCESSKEY", true) - if err != nil { - log.Fatalln(err) - } - - locks, err := madmClnt.TopLocks(context.Background()) - if err != nil { - log.Fatalf("failed due to: %v", err) - } - - out, err := json.Marshal(locks) - if err != nil { - log.Fatalf("Marshal failed due to: %v", err) - } - log.Println("Top Locks received successfully: ", string(out)) -} diff --git a/pkg/madmin/group-commands.go b/pkg/madmin/group-commands.go deleted file mode 100644 index fa31116b..00000000 --- a/pkg/madmin/group-commands.go +++ /dev/null @@ -1,165 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018-2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package madmin - -import ( - "context" - "encoding/json" - "io/ioutil" - "net/http" - "net/url" -) - -// GroupAddRemove is type for adding/removing members to/from a group. -type GroupAddRemove struct { - Group string `json:"group"` - Members []string `json:"members"` - IsRemove bool `json:"isRemove"` -} - -// UpdateGroupMembers - adds/removes users to/from a group. Server -// creates the group as needed. Group is removed if remove request is -// made on empty group. -func (adm *AdminClient) UpdateGroupMembers(ctx context.Context, g GroupAddRemove) error { - data, err := json.Marshal(g) - if err != nil { - return err - } - - reqData := requestData{ - relPath: adminAPIPrefix + "/update-group-members", - content: data, - } - - // Execute PUT on /minio/admin/v3/update-group-members - resp, err := adm.executeMethod(ctx, http.MethodPut, reqData) - - defer closeResponse(resp) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp) - } - - return nil -} - -// GroupDesc is a type that holds group info along with the policy -// attached to it. -type GroupDesc struct { - Name string `json:"name"` - Status string `json:"status"` - Members []string `json:"members"` - Policy string `json:"policy"` -} - -// GetGroupDescription - fetches information on a group. -func (adm *AdminClient) GetGroupDescription(ctx context.Context, group string) (*GroupDesc, error) { - v := url.Values{} - v.Set("group", group) - reqData := requestData{ - relPath: adminAPIPrefix + "/group", - queryValues: v, - } - - resp, err := adm.executeMethod(ctx, http.MethodGet, reqData) - defer closeResponse(resp) - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp) - } - - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - gd := GroupDesc{} - if err = json.Unmarshal(data, &gd); err != nil { - return nil, err - } - - return &gd, nil -} - -// ListGroups - lists all groups names present on the server. -func (adm *AdminClient) ListGroups(ctx context.Context) ([]string, error) { - reqData := requestData{ - relPath: adminAPIPrefix + "/groups", - } - - resp, err := adm.executeMethod(ctx, http.MethodGet, reqData) - defer closeResponse(resp) - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp) - } - - data, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - groups := []string{} - if err = json.Unmarshal(data, &groups); err != nil { - return nil, err - } - - return groups, nil -} - -// GroupStatus - group status. -type GroupStatus string - -// GroupStatus values. -const ( - GroupEnabled GroupStatus = "enabled" - GroupDisabled GroupStatus = "disabled" -) - -// SetGroupStatus - sets the status of a group. -func (adm *AdminClient) SetGroupStatus(ctx context.Context, group string, status GroupStatus) error { - v := url.Values{} - v.Set("group", group) - v.Set("status", string(status)) - - reqData := requestData{ - relPath: adminAPIPrefix + "/set-group-status", - queryValues: v, - } - - resp, err := adm.executeMethod(ctx, http.MethodPut, reqData) - defer closeResponse(resp) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp) - } - - return nil -} diff --git a/pkg/madmin/heal-commands.go b/pkg/madmin/heal-commands.go deleted file mode 100644 index 579f1fe9..00000000 --- a/pkg/madmin/heal-commands.go +++ /dev/null @@ -1,324 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2017, 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package madmin - -import ( - "context" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/url" - "time" -) - -// HealScanMode represents the type of healing scan -type HealScanMode int - -const ( - // HealNormalScan checks if parts are present and not outdated - HealNormalScan HealScanMode = iota - // HealDeepScan checks for parts bitrot checksums - HealDeepScan -) - -// HealOpts - collection of options for a heal sequence -type HealOpts struct { - Recursive bool `json:"recursive"` - DryRun bool `json:"dryRun"` - Remove bool `json:"remove"` - ScanMode HealScanMode `json:"scanMode"` -} - -// Equal returns true if no is same as o. -func (o HealOpts) Equal(no HealOpts) bool { - if o.Recursive != no.Recursive { - return false - } - if o.DryRun != no.DryRun { - return false - } - if o.Remove != no.Remove { - return false - } - return o.ScanMode == no.ScanMode -} - -// HealStartSuccess - holds information about a successfully started -// heal operation -type HealStartSuccess struct { - ClientToken string `json:"clientToken"` - ClientAddress string `json:"clientAddress"` - StartTime time.Time `json:"startTime"` -} - -// HealStopSuccess - holds information about a successfully stopped -// heal operation. -type HealStopSuccess HealStartSuccess - -// HealTaskStatus - status struct for a heal task -type HealTaskStatus struct { - Summary string `json:"summary"` - FailureDetail string `json:"detail"` - StartTime time.Time `json:"startTime"` - HealSettings HealOpts `json:"settings"` - - Items []HealResultItem `json:"items,omitempty"` -} - -// HealItemType - specify the type of heal operation in a healing -// result -type HealItemType string - -// HealItemType constants -const ( - HealItemMetadata HealItemType = "metadata" - HealItemBucket = "bucket" - HealItemBucketMetadata = "bucket-metadata" - HealItemObject = "object" -) - -// Drive state constants -const ( - DriveStateOk string = "ok" - DriveStateOffline = "offline" - DriveStateCorrupt = "corrupt" - DriveStateMissing = "missing" - DriveStateUnformatted = "unformatted" // only returned by disk -) - -// HealDriveInfo - struct for an individual drive info item. -type HealDriveInfo struct { - UUID string `json:"uuid"` - Endpoint string `json:"endpoint"` - State string `json:"state"` -} - -// HealResultItem - struct for an individual heal result item -type HealResultItem struct { - ResultIndex int64 `json:"resultId"` - Type HealItemType `json:"type"` - Bucket string `json:"bucket"` - Object string `json:"object"` - Detail string `json:"detail"` - ParityBlocks int `json:"parityBlocks,omitempty"` - DataBlocks int `json:"dataBlocks,omitempty"` - DiskCount int `json:"diskCount"` - SetCount int `json:"setCount"` - // below slices are from drive info. - Before struct { - Drives []HealDriveInfo `json:"drives"` - } `json:"before"` - After struct { - Drives []HealDriveInfo `json:"drives"` - } `json:"after"` - ObjectSize int64 `json:"objectSize"` -} - -// GetMissingCounts - returns the number of missing disks before -// and after heal -func (hri *HealResultItem) GetMissingCounts() (b, a int) { - if hri == nil { - return - } - for _, v := range hri.Before.Drives { - if v.State == DriveStateMissing { - b++ - } - } - for _, v := range hri.After.Drives { - if v.State == DriveStateMissing { - a++ - } - } - return -} - -// GetOfflineCounts - returns the number of offline disks before -// and after heal -func (hri *HealResultItem) GetOfflineCounts() (b, a int) { - if hri == nil { - return - } - for _, v := range hri.Before.Drives { - if v.State == DriveStateOffline { - b++ - } - } - for _, v := range hri.After.Drives { - if v.State == DriveStateOffline { - a++ - } - } - return -} - -// GetCorruptedCounts - returns the number of corrupted disks before -// and after heal -func (hri *HealResultItem) GetCorruptedCounts() (b, a int) { - if hri == nil { - return - } - for _, v := range hri.Before.Drives { - if v.State == DriveStateCorrupt { - b++ - } - } - for _, v := range hri.After.Drives { - if v.State == DriveStateCorrupt { - a++ - } - } - return -} - -// GetOnlineCounts - returns the number of online disks before -// and after heal -func (hri *HealResultItem) GetOnlineCounts() (b, a int) { - if hri == nil { - return - } - for _, v := range hri.Before.Drives { - if v.State == DriveStateOk { - b++ - } - } - for _, v := range hri.After.Drives { - if v.State == DriveStateOk { - a++ - } - } - return -} - -// Heal - API endpoint to start heal and to fetch status -// forceStart and forceStop are mutually exclusive, you can either -// set one of them to 'true'. If both are set 'forceStart' will be -// honored. -func (adm *AdminClient) Heal(ctx context.Context, bucket, prefix string, - healOpts HealOpts, clientToken string, forceStart, forceStop bool) ( - healStart HealStartSuccess, healTaskStatus HealTaskStatus, err error) { - - if forceStart && forceStop { - return healStart, healTaskStatus, ErrInvalidArgument("forceStart and forceStop set to true is not allowed") - } - - body, err := json.Marshal(healOpts) - if err != nil { - return healStart, healTaskStatus, err - } - - path := fmt.Sprintf(adminAPIPrefix+"/heal/%s", bucket) - if bucket != "" && prefix != "" { - path += "/" + prefix - } - - // execute POST request to heal api - queryVals := make(url.Values) - if clientToken != "" { - queryVals.Set("clientToken", clientToken) - body = []byte{} - } - - // Anyone can be set, either force start or forceStop. - if forceStart { - queryVals.Set("forceStart", "true") - } else if forceStop { - queryVals.Set("forceStop", "true") - } - - resp, err := adm.executeMethod(ctx, - http.MethodPost, requestData{ - relPath: path, - content: body, - queryValues: queryVals, - }) - defer closeResponse(resp) - if err != nil { - return healStart, healTaskStatus, err - } - - if resp.StatusCode != http.StatusOK { - return healStart, healTaskStatus, httpRespToErrorResponse(resp) - } - - respBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - return healStart, healTaskStatus, err - } - - // Was it a status request? - if clientToken == "" { - // As a special operation forceStop would return a - // similar struct as healStart will have the - // heal sequence information about the heal which - // was stopped. - err = json.Unmarshal(respBytes, &healStart) - } else { - err = json.Unmarshal(respBytes, &healTaskStatus) - } - if err != nil { - // May be the server responded with error after success - // message, handle it separately here. - var errResp ErrorResponse - err = json.Unmarshal(respBytes, &errResp) - if err != nil { - // Unknown structure return error anyways. - return healStart, healTaskStatus, err - } - return healStart, healTaskStatus, errResp - } - return healStart, healTaskStatus, nil -} - -// BgHealState represents the status of the background heal -type BgHealState struct { - ScannedItemsCount int64 - LastHealActivity time.Time - NextHealRound time.Time -} - -// BackgroundHealStatus returns the background heal status of the -// current server or cluster. -func (adm *AdminClient) BackgroundHealStatus(ctx context.Context) (BgHealState, error) { - // Execute POST request to background heal status api - resp, err := adm.executeMethod(ctx, - http.MethodPost, - requestData{relPath: adminAPIPrefix + "/background-heal/status"}) - if err != nil { - return BgHealState{}, err - } - defer closeResponse(resp) - - if resp.StatusCode != http.StatusOK { - return BgHealState{}, httpRespToErrorResponse(resp) - } - - respBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - return BgHealState{}, err - } - - var healState BgHealState - - err = json.Unmarshal(respBytes, &healState) - if err != nil { - return BgHealState{}, err - } - return healState, nil -} diff --git a/pkg/madmin/heal-commands_test.go b/pkg/madmin/heal-commands_test.go deleted file mode 100644 index 7423f867..00000000 --- a/pkg/madmin/heal-commands_test.go +++ /dev/null @@ -1,73 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package madmin - -import ( - "testing" -) - -// Tests heal drives missing and offline counts. -func TestHealDriveCounts(t *testing.T) { - rs := HealResultItem{} - rs.Before.Drives = make([]HealDriveInfo, 20) - rs.After.Drives = make([]HealDriveInfo, 20) - for i := range rs.Before.Drives { - if i < 4 { - rs.Before.Drives[i] = HealDriveInfo{State: DriveStateMissing} - rs.After.Drives[i] = HealDriveInfo{State: DriveStateMissing} - } else if i > 4 && i < 15 { - rs.Before.Drives[i] = HealDriveInfo{State: DriveStateOffline} - rs.After.Drives[i] = HealDriveInfo{State: DriveStateOffline} - } else if i > 15 { - rs.Before.Drives[i] = HealDriveInfo{State: DriveStateCorrupt} - rs.After.Drives[i] = HealDriveInfo{State: DriveStateCorrupt} - } else { - rs.Before.Drives[i] = HealDriveInfo{State: DriveStateOk} - rs.After.Drives[i] = HealDriveInfo{State: DriveStateOk} - } - } - - i, j := rs.GetOnlineCounts() - if i > 2 { - t.Errorf("Expected '2', got %d before online disks", i) - } - if j > 2 { - t.Errorf("Expected '2', got %d after online disks", j) - } - i, j = rs.GetOfflineCounts() - if i > 10 { - t.Errorf("Expected '10', got %d before offline disks", i) - } - if j > 10 { - t.Errorf("Expected '10', got %d after offline disks", j) - } - i, j = rs.GetCorruptedCounts() - if i > 4 { - t.Errorf("Expected '4', got %d before corrupted disks", i) - } - if j > 4 { - t.Errorf("Expected '4', got %d after corrupted disks", j) - } - i, j = rs.GetMissingCounts() - if i > 4 { - t.Errorf("Expected '4', got %d before missing disks", i) - } - if j > 4 { - t.Errorf("Expected '4', got %d after missing disks", i) - } -} diff --git a/pkg/madmin/info-commands.go b/pkg/madmin/info-commands.go deleted file mode 100644 index b3a8e3a4..00000000 --- a/pkg/madmin/info-commands.go +++ /dev/null @@ -1,329 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package madmin - -import ( - "context" - "encoding/json" - "io/ioutil" - "net/http" - "time" -) - -// BackendType - represents different backend types. -type BackendType int - -// Enum for different backend types. -const ( - Unknown BackendType = iota - // Filesystem backend. - FS - // Multi disk Erasure (single, distributed) backend. - Erasure - - // Add your own backend. -) - -// DriveInfo - represents each drive info, describing -// status, uuid and endpoint. -type DriveInfo HealDriveInfo - -// StorageInfo - represents total capacity of underlying storage. -type StorageInfo struct { - Used []uint64 // Used total used per disk. - - Total []uint64 // Total disk space per disk. - - Available []uint64 // Total disk space available per disk. - - MountPaths []string // Disk mountpoints - - // Backend type. - Backend struct { - // Represents various backend types, currently on FS and Erasure. - Type BackendType - - // Following fields are only meaningful if BackendType is Erasure. - OnlineDisks BackendDisks // Online disks during server startup. - OfflineDisks BackendDisks // Offline disks during server startup. - StandardSCData int // Data disks for currently configured Standard storage class. - StandardSCParity int // Parity disks for currently configured Standard storage class. - RRSCData int // Data disks for currently configured Reduced Redundancy storage class. - RRSCParity int // Parity disks for currently configured Reduced Redundancy storage class. - - // List of all disk status, this is only meaningful if BackendType is Erasure. - Sets [][]DriveInfo - } -} - -// BackendDisks - represents the map of endpoint-disks. -type BackendDisks map[string]int - -// Sum - Return the sum of the disks in the endpoint-disk map. -func (d1 BackendDisks) Sum() (sum int) { - for _, count := range d1 { - sum += count - } - return sum -} - -// Merge - Reduces two endpoint-disk maps. -func (d1 BackendDisks) Merge(d2 BackendDisks) BackendDisks { - if len(d2) == 0 { - d2 = make(BackendDisks) - } - for i1, v1 := range d1 { - if v2, ok := d2[i1]; ok { - d2[i1] = v2 + v1 - continue - } - d2[i1] = v1 - } - return d2 -} - -// StorageInfo - Connect to a minio server and call Storage Info Management API -// to fetch server's information represented by StorageInfo structure -func (adm *AdminClient) StorageInfo(ctx context.Context) (StorageInfo, error) { - resp, err := adm.executeMethod(ctx, http.MethodGet, requestData{relPath: adminAPIPrefix + "/storageinfo"}) - defer closeResponse(resp) - if err != nil { - return StorageInfo{}, err - } - - // Check response http status code - if resp.StatusCode != http.StatusOK { - return StorageInfo{}, httpRespToErrorResponse(resp) - } - - // Unmarshal the server's json response - var storageInfo StorageInfo - - respBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - return StorageInfo{}, err - } - - err = json.Unmarshal(respBytes, &storageInfo) - if err != nil { - return StorageInfo{}, err - } - - return storageInfo, nil -} - -// DataUsageInfo represents data usage of an Object API -type DataUsageInfo struct { - // LastUpdate is the timestamp of when the data usage info was last updated. - // This does not indicate a full scan. - LastUpdate time.Time `json:"lastUpdate"` - ObjectsCount uint64 `json:"objectsCount"` - ObjectsTotalSize uint64 `json:"objectsTotalSize"` - - // ObjectsSizesHistogram contains information on objects across all buckets. - // See ObjectsHistogramIntervals. - ObjectsSizesHistogram map[string]uint64 `json:"objectsSizesHistogram"` - - BucketsCount uint64 `json:"bucketsCount"` - - // BucketsSizes is "bucket name" -> size. - BucketsSizes map[string]uint64 `json:"bucketsSizes"` -} - -// DataUsageInfo - returns data usage of the current object API -func (adm *AdminClient) DataUsageInfo(ctx context.Context) (DataUsageInfo, error) { - resp, err := adm.executeMethod(ctx, http.MethodGet, requestData{relPath: adminAPIPrefix + "/datausageinfo"}) - defer closeResponse(resp) - if err != nil { - return DataUsageInfo{}, err - } - - // Check response http status code - if resp.StatusCode != http.StatusOK { - return DataUsageInfo{}, httpRespToErrorResponse(resp) - } - - // Unmarshal the server's json response - var dataUsageInfo DataUsageInfo - - respBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - return DataUsageInfo{}, err - } - - err = json.Unmarshal(respBytes, &dataUsageInfo) - if err != nil { - return DataUsageInfo{}, err - } - - return dataUsageInfo, nil -} - -// InfoMessage container to hold server admin related information. -type InfoMessage struct { - Mode string `json:"mode,omitempty"` - Domain []string `json:"domain,omitempty"` - Region string `json:"region,omitempty"` - SQSARN []string `json:"sqsARN,omitempty"` - DeploymentID string `json:"deploymentID,omitempty"` - Buckets Buckets `json:"buckets,omitempty"` - Objects Objects `json:"objects,omitempty"` - Usage Usage `json:"usage,omitempty"` - Services Services `json:"services,omitempty"` - Backend interface{} `json:"backend,omitempty"` - Servers []ServerProperties `json:"servers,omitempty"` -} - -// Services contains different services information -type Services struct { - Vault Vault `json:"vault,omitempty"` - LDAP LDAP `json:"ldap,omitempty"` - Logger []Logger `json:"logger,omitempty"` - Audit []Audit `json:"audit,omitempty"` - Notifications []map[string][]TargetIDStatus `json:"notifications,omitempty"` -} - -// Buckets contains the number of buckets -type Buckets struct { - Count uint64 `json:"count,omitempty"` -} - -// Objects contains the number of objects -type Objects struct { - Count uint64 `json:"count,omitempty"` -} - -// Usage contains the tottal size used -type Usage struct { - Size uint64 `json:"size,omitempty"` -} - -// Vault - Fetches the Vault status -type Vault struct { - Status string `json:"status,omitempty"` - Encrypt string `json:"encryp,omitempty"` - Decrypt string `json:"decrypt,omitempty"` -} - -// LDAP contains ldap status -type LDAP struct { - Status string `json:"status,omitempty"` -} - -// Status of endpoint -type Status struct { - Status string `json:"status,omitempty"` -} - -// Audit contains audit logger status -type Audit map[string]Status - -// Logger contains logger status -type Logger map[string]Status - -// TargetIDStatus containsid and status -type TargetIDStatus map[string]Status - -// backendType - indicates the type of backend storage -type backendType string - -const ( - // FsType - Backend is FS Type - FsType = backendType("FS") - // ErasureType - Backend is Erasure type - ErasureType = backendType("Erasure") -) - -// FSBackend contains specific FS storage information -type FSBackend struct { - Type backendType `json:"backendType,omitempty"` -} - -// XLBackend contains specific erasure storage information -type XLBackend struct { - Type backendType `json:"backendType,omitempty"` - OnlineDisks int `json:"onlineDisks,omitempty"` - OfflineDisks int `json:"offlineDisks,omitempty"` - // Data disks for currently configured Standard storage class. - StandardSCData int `json:"standardSCData,omitempty"` - // Parity disks for currently configured Standard storage class. - StandardSCParity int `json:"standardSCParity,omitempty"` - // Data disks for currently configured Reduced Redundancy storage class. - RRSCData int `json:"rrSCData,omitempty"` - // Parity disks for currently configured Reduced Redundancy storage class. - RRSCParity int `json:"rrSCParity,omitempty"` -} - -// ServerProperties holds server information -type ServerProperties struct { - State string `json:"state,omitempty"` - Endpoint string `json:"endpoint,omitempty"` - Uptime int64 `json:"uptime,omitempty"` - Version string `json:"version,omitempty"` - CommitID string `json:"commitID,omitempty"` - Network map[string]string `json:"network,omitempty"` - Disks []Disk `json:"disks,omitempty"` -} - -// Disk holds Disk information -type Disk struct { - DrivePath string `json:"path,omitempty"` - State string `json:"state,omitempty"` - UUID string `json:"uuid,omitempty"` - Model string `json:"model,omitempty"` - TotalSpace uint64 `json:"totalspace,omitempty"` - UsedSpace uint64 `json:"usedspace,omitempty"` - ReadThroughput float64 `json:"readthroughput,omitempty"` - WriteThroughPut float64 `json:"writethroughput,omitempty"` - ReadLatency float64 `json:"readlatency,omitempty"` - WriteLatency float64 `json:"writelatency,omitempty"` - Utilization float64 `json:"utilization,omitempty"` -} - -// ServerInfo - Connect to a minio server and call Server Admin Info Management API -// to fetch server's information represented by infoMessage structure -func (adm *AdminClient) ServerInfo(ctx context.Context) (InfoMessage, error) { - resp, err := adm.executeMethod(ctx, - http.MethodGet, - requestData{relPath: adminAPIPrefix + "/info"}, - ) - defer closeResponse(resp) - if err != nil { - return InfoMessage{}, err - } - - // Check response http status code - if resp.StatusCode != http.StatusOK { - return InfoMessage{}, httpRespToErrorResponse(resp) - } - - // Unmarshal the server's json response - var message InfoMessage - - respBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - return InfoMessage{}, err - } - - err = json.Unmarshal(respBytes, &message) - if err != nil { - return InfoMessage{}, err - } - - return message, nil -} diff --git a/pkg/madmin/kms-commands.go b/pkg/madmin/kms-commands.go deleted file mode 100644 index 064b4dce..00000000 --- a/pkg/madmin/kms-commands.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package madmin - -import ( - "context" - "encoding/json" - "net/http" - "net/url" -) - -// GetKeyStatus requests status information about the key referenced by keyID -// from the KMS connected to a MinIO by performing a Admin-API request. -// It basically hits the `/minio/admin/v3/kms/key/status` API endpoint. -func (adm *AdminClient) GetKeyStatus(ctx context.Context, keyID string) (*KMSKeyStatus, error) { - // GET /minio/admin/v3/kms/key/status?key-id= - qv := url.Values{} - qv.Set("key-id", keyID) - reqData := requestData{ - relPath: adminAPIPrefix + "/kms/key/status", - queryValues: qv, - } - - resp, err := adm.executeMethod(ctx, http.MethodGet, reqData) - if err != nil { - return nil, err - } - defer closeResponse(resp) - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp) - } - var keyInfo KMSKeyStatus - if err = json.NewDecoder(resp.Body).Decode(&keyInfo); err != nil { - return nil, err - } - return &keyInfo, nil -} - -// KMSKeyStatus contains some status information about a KMS master key. -// The MinIO server tries to access the KMS and perform encryption and -// decryption operations. If the MinIO server can access the KMS and -// all master key operations succeed it returns a status containing only -// the master key ID but no error. -type KMSKeyStatus struct { - KeyID string `json:"key-id"` - EncryptionErr string `json:"encryption-error,omitempty"` // An empty error == success - DecryptionErr string `json:"decryption-error,omitempty"` // An empty error == success -} diff --git a/pkg/madmin/obd.go b/pkg/madmin/obd.go deleted file mode 100644 index c6ebee71..00000000 --- a/pkg/madmin/obd.go +++ /dev/null @@ -1,300 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package madmin - -import ( - "context" - "encoding/json" - "io" - "net/http" - "net/url" - "time" - - "github.com/minio/minio/pkg/disk" - "github.com/minio/minio/pkg/net" - - "github.com/shirou/gopsutil/cpu" - "github.com/shirou/gopsutil/host" - "github.com/shirou/gopsutil/mem" - nethw "github.com/shirou/gopsutil/net" - "github.com/shirou/gopsutil/process" -) - -// OBDInfo - MinIO cluster's OBD Info -type OBDInfo struct { - TimeStamp time.Time `json:"timestamp,omitempty"` - Error string `json:"error,omitempty"` - Perf PerfOBDInfo `json:"perf,omitempty"` - Minio MinioOBDInfo `json:"minio,omitempty"` - Sys SysOBDInfo `json:"sys,omitempty"` -} - -// SysOBDInfo - Includes hardware and system information of the MinIO cluster -type SysOBDInfo struct { - CPUInfo []ServerCPUOBDInfo `json:"cpus,omitempty"` - DiskHwInfo []ServerDiskHwOBDInfo `json:"disks,omitempty"` - OsInfo []ServerOsOBDInfo `json:"osinfos,omitempty"` - MemInfo []ServerMemOBDInfo `json:"meminfos,omitempty"` - ProcInfo []ServerProcOBDInfo `json:"procinfos,omitempty"` - Error string `json:"error,omitempty"` -} - -// ServerProcOBDInfo - Includes host process lvl information -type ServerProcOBDInfo struct { - Addr string `json:"addr"` - Processes []SysOBDProcess `json:"processes,omitempty"` - Error string `json:"error,omitempty"` -} - -// SysOBDProcess - Includes process lvl information about a single process -type SysOBDProcess struct { - Pid int32 `json:"pid"` - Background bool `json:"background,omitempty"` - CPUPercent float64 `json:"cpupercent,omitempty"` - Children []int32 `json:"children,omitempty"` - CmdLine string `json:"cmd,omitempty"` - Connections []nethw.ConnectionStat `json:"connections,omitempty"` - CreateTime int64 `json:"createtime,omitempty"` - Cwd string `json:"cwd,omitempty"` - Exe string `json:"exe,omitempty"` - Gids []int32 `json:"gids,omitempty"` - IOCounters *process.IOCountersStat `json:"iocounters,omitempty"` - IsRunning bool `json:"isrunning,omitempty"` - MemInfo *process.MemoryInfoStat `json:"meminfo,omitempty"` - MemMaps *[]process.MemoryMapsStat `json:"memmaps,omitempty"` - MemPercent float32 `json:"mempercent,omitempty"` - Name string `json:"name,omitempty"` - NetIOCounters []nethw.IOCountersStat `json:"netiocounters,omitempty"` - Nice int32 `json:"nice,omitempty"` - NumCtxSwitches *process.NumCtxSwitchesStat `json:"numctxswitches,omitempty"` - NumFds int32 `json:"numfds,omitempty"` - NumThreads int32 `json:"numthreads,omitempty"` - OpenFiles []process.OpenFilesStat `json:"openfiles,omitempty"` - PageFaults *process.PageFaultsStat `json:"pagefaults,omitempty"` - Parent int32 `json:"parent,omitempty"` - Ppid int32 `json:"ppid,omitempty"` - Rlimit []process.RlimitStat `json:"rlimit,omitempty"` - Status string `json:"status,omitempty"` - Tgid int32 `json:"tgid,omitempty"` - Threads map[int32]*cpu.TimesStat `json:"threadstats,omitempty"` - Times *cpu.TimesStat `json:"cputimes,omitempty"` - Uids []int32 `json:"uidsomitempty"` - Username string `json:"username,omitempty"` -} - -// ServerMemOBDInfo - Includes host virtual and swap mem information -type ServerMemOBDInfo struct { - Addr string `json:"addr"` - SwapMem *mem.SwapMemoryStat `json:"swap,omitempty"` - VirtualMem *mem.VirtualMemoryStat `json:"virtualmem,omitempty"` - Error string `json:"error,omitempty"` -} - -// ServerOsOBDInfo - Includes host os information -type ServerOsOBDInfo struct { - Addr string `json:"addr"` - Info *host.InfoStat `json:"info,omitempty"` - Sensors []host.TemperatureStat `json:"sensors,omitempty"` - Users []host.UserStat `json:"users,omitempty"` - Error string `json:"error,omitempty"` -} - -// ServerCPUOBDInfo - Includes cpu and timer stats of each node of the MinIO cluster -type ServerCPUOBDInfo struct { - Addr string `json:"addr"` - CPUStat []cpu.InfoStat `json:"cpu,omitempty"` - TimeStat []cpu.TimesStat `json:"time,omitempty"` - Error string `json:"error,omitempty"` -} - -// MinioOBDInfo - Includes MinIO confifuration information -type MinioOBDInfo struct { - Info InfoMessage `json:"info,omitempty"` - Config interface{} `json:"config,omitempty"` - Error string `json:"error,omitempty"` -} - -// PerfOBDInfo - Includes Drive and Net perf info for the entire MinIO cluster -type PerfOBDInfo struct { - DriveInfo []ServerDrivesOBDInfo `json:"drives,omitempty"` - Net []ServerNetOBDInfo `json:"net,omitempty"` - NetParallel ServerNetOBDInfo `json:"net_parallel,omitempty"` - Error string `json:"error,omitempty"` -} - -// ServerDrivesOBDInfo - Drive OBD info about all drives in a single MinIO node -type ServerDrivesOBDInfo struct { - Addr string `json:"addr"` - Serial []DriveOBDInfo `json:"serial,omitempty"` - Parallel []DriveOBDInfo `json:"parallel,omitempty"` - Error string `json:"error,omitempty"` -} - -// DriveOBDInfo - Stats about a single drive in a MinIO node -type DriveOBDInfo struct { - Path string `json:"endpoint"` - Latency disk.Latency `json:"latency,omitempty"` - Throughput disk.Throughput `json:"throughput,omitempty"` - Error string `json:"error,omitempty"` -} - -// ServerNetOBDInfo - Network OBD info about a single MinIO node -type ServerNetOBDInfo struct { - Addr string `json:"addr"` - Net []NetOBDInfo `json:"net,omitempty"` - Error string `json:"error,omitempty"` -} - -// NetOBDInfo - one-to-one network connectivity Stats between 2 MinIO nodes -type NetOBDInfo struct { - Addr string `json:"remote"` - Latency net.Latency `json:"latency,omitempty"` - Throughput net.Throughput `json:"throughput,omitempty"` - Error string `json:"error,omitempty"` -} - -// OBDDataType - Typed OBD data types -type OBDDataType string - -// OBDDataTypes -const ( - OBDDataTypePerfDrive OBDDataType = "perfdrive" - OBDDataTypePerfNet OBDDataType = "perfnet" - OBDDataTypeMinioInfo OBDDataType = "minioinfo" - OBDDataTypeMinioConfig OBDDataType = "minioconfig" - OBDDataTypeSysCPU OBDDataType = "syscpu" - OBDDataTypeSysDiskHw OBDDataType = "sysdiskhw" - OBDDataTypeSysDocker OBDDataType = "sysdocker" // is this really needed? - OBDDataTypeSysOsInfo OBDDataType = "sysosinfo" - OBDDataTypeSysLoad OBDDataType = "sysload" // provides very little info. Making it TBD - OBDDataTypeSysMem OBDDataType = "sysmem" - OBDDataTypeSysNet OBDDataType = "sysnet" - OBDDataTypeSysProcess OBDDataType = "sysprocess" -) - -// OBDDataTypesMap - Map of OBD datatypes -var OBDDataTypesMap = map[string]OBDDataType{ - "perfdrive": OBDDataTypePerfDrive, - "perfnet": OBDDataTypePerfNet, - "minioinfo": OBDDataTypeMinioInfo, - "minioconfig": OBDDataTypeMinioConfig, - "syscpu": OBDDataTypeSysCPU, - "sysdiskhw": OBDDataTypeSysDiskHw, - "sysdocker": OBDDataTypeSysDocker, - "sysosinfo": OBDDataTypeSysOsInfo, - "sysload": OBDDataTypeSysLoad, - "sysmem": OBDDataTypeSysMem, - "sysnet": OBDDataTypeSysNet, - "sysprocess": OBDDataTypeSysProcess, -} - -// OBDDataTypesList - List of OBD datatypes -var OBDDataTypesList = []OBDDataType{ - OBDDataTypePerfDrive, - OBDDataTypePerfNet, - OBDDataTypeMinioInfo, - OBDDataTypeMinioConfig, - OBDDataTypeSysCPU, - OBDDataTypeSysDiskHw, - OBDDataTypeSysDocker, - OBDDataTypeSysOsInfo, - OBDDataTypeSysLoad, - OBDDataTypeSysMem, - OBDDataTypeSysNet, - OBDDataTypeSysProcess, -} - -// ServerOBDInfo - Connect to a minio server and call OBD Info Management API -// to fetch server's information represented by OBDInfo structure -func (adm *AdminClient) ServerOBDInfo(ctx context.Context, obdDataTypes []OBDDataType, deadline time.Duration) <-chan OBDInfo { - respChan := make(chan OBDInfo) - go func() { - v := url.Values{} - - v.Set("deadline", - deadline.Truncate(1*time.Second).String()) - - // start with all set to false - for _, d := range OBDDataTypesList { - v.Set(string(d), "false") - } - - // only 'trueify' user provided values - for _, d := range obdDataTypes { - v.Set(string(d), "true") - } - var OBDInfoMessage OBDInfo - OBDInfoMessage.TimeStamp = time.Now() - - if v.Get(string(OBDDataTypeMinioInfo)) == "true" { - info, err := adm.ServerInfo(ctx) - if err != nil { - respChan <- OBDInfo{ - Error: err.Error(), - } - return - } - OBDInfoMessage.Minio.Info = info - respChan <- OBDInfoMessage - } - - resp, err := adm.executeMethod(ctx, "GET", requestData{ - relPath: adminAPIPrefix + "/obdinfo", - queryValues: v, - }) - - defer closeResponse(resp) - if err != nil { - respChan <- OBDInfo{ - Error: err.Error(), - } - close(respChan) - return - } - - // Check response http status code - if resp.StatusCode != http.StatusOK { - respChan <- OBDInfo{ - Error: httpRespToErrorResponse(resp).Error(), - } - return - } - - // Unmarshal the server's json response - decoder := json.NewDecoder(resp.Body) - for { - err := decoder.Decode(&OBDInfoMessage) - OBDInfoMessage.TimeStamp = time.Now() - - if err == io.EOF { - break - } - if err != nil { - respChan <- OBDInfo{ - Error: err.Error(), - } - } - respChan <- OBDInfoMessage - } - - respChan <- OBDInfoMessage - close(respChan) - }() - return respChan - -} diff --git a/pkg/madmin/obd_freebsd.go b/pkg/madmin/obd_freebsd.go deleted file mode 100644 index 4894a54d..00000000 --- a/pkg/madmin/obd_freebsd.go +++ /dev/null @@ -1,24 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package madmin - -// ServerDiskHwOBDInfo - Includes usage counters, disk counters and partitions -type ServerDiskHwOBDInfo struct { - Addr string `json:"addr"` - Error string `json:"error,omitempty"` -} diff --git a/pkg/madmin/obd_other.go b/pkg/madmin/obd_other.go deleted file mode 100644 index fd87b113..00000000 --- a/pkg/madmin/obd_other.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build !freebsd - -/* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package madmin - -import ( - diskhw "github.com/shirou/gopsutil/disk" -) - -// ServerDiskHwOBDInfo - Includes usage counters, disk counters and partitions -type ServerDiskHwOBDInfo struct { - Addr string `json:"addr"` - Usage []*diskhw.UsageStat `json:"usages,omitempty"` - Partitions []diskhw.PartitionStat `json:"partitions,omitempty"` - Counters map[string]diskhw.IOCountersStat `json:"counters,omitempty"` - Error string `json:"error,omitempty"` -} diff --git a/pkg/madmin/parse-kv.go b/pkg/madmin/parse-kv.go deleted file mode 100644 index 98255998..00000000 --- a/pkg/madmin/parse-kv.go +++ /dev/null @@ -1,204 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package madmin - -import ( - "bufio" - "bytes" - "fmt" - "sort" - "strings" - "unicode" -) - -// KV - is a shorthand of each key value. -type KV struct { - Key string `json:"key"` - Value string `json:"value"` -} - -// KVS - is a shorthand for some wrapper functions -// to operate on list of key values. -type KVS []KV - -// Empty - return if kv is empty -func (kvs KVS) Empty() bool { - return len(kvs) == 0 -} - -// Set sets a value, if not sets a default value. -func (kvs *KVS) Set(key, value string) { - for i, kv := range *kvs { - if kv.Key == key { - (*kvs)[i] = KV{ - Key: key, - Value: value, - } - return - } - } - *kvs = append(*kvs, KV{ - Key: key, - Value: value, - }) -} - -// Get - returns the value of a key, if not found returns empty. -func (kvs KVS) Get(key string) string { - v, ok := kvs.Lookup(key) - if ok { - return v - } - return "" -} - -// Lookup - lookup a key in a list of KVS -func (kvs KVS) Lookup(key string) (string, bool) { - for _, kv := range kvs { - if kv.Key == key { - return kv.Value, true - } - } - return "", false -} - -// Target signifies an individual target -type Target struct { - SubSystem string `json:"subSys"` - KVS KVS `json:"kvs"` -} - -// Standard config keys and values. -const ( - EnableKey = "enable" - CommentKey = "comment" - - // Enable values - EnableOn = "on" - EnableOff = "off" -) - -// HasSpace - returns if given string has space. -func HasSpace(s string) bool { - for _, r := range s { - if unicode.IsSpace(r) { - return true - } - } - return false -} - -// Constant separators -const ( - SubSystemSeparator = `:` - KvSeparator = `=` - KvComment = `#` - KvSpaceSeparator = ` ` - KvNewline = "\n" - KvDoubleQuote = `"` - KvSingleQuote = `'` - - Default = `_` -) - -// SanitizeValue - this function is needed, to trim off single or double quotes, creeping into the values. -func SanitizeValue(v string) string { - v = strings.TrimSuffix(strings.TrimPrefix(strings.TrimSpace(v), KvDoubleQuote), KvDoubleQuote) - return strings.TrimSuffix(strings.TrimPrefix(v, KvSingleQuote), KvSingleQuote) -} - -// KvFields - converts an input string of form "k1=v1 k2=v2" into -// fields of ["k1=v1", "k2=v2"], the tokenization of each `k=v` -// happens with the right number of input keys, if keys -// input is empty returned value is empty slice as well. -func KvFields(input string, keys []string) []string { - var valueIndexes []int - for _, key := range keys { - i := strings.Index(input, key+KvSeparator) - if i == -1 { - continue - } - valueIndexes = append(valueIndexes, i) - } - - sort.Ints(valueIndexes) - var fields = make([]string, len(valueIndexes)) - for i := range valueIndexes { - j := i + 1 - if j < len(valueIndexes) { - fields[i] = strings.TrimSpace(input[valueIndexes[i]:valueIndexes[j]]) - } else { - fields[i] = strings.TrimSpace(input[valueIndexes[i]:]) - } - } - return fields -} - -// ParseTarget - adds new targets, by parsing the input string s. -func ParseTarget(s string, help Help) (*Target, error) { - inputs := strings.SplitN(s, KvSpaceSeparator, 2) - if len(inputs) <= 1 { - return nil, fmt.Errorf("invalid number of arguments '%s'", s) - } - - subSystemValue := strings.SplitN(inputs[0], SubSystemSeparator, 2) - if len(subSystemValue) == 0 { - return nil, fmt.Errorf("invalid number of arguments %s", s) - } - - if help.SubSys != subSystemValue[0] { - return nil, fmt.Errorf("unknown sub-system %s", subSystemValue[0]) - } - - var kvs = KVS{} - var prevK string - for _, v := range KvFields(inputs[1], help.Keys()) { - kv := strings.SplitN(v, KvSeparator, 2) - if len(kv) == 0 { - continue - } - if len(kv) == 1 && prevK != "" { - value := strings.Join([]string{ - kvs.Get(prevK), - SanitizeValue(kv[0]), - }, KvSpaceSeparator) - kvs.Set(prevK, value) - continue - } - if len(kv) == 2 { - prevK = kv[0] - kvs.Set(prevK, SanitizeValue(kv[1])) - continue - } - return nil, fmt.Errorf("value for key '%s' cannot be empty", kv[0]) - } - - return &Target{ - SubSystem: inputs[0], - KVS: kvs, - }, nil -} - -// ParseSubSysTarget - parse a sub-system target -func ParseSubSysTarget(buf []byte, help Help) (target *Target, err error) { - bio := bufio.NewScanner(bytes.NewReader(buf)) - if bio.Scan() { - return ParseTarget(bio.Text(), help) - } - return nil, bio.Err() -} diff --git a/pkg/madmin/policy-commands.go b/pkg/madmin/policy-commands.go deleted file mode 100644 index ddf122cd..00000000 --- a/pkg/madmin/policy-commands.go +++ /dev/null @@ -1,177 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package madmin - -import ( - "context" - "encoding/json" - "io/ioutil" - "net/http" - "net/url" - - iampolicy "github.com/minio/minio/pkg/iam/policy" -) - -// InfoCannedPolicy - expand canned policy into JSON structure. -func (adm *AdminClient) InfoCannedPolicy(ctx context.Context, policyName string) (*iampolicy.Policy, error) { - queryValues := url.Values{} - queryValues.Set("name", policyName) - - reqData := requestData{ - relPath: adminAPIPrefix + "/info-canned-policy", - queryValues: queryValues, - } - - // Execute GET on /minio/admin/v3/info-canned-policy - resp, err := adm.executeMethod(ctx, http.MethodGet, reqData) - - defer closeResponse(resp) - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp) - } - - return iampolicy.ParseConfig(resp.Body) -} - -// ListCannedPolicies - list all configured canned policies. -func (adm *AdminClient) ListCannedPolicies(ctx context.Context) (map[string]*iampolicy.Policy, error) { - reqData := requestData{ - relPath: adminAPIPrefix + "/list-canned-policies", - } - - // Execute GET on /minio/admin/v3/list-canned-policies - resp, err := adm.executeMethod(ctx, http.MethodGet, reqData) - - defer closeResponse(resp) - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp) - } - - respBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - var policies = make(map[string]*iampolicy.Policy) - if err = json.Unmarshal(respBytes, &policies); err != nil { - return nil, err - } - - return policies, nil -} - -// RemoveCannedPolicy - remove a policy for a canned. -func (adm *AdminClient) RemoveCannedPolicy(ctx context.Context, policyName string) error { - queryValues := url.Values{} - queryValues.Set("name", policyName) - - reqData := requestData{ - relPath: adminAPIPrefix + "/remove-canned-policy", - queryValues: queryValues, - } - - // Execute DELETE on /minio/admin/v3/remove-canned-policy to remove policy. - resp, err := adm.executeMethod(ctx, http.MethodDelete, reqData) - - defer closeResponse(resp) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp) - } - - return nil -} - -// AddCannedPolicy - adds a policy for a canned. -func (adm *AdminClient) AddCannedPolicy(ctx context.Context, policyName string, policy *iampolicy.Policy) error { - if policy == nil { - return ErrInvalidArgument("policy input cannot be empty") - } - - if err := policy.Validate(); err != nil { - return err - } - - buf, err := json.Marshal(policy) - if err != nil { - return err - } - - queryValues := url.Values{} - queryValues.Set("name", policyName) - - reqData := requestData{ - relPath: adminAPIPrefix + "/add-canned-policy", - queryValues: queryValues, - content: buf, - } - - // Execute PUT on /minio/admin/v3/add-canned-policy to set policy. - resp, err := adm.executeMethod(ctx, http.MethodPut, reqData) - - defer closeResponse(resp) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp) - } - - return nil -} - -// SetPolicy - sets the policy for a user or a group. -func (adm *AdminClient) SetPolicy(ctx context.Context, policyName, entityName string, isGroup bool) error { - queryValues := url.Values{} - queryValues.Set("policyName", policyName) - queryValues.Set("userOrGroup", entityName) - groupStr := "false" - if isGroup { - groupStr = "true" - } - queryValues.Set("isGroup", groupStr) - - reqData := requestData{ - relPath: adminAPIPrefix + "/set-user-or-group-policy", - queryValues: queryValues, - } - - // Execute PUT on /minio/admin/v3/set-user-or-group-policy to set policy. - resp, err := adm.executeMethod(ctx, http.MethodPut, reqData) - defer closeResponse(resp) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp) - } - return nil -} diff --git a/pkg/madmin/profiling-commands.go b/pkg/madmin/profiling-commands.go deleted file mode 100644 index 84dc96ba..00000000 --- a/pkg/madmin/profiling-commands.go +++ /dev/null @@ -1,112 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2017, 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package madmin - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" -) - -// ProfilerType represents the profiler type -// passed to the profiler subsystem. -type ProfilerType string - -// Different supported profiler types. -const ( - ProfilerCPU ProfilerType = "cpu" // represents CPU profiler type - ProfilerMEM ProfilerType = "mem" // represents MEM profiler type - ProfilerBlock ProfilerType = "block" // represents Block profiler type - ProfilerMutex ProfilerType = "mutex" // represents Mutex profiler type - ProfilerTrace ProfilerType = "trace" // represents Trace profiler type - ProfilerThreads ProfilerType = "threads" // represents ThreadCreate profiler type - ProfilerGoroutines ProfilerType = "goroutines" // represents Goroutine dumps. -) - -// StartProfilingResult holds the result of starting -// profiler result in a given node. -type StartProfilingResult struct { - NodeName string `json:"nodeName"` - Success bool `json:"success"` - Error string `json:"error"` -} - -// StartProfiling makes an admin call to remotely start profiling on a standalone -// server or the whole cluster in case of a distributed setup. -func (adm *AdminClient) StartProfiling(ctx context.Context, profiler ProfilerType) ([]StartProfilingResult, error) { - v := url.Values{} - v.Set("profilerType", string(profiler)) - resp, err := adm.executeMethod(ctx, - http.MethodPost, requestData{ - relPath: adminAPIPrefix + "/profiling/start", - queryValues: v, - }, - ) - defer closeResponse(resp) - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp) - } - - jsonResult, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - - var startResults []StartProfilingResult - err = json.Unmarshal(jsonResult, &startResults) - if err != nil { - return nil, err - } - - return startResults, nil -} - -// DownloadProfilingData makes an admin call to download profiling data of a standalone -// server or of the whole cluster in case of a distributed setup. -func (adm *AdminClient) DownloadProfilingData(ctx context.Context) (io.ReadCloser, error) { - path := fmt.Sprintf(adminAPIPrefix + "/profiling/download") - resp, err := adm.executeMethod(ctx, - http.MethodGet, requestData{ - relPath: path, - }, - ) - - if err != nil { - closeResponse(resp) - return nil, err - } - - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp) - } - - if resp.Body == nil { - return nil, errors.New("body is nil") - } - - return resp.Body, nil -} diff --git a/pkg/madmin/quota-commands.go b/pkg/madmin/quota-commands.go deleted file mode 100644 index 4a142b5e..00000000 --- a/pkg/madmin/quota-commands.go +++ /dev/null @@ -1,122 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package madmin - -import ( - "context" - "encoding/json" - "io/ioutil" - "net/http" - "net/url" -) - -// QuotaType represents bucket quota type -type QuotaType string - -const ( - // HardQuota specifies a hard quota of usage for bucket - HardQuota QuotaType = "hard" - // FIFOQuota specifies a quota limit beyond which older files are deleted from bucket - FIFOQuota QuotaType = "fifo" -) - -// IsValid returns true if quota type is one of FIFO or Hard -func (t QuotaType) IsValid() bool { - return t == HardQuota || t == FIFOQuota -} - -// BucketQuota holds bucket quota restrictions -type BucketQuota struct { - Quota uint64 `json:"quota"` - Type QuotaType `json:"quotatype,omitempty"` -} - -// IsValid returns false if quota is invalid -// empty quota when Quota == 0 is always true. -func (q BucketQuota) IsValid() bool { - if q.Quota > 0 { - return q.Type.IsValid() - } - // Empty configs are valid. - return true -} - -// GetBucketQuota - get info on a user -func (adm *AdminClient) GetBucketQuota(ctx context.Context, bucket string) (q BucketQuota, err error) { - queryValues := url.Values{} - queryValues.Set("bucket", bucket) - - reqData := requestData{ - relPath: adminAPIPrefix + "/get-bucket-quota", - queryValues: queryValues, - } - - // Execute GET on /minio/admin/v3/get-quota - resp, err := adm.executeMethod(ctx, http.MethodGet, reqData) - - defer closeResponse(resp) - if err != nil { - return q, err - } - - if resp.StatusCode != http.StatusOK { - return q, httpRespToErrorResponse(resp) - } - - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return q, err - } - if err = json.Unmarshal(b, &q); err != nil { - return q, err - } - - return q, nil -} - -// SetBucketQuota - sets a bucket's quota, if quota is set to '0' -// quota is disabled. -func (adm *AdminClient) SetBucketQuota(ctx context.Context, bucket string, quota *BucketQuota) error { - data, err := json.Marshal(quota) - if err != nil { - return err - } - - queryValues := url.Values{} - queryValues.Set("bucket", bucket) - - reqData := requestData{ - relPath: adminAPIPrefix + "/set-bucket-quota", - queryValues: queryValues, - content: data, - } - - // Execute PUT on /minio/admin/v3/set-bucket-quota to set quota for a bucket. - resp, err := adm.executeMethod(ctx, http.MethodPut, reqData) - - defer closeResponse(resp) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp) - } - - return nil -} diff --git a/pkg/madmin/retry.go b/pkg/madmin/retry.go deleted file mode 100644 index 17520760..00000000 --- a/pkg/madmin/retry.go +++ /dev/null @@ -1,149 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019-2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package madmin - -import ( - "context" - "math/rand" - "net/http" - "sync" - "time" -) - -// MaxRetry is the maximum number of retries before stopping. -var MaxRetry = 10 - -// MaxJitter will randomize over the full exponential backoff time -const MaxJitter = 1.0 - -// NoJitter disables the use of jitter for randomizing the exponential backoff time -const NoJitter = 0.0 - -// DefaultRetryUnit - default unit multiplicative per retry. -// defaults to 1 second. -const DefaultRetryUnit = time.Second - -// DefaultRetryCap - Each retry attempt never waits no longer than -// this maximum time duration. -const DefaultRetryCap = time.Second * 30 - -// lockedRandSource provides protected rand source, implements rand.Source interface. -type lockedRandSource struct { - lk sync.Mutex - src rand.Source -} - -// Int63 returns a non-negative pseudo-random 63-bit integer as an int64. -func (r *lockedRandSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -// Seed uses the provided seed value to initialize the generator to a -// deterministic state. -func (r *lockedRandSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} - -// newRetryTimer creates a timer with exponentially increasing -// delays until the maximum retry attempts are reached. -func (adm AdminClient) newRetryTimer(ctx context.Context, maxRetry int, unit time.Duration, cap time.Duration, jitter float64) <-chan int { - attemptCh := make(chan int) - - // computes the exponential backoff duration according to - // https://www.awsarchitectureblog.com/2015/03/backoff.html - exponentialBackoffWait := func(attempt int) time.Duration { - // normalize jitter to the range [0, 1.0] - if jitter < NoJitter { - jitter = NoJitter - } - if jitter > MaxJitter { - jitter = MaxJitter - } - - //sleep = random_between(0, min(cap, base * 2 ** attempt)) - sleep := unit * time.Duration(1< cap { - sleep = cap - } - if jitter != NoJitter { - sleep -= time.Duration(adm.random.Float64() * float64(sleep) * jitter) - } - return sleep - } - - go func() { - defer close(attemptCh) - for i := 0; i < maxRetry; i++ { - // Attempts start from 1. - select { - case attemptCh <- i + 1: - case <-ctx.Done(): - // Stop the routine. - return - } - - select { - case <-time.After(exponentialBackoffWait(i)): - case <-ctx.Done(): - // Stop the routine. - return - } - } - }() - return attemptCh -} - -// List of AWS S3 error codes which are retryable. -var retryableS3Codes = map[string]struct{}{ - "RequestError": {}, - "RequestTimeout": {}, - "Throttling": {}, - "ThrottlingException": {}, - "RequestLimitExceeded": {}, - "RequestThrottled": {}, - "InternalError": {}, - "SlowDown": {}, - // Add more AWS S3 codes here. -} - -// isS3CodeRetryable - is s3 error code retryable. -func isS3CodeRetryable(s3Code string) (ok bool) { - _, ok = retryableS3Codes[s3Code] - return ok -} - -// List of HTTP status codes which are retryable. -var retryableHTTPStatusCodes = map[int]struct{}{ - http.StatusRequestTimeout: {}, - http.StatusTooManyRequests: {}, - http.StatusInternalServerError: {}, - http.StatusBadGateway: {}, - http.StatusServiceUnavailable: {}, - // Add more HTTP status codes here. -} - -// isHTTPStatusRetryable - is HTTP error code retryable. -func isHTTPStatusRetryable(httpStatusCode int) (ok bool) { - _, ok = retryableHTTPStatusCodes[httpStatusCode] - return ok -} diff --git a/pkg/madmin/service-commands.go b/pkg/madmin/service-commands.go deleted file mode 100644 index 77417ac7..00000000 --- a/pkg/madmin/service-commands.go +++ /dev/null @@ -1,123 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2016-2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package madmin - -import ( - "context" - "encoding/json" - "net/http" - "net/url" - "strconv" - - trace "github.com/minio/minio/pkg/trace" -) - -// ServiceRestart - restarts the MinIO cluster -func (adm *AdminClient) ServiceRestart(ctx context.Context) error { - return adm.serviceCallAction(ctx, ServiceActionRestart) -} - -// ServiceStop - stops the MinIO cluster -func (adm *AdminClient) ServiceStop(ctx context.Context) error { - return adm.serviceCallAction(ctx, ServiceActionStop) -} - -// ServiceAction - type to restrict service-action values -type ServiceAction string - -const ( - // ServiceActionRestart represents restart action - ServiceActionRestart ServiceAction = "restart" - // ServiceActionStop represents stop action - ServiceActionStop = "stop" -) - -// serviceCallAction - call service restart/update/stop API. -func (adm *AdminClient) serviceCallAction(ctx context.Context, action ServiceAction) error { - queryValues := url.Values{} - queryValues.Set("action", string(action)) - - // Request API to Restart server - resp, err := adm.executeMethod(ctx, - http.MethodPost, requestData{ - relPath: adminAPIPrefix + "/service", - queryValues: queryValues, - }, - ) - defer closeResponse(resp) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp) - } - - return nil -} - -// ServiceTraceInfo holds http trace -type ServiceTraceInfo struct { - Trace trace.Info - Err error `json:"-"` -} - -// ServiceTrace - listen on http trace notifications. -func (adm AdminClient) ServiceTrace(ctx context.Context, allTrace, errTrace bool) <-chan ServiceTraceInfo { - traceInfoCh := make(chan ServiceTraceInfo) - // Only success, start a routine to start reading line by line. - go func(traceInfoCh chan<- ServiceTraceInfo) { - defer close(traceInfoCh) - for { - urlValues := make(url.Values) - urlValues.Set("all", strconv.FormatBool(allTrace)) - urlValues.Set("err", strconv.FormatBool(errTrace)) - reqData := requestData{ - relPath: adminAPIPrefix + "/trace", - queryValues: urlValues, - } - // Execute GET to call trace handler - resp, err := adm.executeMethod(ctx, http.MethodGet, reqData) - if err != nil { - closeResponse(resp) - return - } - - if resp.StatusCode != http.StatusOK { - traceInfoCh <- ServiceTraceInfo{Err: httpRespToErrorResponse(resp)} - return - } - - dec := json.NewDecoder(resp.Body) - for { - var info trace.Info - if err = dec.Decode(&info); err != nil { - break - } - select { - case <-ctx.Done(): - return - case traceInfoCh <- ServiceTraceInfo{Trace: info}: - } - } - } - }(traceInfoCh) - - // Returns the trace info channel, for caller to start reading from. - return traceInfoCh -} diff --git a/pkg/madmin/top-commands.go b/pkg/madmin/top-commands.go deleted file mode 100644 index 9469b69b..00000000 --- a/pkg/madmin/top-commands.go +++ /dev/null @@ -1,81 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package madmin - -import ( - "context" - "encoding/json" - "io/ioutil" - "net/http" - "time" -) - -// LockEntry holds information about client requesting the lock, -// servers holding the lock, source on the client machine, -// ID, type(read or write) and time stamp. -type LockEntry struct { - Timestamp time.Time `json:"time"` // Timestamp set at the time of initialization. - Resource string `json:"resource"` // Resource contains info like bucket, object etc - Type string `json:"type"` // Bool whether write or read lock. - Source string `json:"source"` // Source which created the lock - ServerList []string `json:"serverlist"` // RPC path of servers issuing the lock. - Owner string `json:"owner"` // RPC path of client claiming lock. - ID string `json:"id"` // UID to uniquely identify request of client. -} - -// LockEntries - To sort the locks -type LockEntries []LockEntry - -func (l LockEntries) Len() int { - return len(l) -} - -func (l LockEntries) Less(i, j int) bool { - return l[i].Timestamp.Before(l[j].Timestamp) -} - -func (l LockEntries) Swap(i, j int) { - l[i], l[j] = l[j], l[i] -} - -// TopLocks - returns the oldest locks in a minio setup. -func (adm *AdminClient) TopLocks(ctx context.Context) (LockEntries, error) { - // Execute GET on /minio/admin/v3/top/locks - // to get the oldest locks in a minio setup. - resp, err := adm.executeMethod(ctx, - http.MethodGet, - requestData{relPath: adminAPIPrefix + "/top/locks"}, - ) - defer closeResponse(resp) - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp) - } - - response, err := ioutil.ReadAll(resp.Body) - if err != nil { - return LockEntries{}, err - } - - var lockEntries LockEntries - err = json.Unmarshal(response, &lockEntries) - return lockEntries, err -} diff --git a/pkg/madmin/transport.go b/pkg/madmin/transport.go deleted file mode 100644 index ff9be741..00000000 --- a/pkg/madmin/transport.go +++ /dev/null @@ -1,60 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package madmin - -import ( - "crypto/tls" - "net" - "net/http" - "time" -) - -// DefaultTransport - this default transport is similar to -// http.DefaultTransport but with additional param DisableCompression -// is set to true to avoid decompressing content with 'gzip' encoding. -var DefaultTransport = func(secure bool) http.RoundTripper { - tr := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 5 * time.Second, - KeepAlive: 15 * time.Second, - }).DialContext, - MaxIdleConns: 1024, - MaxIdleConnsPerHost: 1024, - ResponseHeaderTimeout: 60 * time.Second, - IdleConnTimeout: 60 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - // Set this value so that the underlying transport round-tripper - // doesn't try to auto decode the body of objects with - // content-encoding set to `gzip`. - // - // Refer: - // https://golang.org/src/net/http/transport.go?h=roundTrip#L1843 - DisableCompression: true, - } - - if secure { - tr.TLSClientConfig = &tls.Config{ - // Can't use SSLv3 because of POODLE and BEAST - // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher - // Can't use TLSv1.1 because of RC4 cipher usage - MinVersion: tls.VersionTLS12, - } - } - return tr -} diff --git a/pkg/madmin/update-commands.go b/pkg/madmin/update-commands.go deleted file mode 100644 index 0139af8b..00000000 --- a/pkg/madmin/update-commands.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package madmin - -import ( - "context" - "encoding/json" - "io/ioutil" - "net/http" - "net/url" -) - -// ServerUpdateStatus - contains the response of service update API -type ServerUpdateStatus struct { - CurrentVersion string `json:"currentVersion"` - UpdatedVersion string `json:"updatedVersion"` -} - -// ServerUpdate - updates and restarts the MinIO cluster to latest version. -// optionally takes an input URL to specify a custom update binary link -func (adm *AdminClient) ServerUpdate(ctx context.Context, updateURL string) (us ServerUpdateStatus, err error) { - queryValues := url.Values{} - queryValues.Set("updateURL", updateURL) - - // Request API to Restart server - resp, err := adm.executeMethod(ctx, - http.MethodPost, requestData{ - relPath: adminAPIPrefix + "/update", - queryValues: queryValues, - }, - ) - defer closeResponse(resp) - if err != nil { - return us, err - } - - if resp.StatusCode != http.StatusOK { - return us, httpRespToErrorResponse(resp) - } - - buf, err := ioutil.ReadAll(resp.Body) - if err != nil { - return us, err - } - err = json.Unmarshal(buf, &us) - return us, err -} diff --git a/pkg/madmin/user-commands.go b/pkg/madmin/user-commands.go deleted file mode 100644 index a5b9f0fb..00000000 --- a/pkg/madmin/user-commands.go +++ /dev/null @@ -1,388 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package madmin - -import ( - "context" - "encoding/json" - "io/ioutil" - "net/http" - "net/url" - "time" - - "github.com/minio/minio/pkg/auth" - iampolicy "github.com/minio/minio/pkg/iam/policy" -) - -// AccountAccess contains information about -type AccountAccess struct { - Read bool `json:"read"` - Write bool `json:"write"` -} - -// BucketUsageInfo represents bucket usage of a bucket, and its relevant -// access type for an account -type BucketUsageInfo struct { - Name string `json:"name"` - Size uint64 `json:"size"` - Created time.Time `json:"created"` - Access AccountAccess `json:"access"` -} - -// AccountUsageInfo represents the account usage info of an -// account across buckets. -type AccountUsageInfo struct { - AccountName string - Buckets []BucketUsageInfo -} - -// AccountUsageInfo returns the usage info for the authenticating account. -func (adm *AdminClient) AccountUsageInfo(ctx context.Context) (AccountUsageInfo, error) { - resp, err := adm.executeMethod(ctx, http.MethodGet, requestData{relPath: adminAPIPrefix + "/accountusageinfo"}) - defer closeResponse(resp) - if err != nil { - return AccountUsageInfo{}, err - } - - // Check response http status code - if resp.StatusCode != http.StatusOK { - return AccountUsageInfo{}, httpRespToErrorResponse(resp) - } - - // Unmarshal the server's json response - var accountInfo AccountUsageInfo - - respBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - return AccountUsageInfo{}, err - } - - err = json.Unmarshal(respBytes, &accountInfo) - if err != nil { - return AccountUsageInfo{}, err - } - - return accountInfo, nil -} - -// AccountStatus - account status. -type AccountStatus string - -// Account status per user. -const ( - AccountEnabled AccountStatus = "enabled" - AccountDisabled AccountStatus = "disabled" -) - -// UserInfo carries information about long term users. -type UserInfo struct { - SecretKey string `json:"secretKey,omitempty"` - PolicyName string `json:"policyName,omitempty"` - Status AccountStatus `json:"status"` - MemberOf []string `json:"memberOf,omitempty"` -} - -// RemoveUser - remove a user. -func (adm *AdminClient) RemoveUser(ctx context.Context, accessKey string) error { - queryValues := url.Values{} - queryValues.Set("accessKey", accessKey) - - reqData := requestData{ - relPath: adminAPIPrefix + "/remove-user", - queryValues: queryValues, - } - - // Execute DELETE on /minio/admin/v3/remove-user to remove a user. - resp, err := adm.executeMethod(ctx, http.MethodDelete, reqData) - - defer closeResponse(resp) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp) - } - - return nil -} - -// ListUsers - list all users. -func (adm *AdminClient) ListUsers(ctx context.Context) (map[string]UserInfo, error) { - reqData := requestData{ - relPath: adminAPIPrefix + "/list-users", - } - - // Execute GET on /minio/admin/v3/list-users - resp, err := adm.executeMethod(ctx, http.MethodGet, reqData) - - defer closeResponse(resp) - if err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusOK { - return nil, httpRespToErrorResponse(resp) - } - - data, err := DecryptData(adm.getSecretKey(), resp.Body) - if err != nil { - return nil, err - } - - var users = make(map[string]UserInfo) - if err = json.Unmarshal(data, &users); err != nil { - return nil, err - } - - return users, nil -} - -// GetUserInfo - get info on a user -func (adm *AdminClient) GetUserInfo(ctx context.Context, name string) (u UserInfo, err error) { - queryValues := url.Values{} - queryValues.Set("accessKey", name) - - reqData := requestData{ - relPath: adminAPIPrefix + "/user-info", - queryValues: queryValues, - } - - // Execute GET on /minio/admin/v3/user-info - resp, err := adm.executeMethod(ctx, http.MethodGet, reqData) - - defer closeResponse(resp) - if err != nil { - return u, err - } - - if resp.StatusCode != http.StatusOK { - return u, httpRespToErrorResponse(resp) - } - - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return u, err - } - - if err = json.Unmarshal(b, &u); err != nil { - return u, err - } - - return u, nil -} - -// SetUser - sets a user info. -func (adm *AdminClient) SetUser(ctx context.Context, accessKey, secretKey string, status AccountStatus) error { - - if !auth.IsAccessKeyValid(accessKey) { - return auth.ErrInvalidAccessKeyLength - } - - if !auth.IsSecretKeyValid(secretKey) { - return auth.ErrInvalidSecretKeyLength - } - - data, err := json.Marshal(UserInfo{ - SecretKey: secretKey, - Status: status, - }) - if err != nil { - return err - } - econfigBytes, err := EncryptData(adm.getSecretKey(), data) - if err != nil { - return err - } - - queryValues := url.Values{} - queryValues.Set("accessKey", accessKey) - - reqData := requestData{ - relPath: adminAPIPrefix + "/add-user", - queryValues: queryValues, - content: econfigBytes, - } - - // Execute PUT on /minio/admin/v3/add-user to set a user. - resp, err := adm.executeMethod(ctx, http.MethodPut, reqData) - - defer closeResponse(resp) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp) - } - - return nil -} - -// AddUser - adds a user. -func (adm *AdminClient) AddUser(ctx context.Context, accessKey, secretKey string) error { - return adm.SetUser(ctx, accessKey, secretKey, AccountEnabled) -} - -// SetUserStatus - adds a status for a user. -func (adm *AdminClient) SetUserStatus(ctx context.Context, accessKey string, status AccountStatus) error { - queryValues := url.Values{} - queryValues.Set("accessKey", accessKey) - queryValues.Set("status", string(status)) - - reqData := requestData{ - relPath: adminAPIPrefix + "/set-user-status", - queryValues: queryValues, - } - - // Execute PUT on /minio/admin/v3/set-user-status to set status. - resp, err := adm.executeMethod(ctx, http.MethodPut, reqData) - - defer closeResponse(resp) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusOK { - return httpRespToErrorResponse(resp) - } - - return nil -} - -// AddServiceAccountReq is the request body of the add service account admin call -type AddServiceAccountReq struct { - Policy *iampolicy.Policy `json:"policy,omitempty"` -} - -// AddServiceAccountResp is the response body of the add service account admin call -type AddServiceAccountResp struct { - Credentials auth.Credentials `json:"credentials"` -} - -// AddServiceAccount - creates a new service account belonging to the user sending -// the request while restricting the service account permission by the given policy document. -func (adm *AdminClient) AddServiceAccount(ctx context.Context, policy *iampolicy.Policy) (auth.Credentials, error) { - if policy != nil { - if err := policy.Validate(); err != nil { - return auth.Credentials{}, err - } - } - - data, err := json.Marshal(AddServiceAccountReq{ - Policy: policy, - }) - if err != nil { - return auth.Credentials{}, err - } - - econfigBytes, err := EncryptData(adm.getSecretKey(), data) - if err != nil { - return auth.Credentials{}, err - } - - reqData := requestData{ - relPath: adminAPIPrefix + "/add-service-account", - content: econfigBytes, - } - - // Execute PUT on /minio/admin/v3/add-service-account to set a user. - resp, err := adm.executeMethod(ctx, http.MethodPut, reqData) - defer closeResponse(resp) - if err != nil { - return auth.Credentials{}, err - } - - if resp.StatusCode != http.StatusOK { - return auth.Credentials{}, httpRespToErrorResponse(resp) - } - - data, err = DecryptData(adm.getSecretKey(), resp.Body) - if err != nil { - return auth.Credentials{}, err - } - - var serviceAccountResp AddServiceAccountResp - if err = json.Unmarshal(data, &serviceAccountResp); err != nil { - return auth.Credentials{}, err - } - return serviceAccountResp.Credentials, nil -} - -// ListServiceAccountsResp is the response body of the list service accounts call -type ListServiceAccountsResp struct { - Accounts []string `json:"accounts"` -} - -// ListServiceAccounts - list service accounts belonging to the specified user -func (adm *AdminClient) ListServiceAccounts(ctx context.Context) (ListServiceAccountsResp, error) { - reqData := requestData{ - relPath: adminAPIPrefix + "/list-service-accounts", - } - - // Execute GET on /minio/admin/v3/list-service-accounts - resp, err := adm.executeMethod(ctx, http.MethodGet, reqData) - defer closeResponse(resp) - if err != nil { - return ListServiceAccountsResp{}, err - } - - if resp.StatusCode != http.StatusOK { - return ListServiceAccountsResp{}, httpRespToErrorResponse(resp) - } - - data, err := DecryptData(adm.getSecretKey(), resp.Body) - if err != nil { - return ListServiceAccountsResp{}, err - } - - var listResp ListServiceAccountsResp - if err = json.Unmarshal(data, &listResp); err != nil { - return ListServiceAccountsResp{}, err - } - return listResp, nil -} - -// DeleteServiceAccount - delete a specified service account. The server will reject -// the request if the service account does not belong to the user initiating the request -func (adm *AdminClient) DeleteServiceAccount(ctx context.Context, serviceAccount string) error { - if !auth.IsAccessKeyValid(serviceAccount) { - return auth.ErrInvalidAccessKeyLength - } - - queryValues := url.Values{} - queryValues.Set("accessKey", serviceAccount) - - reqData := requestData{ - relPath: adminAPIPrefix + "/delete-service-account", - queryValues: queryValues, - } - - // Execute DELETE on /minio/admin/v3/delete-service-account - resp, err := adm.executeMethod(ctx, http.MethodDelete, reqData) - defer closeResponse(resp) - if err != nil { - return err - } - - if resp.StatusCode != http.StatusNoContent { - return httpRespToErrorResponse(resp) - } - - return nil -} diff --git a/pkg/madmin/utils.go b/pkg/madmin/utils.go deleted file mode 100644 index 035bb636..00000000 --- a/pkg/madmin/utils.go +++ /dev/null @@ -1,133 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2016 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package madmin - -import ( - "encoding/json" - "io" - "io/ioutil" - "net" - "net/http" - "net/url" - "strings" - - sha256 "github.com/minio/sha256-simd" - - "github.com/minio/minio-go/v6/pkg/s3utils" -) - -// AdminAPIVersion - admin api version used in the request. -const ( - AdminAPIVersion = "v3" - AdminAPIVersionV2 = "v2" - adminAPIPrefix = "/" + AdminAPIVersion -) - -// sum256 calculate sha256 sum for an input byte array. -func sum256(data []byte) []byte { - hash := sha256.New() - hash.Write(data) - return hash.Sum(nil) -} - -// jsonDecoder decode json to go type. -func jsonDecoder(body io.Reader, v interface{}) error { - d := json.NewDecoder(body) - return d.Decode(v) -} - -// getEndpointURL - construct a new endpoint. -func getEndpointURL(endpoint string, secure bool) (*url.URL, error) { - if strings.Contains(endpoint, ":") { - host, _, err := net.SplitHostPort(endpoint) - if err != nil { - return nil, err - } - if !s3utils.IsValidIP(host) && !s3utils.IsValidDomain(host) { - msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." - return nil, ErrInvalidArgument(msg) - } - } else { - if !s3utils.IsValidIP(endpoint) && !s3utils.IsValidDomain(endpoint) { - msg := "Endpoint: " + endpoint + " does not follow ip address or domain name standards." - return nil, ErrInvalidArgument(msg) - } - } - - // If secure is false, use 'http' scheme. - scheme := "https" - if !secure { - scheme = "http" - } - - // Strip the obvious :443 and :80 from the endpoint - // to avoid the signature mismatch error. - if secure && strings.HasSuffix(endpoint, ":443") { - endpoint = strings.TrimSuffix(endpoint, ":443") - } - if !secure && strings.HasSuffix(endpoint, ":80") { - endpoint = strings.TrimSuffix(endpoint, ":80") - } - - // Construct a secured endpoint URL. - endpointURLStr := scheme + "://" + endpoint - endpointURL, err := url.Parse(endpointURLStr) - if err != nil { - return nil, err - } - - // Validate incoming endpoint URL. - if err := isValidEndpointURL(endpointURL.String()); err != nil { - return nil, err - } - return endpointURL, nil -} - -// Verify if input endpoint URL is valid. -func isValidEndpointURL(endpointURL string) error { - if endpointURL == "" { - return ErrInvalidArgument("Endpoint url cannot be empty.") - } - url, err := url.Parse(endpointURL) - if err != nil { - return ErrInvalidArgument("Endpoint url cannot be parsed.") - } - if url.Path != "/" && url.Path != "" { - return ErrInvalidArgument("Endpoint url cannot have fully qualified paths.") - } - return nil -} - -// closeResponse close non nil response with any response Body. -// convenient wrapper to drain any remaining data on response body. -// -// Subsequently this allows golang http RoundTripper -// to re-use the same connection for future requests. -func closeResponse(resp *http.Response) { - // Callers should close resp.Body when done reading from it. - // If resp.Body is not closed, the Client's underlying RoundTripper - // (typically Transport) may not be able to re-use a persistent TCP - // connection to the server for a subsequent "keep-alive" request. - if resp != nil && resp.Body != nil { - // Drain any remaining Body and then close the connection. - // Without this closing connection would disallow re-using - // the same connection for future uses. - // - http://stackoverflow.com/a/17961593/4465767 - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - } -} diff --git a/pkg/mimedb/Makefile b/pkg/mimedb/Makefile deleted file mode 100644 index 537bf1f7..00000000 --- a/pkg/mimedb/Makefile +++ /dev/null @@ -1,17 +0,0 @@ -# Generate db.go from db.json downloaded nodejs mime-db project. -# NOTE: Autogenerated db.go needs to be vet proofed. \ - Manually edit json -> JSON for all variable names -all: download build - -# Download db.json from NodeJS's mime-db project. It is under MIT license. -download: - @mkdir db - @wget -nv -q https://cdn.rawgit.com/jshttp/mime-db/master/db.json -O db/db.json - - -# After generating db.go, clean up downloaded db.json. -build: download - @go run util/gen-db.go db/db.json > db.go - @rm -f db/db.json - @rm -rf db - @echo Generated \"db.go\". diff --git a/pkg/mimedb/db.go b/pkg/mimedb/db.go deleted file mode 100644 index 534fbf2f..00000000 --- a/pkg/mimedb/db.go +++ /dev/null @@ -1,4411 +0,0 @@ -// DO NOT EDIT THIS FILE. IT IS AUTO-GENERATED BY "gen-db.go". // -/* - * mimedb: Mime Database, (C) 2016 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package mimedb is a database of file extension to mime content-type. -// Definitions are imported from NodeJS mime-db project under MIT license. -package mimedb - -// DB - Mime is a collection of mime types with extension as key and content-type as value. -var DB = map[string]struct { - ContentType string - Compressible bool -}{ - "123": { - ContentType: "application/vnd.lotus-1-2-3", - Compressible: false, - }, - "3dml": { - ContentType: "text/vnd.in3d.3dml", - Compressible: false, - }, - "3ds": { - ContentType: "image/x-3ds", - Compressible: false, - }, - "3g2": { - ContentType: "video/3gpp2", - Compressible: false, - }, - "3gp": { - ContentType: "video/3gpp", - Compressible: false, - }, - "3gpp": { - ContentType: "video/3gpp", - Compressible: false, - }, - "7z": { - ContentType: "application/x-7z-compressed", - Compressible: false, - }, - "aab": { - ContentType: "application/x-authorware-bin", - Compressible: false, - }, - "aac": { - ContentType: "audio/x-aac", - Compressible: false, - }, - "aam": { - ContentType: "application/x-authorware-map", - Compressible: false, - }, - "aas": { - ContentType: "application/x-authorware-seg", - Compressible: false, - }, - "abw": { - ContentType: "application/x-abiword", - Compressible: false, - }, - "ac": { - ContentType: "application/pkix-attr-cert", - Compressible: false, - }, - "acc": { - ContentType: "application/vnd.americandynamics.acc", - Compressible: false, - }, - "ace": { - ContentType: "application/x-ace-compressed", - Compressible: false, - }, - "acu": { - ContentType: "application/vnd.acucobol", - Compressible: false, - }, - "acutc": { - ContentType: "application/vnd.acucorp", - Compressible: false, - }, - "adp": { - ContentType: "audio/adpcm", - Compressible: false, - }, - "aep": { - ContentType: "application/vnd.audiograph", - Compressible: false, - }, - "afm": { - ContentType: "application/x-font-type1", - Compressible: false, - }, - "afp": { - ContentType: "application/vnd.ibm.modcap", - Compressible: false, - }, - "ahead": { - ContentType: "application/vnd.ahead.space", - Compressible: false, - }, - "ai": { - ContentType: "application/postscript", - Compressible: false, - }, - "aif": { - ContentType: "audio/x-aiff", - Compressible: false, - }, - "aifc": { - ContentType: "audio/x-aiff", - Compressible: false, - }, - "aiff": { - ContentType: "audio/x-aiff", - Compressible: false, - }, - "air": { - ContentType: "application/vnd.adobe.air-application-installer-package+zip", - Compressible: false, - }, - "ait": { - ContentType: "application/vnd.dvb.ait", - Compressible: false, - }, - "ami": { - ContentType: "application/vnd.amiga.ami", - Compressible: false, - }, - "apk": { - ContentType: "application/vnd.android.package-archive", - Compressible: false, - }, - "apng": { - ContentType: "image/apng", - Compressible: false, - }, - "appcache": { - ContentType: "text/cache-manifest", - Compressible: false, - }, - "application": { - ContentType: "application/x-ms-application", - Compressible: false, - }, - "apr": { - ContentType: "application/vnd.lotus-approach", - Compressible: false, - }, - "arc": { - ContentType: "application/x-freearc", - Compressible: false, - }, - "arj": { - ContentType: "application/x-arj", - Compressible: false, - }, - "asc": { - ContentType: "application/pgp-signature", - Compressible: false, - }, - "asf": { - ContentType: "video/x-ms-asf", - Compressible: false, - }, - "asm": { - ContentType: "text/x-asm", - Compressible: false, - }, - "aso": { - ContentType: "application/vnd.accpac.simply.aso", - Compressible: false, - }, - "asx": { - ContentType: "video/x-ms-asf", - Compressible: false, - }, - "atc": { - ContentType: "application/vnd.acucorp", - Compressible: false, - }, - "atom": { - ContentType: "application/atom+xml", - Compressible: false, - }, - "atomcat": { - ContentType: "application/atomcat+xml", - Compressible: false, - }, - "atomsvc": { - ContentType: "application/atomsvc+xml", - Compressible: false, - }, - "atx": { - ContentType: "application/vnd.antix.game-component", - Compressible: false, - }, - "au": { - ContentType: "audio/basic", - Compressible: false, - }, - "avi": { - ContentType: "video/x-msvideo", - Compressible: false, - }, - "aw": { - ContentType: "application/applixware", - Compressible: false, - }, - "azf": { - ContentType: "application/vnd.airzip.filesecure.azf", - Compressible: false, - }, - "azs": { - ContentType: "application/vnd.airzip.filesecure.azs", - Compressible: false, - }, - "azv": { - ContentType: "image/vnd.airzip.accelerator.azv", - Compressible: false, - }, - "azw": { - ContentType: "application/vnd.amazon.ebook", - Compressible: false, - }, - "bat": { - ContentType: "application/x-msdownload", - Compressible: false, - }, - "bcpio": { - ContentType: "application/x-bcpio", - Compressible: false, - }, - "bdf": { - ContentType: "application/x-font-bdf", - Compressible: false, - }, - "bdm": { - ContentType: "application/vnd.syncml.dm+wbxml", - Compressible: false, - }, - "bdoc": { - ContentType: "application/x-bdoc", - Compressible: false, - }, - "bed": { - ContentType: "application/vnd.realvnc.bed", - Compressible: false, - }, - "bh2": { - ContentType: "application/vnd.fujitsu.oasysprs", - Compressible: false, - }, - "bin": { - ContentType: "application/octet-stream", - Compressible: false, - }, - "blb": { - ContentType: "application/x-blorb", - Compressible: false, - }, - "blorb": { - ContentType: "application/x-blorb", - Compressible: false, - }, - "bmi": { - ContentType: "application/vnd.bmi", - Compressible: false, - }, - "bmp": { - ContentType: "image/x-ms-bmp", - Compressible: false, - }, - "book": { - ContentType: "application/vnd.framemaker", - Compressible: false, - }, - "box": { - ContentType: "application/vnd.previewsystems.box", - Compressible: false, - }, - "boz": { - ContentType: "application/x-bzip2", - Compressible: false, - }, - "bpk": { - ContentType: "application/octet-stream", - Compressible: false, - }, - "btif": { - ContentType: "image/prs.btif", - Compressible: false, - }, - "buffer": { - ContentType: "application/octet-stream", - Compressible: false, - }, - "bz": { - ContentType: "application/x-bzip", - Compressible: false, - }, - "bz2": { - ContentType: "application/x-bzip2", - Compressible: false, - }, - "c": { - ContentType: "text/x-c", - Compressible: false, - }, - "c11amc": { - ContentType: "application/vnd.cluetrust.cartomobile-config", - Compressible: false, - }, - "c11amz": { - ContentType: "application/vnd.cluetrust.cartomobile-config-pkg", - Compressible: false, - }, - "c4d": { - ContentType: "application/vnd.clonk.c4group", - Compressible: false, - }, - "c4f": { - ContentType: "application/vnd.clonk.c4group", - Compressible: false, - }, - "c4g": { - ContentType: "application/vnd.clonk.c4group", - Compressible: false, - }, - "c4p": { - ContentType: "application/vnd.clonk.c4group", - Compressible: false, - }, - "c4u": { - ContentType: "application/vnd.clonk.c4group", - Compressible: false, - }, - "cab": { - ContentType: "application/vnd.ms-cab-compressed", - Compressible: false, - }, - "caf": { - ContentType: "audio/x-caf", - Compressible: false, - }, - "cap": { - ContentType: "application/vnd.tcpdump.pcap", - Compressible: false, - }, - "car": { - ContentType: "application/vnd.curl.car", - Compressible: false, - }, - "cat": { - ContentType: "application/vnd.ms-pki.seccat", - Compressible: false, - }, - "cb7": { - ContentType: "application/x-cbr", - Compressible: false, - }, - "cba": { - ContentType: "application/x-cbr", - Compressible: false, - }, - "cbr": { - ContentType: "application/x-cbr", - Compressible: false, - }, - "cbt": { - ContentType: "application/x-cbr", - Compressible: false, - }, - "cbz": { - ContentType: "application/x-cbr", - Compressible: false, - }, - "cc": { - ContentType: "text/x-c", - Compressible: false, - }, - "cco": { - ContentType: "application/x-cocoa", - Compressible: false, - }, - "cct": { - ContentType: "application/x-director", - Compressible: false, - }, - "ccxml": { - ContentType: "application/ccxml+xml", - Compressible: false, - }, - "cdbcmsg": { - ContentType: "application/vnd.contact.cmsg", - Compressible: false, - }, - "cdf": { - ContentType: "application/x-netcdf", - Compressible: false, - }, - "cdkey": { - ContentType: "application/vnd.mediastation.cdkey", - Compressible: false, - }, - "cdmia": { - ContentType: "application/cdmi-capability", - Compressible: false, - }, - "cdmic": { - ContentType: "application/cdmi-container", - Compressible: false, - }, - "cdmid": { - ContentType: "application/cdmi-domain", - Compressible: false, - }, - "cdmio": { - ContentType: "application/cdmi-object", - Compressible: false, - }, - "cdmiq": { - ContentType: "application/cdmi-queue", - Compressible: false, - }, - "cdx": { - ContentType: "chemical/x-cdx", - Compressible: false, - }, - "cdxml": { - ContentType: "application/vnd.chemdraw+xml", - Compressible: false, - }, - "cdy": { - ContentType: "application/vnd.cinderella", - Compressible: false, - }, - "cer": { - ContentType: "application/pkix-cert", - Compressible: false, - }, - "cfs": { - ContentType: "application/x-cfs-compressed", - Compressible: false, - }, - "cgm": { - ContentType: "image/cgm", - Compressible: false, - }, - "chat": { - ContentType: "application/x-chat", - Compressible: false, - }, - "chm": { - ContentType: "application/vnd.ms-htmlhelp", - Compressible: false, - }, - "chrt": { - ContentType: "application/vnd.kde.kchart", - Compressible: false, - }, - "cif": { - ContentType: "chemical/x-cif", - Compressible: false, - }, - "cii": { - ContentType: "application/vnd.anser-web-certificate-issue-initiation", - Compressible: false, - }, - "cil": { - ContentType: "application/vnd.ms-artgalry", - Compressible: false, - }, - "cla": { - ContentType: "application/vnd.claymore", - Compressible: false, - }, - "class": { - ContentType: "application/java-vm", - Compressible: false, - }, - "clkk": { - ContentType: "application/vnd.crick.clicker.keyboard", - Compressible: false, - }, - "clkp": { - ContentType: "application/vnd.crick.clicker.palette", - Compressible: false, - }, - "clkt": { - ContentType: "application/vnd.crick.clicker.template", - Compressible: false, - }, - "clkw": { - ContentType: "application/vnd.crick.clicker.wordbank", - Compressible: false, - }, - "clkx": { - ContentType: "application/vnd.crick.clicker", - Compressible: false, - }, - "clp": { - ContentType: "application/x-msclip", - Compressible: false, - }, - "cmc": { - ContentType: "application/vnd.cosmocaller", - Compressible: false, - }, - "cmdf": { - ContentType: "chemical/x-cmdf", - Compressible: false, - }, - "cml": { - ContentType: "chemical/x-cml", - Compressible: false, - }, - "cmp": { - ContentType: "application/vnd.yellowriver-custom-menu", - Compressible: false, - }, - "cmx": { - ContentType: "image/x-cmx", - Compressible: false, - }, - "cod": { - ContentType: "application/vnd.rim.cod", - Compressible: false, - }, - "coffee": { - ContentType: "text/coffeescript", - Compressible: false, - }, - "com": { - ContentType: "application/x-msdownload", - Compressible: false, - }, - "conf": { - ContentType: "text/plain", - Compressible: false, - }, - "cpio": { - ContentType: "application/x-cpio", - Compressible: false, - }, - "cpp": { - ContentType: "text/x-c", - Compressible: false, - }, - "cpt": { - ContentType: "application/mac-compactpro", - Compressible: false, - }, - "crd": { - ContentType: "application/x-mscardfile", - Compressible: false, - }, - "crl": { - ContentType: "application/pkix-crl", - Compressible: false, - }, - "crt": { - ContentType: "application/x-x509-ca-cert", - Compressible: false, - }, - "crx": { - ContentType: "application/x-chrome-extension", - Compressible: false, - }, - "cryptonote": { - ContentType: "application/vnd.rig.cryptonote", - Compressible: false, - }, - "csh": { - ContentType: "application/x-csh", - Compressible: false, - }, - "csl": { - ContentType: "application/vnd.citationstyles.style+xml", - Compressible: false, - }, - "csml": { - ContentType: "chemical/x-csml", - Compressible: false, - }, - "csp": { - ContentType: "application/vnd.commonspace", - Compressible: false, - }, - "css": { - ContentType: "text/css", - Compressible: false, - }, - "cst": { - ContentType: "application/x-director", - Compressible: false, - }, - "csv": { - ContentType: "text/csv", - Compressible: false, - }, - "cu": { - ContentType: "application/cu-seeme", - Compressible: false, - }, - "curl": { - ContentType: "text/vnd.curl", - Compressible: false, - }, - "cww": { - ContentType: "application/prs.cww", - Compressible: false, - }, - "cxt": { - ContentType: "application/x-director", - Compressible: false, - }, - "cxx": { - ContentType: "text/x-c", - Compressible: false, - }, - "dae": { - ContentType: "model/vnd.collada+xml", - Compressible: false, - }, - "daf": { - ContentType: "application/vnd.mobius.daf", - Compressible: false, - }, - "dart": { - ContentType: "application/vnd.dart", - Compressible: false, - }, - "dataless": { - ContentType: "application/vnd.fdsn.seed", - Compressible: false, - }, - "davmount": { - ContentType: "application/davmount+xml", - Compressible: false, - }, - "dbk": { - ContentType: "application/docbook+xml", - Compressible: false, - }, - "dcr": { - ContentType: "application/x-director", - Compressible: false, - }, - "dcurl": { - ContentType: "text/vnd.curl.dcurl", - Compressible: false, - }, - "dd2": { - ContentType: "application/vnd.oma.dd2+xml", - Compressible: false, - }, - "ddd": { - ContentType: "application/vnd.fujixerox.ddd", - Compressible: false, - }, - "deb": { - ContentType: "application/x-debian-package", - Compressible: false, - }, - "def": { - ContentType: "text/plain", - Compressible: false, - }, - "deploy": { - ContentType: "application/octet-stream", - Compressible: false, - }, - "der": { - ContentType: "application/x-x509-ca-cert", - Compressible: false, - }, - "dfac": { - ContentType: "application/vnd.dreamfactory", - Compressible: false, - }, - "dgc": { - ContentType: "application/x-dgc-compressed", - Compressible: false, - }, - "dic": { - ContentType: "text/x-c", - Compressible: false, - }, - "dir": { - ContentType: "application/x-director", - Compressible: false, - }, - "dis": { - ContentType: "application/vnd.mobius.dis", - Compressible: false, - }, - "disposition-notification": { - ContentType: "message/disposition-notification", - Compressible: false, - }, - "dist": { - ContentType: "application/octet-stream", - Compressible: false, - }, - "distz": { - ContentType: "application/octet-stream", - Compressible: false, - }, - "djv": { - ContentType: "image/vnd.djvu", - Compressible: false, - }, - "djvu": { - ContentType: "image/vnd.djvu", - Compressible: false, - }, - "dll": { - ContentType: "application/x-msdownload", - Compressible: false, - }, - "dmg": { - ContentType: "application/x-apple-diskimage", - Compressible: false, - }, - "dmp": { - ContentType: "application/vnd.tcpdump.pcap", - Compressible: false, - }, - "dms": { - ContentType: "application/octet-stream", - Compressible: false, - }, - "dna": { - ContentType: "application/vnd.dna", - Compressible: false, - }, - "doc": { - ContentType: "application/msword", - Compressible: false, - }, - "docm": { - ContentType: "application/vnd.ms-word.document.macroenabled.12", - Compressible: false, - }, - "docx": { - ContentType: "application/vnd.openxmlformats-officedocument.wordprocessingml.document", - Compressible: false, - }, - "dot": { - ContentType: "application/msword", - Compressible: false, - }, - "dotm": { - ContentType: "application/vnd.ms-word.template.macroenabled.12", - Compressible: false, - }, - "dotx": { - ContentType: "application/vnd.openxmlformats-officedocument.wordprocessingml.template", - Compressible: false, - }, - "dp": { - ContentType: "application/vnd.osgi.dp", - Compressible: false, - }, - "dpg": { - ContentType: "application/vnd.dpgraph", - Compressible: false, - }, - "dra": { - ContentType: "audio/vnd.dra", - Compressible: false, - }, - "drle": { - ContentType: "image/dicom-rle", - Compressible: false, - }, - "dsc": { - ContentType: "text/prs.lines.tag", - Compressible: false, - }, - "dssc": { - ContentType: "application/dssc+der", - Compressible: false, - }, - "dtb": { - ContentType: "application/x-dtbook+xml", - Compressible: false, - }, - "dtd": { - ContentType: "application/xml-dtd", - Compressible: false, - }, - "dts": { - ContentType: "audio/vnd.dts", - Compressible: false, - }, - "dtshd": { - ContentType: "audio/vnd.dts.hd", - Compressible: false, - }, - "dump": { - ContentType: "application/octet-stream", - Compressible: false, - }, - "dvb": { - ContentType: "video/vnd.dvb.file", - Compressible: false, - }, - "dvi": { - ContentType: "application/x-dvi", - Compressible: false, - }, - "dwf": { - ContentType: "model/vnd.dwf", - Compressible: false, - }, - "dwg": { - ContentType: "image/vnd.dwg", - Compressible: false, - }, - "dxf": { - ContentType: "image/vnd.dxf", - Compressible: false, - }, - "dxp": { - ContentType: "application/vnd.spotfire.dxp", - Compressible: false, - }, - "dxr": { - ContentType: "application/x-director", - Compressible: false, - }, - "ear": { - ContentType: "application/java-archive", - Compressible: false, - }, - "ecelp4800": { - ContentType: "audio/vnd.nuera.ecelp4800", - Compressible: false, - }, - "ecelp7470": { - ContentType: "audio/vnd.nuera.ecelp7470", - Compressible: false, - }, - "ecelp9600": { - ContentType: "audio/vnd.nuera.ecelp9600", - Compressible: false, - }, - "ecma": { - ContentType: "application/ecmascript", - Compressible: false, - }, - "edm": { - ContentType: "application/vnd.novadigm.edm", - Compressible: false, - }, - "edx": { - ContentType: "application/vnd.novadigm.edx", - Compressible: false, - }, - "efif": { - ContentType: "application/vnd.picsel", - Compressible: false, - }, - "ei6": { - ContentType: "application/vnd.pg.osasli", - Compressible: false, - }, - "elc": { - ContentType: "application/octet-stream", - Compressible: false, - }, - "emf": { - ContentType: "image/emf", - Compressible: false, - }, - "eml": { - ContentType: "message/rfc822", - Compressible: false, - }, - "emma": { - ContentType: "application/emma+xml", - Compressible: false, - }, - "emz": { - ContentType: "application/x-msmetafile", - Compressible: false, - }, - "eol": { - ContentType: "audio/vnd.digital-winds", - Compressible: false, - }, - "eot": { - ContentType: "application/vnd.ms-fontobject", - Compressible: false, - }, - "eps": { - ContentType: "application/postscript", - Compressible: false, - }, - "epub": { - ContentType: "application/epub+zip", - Compressible: false, - }, - "es": { - ContentType: "application/ecmascript", - Compressible: false, - }, - "es3": { - ContentType: "application/vnd.eszigno3+xml", - Compressible: false, - }, - "esa": { - ContentType: "application/vnd.osgi.subsystem", - Compressible: false, - }, - "esf": { - ContentType: "application/vnd.epson.esf", - Compressible: false, - }, - "et3": { - ContentType: "application/vnd.eszigno3+xml", - Compressible: false, - }, - "etx": { - ContentType: "text/x-setext", - Compressible: false, - }, - "eva": { - ContentType: "application/x-eva", - Compressible: false, - }, - "evy": { - ContentType: "application/x-envoy", - Compressible: false, - }, - "exe": { - ContentType: "application/x-msdownload", - Compressible: false, - }, - "exi": { - ContentType: "application/exi", - Compressible: false, - }, - "exr": { - ContentType: "image/aces", - Compressible: false, - }, - "ext": { - ContentType: "application/vnd.novadigm.ext", - Compressible: false, - }, - "ez": { - ContentType: "application/andrew-inset", - Compressible: false, - }, - "ez2": { - ContentType: "application/vnd.ezpix-album", - Compressible: false, - }, - "ez3": { - ContentType: "application/vnd.ezpix-package", - Compressible: false, - }, - "f": { - ContentType: "text/x-fortran", - Compressible: false, - }, - "f4v": { - ContentType: "video/x-f4v", - Compressible: false, - }, - "f77": { - ContentType: "text/x-fortran", - Compressible: false, - }, - "f90": { - ContentType: "text/x-fortran", - Compressible: false, - }, - "fbs": { - ContentType: "image/vnd.fastbidsheet", - Compressible: false, - }, - "fcdt": { - ContentType: "application/vnd.adobe.formscentral.fcdt", - Compressible: false, - }, - "fcs": { - ContentType: "application/vnd.isac.fcs", - Compressible: false, - }, - "fdf": { - ContentType: "application/vnd.fdf", - Compressible: false, - }, - "fe_launch": { - ContentType: "application/vnd.denovo.fcselayout-link", - Compressible: false, - }, - "fg5": { - ContentType: "application/vnd.fujitsu.oasysgp", - Compressible: false, - }, - "fgd": { - ContentType: "application/x-director", - Compressible: false, - }, - "fh": { - ContentType: "image/x-freehand", - Compressible: false, - }, - "fh4": { - ContentType: "image/x-freehand", - Compressible: false, - }, - "fh5": { - ContentType: "image/x-freehand", - Compressible: false, - }, - "fh7": { - ContentType: "image/x-freehand", - Compressible: false, - }, - "fhc": { - ContentType: "image/x-freehand", - Compressible: false, - }, - "fig": { - ContentType: "application/x-xfig", - Compressible: false, - }, - "fits": { - ContentType: "image/fits", - Compressible: false, - }, - "flac": { - ContentType: "audio/x-flac", - Compressible: false, - }, - "fli": { - ContentType: "video/x-fli", - Compressible: false, - }, - "flo": { - ContentType: "application/vnd.micrografx.flo", - Compressible: false, - }, - "flv": { - ContentType: "video/x-flv", - Compressible: false, - }, - "flw": { - ContentType: "application/vnd.kde.kivio", - Compressible: false, - }, - "flx": { - ContentType: "text/vnd.fmi.flexstor", - Compressible: false, - }, - "fly": { - ContentType: "text/vnd.fly", - Compressible: false, - }, - "fm": { - ContentType: "application/vnd.framemaker", - Compressible: false, - }, - "fnc": { - ContentType: "application/vnd.frogans.fnc", - Compressible: false, - }, - "for": { - ContentType: "text/x-fortran", - Compressible: false, - }, - "fpx": { - ContentType: "image/vnd.fpx", - Compressible: false, - }, - "frame": { - ContentType: "application/vnd.framemaker", - Compressible: false, - }, - "fsc": { - ContentType: "application/vnd.fsc.weblaunch", - Compressible: false, - }, - "fst": { - ContentType: "image/vnd.fst", - Compressible: false, - }, - "ftc": { - ContentType: "application/vnd.fluxtime.clip", - Compressible: false, - }, - "fti": { - ContentType: "application/vnd.anser-web-funds-transfer-initiation", - Compressible: false, - }, - "fvt": { - ContentType: "video/vnd.fvt", - Compressible: false, - }, - "fxp": { - ContentType: "application/vnd.adobe.fxp", - Compressible: false, - }, - "fxpl": { - ContentType: "application/vnd.adobe.fxp", - Compressible: false, - }, - "fzs": { - ContentType: "application/vnd.fuzzysheet", - Compressible: false, - }, - "g2w": { - ContentType: "application/vnd.geoplan", - Compressible: false, - }, - "g3": { - ContentType: "image/g3fax", - Compressible: false, - }, - "g3w": { - ContentType: "application/vnd.geospace", - Compressible: false, - }, - "gac": { - ContentType: "application/vnd.groove-account", - Compressible: false, - }, - "gam": { - ContentType: "application/x-tads", - Compressible: false, - }, - "gbr": { - ContentType: "application/rpki-ghostbusters", - Compressible: false, - }, - "gca": { - ContentType: "application/x-gca-compressed", - Compressible: false, - }, - "gdl": { - ContentType: "model/vnd.gdl", - Compressible: false, - }, - "gdoc": { - ContentType: "application/vnd.google-apps.document", - Compressible: false, - }, - "geo": { - ContentType: "application/vnd.dynageo", - Compressible: false, - }, - "geojson": { - ContentType: "application/geo+json", - Compressible: false, - }, - "gex": { - ContentType: "application/vnd.geometry-explorer", - Compressible: false, - }, - "ggb": { - ContentType: "application/vnd.geogebra.file", - Compressible: false, - }, - "ggt": { - ContentType: "application/vnd.geogebra.tool", - Compressible: false, - }, - "ghf": { - ContentType: "application/vnd.groove-help", - Compressible: false, - }, - "gif": { - ContentType: "image/gif", - Compressible: false, - }, - "gim": { - ContentType: "application/vnd.groove-identity-message", - Compressible: false, - }, - "glb": { - ContentType: "model/gltf-binary", - Compressible: false, - }, - "gltf": { - ContentType: "model/gltf+json", - Compressible: false, - }, - "gml": { - ContentType: "application/gml+xml", - Compressible: false, - }, - "gmx": { - ContentType: "application/vnd.gmx", - Compressible: false, - }, - "gnumeric": { - ContentType: "application/x-gnumeric", - Compressible: false, - }, - "gph": { - ContentType: "application/vnd.flographit", - Compressible: false, - }, - "gpx": { - ContentType: "application/gpx+xml", - Compressible: false, - }, - "gqf": { - ContentType: "application/vnd.grafeq", - Compressible: false, - }, - "gqs": { - ContentType: "application/vnd.grafeq", - Compressible: false, - }, - "gram": { - ContentType: "application/srgs", - Compressible: false, - }, - "gramps": { - ContentType: "application/x-gramps-xml", - Compressible: false, - }, - "gre": { - ContentType: "application/vnd.geometry-explorer", - Compressible: false, - }, - "grv": { - ContentType: "application/vnd.groove-injector", - Compressible: false, - }, - "grxml": { - ContentType: "application/srgs+xml", - Compressible: false, - }, - "gsf": { - ContentType: "application/x-font-ghostscript", - Compressible: false, - }, - "gsheet": { - ContentType: "application/vnd.google-apps.spreadsheet", - Compressible: false, - }, - "gslides": { - ContentType: "application/vnd.google-apps.presentation", - Compressible: false, - }, - "gtar": { - ContentType: "application/x-gtar", - Compressible: false, - }, - "gtm": { - ContentType: "application/vnd.groove-tool-message", - Compressible: false, - }, - "gtw": { - ContentType: "model/vnd.gtw", - Compressible: false, - }, - "gv": { - ContentType: "text/vnd.graphviz", - Compressible: false, - }, - "gxf": { - ContentType: "application/gxf", - Compressible: false, - }, - "gxt": { - ContentType: "application/vnd.geonext", - Compressible: false, - }, - "gz": { - ContentType: "application/gzip", - Compressible: false, - }, - "h": { - ContentType: "text/x-c", - Compressible: false, - }, - "h261": { - ContentType: "video/h261", - Compressible: false, - }, - "h263": { - ContentType: "video/h263", - Compressible: false, - }, - "h264": { - ContentType: "video/h264", - Compressible: false, - }, - "hal": { - ContentType: "application/vnd.hal+xml", - Compressible: false, - }, - "hbci": { - ContentType: "application/vnd.hbci", - Compressible: false, - }, - "hbs": { - ContentType: "text/x-handlebars-template", - Compressible: false, - }, - "hdd": { - ContentType: "application/x-virtualbox-hdd", - Compressible: false, - }, - "hdf": { - ContentType: "application/x-hdf", - Compressible: false, - }, - "heic": { - ContentType: "image/heic", - Compressible: false, - }, - "heics": { - ContentType: "image/heic-sequence", - Compressible: false, - }, - "heif": { - ContentType: "image/heif", - Compressible: false, - }, - "heifs": { - ContentType: "image/heif-sequence", - Compressible: false, - }, - "hh": { - ContentType: "text/x-c", - Compressible: false, - }, - "hjson": { - ContentType: "application/hjson", - Compressible: false, - }, - "hlp": { - ContentType: "application/winhlp", - Compressible: false, - }, - "hpgl": { - ContentType: "application/vnd.hp-hpgl", - Compressible: false, - }, - "hpid": { - ContentType: "application/vnd.hp-hpid", - Compressible: false, - }, - "hps": { - ContentType: "application/vnd.hp-hps", - Compressible: false, - }, - "hqx": { - ContentType: "application/mac-binhex40", - Compressible: false, - }, - "htc": { - ContentType: "text/x-component", - Compressible: false, - }, - "htke": { - ContentType: "application/vnd.kenameaapp", - Compressible: false, - }, - "htm": { - ContentType: "text/html", - Compressible: false, - }, - "html": { - ContentType: "text/html", - Compressible: false, - }, - "hvd": { - ContentType: "application/vnd.yamaha.hv-dic", - Compressible: false, - }, - "hvp": { - ContentType: "application/vnd.yamaha.hv-voice", - Compressible: false, - }, - "hvs": { - ContentType: "application/vnd.yamaha.hv-script", - Compressible: false, - }, - "i2g": { - ContentType: "application/vnd.intergeo", - Compressible: false, - }, - "icc": { - ContentType: "application/vnd.iccprofile", - Compressible: false, - }, - "ice": { - ContentType: "x-conference/x-cooltalk", - Compressible: false, - }, - "icm": { - ContentType: "application/vnd.iccprofile", - Compressible: false, - }, - "ico": { - ContentType: "image/x-icon", - Compressible: false, - }, - "ics": { - ContentType: "text/calendar", - Compressible: false, - }, - "ief": { - ContentType: "image/ief", - Compressible: false, - }, - "ifb": { - ContentType: "text/calendar", - Compressible: false, - }, - "ifm": { - ContentType: "application/vnd.shana.informed.formdata", - Compressible: false, - }, - "iges": { - ContentType: "model/iges", - Compressible: false, - }, - "igl": { - ContentType: "application/vnd.igloader", - Compressible: false, - }, - "igm": { - ContentType: "application/vnd.insors.igm", - Compressible: false, - }, - "igs": { - ContentType: "model/iges", - Compressible: false, - }, - "igx": { - ContentType: "application/vnd.micrografx.igx", - Compressible: false, - }, - "iif": { - ContentType: "application/vnd.shana.informed.interchange", - Compressible: false, - }, - "img": { - ContentType: "application/octet-stream", - Compressible: false, - }, - "imp": { - ContentType: "application/vnd.accpac.simply.imp", - Compressible: false, - }, - "ims": { - ContentType: "application/vnd.ms-ims", - Compressible: false, - }, - "in": { - ContentType: "text/plain", - Compressible: false, - }, - "ini": { - ContentType: "text/plain", - Compressible: false, - }, - "ink": { - ContentType: "application/inkml+xml", - Compressible: false, - }, - "inkml": { - ContentType: "application/inkml+xml", - Compressible: false, - }, - "install": { - ContentType: "application/x-install-instructions", - Compressible: false, - }, - "iota": { - ContentType: "application/vnd.astraea-software.iota", - Compressible: false, - }, - "ipfix": { - ContentType: "application/ipfix", - Compressible: false, - }, - "ipk": { - ContentType: "application/vnd.shana.informed.package", - Compressible: false, - }, - "irm": { - ContentType: "application/vnd.ibm.rights-management", - Compressible: false, - }, - "irp": { - ContentType: "application/vnd.irepository.package+xml", - Compressible: false, - }, - "iso": { - ContentType: "application/x-iso9660-image", - Compressible: false, - }, - "itp": { - ContentType: "application/vnd.shana.informed.formtemplate", - Compressible: false, - }, - "ivp": { - ContentType: "application/vnd.immervision-ivp", - Compressible: false, - }, - "ivu": { - ContentType: "application/vnd.immervision-ivu", - Compressible: false, - }, - "jad": { - ContentType: "text/vnd.sun.j2me.app-descriptor", - Compressible: false, - }, - "jade": { - ContentType: "text/jade", - Compressible: false, - }, - "jam": { - ContentType: "application/vnd.jam", - Compressible: false, - }, - "jar": { - ContentType: "application/java-archive", - Compressible: false, - }, - "jardiff": { - ContentType: "application/x-java-archive-diff", - Compressible: false, - }, - "java": { - ContentType: "text/x-java-source", - Compressible: false, - }, - "jisp": { - ContentType: "application/vnd.jisp", - Compressible: false, - }, - "jls": { - ContentType: "image/jls", - Compressible: false, - }, - "jlt": { - ContentType: "application/vnd.hp-jlyt", - Compressible: false, - }, - "jng": { - ContentType: "image/x-jng", - Compressible: false, - }, - "jnlp": { - ContentType: "application/x-java-jnlp-file", - Compressible: false, - }, - "joda": { - ContentType: "application/vnd.joost.joda-archive", - Compressible: false, - }, - "jp2": { - ContentType: "image/jp2", - Compressible: false, - }, - "jpe": { - ContentType: "image/jpeg", - Compressible: false, - }, - "jpeg": { - ContentType: "image/jpeg", - Compressible: false, - }, - "jpf": { - ContentType: "image/jpx", - Compressible: false, - }, - "jpg": { - ContentType: "image/jpeg", - Compressible: false, - }, - "jpg2": { - ContentType: "image/jp2", - Compressible: false, - }, - "jpgm": { - ContentType: "video/jpm", - Compressible: false, - }, - "jpgv": { - ContentType: "video/jpeg", - Compressible: false, - }, - "jpm": { - ContentType: "video/jpm", - Compressible: false, - }, - "jpx": { - ContentType: "image/jpx", - Compressible: false, - }, - "js": { - ContentType: "application/javascript", - Compressible: false, - }, - "json": { - ContentType: "application/json", - Compressible: false, - }, - "json5": { - ContentType: "application/json5", - Compressible: false, - }, - "jsonld": { - ContentType: "application/ld+json", - Compressible: false, - }, - "jsonml": { - ContentType: "application/jsonml+json", - Compressible: false, - }, - "jsx": { - ContentType: "text/jsx", - Compressible: false, - }, - "kar": { - ContentType: "audio/midi", - Compressible: false, - }, - "karbon": { - ContentType: "application/vnd.kde.karbon", - Compressible: false, - }, - "keynote": { - ContentType: "application/vnd.apple.keynote", - Compressible: false, - }, - "kfo": { - ContentType: "application/vnd.kde.kformula", - Compressible: false, - }, - "kia": { - ContentType: "application/vnd.kidspiration", - Compressible: false, - }, - "kml": { - ContentType: "application/vnd.google-earth.kml+xml", - Compressible: false, - }, - "kmz": { - ContentType: "application/vnd.google-earth.kmz", - Compressible: false, - }, - "kne": { - ContentType: "application/vnd.kinar", - Compressible: false, - }, - "knp": { - ContentType: "application/vnd.kinar", - Compressible: false, - }, - "kon": { - ContentType: "application/vnd.kde.kontour", - Compressible: false, - }, - "kpr": { - ContentType: "application/vnd.kde.kpresenter", - Compressible: false, - }, - "kpt": { - ContentType: "application/vnd.kde.kpresenter", - Compressible: false, - }, - "kpxx": { - ContentType: "application/vnd.ds-keypoint", - Compressible: false, - }, - "ksp": { - ContentType: "application/vnd.kde.kspread", - Compressible: false, - }, - "ktr": { - ContentType: "application/vnd.kahootz", - Compressible: false, - }, - "ktx": { - ContentType: "image/ktx", - Compressible: false, - }, - "ktz": { - ContentType: "application/vnd.kahootz", - Compressible: false, - }, - "kwd": { - ContentType: "application/vnd.kde.kword", - Compressible: false, - }, - "kwt": { - ContentType: "application/vnd.kde.kword", - Compressible: false, - }, - "lasxml": { - ContentType: "application/vnd.las.las+xml", - Compressible: false, - }, - "latex": { - ContentType: "application/x-latex", - Compressible: false, - }, - "lbd": { - ContentType: "application/vnd.llamagraphics.life-balance.desktop", - Compressible: false, - }, - "lbe": { - ContentType: "application/vnd.llamagraphics.life-balance.exchange+xml", - Compressible: false, - }, - "les": { - ContentType: "application/vnd.hhe.lesson-player", - Compressible: false, - }, - "less": { - ContentType: "text/less", - Compressible: false, - }, - "lha": { - ContentType: "application/x-lzh-compressed", - Compressible: false, - }, - "link66": { - ContentType: "application/vnd.route66.link66+xml", - Compressible: false, - }, - "list": { - ContentType: "text/plain", - Compressible: false, - }, - "list3820": { - ContentType: "application/vnd.ibm.modcap", - Compressible: false, - }, - "listafp": { - ContentType: "application/vnd.ibm.modcap", - Compressible: false, - }, - "litcoffee": { - ContentType: "text/coffeescript", - Compressible: false, - }, - "lnk": { - ContentType: "application/x-ms-shortcut", - Compressible: false, - }, - "log": { - ContentType: "text/plain", - Compressible: false, - }, - "lostxml": { - ContentType: "application/lost+xml", - Compressible: false, - }, - "lrf": { - ContentType: "application/octet-stream", - Compressible: false, - }, - "lrm": { - ContentType: "application/vnd.ms-lrm", - Compressible: false, - }, - "ltf": { - ContentType: "application/vnd.frogans.ltf", - Compressible: false, - }, - "lua": { - ContentType: "text/x-lua", - Compressible: false, - }, - "luac": { - ContentType: "application/x-lua-bytecode", - Compressible: false, - }, - "lvp": { - ContentType: "audio/vnd.lucent.voice", - Compressible: false, - }, - "lwp": { - ContentType: "application/vnd.lotus-wordpro", - Compressible: false, - }, - "lzh": { - ContentType: "application/x-lzh-compressed", - Compressible: false, - }, - "m13": { - ContentType: "application/x-msmediaview", - Compressible: false, - }, - "m14": { - ContentType: "application/x-msmediaview", - Compressible: false, - }, - "m1v": { - ContentType: "video/mpeg", - Compressible: false, - }, - "m21": { - ContentType: "application/mp21", - Compressible: false, - }, - "m2a": { - ContentType: "audio/mpeg", - Compressible: false, - }, - "m2v": { - ContentType: "video/mpeg", - Compressible: false, - }, - "m3a": { - ContentType: "audio/mpeg", - Compressible: false, - }, - "m3u": { - ContentType: "audio/x-mpegurl", - Compressible: false, - }, - "m3u8": { - ContentType: "application/vnd.apple.mpegurl", - Compressible: false, - }, - "m4a": { - ContentType: "audio/x-m4a", - Compressible: false, - }, - "m4p": { - ContentType: "application/mp4", - Compressible: false, - }, - "m4u": { - ContentType: "video/vnd.mpegurl", - Compressible: false, - }, - "m4v": { - ContentType: "video/x-m4v", - Compressible: false, - }, - "ma": { - ContentType: "application/mathematica", - Compressible: false, - }, - "mads": { - ContentType: "application/mads+xml", - Compressible: false, - }, - "mag": { - ContentType: "application/vnd.ecowin.chart", - Compressible: false, - }, - "maker": { - ContentType: "application/vnd.framemaker", - Compressible: false, - }, - "man": { - ContentType: "text/troff", - Compressible: false, - }, - "manifest": { - ContentType: "text/cache-manifest", - Compressible: false, - }, - "map": { - ContentType: "application/json", - Compressible: false, - }, - "mar": { - ContentType: "application/octet-stream", - Compressible: false, - }, - "markdown": { - ContentType: "text/markdown", - Compressible: false, - }, - "mathml": { - ContentType: "application/mathml+xml", - Compressible: false, - }, - "mb": { - ContentType: "application/mathematica", - Compressible: false, - }, - "mbk": { - ContentType: "application/vnd.mobius.mbk", - Compressible: false, - }, - "mbox": { - ContentType: "application/mbox", - Compressible: false, - }, - "mc1": { - ContentType: "application/vnd.medcalcdata", - Compressible: false, - }, - "mcd": { - ContentType: "application/vnd.mcd", - Compressible: false, - }, - "mcurl": { - ContentType: "text/vnd.curl.mcurl", - Compressible: false, - }, - "md": { - ContentType: "text/markdown", - Compressible: false, - }, - "mdb": { - ContentType: "application/x-msaccess", - Compressible: false, - }, - "mdi": { - ContentType: "image/vnd.ms-modi", - Compressible: false, - }, - "me": { - ContentType: "text/troff", - Compressible: false, - }, - "mesh": { - ContentType: "model/mesh", - Compressible: false, - }, - "meta4": { - ContentType: "application/metalink4+xml", - Compressible: false, - }, - "metalink": { - ContentType: "application/metalink+xml", - Compressible: false, - }, - "mets": { - ContentType: "application/mets+xml", - Compressible: false, - }, - "mfm": { - ContentType: "application/vnd.mfmp", - Compressible: false, - }, - "mft": { - ContentType: "application/rpki-manifest", - Compressible: false, - }, - "mgp": { - ContentType: "application/vnd.osgeo.mapguide.package", - Compressible: false, - }, - "mgz": { - ContentType: "application/vnd.proteus.magazine", - Compressible: false, - }, - "mid": { - ContentType: "audio/midi", - Compressible: false, - }, - "midi": { - ContentType: "audio/midi", - Compressible: false, - }, - "mie": { - ContentType: "application/x-mie", - Compressible: false, - }, - "mif": { - ContentType: "application/vnd.mif", - Compressible: false, - }, - "mime": { - ContentType: "message/rfc822", - Compressible: false, - }, - "mj2": { - ContentType: "video/mj2", - Compressible: false, - }, - "mjp2": { - ContentType: "video/mj2", - Compressible: false, - }, - "mjs": { - ContentType: "application/javascript", - Compressible: false, - }, - "mk3d": { - ContentType: "video/x-matroska", - Compressible: false, - }, - "mka": { - ContentType: "audio/x-matroska", - Compressible: false, - }, - "mkd": { - ContentType: "text/x-markdown", - Compressible: false, - }, - "mks": { - ContentType: "video/x-matroska", - Compressible: false, - }, - "mkv": { - ContentType: "video/x-matroska", - Compressible: false, - }, - "mlp": { - ContentType: "application/vnd.dolby.mlp", - Compressible: false, - }, - "mmd": { - ContentType: "application/vnd.chipnuts.karaoke-mmd", - Compressible: false, - }, - "mmf": { - ContentType: "application/vnd.smaf", - Compressible: false, - }, - "mml": { - ContentType: "text/mathml", - Compressible: false, - }, - "mmr": { - ContentType: "image/vnd.fujixerox.edmics-mmr", - Compressible: false, - }, - "mng": { - ContentType: "video/x-mng", - Compressible: false, - }, - "mny": { - ContentType: "application/x-msmoney", - Compressible: false, - }, - "mobi": { - ContentType: "application/x-mobipocket-ebook", - Compressible: false, - }, - "mods": { - ContentType: "application/mods+xml", - Compressible: false, - }, - "mov": { - ContentType: "video/quicktime", - Compressible: false, - }, - "movie": { - ContentType: "video/x-sgi-movie", - Compressible: false, - }, - "mp2": { - ContentType: "audio/mpeg", - Compressible: false, - }, - "mp21": { - ContentType: "application/mp21", - Compressible: false, - }, - "mp2a": { - ContentType: "audio/mpeg", - Compressible: false, - }, - "mp3": { - ContentType: "audio/mpeg", - Compressible: false, - }, - "mp4": { - ContentType: "video/mp4", - Compressible: false, - }, - "mp4a": { - ContentType: "audio/mp4", - Compressible: false, - }, - "mp4s": { - ContentType: "application/mp4", - Compressible: false, - }, - "mp4v": { - ContentType: "video/mp4", - Compressible: false, - }, - "mpc": { - ContentType: "application/vnd.mophun.certificate", - Compressible: false, - }, - "mpd": { - ContentType: "application/dash+xml", - Compressible: false, - }, - "mpe": { - ContentType: "video/mpeg", - Compressible: false, - }, - "mpeg": { - ContentType: "video/mpeg", - Compressible: false, - }, - "mpg": { - ContentType: "video/mpeg", - Compressible: false, - }, - "mpg4": { - ContentType: "video/mp4", - Compressible: false, - }, - "mpga": { - ContentType: "audio/mpeg", - Compressible: false, - }, - "mpkg": { - ContentType: "application/vnd.apple.installer+xml", - Compressible: false, - }, - "mpm": { - ContentType: "application/vnd.blueice.multipass", - Compressible: false, - }, - "mpn": { - ContentType: "application/vnd.mophun.application", - Compressible: false, - }, - "mpp": { - ContentType: "application/vnd.ms-project", - Compressible: false, - }, - "mpt": { - ContentType: "application/vnd.ms-project", - Compressible: false, - }, - "mpy": { - ContentType: "application/vnd.ibm.minipay", - Compressible: false, - }, - "mqy": { - ContentType: "application/vnd.mobius.mqy", - Compressible: false, - }, - "mrc": { - ContentType: "application/marc", - Compressible: false, - }, - "mrcx": { - ContentType: "application/marcxml+xml", - Compressible: false, - }, - "ms": { - ContentType: "text/troff", - Compressible: false, - }, - "mscml": { - ContentType: "application/mediaservercontrol+xml", - Compressible: false, - }, - "mseed": { - ContentType: "application/vnd.fdsn.mseed", - Compressible: false, - }, - "mseq": { - ContentType: "application/vnd.mseq", - Compressible: false, - }, - "msf": { - ContentType: "application/vnd.epson.msf", - Compressible: false, - }, - "msg": { - ContentType: "application/vnd.ms-outlook", - Compressible: false, - }, - "msh": { - ContentType: "model/mesh", - Compressible: false, - }, - "msi": { - ContentType: "application/x-msdownload", - Compressible: false, - }, - "msl": { - ContentType: "application/vnd.mobius.msl", - Compressible: false, - }, - "msm": { - ContentType: "application/octet-stream", - Compressible: false, - }, - "msp": { - ContentType: "application/octet-stream", - Compressible: false, - }, - "msty": { - ContentType: "application/vnd.muvee.style", - Compressible: false, - }, - "mts": { - ContentType: "model/vnd.mts", - Compressible: false, - }, - "mus": { - ContentType: "application/vnd.musician", - Compressible: false, - }, - "musicxml": { - ContentType: "application/vnd.recordare.musicxml+xml", - Compressible: false, - }, - "mvb": { - ContentType: "application/x-msmediaview", - Compressible: false, - }, - "mwf": { - ContentType: "application/vnd.mfer", - Compressible: false, - }, - "mxf": { - ContentType: "application/mxf", - Compressible: false, - }, - "mxl": { - ContentType: "application/vnd.recordare.musicxml", - Compressible: false, - }, - "mxml": { - ContentType: "application/xv+xml", - Compressible: false, - }, - "mxs": { - ContentType: "application/vnd.triscape.mxs", - Compressible: false, - }, - "mxu": { - ContentType: "video/vnd.mpegurl", - Compressible: false, - }, - "n-gage": { - ContentType: "application/vnd.nokia.n-gage.symbian.install", - Compressible: false, - }, - "n3": { - ContentType: "text/n3", - Compressible: false, - }, - "nb": { - ContentType: "application/mathematica", - Compressible: false, - }, - "nbp": { - ContentType: "application/vnd.wolfram.player", - Compressible: false, - }, - "nc": { - ContentType: "application/x-netcdf", - Compressible: false, - }, - "ncx": { - ContentType: "application/x-dtbncx+xml", - Compressible: false, - }, - "nfo": { - ContentType: "text/x-nfo", - Compressible: false, - }, - "ngdat": { - ContentType: "application/vnd.nokia.n-gage.data", - Compressible: false, - }, - "nitf": { - ContentType: "application/vnd.nitf", - Compressible: false, - }, - "nlu": { - ContentType: "application/vnd.neurolanguage.nlu", - Compressible: false, - }, - "nml": { - ContentType: "application/vnd.enliven", - Compressible: false, - }, - "nnd": { - ContentType: "application/vnd.noblenet-directory", - Compressible: false, - }, - "nns": { - ContentType: "application/vnd.noblenet-sealer", - Compressible: false, - }, - "nnw": { - ContentType: "application/vnd.noblenet-web", - Compressible: false, - }, - "npx": { - ContentType: "image/vnd.net-fpx", - Compressible: false, - }, - "nsc": { - ContentType: "application/x-conference", - Compressible: false, - }, - "nsf": { - ContentType: "application/vnd.lotus-notes", - Compressible: false, - }, - "ntf": { - ContentType: "application/vnd.nitf", - Compressible: false, - }, - "numbers": { - ContentType: "application/vnd.apple.numbers", - Compressible: false, - }, - "nzb": { - ContentType: "application/x-nzb", - Compressible: false, - }, - "oa2": { - ContentType: "application/vnd.fujitsu.oasys2", - Compressible: false, - }, - "oa3": { - ContentType: "application/vnd.fujitsu.oasys3", - Compressible: false, - }, - "oas": { - ContentType: "application/vnd.fujitsu.oasys", - Compressible: false, - }, - "obd": { - ContentType: "application/x-msbinder", - Compressible: false, - }, - "obj": { - ContentType: "application/x-tgif", - Compressible: false, - }, - "oda": { - ContentType: "application/oda", - Compressible: false, - }, - "odb": { - ContentType: "application/vnd.oasis.opendocument.database", - Compressible: false, - }, - "odc": { - ContentType: "application/vnd.oasis.opendocument.chart", - Compressible: false, - }, - "odf": { - ContentType: "application/vnd.oasis.opendocument.formula", - Compressible: false, - }, - "odft": { - ContentType: "application/vnd.oasis.opendocument.formula-template", - Compressible: false, - }, - "odg": { - ContentType: "application/vnd.oasis.opendocument.graphics", - Compressible: false, - }, - "odi": { - ContentType: "application/vnd.oasis.opendocument.image", - Compressible: false, - }, - "odm": { - ContentType: "application/vnd.oasis.opendocument.text-master", - Compressible: false, - }, - "odp": { - ContentType: "application/vnd.oasis.opendocument.presentation", - Compressible: false, - }, - "ods": { - ContentType: "application/vnd.oasis.opendocument.spreadsheet", - Compressible: false, - }, - "odt": { - ContentType: "application/vnd.oasis.opendocument.text", - Compressible: false, - }, - "oga": { - ContentType: "audio/ogg", - Compressible: false, - }, - "ogg": { - ContentType: "audio/ogg", - Compressible: false, - }, - "ogv": { - ContentType: "video/ogg", - Compressible: false, - }, - "ogx": { - ContentType: "application/ogg", - Compressible: false, - }, - "omdoc": { - ContentType: "application/omdoc+xml", - Compressible: false, - }, - "onepkg": { - ContentType: "application/onenote", - Compressible: false, - }, - "onetmp": { - ContentType: "application/onenote", - Compressible: false, - }, - "onetoc": { - ContentType: "application/onenote", - Compressible: false, - }, - "onetoc2": { - ContentType: "application/onenote", - Compressible: false, - }, - "opf": { - ContentType: "application/oebps-package+xml", - Compressible: false, - }, - "opml": { - ContentType: "text/x-opml", - Compressible: false, - }, - "oprc": { - ContentType: "application/vnd.palm", - Compressible: false, - }, - "org": { - ContentType: "text/x-org", - Compressible: false, - }, - "osf": { - ContentType: "application/vnd.yamaha.openscoreformat", - Compressible: false, - }, - "osfpvg": { - ContentType: "application/vnd.yamaha.openscoreformat.osfpvg+xml", - Compressible: false, - }, - "otc": { - ContentType: "application/vnd.oasis.opendocument.chart-template", - Compressible: false, - }, - "otf": { - ContentType: "font/otf", - Compressible: false, - }, - "otg": { - ContentType: "application/vnd.oasis.opendocument.graphics-template", - Compressible: false, - }, - "oth": { - ContentType: "application/vnd.oasis.opendocument.text-web", - Compressible: false, - }, - "oti": { - ContentType: "application/vnd.oasis.opendocument.image-template", - Compressible: false, - }, - "otp": { - ContentType: "application/vnd.oasis.opendocument.presentation-template", - Compressible: false, - }, - "ots": { - ContentType: "application/vnd.oasis.opendocument.spreadsheet-template", - Compressible: false, - }, - "ott": { - ContentType: "application/vnd.oasis.opendocument.text-template", - Compressible: false, - }, - "ova": { - ContentType: "application/x-virtualbox-ova", - Compressible: false, - }, - "ovf": { - ContentType: "application/x-virtualbox-ovf", - Compressible: false, - }, - "owl": { - ContentType: "application/rdf+xml", - Compressible: false, - }, - "oxps": { - ContentType: "application/oxps", - Compressible: false, - }, - "oxt": { - ContentType: "application/vnd.openofficeorg.extension", - Compressible: false, - }, - "p": { - ContentType: "text/x-pascal", - Compressible: false, - }, - "p10": { - ContentType: "application/pkcs10", - Compressible: false, - }, - "p12": { - ContentType: "application/x-pkcs12", - Compressible: false, - }, - "p7b": { - ContentType: "application/x-pkcs7-certificates", - Compressible: false, - }, - "p7c": { - ContentType: "application/pkcs7-mime", - Compressible: false, - }, - "p7m": { - ContentType: "application/pkcs7-mime", - Compressible: false, - }, - "p7r": { - ContentType: "application/x-pkcs7-certreqresp", - Compressible: false, - }, - "p7s": { - ContentType: "application/pkcs7-signature", - Compressible: false, - }, - "p8": { - ContentType: "application/pkcs8", - Compressible: false, - }, - "pac": { - ContentType: "application/x-ns-proxy-autoconfig", - Compressible: false, - }, - "pages": { - ContentType: "application/vnd.apple.pages", - Compressible: false, - }, - "pas": { - ContentType: "text/x-pascal", - Compressible: false, - }, - "paw": { - ContentType: "application/vnd.pawaafile", - Compressible: false, - }, - "pbd": { - ContentType: "application/vnd.powerbuilder6", - Compressible: false, - }, - "pbm": { - ContentType: "image/x-portable-bitmap", - Compressible: false, - }, - "pcap": { - ContentType: "application/vnd.tcpdump.pcap", - Compressible: false, - }, - "pcf": { - ContentType: "application/x-font-pcf", - Compressible: false, - }, - "pcl": { - ContentType: "application/vnd.hp-pcl", - Compressible: false, - }, - "pclxl": { - ContentType: "application/vnd.hp-pclxl", - Compressible: false, - }, - "pct": { - ContentType: "image/x-pict", - Compressible: false, - }, - "pcurl": { - ContentType: "application/vnd.curl.pcurl", - Compressible: false, - }, - "pcx": { - ContentType: "image/x-pcx", - Compressible: false, - }, - "pdb": { - ContentType: "application/x-pilot", - Compressible: false, - }, - "pde": { - ContentType: "text/x-processing", - Compressible: false, - }, - "pdf": { - ContentType: "application/pdf", - Compressible: false, - }, - "pem": { - ContentType: "application/x-x509-ca-cert", - Compressible: false, - }, - "pfa": { - ContentType: "application/x-font-type1", - Compressible: false, - }, - "pfb": { - ContentType: "application/x-font-type1", - Compressible: false, - }, - "pfm": { - ContentType: "application/x-font-type1", - Compressible: false, - }, - "pfr": { - ContentType: "application/font-tdpfr", - Compressible: false, - }, - "pfx": { - ContentType: "application/x-pkcs12", - Compressible: false, - }, - "pgm": { - ContentType: "image/x-portable-graymap", - Compressible: false, - }, - "pgn": { - ContentType: "application/x-chess-pgn", - Compressible: false, - }, - "pgp": { - ContentType: "application/pgp-encrypted", - Compressible: false, - }, - "php": { - ContentType: "application/x-httpd-php", - Compressible: false, - }, - "pic": { - ContentType: "image/x-pict", - Compressible: false, - }, - "pkg": { - ContentType: "application/octet-stream", - Compressible: false, - }, - "pki": { - ContentType: "application/pkixcmp", - Compressible: false, - }, - "pkipath": { - ContentType: "application/pkix-pkipath", - Compressible: false, - }, - "pkpass": { - ContentType: "application/vnd.apple.pkpass", - Compressible: false, - }, - "pl": { - ContentType: "application/x-perl", - Compressible: false, - }, - "plb": { - ContentType: "application/vnd.3gpp.pic-bw-large", - Compressible: false, - }, - "plc": { - ContentType: "application/vnd.mobius.plc", - Compressible: false, - }, - "plf": { - ContentType: "application/vnd.pocketlearn", - Compressible: false, - }, - "pls": { - ContentType: "application/pls+xml", - Compressible: false, - }, - "pm": { - ContentType: "application/x-perl", - Compressible: false, - }, - "pml": { - ContentType: "application/vnd.ctc-posml", - Compressible: false, - }, - "png": { - ContentType: "image/png", - Compressible: false, - }, - "pnm": { - ContentType: "image/x-portable-anymap", - Compressible: false, - }, - "portpkg": { - ContentType: "application/vnd.macports.portpkg", - Compressible: false, - }, - "pot": { - ContentType: "application/vnd.ms-powerpoint", - Compressible: false, - }, - "potm": { - ContentType: "application/vnd.ms-powerpoint.template.macroenabled.12", - Compressible: false, - }, - "potx": { - ContentType: "application/vnd.openxmlformats-officedocument.presentationml.template", - Compressible: false, - }, - "ppam": { - ContentType: "application/vnd.ms-powerpoint.addin.macroenabled.12", - Compressible: false, - }, - "ppd": { - ContentType: "application/vnd.cups-ppd", - Compressible: false, - }, - "ppm": { - ContentType: "image/x-portable-pixmap", - Compressible: false, - }, - "pps": { - ContentType: "application/vnd.ms-powerpoint", - Compressible: false, - }, - "ppsm": { - ContentType: "application/vnd.ms-powerpoint.slideshow.macroenabled.12", - Compressible: false, - }, - "ppsx": { - ContentType: "application/vnd.openxmlformats-officedocument.presentationml.slideshow", - Compressible: false, - }, - "ppt": { - ContentType: "application/vnd.ms-powerpoint", - Compressible: false, - }, - "pptm": { - ContentType: "application/vnd.ms-powerpoint.presentation.macroenabled.12", - Compressible: false, - }, - "pptx": { - ContentType: "application/vnd.openxmlformats-officedocument.presentationml.presentation", - Compressible: false, - }, - "pqa": { - ContentType: "application/vnd.palm", - Compressible: false, - }, - "prc": { - ContentType: "application/x-pilot", - Compressible: false, - }, - "pre": { - ContentType: "application/vnd.lotus-freelance", - Compressible: false, - }, - "prf": { - ContentType: "application/pics-rules", - Compressible: false, - }, - "ps": { - ContentType: "application/postscript", - Compressible: false, - }, - "psb": { - ContentType: "application/vnd.3gpp.pic-bw-small", - Compressible: false, - }, - "psd": { - ContentType: "image/vnd.adobe.photoshop", - Compressible: false, - }, - "psf": { - ContentType: "application/x-font-linux-psf", - Compressible: false, - }, - "pskcxml": { - ContentType: "application/pskc+xml", - Compressible: false, - }, - "pti": { - ContentType: "image/prs.pti", - Compressible: false, - }, - "ptid": { - ContentType: "application/vnd.pvi.ptid1", - Compressible: false, - }, - "pub": { - ContentType: "application/x-mspublisher", - Compressible: false, - }, - "pvb": { - ContentType: "application/vnd.3gpp.pic-bw-var", - Compressible: false, - }, - "pwn": { - ContentType: "application/vnd.3m.post-it-notes", - Compressible: false, - }, - "pya": { - ContentType: "audio/vnd.ms-playready.media.pya", - Compressible: false, - }, - "pyv": { - ContentType: "video/vnd.ms-playready.media.pyv", - Compressible: false, - }, - "qam": { - ContentType: "application/vnd.epson.quickanime", - Compressible: false, - }, - "qbo": { - ContentType: "application/vnd.intu.qbo", - Compressible: false, - }, - "qfx": { - ContentType: "application/vnd.intu.qfx", - Compressible: false, - }, - "qps": { - ContentType: "application/vnd.publishare-delta-tree", - Compressible: false, - }, - "qt": { - ContentType: "video/quicktime", - Compressible: false, - }, - "qwd": { - ContentType: "application/vnd.quark.quarkxpress", - Compressible: false, - }, - "qwt": { - ContentType: "application/vnd.quark.quarkxpress", - Compressible: false, - }, - "qxb": { - ContentType: "application/vnd.quark.quarkxpress", - Compressible: false, - }, - "qxd": { - ContentType: "application/vnd.quark.quarkxpress", - Compressible: false, - }, - "qxl": { - ContentType: "application/vnd.quark.quarkxpress", - Compressible: false, - }, - "qxt": { - ContentType: "application/vnd.quark.quarkxpress", - Compressible: false, - }, - "ra": { - ContentType: "audio/x-realaudio", - Compressible: false, - }, - "ram": { - ContentType: "audio/x-pn-realaudio", - Compressible: false, - }, - "raml": { - ContentType: "application/raml+yaml", - Compressible: false, - }, - "rar": { - ContentType: "application/x-rar-compressed", - Compressible: false, - }, - "ras": { - ContentType: "image/x-cmu-raster", - Compressible: false, - }, - "rcprofile": { - ContentType: "application/vnd.ipunplugged.rcprofile", - Compressible: false, - }, - "rdf": { - ContentType: "application/rdf+xml", - Compressible: false, - }, - "rdz": { - ContentType: "application/vnd.data-vision.rdz", - Compressible: false, - }, - "rep": { - ContentType: "application/vnd.businessobjects", - Compressible: false, - }, - "res": { - ContentType: "application/x-dtbresource+xml", - Compressible: false, - }, - "rgb": { - ContentType: "image/x-rgb", - Compressible: false, - }, - "rif": { - ContentType: "application/reginfo+xml", - Compressible: false, - }, - "rip": { - ContentType: "audio/vnd.rip", - Compressible: false, - }, - "ris": { - ContentType: "application/x-research-info-systems", - Compressible: false, - }, - "rl": { - ContentType: "application/resource-lists+xml", - Compressible: false, - }, - "rlc": { - ContentType: "image/vnd.fujixerox.edmics-rlc", - Compressible: false, - }, - "rld": { - ContentType: "application/resource-lists-diff+xml", - Compressible: false, - }, - "rm": { - ContentType: "application/vnd.rn-realmedia", - Compressible: false, - }, - "rmi": { - ContentType: "audio/midi", - Compressible: false, - }, - "rmp": { - ContentType: "audio/x-pn-realaudio-plugin", - Compressible: false, - }, - "rms": { - ContentType: "application/vnd.jcp.javame.midlet-rms", - Compressible: false, - }, - "rmvb": { - ContentType: "application/vnd.rn-realmedia-vbr", - Compressible: false, - }, - "rnc": { - ContentType: "application/relax-ng-compact-syntax", - Compressible: false, - }, - "rng": { - ContentType: "application/xml", - Compressible: false, - }, - "roa": { - ContentType: "application/rpki-roa", - Compressible: false, - }, - "roff": { - ContentType: "text/troff", - Compressible: false, - }, - "rp9": { - ContentType: "application/vnd.cloanto.rp9", - Compressible: false, - }, - "rpm": { - ContentType: "application/x-redhat-package-manager", - Compressible: false, - }, - "rpss": { - ContentType: "application/vnd.nokia.radio-presets", - Compressible: false, - }, - "rpst": { - ContentType: "application/vnd.nokia.radio-preset", - Compressible: false, - }, - "rq": { - ContentType: "application/sparql-query", - Compressible: false, - }, - "rs": { - ContentType: "application/rls-services+xml", - Compressible: false, - }, - "rsd": { - ContentType: "application/rsd+xml", - Compressible: false, - }, - "rss": { - ContentType: "application/rss+xml", - Compressible: false, - }, - "rtf": { - ContentType: "text/rtf", - Compressible: false, - }, - "rtx": { - ContentType: "text/richtext", - Compressible: false, - }, - "run": { - ContentType: "application/x-makeself", - Compressible: false, - }, - "s": { - ContentType: "text/x-asm", - Compressible: false, - }, - "s3m": { - ContentType: "audio/s3m", - Compressible: false, - }, - "saf": { - ContentType: "application/vnd.yamaha.smaf-audio", - Compressible: false, - }, - "sass": { - ContentType: "text/x-sass", - Compressible: false, - }, - "sbml": { - ContentType: "application/sbml+xml", - Compressible: false, - }, - "sc": { - ContentType: "application/vnd.ibm.secure-container", - Compressible: false, - }, - "scd": { - ContentType: "application/x-msschedule", - Compressible: false, - }, - "scm": { - ContentType: "application/vnd.lotus-screencam", - Compressible: false, - }, - "scq": { - ContentType: "application/scvp-cv-request", - Compressible: false, - }, - "scs": { - ContentType: "application/scvp-cv-response", - Compressible: false, - }, - "scss": { - ContentType: "text/x-scss", - Compressible: false, - }, - "scurl": { - ContentType: "text/vnd.curl.scurl", - Compressible: false, - }, - "sda": { - ContentType: "application/vnd.stardivision.draw", - Compressible: false, - }, - "sdc": { - ContentType: "application/vnd.stardivision.calc", - Compressible: false, - }, - "sdd": { - ContentType: "application/vnd.stardivision.impress", - Compressible: false, - }, - "sdkd": { - ContentType: "application/vnd.solent.sdkm+xml", - Compressible: false, - }, - "sdkm": { - ContentType: "application/vnd.solent.sdkm+xml", - Compressible: false, - }, - "sdp": { - ContentType: "application/sdp", - Compressible: false, - }, - "sdw": { - ContentType: "application/vnd.stardivision.writer", - Compressible: false, - }, - "sea": { - ContentType: "application/x-sea", - Compressible: false, - }, - "see": { - ContentType: "application/vnd.seemail", - Compressible: false, - }, - "seed": { - ContentType: "application/vnd.fdsn.seed", - Compressible: false, - }, - "sema": { - ContentType: "application/vnd.sema", - Compressible: false, - }, - "semd": { - ContentType: "application/vnd.semd", - Compressible: false, - }, - "semf": { - ContentType: "application/vnd.semf", - Compressible: false, - }, - "ser": { - ContentType: "application/java-serialized-object", - Compressible: false, - }, - "setpay": { - ContentType: "application/set-payment-initiation", - Compressible: false, - }, - "setreg": { - ContentType: "application/set-registration-initiation", - Compressible: false, - }, - "sfd-hdstx": { - ContentType: "application/vnd.hydrostatix.sof-data", - Compressible: false, - }, - "sfs": { - ContentType: "application/vnd.spotfire.sfs", - Compressible: false, - }, - "sfv": { - ContentType: "text/x-sfv", - Compressible: false, - }, - "sgi": { - ContentType: "image/sgi", - Compressible: false, - }, - "sgl": { - ContentType: "application/vnd.stardivision.writer-global", - Compressible: false, - }, - "sgm": { - ContentType: "text/sgml", - Compressible: false, - }, - "sgml": { - ContentType: "text/sgml", - Compressible: false, - }, - "sh": { - ContentType: "application/x-sh", - Compressible: false, - }, - "shar": { - ContentType: "application/x-shar", - Compressible: false, - }, - "shex": { - ContentType: "text/shex", - Compressible: false, - }, - "shf": { - ContentType: "application/shf+xml", - Compressible: false, - }, - "shtml": { - ContentType: "text/html", - Compressible: false, - }, - "sid": { - ContentType: "image/x-mrsid-image", - Compressible: false, - }, - "sig": { - ContentType: "application/pgp-signature", - Compressible: false, - }, - "sil": { - ContentType: "audio/silk", - Compressible: false, - }, - "silo": { - ContentType: "model/mesh", - Compressible: false, - }, - "sis": { - ContentType: "application/vnd.symbian.install", - Compressible: false, - }, - "sisx": { - ContentType: "application/vnd.symbian.install", - Compressible: false, - }, - "sit": { - ContentType: "application/x-stuffit", - Compressible: false, - }, - "sitx": { - ContentType: "application/x-stuffitx", - Compressible: false, - }, - "skd": { - ContentType: "application/vnd.koan", - Compressible: false, - }, - "skm": { - ContentType: "application/vnd.koan", - Compressible: false, - }, - "skp": { - ContentType: "application/vnd.koan", - Compressible: false, - }, - "skt": { - ContentType: "application/vnd.koan", - Compressible: false, - }, - "sldm": { - ContentType: "application/vnd.ms-powerpoint.slide.macroenabled.12", - Compressible: false, - }, - "sldx": { - ContentType: "application/vnd.openxmlformats-officedocument.presentationml.slide", - Compressible: false, - }, - "slim": { - ContentType: "text/slim", - Compressible: false, - }, - "slm": { - ContentType: "text/slim", - Compressible: false, - }, - "slt": { - ContentType: "application/vnd.epson.salt", - Compressible: false, - }, - "sm": { - ContentType: "application/vnd.stepmania.stepchart", - Compressible: false, - }, - "smf": { - ContentType: "application/vnd.stardivision.math", - Compressible: false, - }, - "smi": { - ContentType: "application/smil+xml", - Compressible: false, - }, - "smil": { - ContentType: "application/smil+xml", - Compressible: false, - }, - "smv": { - ContentType: "video/x-smv", - Compressible: false, - }, - "smzip": { - ContentType: "application/vnd.stepmania.package", - Compressible: false, - }, - "snd": { - ContentType: "audio/basic", - Compressible: false, - }, - "snf": { - ContentType: "application/x-font-snf", - Compressible: false, - }, - "so": { - ContentType: "application/octet-stream", - Compressible: false, - }, - "spc": { - ContentType: "application/x-pkcs7-certificates", - Compressible: false, - }, - "spf": { - ContentType: "application/vnd.yamaha.smaf-phrase", - Compressible: false, - }, - "spl": { - ContentType: "application/x-futuresplash", - Compressible: false, - }, - "spot": { - ContentType: "text/vnd.in3d.spot", - Compressible: false, - }, - "spp": { - ContentType: "application/scvp-vp-response", - Compressible: false, - }, - "spq": { - ContentType: "application/scvp-vp-request", - Compressible: false, - }, - "spx": { - ContentType: "audio/ogg", - Compressible: false, - }, - "sql": { - ContentType: "application/x-sql", - Compressible: false, - }, - "src": { - ContentType: "application/x-wais-source", - Compressible: false, - }, - "srt": { - ContentType: "application/x-subrip", - Compressible: false, - }, - "sru": { - ContentType: "application/sru+xml", - Compressible: false, - }, - "srx": { - ContentType: "application/sparql-results+xml", - Compressible: false, - }, - "ssdl": { - ContentType: "application/ssdl+xml", - Compressible: false, - }, - "sse": { - ContentType: "application/vnd.kodak-descriptor", - Compressible: false, - }, - "ssf": { - ContentType: "application/vnd.epson.ssf", - Compressible: false, - }, - "ssml": { - ContentType: "application/ssml+xml", - Compressible: false, - }, - "st": { - ContentType: "application/vnd.sailingtracker.track", - Compressible: false, - }, - "stc": { - ContentType: "application/vnd.sun.xml.calc.template", - Compressible: false, - }, - "std": { - ContentType: "application/vnd.sun.xml.draw.template", - Compressible: false, - }, - "stf": { - ContentType: "application/vnd.wt.stf", - Compressible: false, - }, - "sti": { - ContentType: "application/vnd.sun.xml.impress.template", - Compressible: false, - }, - "stk": { - ContentType: "application/hyperstudio", - Compressible: false, - }, - "stl": { - ContentType: "application/vnd.ms-pki.stl", - Compressible: false, - }, - "str": { - ContentType: "application/vnd.pg.format", - Compressible: false, - }, - "stw": { - ContentType: "application/vnd.sun.xml.writer.template", - Compressible: false, - }, - "styl": { - ContentType: "text/stylus", - Compressible: false, - }, - "stylus": { - ContentType: "text/stylus", - Compressible: false, - }, - "sub": { - ContentType: "text/vnd.dvb.subtitle", - Compressible: false, - }, - "sus": { - ContentType: "application/vnd.sus-calendar", - Compressible: false, - }, - "susp": { - ContentType: "application/vnd.sus-calendar", - Compressible: false, - }, - "sv4cpio": { - ContentType: "application/x-sv4cpio", - Compressible: false, - }, - "sv4crc": { - ContentType: "application/x-sv4crc", - Compressible: false, - }, - "svc": { - ContentType: "application/vnd.dvb.service", - Compressible: false, - }, - "svd": { - ContentType: "application/vnd.svd", - Compressible: false, - }, - "svg": { - ContentType: "image/svg+xml", - Compressible: false, - }, - "svgz": { - ContentType: "image/svg+xml", - Compressible: false, - }, - "swa": { - ContentType: "application/x-director", - Compressible: false, - }, - "swf": { - ContentType: "application/x-shockwave-flash", - Compressible: false, - }, - "swi": { - ContentType: "application/vnd.aristanetworks.swi", - Compressible: false, - }, - "sxc": { - ContentType: "application/vnd.sun.xml.calc", - Compressible: false, - }, - "sxd": { - ContentType: "application/vnd.sun.xml.draw", - Compressible: false, - }, - "sxg": { - ContentType: "application/vnd.sun.xml.writer.global", - Compressible: false, - }, - "sxi": { - ContentType: "application/vnd.sun.xml.impress", - Compressible: false, - }, - "sxm": { - ContentType: "application/vnd.sun.xml.math", - Compressible: false, - }, - "sxw": { - ContentType: "application/vnd.sun.xml.writer", - Compressible: false, - }, - "t": { - ContentType: "text/troff", - Compressible: false, - }, - "t3": { - ContentType: "application/x-t3vm-image", - Compressible: false, - }, - "t38": { - ContentType: "image/t38", - Compressible: false, - }, - "taglet": { - ContentType: "application/vnd.mynfc", - Compressible: false, - }, - "tao": { - ContentType: "application/vnd.tao.intent-module-archive", - Compressible: false, - }, - "tap": { - ContentType: "image/vnd.tencent.tap", - Compressible: false, - }, - "tar": { - ContentType: "application/x-tar", - Compressible: false, - }, - "tcap": { - ContentType: "application/vnd.3gpp2.tcap", - Compressible: false, - }, - "tcl": { - ContentType: "application/x-tcl", - Compressible: false, - }, - "teacher": { - ContentType: "application/vnd.smart.teacher", - Compressible: false, - }, - "tei": { - ContentType: "application/tei+xml", - Compressible: false, - }, - "teicorpus": { - ContentType: "application/tei+xml", - Compressible: false, - }, - "tex": { - ContentType: "application/x-tex", - Compressible: false, - }, - "texi": { - ContentType: "application/x-texinfo", - Compressible: false, - }, - "texinfo": { - ContentType: "application/x-texinfo", - Compressible: false, - }, - "text": { - ContentType: "text/plain", - Compressible: false, - }, - "tfi": { - ContentType: "application/thraud+xml", - Compressible: false, - }, - "tfm": { - ContentType: "application/x-tex-tfm", - Compressible: false, - }, - "tfx": { - ContentType: "image/tiff-fx", - Compressible: false, - }, - "tga": { - ContentType: "image/x-tga", - Compressible: false, - }, - "thmx": { - ContentType: "application/vnd.ms-officetheme", - Compressible: false, - }, - "tif": { - ContentType: "image/tiff", - Compressible: false, - }, - "tiff": { - ContentType: "image/tiff", - Compressible: false, - }, - "tk": { - ContentType: "application/x-tcl", - Compressible: false, - }, - "tmo": { - ContentType: "application/vnd.tmobile-livetv", - Compressible: false, - }, - "torrent": { - ContentType: "application/x-bittorrent", - Compressible: false, - }, - "tpl": { - ContentType: "application/vnd.groove-tool-template", - Compressible: false, - }, - "tpt": { - ContentType: "application/vnd.trid.tpt", - Compressible: false, - }, - "tr": { - ContentType: "text/troff", - Compressible: false, - }, - "tra": { - ContentType: "application/vnd.trueapp", - Compressible: false, - }, - "trm": { - ContentType: "application/x-msterminal", - Compressible: false, - }, - "ts": { - ContentType: "video/mp2t", - Compressible: false, - }, - "tsd": { - ContentType: "application/timestamped-data", - Compressible: false, - }, - "tsv": { - ContentType: "text/tab-separated-values", - Compressible: false, - }, - "ttc": { - ContentType: "font/collection", - Compressible: false, - }, - "ttf": { - ContentType: "font/ttf", - Compressible: false, - }, - "ttl": { - ContentType: "text/turtle", - Compressible: false, - }, - "twd": { - ContentType: "application/vnd.simtech-mindmapper", - Compressible: false, - }, - "twds": { - ContentType: "application/vnd.simtech-mindmapper", - Compressible: false, - }, - "txd": { - ContentType: "application/vnd.genomatix.tuxedo", - Compressible: false, - }, - "txf": { - ContentType: "application/vnd.mobius.txf", - Compressible: false, - }, - "txt": { - ContentType: "text/plain", - Compressible: false, - }, - "u32": { - ContentType: "application/x-authorware-bin", - Compressible: false, - }, - "u8dsn": { - ContentType: "message/global-delivery-status", - Compressible: false, - }, - "u8hdr": { - ContentType: "message/global-headers", - Compressible: false, - }, - "u8mdn": { - ContentType: "message/global-disposition-notification", - Compressible: false, - }, - "u8msg": { - ContentType: "message/global", - Compressible: false, - }, - "udeb": { - ContentType: "application/x-debian-package", - Compressible: false, - }, - "ufd": { - ContentType: "application/vnd.ufdl", - Compressible: false, - }, - "ufdl": { - ContentType: "application/vnd.ufdl", - Compressible: false, - }, - "ulx": { - ContentType: "application/x-glulx", - Compressible: false, - }, - "umj": { - ContentType: "application/vnd.umajin", - Compressible: false, - }, - "unityweb": { - ContentType: "application/vnd.unity", - Compressible: false, - }, - "uoml": { - ContentType: "application/vnd.uoml+xml", - Compressible: false, - }, - "uri": { - ContentType: "text/uri-list", - Compressible: false, - }, - "uris": { - ContentType: "text/uri-list", - Compressible: false, - }, - "urls": { - ContentType: "text/uri-list", - Compressible: false, - }, - "ustar": { - ContentType: "application/x-ustar", - Compressible: false, - }, - "utz": { - ContentType: "application/vnd.uiq.theme", - Compressible: false, - }, - "uu": { - ContentType: "text/x-uuencode", - Compressible: false, - }, - "uva": { - ContentType: "audio/vnd.dece.audio", - Compressible: false, - }, - "uvd": { - ContentType: "application/vnd.dece.data", - Compressible: false, - }, - "uvf": { - ContentType: "application/vnd.dece.data", - Compressible: false, - }, - "uvg": { - ContentType: "image/vnd.dece.graphic", - Compressible: false, - }, - "uvh": { - ContentType: "video/vnd.dece.hd", - Compressible: false, - }, - "uvi": { - ContentType: "image/vnd.dece.graphic", - Compressible: false, - }, - "uvm": { - ContentType: "video/vnd.dece.mobile", - Compressible: false, - }, - "uvp": { - ContentType: "video/vnd.dece.pd", - Compressible: false, - }, - "uvs": { - ContentType: "video/vnd.dece.sd", - Compressible: false, - }, - "uvt": { - ContentType: "application/vnd.dece.ttml+xml", - Compressible: false, - }, - "uvu": { - ContentType: "video/vnd.uvvu.mp4", - Compressible: false, - }, - "uvv": { - ContentType: "video/vnd.dece.video", - Compressible: false, - }, - "uvva": { - ContentType: "audio/vnd.dece.audio", - Compressible: false, - }, - "uvvd": { - ContentType: "application/vnd.dece.data", - Compressible: false, - }, - "uvvf": { - ContentType: "application/vnd.dece.data", - Compressible: false, - }, - "uvvg": { - ContentType: "image/vnd.dece.graphic", - Compressible: false, - }, - "uvvh": { - ContentType: "video/vnd.dece.hd", - Compressible: false, - }, - "uvvi": { - ContentType: "image/vnd.dece.graphic", - Compressible: false, - }, - "uvvm": { - ContentType: "video/vnd.dece.mobile", - Compressible: false, - }, - "uvvp": { - ContentType: "video/vnd.dece.pd", - Compressible: false, - }, - "uvvs": { - ContentType: "video/vnd.dece.sd", - Compressible: false, - }, - "uvvt": { - ContentType: "application/vnd.dece.ttml+xml", - Compressible: false, - }, - "uvvu": { - ContentType: "video/vnd.uvvu.mp4", - Compressible: false, - }, - "uvvv": { - ContentType: "video/vnd.dece.video", - Compressible: false, - }, - "uvvx": { - ContentType: "application/vnd.dece.unspecified", - Compressible: false, - }, - "uvvz": { - ContentType: "application/vnd.dece.zip", - Compressible: false, - }, - "uvx": { - ContentType: "application/vnd.dece.unspecified", - Compressible: false, - }, - "uvz": { - ContentType: "application/vnd.dece.zip", - Compressible: false, - }, - "vbox": { - ContentType: "application/x-virtualbox-vbox", - Compressible: false, - }, - "vbox-extpack": { - ContentType: "application/x-virtualbox-vbox-extpack", - Compressible: false, - }, - "vcard": { - ContentType: "text/vcard", - Compressible: false, - }, - "vcd": { - ContentType: "application/x-cdlink", - Compressible: false, - }, - "vcf": { - ContentType: "text/x-vcard", - Compressible: false, - }, - "vcg": { - ContentType: "application/vnd.groove-vcard", - Compressible: false, - }, - "vcs": { - ContentType: "text/x-vcalendar", - Compressible: false, - }, - "vcx": { - ContentType: "application/vnd.vcx", - Compressible: false, - }, - "vdi": { - ContentType: "application/x-virtualbox-vdi", - Compressible: false, - }, - "vhd": { - ContentType: "application/x-virtualbox-vhd", - Compressible: false, - }, - "vis": { - ContentType: "application/vnd.visionary", - Compressible: false, - }, - "viv": { - ContentType: "video/vnd.vivo", - Compressible: false, - }, - "vmdk": { - ContentType: "application/x-virtualbox-vmdk", - Compressible: false, - }, - "vob": { - ContentType: "video/x-ms-vob", - Compressible: false, - }, - "vor": { - ContentType: "application/vnd.stardivision.writer", - Compressible: false, - }, - "vox": { - ContentType: "application/x-authorware-bin", - Compressible: false, - }, - "vrml": { - ContentType: "model/vrml", - Compressible: false, - }, - "vsd": { - ContentType: "application/vnd.visio", - Compressible: false, - }, - "vsf": { - ContentType: "application/vnd.vsf", - Compressible: false, - }, - "vss": { - ContentType: "application/vnd.visio", - Compressible: false, - }, - "vst": { - ContentType: "application/vnd.visio", - Compressible: false, - }, - "vsw": { - ContentType: "application/vnd.visio", - Compressible: false, - }, - "vtf": { - ContentType: "image/vnd.valve.source.texture", - Compressible: false, - }, - "vtt": { - ContentType: "text/vtt", - Compressible: false, - }, - "vtu": { - ContentType: "model/vnd.vtu", - Compressible: false, - }, - "vxml": { - ContentType: "application/voicexml+xml", - Compressible: false, - }, - "w3d": { - ContentType: "application/x-director", - Compressible: false, - }, - "wad": { - ContentType: "application/x-doom", - Compressible: false, - }, - "wadl": { - ContentType: "application/vnd.sun.wadl+xml", - Compressible: false, - }, - "war": { - ContentType: "application/java-archive", - Compressible: false, - }, - "wasm": { - ContentType: "application/wasm", - Compressible: false, - }, - "wav": { - ContentType: "audio/x-wav", - Compressible: false, - }, - "wax": { - ContentType: "audio/x-ms-wax", - Compressible: false, - }, - "wbmp": { - ContentType: "image/vnd.wap.wbmp", - Compressible: false, - }, - "wbs": { - ContentType: "application/vnd.criticaltools.wbs+xml", - Compressible: false, - }, - "wbxml": { - ContentType: "application/vnd.wap.wbxml", - Compressible: false, - }, - "wcm": { - ContentType: "application/vnd.ms-works", - Compressible: false, - }, - "wdb": { - ContentType: "application/vnd.ms-works", - Compressible: false, - }, - "wdp": { - ContentType: "image/vnd.ms-photo", - Compressible: false, - }, - "weba": { - ContentType: "audio/webm", - Compressible: false, - }, - "webapp": { - ContentType: "application/x-web-app-manifest+json", - Compressible: false, - }, - "webm": { - ContentType: "video/webm", - Compressible: false, - }, - "webmanifest": { - ContentType: "application/manifest+json", - Compressible: false, - }, - "webp": { - ContentType: "image/webp", - Compressible: false, - }, - "wg": { - ContentType: "application/vnd.pmi.widget", - Compressible: false, - }, - "wgt": { - ContentType: "application/widget", - Compressible: false, - }, - "wks": { - ContentType: "application/vnd.ms-works", - Compressible: false, - }, - "wm": { - ContentType: "video/x-ms-wm", - Compressible: false, - }, - "wma": { - ContentType: "audio/x-ms-wma", - Compressible: false, - }, - "wmd": { - ContentType: "application/x-ms-wmd", - Compressible: false, - }, - "wmf": { - ContentType: "image/wmf", - Compressible: false, - }, - "wml": { - ContentType: "text/vnd.wap.wml", - Compressible: false, - }, - "wmlc": { - ContentType: "application/vnd.wap.wmlc", - Compressible: false, - }, - "wmls": { - ContentType: "text/vnd.wap.wmlscript", - Compressible: false, - }, - "wmlsc": { - ContentType: "application/vnd.wap.wmlscriptc", - Compressible: false, - }, - "wmv": { - ContentType: "video/x-ms-wmv", - Compressible: false, - }, - "wmx": { - ContentType: "video/x-ms-wmx", - Compressible: false, - }, - "wmz": { - ContentType: "application/x-msmetafile", - Compressible: false, - }, - "woff": { - ContentType: "font/woff", - Compressible: false, - }, - "woff2": { - ContentType: "font/woff2", - Compressible: false, - }, - "wpd": { - ContentType: "application/vnd.wordperfect", - Compressible: false, - }, - "wpl": { - ContentType: "application/vnd.ms-wpl", - Compressible: false, - }, - "wps": { - ContentType: "application/vnd.ms-works", - Compressible: false, - }, - "wqd": { - ContentType: "application/vnd.wqd", - Compressible: false, - }, - "wri": { - ContentType: "application/x-mswrite", - Compressible: false, - }, - "wrl": { - ContentType: "model/vrml", - Compressible: false, - }, - "wsc": { - ContentType: "message/vnd.wfa.wsc", - Compressible: false, - }, - "wsdl": { - ContentType: "application/wsdl+xml", - Compressible: false, - }, - "wspolicy": { - ContentType: "application/wspolicy+xml", - Compressible: false, - }, - "wtb": { - ContentType: "application/vnd.webturbo", - Compressible: false, - }, - "wvx": { - ContentType: "video/x-ms-wvx", - Compressible: false, - }, - "x32": { - ContentType: "application/x-authorware-bin", - Compressible: false, - }, - "x3d": { - ContentType: "model/x3d+xml", - Compressible: false, - }, - "x3db": { - ContentType: "model/x3d+binary", - Compressible: false, - }, - "x3dbz": { - ContentType: "model/x3d+binary", - Compressible: false, - }, - "x3dv": { - ContentType: "model/x3d+vrml", - Compressible: false, - }, - "x3dvz": { - ContentType: "model/x3d+vrml", - Compressible: false, - }, - "x3dz": { - ContentType: "model/x3d+xml", - Compressible: false, - }, - "xaml": { - ContentType: "application/xaml+xml", - Compressible: false, - }, - "xap": { - ContentType: "application/x-silverlight-app", - Compressible: false, - }, - "xar": { - ContentType: "application/vnd.xara", - Compressible: false, - }, - "xbap": { - ContentType: "application/x-ms-xbap", - Compressible: false, - }, - "xbd": { - ContentType: "application/vnd.fujixerox.docuworks.binder", - Compressible: false, - }, - "xbm": { - ContentType: "image/x-xbitmap", - Compressible: false, - }, - "xdf": { - ContentType: "application/xcap-diff+xml", - Compressible: false, - }, - "xdm": { - ContentType: "application/vnd.syncml.dm+xml", - Compressible: false, - }, - "xdp": { - ContentType: "application/vnd.adobe.xdp+xml", - Compressible: false, - }, - "xdssc": { - ContentType: "application/dssc+xml", - Compressible: false, - }, - "xdw": { - ContentType: "application/vnd.fujixerox.docuworks", - Compressible: false, - }, - "xenc": { - ContentType: "application/xenc+xml", - Compressible: false, - }, - "xer": { - ContentType: "application/patch-ops-error+xml", - Compressible: false, - }, - "xfdf": { - ContentType: "application/vnd.adobe.xfdf", - Compressible: false, - }, - "xfdl": { - ContentType: "application/vnd.xfdl", - Compressible: false, - }, - "xht": { - ContentType: "application/xhtml+xml", - Compressible: false, - }, - "xhtml": { - ContentType: "application/xhtml+xml", - Compressible: false, - }, - "xhvml": { - ContentType: "application/xv+xml", - Compressible: false, - }, - "xif": { - ContentType: "image/vnd.xiff", - Compressible: false, - }, - "xla": { - ContentType: "application/vnd.ms-excel", - Compressible: false, - }, - "xlam": { - ContentType: "application/vnd.ms-excel.addin.macroenabled.12", - Compressible: false, - }, - "xlc": { - ContentType: "application/vnd.ms-excel", - Compressible: false, - }, - "xlf": { - ContentType: "application/x-xliff+xml", - Compressible: false, - }, - "xlm": { - ContentType: "application/vnd.ms-excel", - Compressible: false, - }, - "xls": { - ContentType: "application/vnd.ms-excel", - Compressible: false, - }, - "xlsb": { - ContentType: "application/vnd.ms-excel.sheet.binary.macroenabled.12", - Compressible: false, - }, - "xlsm": { - ContentType: "application/vnd.ms-excel.sheet.macroenabled.12", - Compressible: false, - }, - "xlsx": { - ContentType: "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet", - Compressible: false, - }, - "xlt": { - ContentType: "application/vnd.ms-excel", - Compressible: false, - }, - "xltm": { - ContentType: "application/vnd.ms-excel.template.macroenabled.12", - Compressible: false, - }, - "xltx": { - ContentType: "application/vnd.openxmlformats-officedocument.spreadsheetml.template", - Compressible: false, - }, - "xlw": { - ContentType: "application/vnd.ms-excel", - Compressible: false, - }, - "xm": { - ContentType: "audio/xm", - Compressible: false, - }, - "xml": { - ContentType: "text/xml", - Compressible: false, - }, - "xo": { - ContentType: "application/vnd.olpc-sugar", - Compressible: false, - }, - "xop": { - ContentType: "application/xop+xml", - Compressible: false, - }, - "xpi": { - ContentType: "application/x-xpinstall", - Compressible: false, - }, - "xpl": { - ContentType: "application/xproc+xml", - Compressible: false, - }, - "xpm": { - ContentType: "image/x-xpixmap", - Compressible: false, - }, - "xpr": { - ContentType: "application/vnd.is-xpr", - Compressible: false, - }, - "xps": { - ContentType: "application/vnd.ms-xpsdocument", - Compressible: false, - }, - "xpw": { - ContentType: "application/vnd.intercon.formnet", - Compressible: false, - }, - "xpx": { - ContentType: "application/vnd.intercon.formnet", - Compressible: false, - }, - "xsd": { - ContentType: "application/xml", - Compressible: false, - }, - "xsl": { - ContentType: "application/xml", - Compressible: false, - }, - "xslt": { - ContentType: "application/xslt+xml", - Compressible: false, - }, - "xsm": { - ContentType: "application/vnd.syncml+xml", - Compressible: false, - }, - "xspf": { - ContentType: "application/xspf+xml", - Compressible: false, - }, - "xul": { - ContentType: "application/vnd.mozilla.xul+xml", - Compressible: false, - }, - "xvm": { - ContentType: "application/xv+xml", - Compressible: false, - }, - "xvml": { - ContentType: "application/xv+xml", - Compressible: false, - }, - "xwd": { - ContentType: "image/x-xwindowdump", - Compressible: false, - }, - "xyz": { - ContentType: "chemical/x-xyz", - Compressible: false, - }, - "xz": { - ContentType: "application/x-xz", - Compressible: false, - }, - "yaml": { - ContentType: "text/yaml", - Compressible: false, - }, - "yang": { - ContentType: "application/yang", - Compressible: false, - }, - "yin": { - ContentType: "application/yin+xml", - Compressible: false, - }, - "yml": { - ContentType: "text/yaml", - Compressible: false, - }, - "ymp": { - ContentType: "text/x-suse-ymp", - Compressible: false, - }, - "z1": { - ContentType: "application/x-zmachine", - Compressible: false, - }, - "z2": { - ContentType: "application/x-zmachine", - Compressible: false, - }, - "z3": { - ContentType: "application/x-zmachine", - Compressible: false, - }, - "z4": { - ContentType: "application/x-zmachine", - Compressible: false, - }, - "z5": { - ContentType: "application/x-zmachine", - Compressible: false, - }, - "z6": { - ContentType: "application/x-zmachine", - Compressible: false, - }, - "z7": { - ContentType: "application/x-zmachine", - Compressible: false, - }, - "z8": { - ContentType: "application/x-zmachine", - Compressible: false, - }, - "zaz": { - ContentType: "application/vnd.zzazz.deck+xml", - Compressible: false, - }, - "zip": { - ContentType: "application/zip", - Compressible: false, - }, - "zir": { - ContentType: "application/vnd.zul", - Compressible: false, - }, - "zirz": { - ContentType: "application/vnd.zul", - Compressible: false, - }, - "zmm": { - ContentType: "application/vnd.handheld-entertainment+xml", - Compressible: false, - }, -} diff --git a/pkg/mimedb/db_test.go b/pkg/mimedb/db_test.go deleted file mode 100644 index 8766c31d..00000000 --- a/pkg/mimedb/db_test.go +++ /dev/null @@ -1,44 +0,0 @@ -/* - * mime-db: Mime Database, (C) 2015, 2016, 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package mimedb - -import "testing" - -func TestMimeLookup(t *testing.T) { - // Test mimeLookup. - contentType := DB["txt"].ContentType - if contentType != "text/plain" { - t.Fatalf("Invalid content type are found expected \"application/x-msdownload\", got %s", contentType) - } - compressible := DB["txt"].Compressible - if compressible { - t.Fatalf("Invalid content type are found expected \"false\", got %t", compressible) - } -} - -func TestTypeByExtension(t *testing.T) { - // Test TypeByExtension. - contentType := TypeByExtension(".txt") - if contentType != "text/plain" { - t.Fatalf("Invalid content type are found expected \"text/plain\", got %s", contentType) - } - // Test non-existent type resolution - contentType = TypeByExtension(".abc") - if contentType != "application/octet-stream" { - t.Fatalf("Invalid content type are found expected \"application/octet-stream\", got %s", contentType) - } -} diff --git a/pkg/mimedb/resolve-db.go b/pkg/mimedb/resolve-db.go deleted file mode 100644 index 058b9a9e..00000000 --- a/pkg/mimedb/resolve-db.go +++ /dev/null @@ -1,33 +0,0 @@ -/* - * mime-db: Mime Database, (C) 2015, 2016, 2017, 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package mimedb - -import ( - "strings" -) - -// TypeByExtension resolves the extension to its respective content-type. -func TypeByExtension(ext string) string { - // Set default to "application/octet-stream". - var contentType = "application/octet-stream" - if ext != "" { - if content, ok := DB[strings.ToLower(strings.TrimPrefix(ext, "."))]; ok { - contentType = content.ContentType - } - } - return contentType -} diff --git a/pkg/mimedb/util/gen-db.go b/pkg/mimedb/util/gen-db.go deleted file mode 100644 index c0848f96..00000000 --- a/pkg/mimedb/util/gen-db.go +++ /dev/null @@ -1,144 +0,0 @@ -/* - * mimedb: Mime Database, (C) 2015 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package mimedb is a database of file extension to mime content-type. -// Definitions are imported from NodeJS mime-db project under MIT license. - -package main - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "strings" - "text/template" -) - -const progTempl = `// DO NOT EDIT THIS FILE. IT IS AUTO-GENERATED BY "gen-db.go". // -/* - * mimedb: Mime Database, (C) 2016 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package mimedb is a database of file extension to mime content-type. -// Definitions are imported from NodeJS mime-db project under MIT license. -package mimedb - -// DB - Mime is a collection of mime types with extension as key and content-type as value. -var DB = map[string]struct { - ContentType string - Compressible bool -}{ -{{range $extension, $entry := . }} "{{$extension}}": { - ContentType: "{{$entry.ContentType}}", - Compressible: {{$entry.Compressible}}, - }, -{{end}}} -` - -type mimeEntry struct { - ContentType string `json:"contentType"` - Compressible bool `json:"compresible"` -} - -type mimeDB map[string]mimeEntry - -// JSON data from gobindata and parse them into extDB. -func convertDB(jsonFile string) (mimeDB, error) { - // Structure of JSON data from mime-db project. - type dbEntry struct { - Source string `json:"source"` - Compressible bool `json:"compresible"` - Extensions []string `json:"extensions"` - } - - // Access embedded "db.json" inside go-bindata. - jsonDB, err := ioutil.ReadFile(jsonFile) - if err != nil { - return nil, err - } - - // Convert db.json into go's typed structure. - db := make(map[string]dbEntry) - if err := json.Unmarshal(jsonDB, &db); err != nil { - return nil, err - } - - mDB := make(mimeDB) - - // Generate a new database from mime-db. - for key, val := range db { - if len(val.Extensions) > 0 { - /* Denormalize - each extension has its own - unique content-type now. Looks will be fast. */ - for _, ext := range val.Extensions { - /* Single extension type may map to - multiple content-types. In that case, - simply prefer the longest content-type - to maintain some level of - consistency. Only guarantee is, - whatever content type is assigned, it - is appropriate and valid type. */ - if strings.Compare(mDB[ext].ContentType, key) < 0 { - mDB[ext] = mimeEntry{ - ContentType: key, - Compressible: val.Compressible, - } - } - } - } - } - return mDB, nil -} - -func main() { - // Take input json file from command-line". - if len(os.Args) != 2 { - fmt.Print("Syntax:\n\tgen-db /path/to/db.json\n") - os.Exit(1) - } - - // Load and convert db.json into new database with extension - // as key. - mDB, err := convertDB(os.Args[1]) - if err != nil { - panic(err) - } - - // Generate db embedded go program. - tmpl := template.New("mimedb") - mimeTmpl, err := tmpl.Parse(progTempl) - if err != nil { - panic(err) - } - - err = mimeTmpl.Execute(os.Stdout, mDB) - if err != nil { - panic(err) - } -} diff --git a/pkg/mountinfo/mountinfo.go b/pkg/mountinfo/mountinfo.go deleted file mode 100644 index 1156a3d2..00000000 --- a/pkg/mountinfo/mountinfo.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build linux - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package mountinfo - -// mountInfo - This represents a single line in /proc/mounts. -type mountInfo struct { - Device string - Path string - FSType string - Options []string - Freq string - Pass string -} - -func (m mountInfo) String() string { - return m.Path -} - -// mountInfos - This represents the entire /proc/mounts. -type mountInfos []mountInfo diff --git a/pkg/mountinfo/mountinfo_linux.go b/pkg/mountinfo/mountinfo_linux.go deleted file mode 100644 index 1a38ee36..00000000 --- a/pkg/mountinfo/mountinfo_linux.go +++ /dev/null @@ -1,159 +0,0 @@ -// +build linux - -/* - * MinIO Cloud Storage, (C) 2017, 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package mountinfo - -import ( - "bufio" - "fmt" - "io" - "os" - "path/filepath" - "strconv" - "strings" - "syscall" -) - -const ( - // Number of fields per line in /proc/mounts as per the fstab man page. - expectedNumFieldsPerLine = 6 - // Location of the mount file to use - procMountsPath = "/proc/mounts" -) - -// IsLikelyMountPoint determines if a directory is a mountpoint. -func IsLikelyMountPoint(path string) bool { - s1, err := os.Lstat(path) - if err != nil { - return false - } - - // A symlink can never be a mount point - if s1.Mode()&os.ModeSymlink != 0 { - return false - } - - s2, err := os.Lstat(filepath.Dir(strings.TrimSuffix(path, "/"))) - if err != nil { - return false - } - - // If the directory has a different device as parent, then it is a mountpoint. - if s1.Sys().(*syscall.Stat_t).Dev != s2.Sys().(*syscall.Stat_t).Dev { - // path/.. on a different device as path - return true - } - - // path/.. is the same i-node as path - this check is for bind mounts. - return s1.Sys().(*syscall.Stat_t).Ino == s2.Sys().(*syscall.Stat_t).Ino -} - -// CheckCrossDevice - check if any list of paths has any sub-mounts at /proc/mounts. -func CheckCrossDevice(absPaths []string) error { - return checkCrossDevice(absPaths, procMountsPath) -} - -// Check cross device is an internal function. -func checkCrossDevice(absPaths []string, mountsPath string) error { - mounts, err := readProcMounts(mountsPath) - if err != nil { - return err - } - for _, path := range absPaths { - if err := mounts.checkCrossMounts(path); err != nil { - return err - } - } - return nil -} - -// CheckCrossDevice - check if given path has any sub-mounts in the input mounts list. -func (mts mountInfos) checkCrossMounts(path string) error { - if !filepath.IsAbs(path) { - return fmt.Errorf("Invalid argument, path (%s) is expected to be absolute", path) - } - var crossMounts mountInfos - for _, mount := range mts { - // Add a separator to indicate that this is a proper mount-point. - // This is to avoid a situation where prefix is '/tmp/fsmount' - // and mount path is /tmp/fs. In such a scenario we need to check for - // `/tmp/fs/` to be a common prefix amount other mounts. - mpath := strings.TrimSuffix(mount.Path, "/") + "/" - ppath := strings.TrimSuffix(path, "/") + "/" - if strings.HasPrefix(mpath, ppath) { - // At this point if the mount point has a common prefix two conditions can happen. - // - mount.Path matches exact with `path` means we can proceed no error here. - // - mount.Path doesn't match (means cross-device mount), should error out. - if mount.Path != path { - crossMounts = append(crossMounts, mount) - } - } - } - msg := `Cross-device mounts detected on path (%s) at following locations %s. Export path should not have any sub-mounts, refusing to start.` - if len(crossMounts) > 0 { - // if paths didn't match then we do have cross-device mount. - return fmt.Errorf(msg, path, crossMounts) - } - return nil -} - -// readProcMounts reads the given mountFilePath (normally /proc/mounts) and produces a hash -// of the contents. If the out argument is not nil, this fills it with MountPoint structs. -func readProcMounts(mountFilePath string) (mountInfos, error) { - file, err := os.Open(mountFilePath) - if err != nil { - return nil, err - } - defer file.Close() - return parseMountFrom(file) -} - -func parseMountFrom(file io.Reader) (mountInfos, error) { - var mounts = mountInfos{} - scanner := bufio.NewReader(file) - for { - line, err := scanner.ReadString('\n') - if err == io.EOF { - break - } - fields := strings.Fields(line) - if len(fields) != expectedNumFieldsPerLine { - return nil, fmt.Errorf("wrong number of fields (expected %d, got %d): %s", expectedNumFieldsPerLine, len(fields), line) - } - - // Freq should be an integer. - if _, err := strconv.Atoi(fields[4]); err != nil { - return nil, err - } - - // Pass should be an integer. - if _, err := strconv.Atoi(fields[5]); err != nil { - return nil, err - } - - mounts = append(mounts, mountInfo{ - Device: fields[0], - Path: fields[1], - FSType: fields[2], - Options: strings.Split(fields[3], ","), - Freq: fields[4], - Pass: fields[5], - }) - } - return mounts, nil -} diff --git a/pkg/mountinfo/mountinfo_linux_test.go b/pkg/mountinfo/mountinfo_linux_test.go deleted file mode 100644 index 1e6f4dd3..00000000 --- a/pkg/mountinfo/mountinfo_linux_test.go +++ /dev/null @@ -1,251 +0,0 @@ -// +build linux - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package mountinfo - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - "testing" -) - -// Tests cross device mount verification function, for both failure -// and success cases. -func TestCrossDeviceMountPaths(t *testing.T) { - successCase := - `/dev/0 /path/to/0/1 type0 flags 0 0 - /dev/1 /path/to/1 type1 flags 1 1 - /dev/2 /path/to/1/2 type2 flags,1,2=3 2 2 - /dev/3 /path/to/1.1 type3 falgs,1,2=3 3 3 - ` - dir, err := ioutil.TempDir("", "TestReadProcmountInfos") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - mountsPath := filepath.Join(dir, "mounts") - if err = ioutil.WriteFile(mountsPath, []byte(successCase), 0666); err != nil { - t.Fatal(err) - } - // Failure case where we detected successfully cross device mounts. - { - var absPaths = []string{"/path/to/1"} - if err = checkCrossDevice(absPaths, mountsPath); err == nil { - t.Fatal("Expected to fail, but found success") - } - - mp := []mountInfo{ - {"/dev/2", "/path/to/1/2", "type2", []string{"flags"}, "2", "2"}, - } - msg := fmt.Sprintf("Cross-device mounts detected on path (/path/to/1) at following locations %s. Export path should not have any sub-mounts, refusing to start.", mp) - if err.Error() != msg { - t.Fatalf("Expected msg %s, got %s", msg, err) - } - } - // Failure case when input path is not absolute. - { - var absPaths = []string{"."} - if err = checkCrossDevice(absPaths, mountsPath); err == nil { - t.Fatal("Expected to fail for non absolute paths") - } - expectedErrMsg := fmt.Sprintf("Invalid argument, path (%s) is expected to be absolute", ".") - if err.Error() != expectedErrMsg { - t.Fatalf("Expected %s, got %s", expectedErrMsg, err) - } - } - // Success case, where path doesn't have any mounts. - { - var absPaths = []string{"/path/to/x"} - if err = checkCrossDevice(absPaths, mountsPath); err != nil { - t.Fatalf("Expected success, failed instead (%s)", err) - } - } -} - -// Tests cross device mount verification function, for both failure -// and success cases. -func TestCrossDeviceMount(t *testing.T) { - successCase := - `/dev/0 /path/to/0/1 type0 flags 0 0 - /dev/1 /path/to/1 type1 flags 1 1 - /dev/2 /path/to/1/2 type2 flags,1,2=3 2 2 - /dev/3 /path/to/1.1 type3 falgs,1,2=3 3 3 - ` - dir, err := ioutil.TempDir("", "TestReadProcmountInfos") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - mountsPath := filepath.Join(dir, "mounts") - if err = ioutil.WriteFile(mountsPath, []byte(successCase), 0666); err != nil { - t.Fatal(err) - } - mounts, err := readProcMounts(mountsPath) - if err != nil { - t.Fatal(err) - } - // Failure case where we detected successfully cross device mounts. - { - if err = mounts.checkCrossMounts("/path/to/1"); err == nil { - t.Fatal("Expected to fail, but found success") - } - - mp := []mountInfo{ - {"/dev/2", "/path/to/1/2", "type2", []string{"flags"}, "2", "2"}, - } - msg := fmt.Sprintf("Cross-device mounts detected on path (/path/to/1) at following locations %s. Export path should not have any sub-mounts, refusing to start.", mp) - if err.Error() != msg { - t.Fatalf("Expected msg %s, got %s", msg, err) - } - } - // Failure case when input path is not absolute. - { - if err = mounts.checkCrossMounts("."); err == nil { - t.Fatal("Expected to fail for non absolute paths") - } - expectedErrMsg := fmt.Sprintf("Invalid argument, path (%s) is expected to be absolute", ".") - if err.Error() != expectedErrMsg { - t.Fatalf("Expected %s, got %s", expectedErrMsg, err) - } - } - // Success case, where path doesn't have any mounts. - { - if err = mounts.checkCrossMounts("/path/to/x"); err != nil { - t.Fatalf("Expected success, failed instead (%s)", err) - } - } -} - -// Tests read proc mounts file. -func TestReadProcmountInfos(t *testing.T) { - successCase := - `/dev/0 /path/to/0 type0 flags 0 0 - /dev/1 /path/to/1 type1 flags 1 1 - /dev/2 /path/to/2 type2 flags,1,2=3 2 2 - ` - dir, err := ioutil.TempDir("", "TestReadProcmountInfos") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dir) - - mountsPath := filepath.Join(dir, "mounts") - if err = ioutil.WriteFile(mountsPath, []byte(successCase), 0666); err != nil { - t.Fatal(err) - } - // Verifies if reading each line worked properly. - { - var mounts mountInfos - mounts, err = readProcMounts(mountsPath) - if err != nil { - t.Fatal(err) - } - if len(mounts) != 3 { - t.Fatalf("expected 3 mounts, got %d", len(mounts)) - } - mp := mountInfo{"/dev/0", "/path/to/0", "type0", []string{"flags"}, "0", "0"} - if !mountPointsEqual(mounts[0], mp) { - t.Errorf("got unexpected MountPoint[0]: %#v", mounts[0]) - } - mp = mountInfo{"/dev/1", "/path/to/1", "type1", []string{"flags"}, "1", "1"} - if !mountPointsEqual(mounts[1], mp) { - t.Errorf("got unexpected mountInfo[1]: %#v", mounts[1]) - } - mp = mountInfo{"/dev/2", "/path/to/2", "type2", []string{"flags", "1", "2=3"}, "2", "2"} - if !mountPointsEqual(mounts[2], mp) { - t.Errorf("got unexpected mountInfo[2]: %#v", mounts[2]) - } - } - // Failure case mounts path doesn't exist, if not fail. - { - if _, err = readProcMounts(filepath.Join(dir, "non-existent")); err != nil && !os.IsNotExist(err) { - t.Fatal(err) - } - } -} - -// Tests read proc mounts reader. -func TestReadProcMountFrom(t *testing.T) { - successCase := - `/dev/0 /path/to/0 type0 flags 0 0 - /dev/1 /path/to/1 type1 flags 1 1 - /dev/2 /path/to/2 type2 flags,1,2=3 2 2 - ` - // Success case, verifies if parsing works properly. - { - mounts, err := parseMountFrom(strings.NewReader(successCase)) - if err != nil { - t.Errorf("expected success") - } - if len(mounts) != 3 { - t.Fatalf("expected 3 mounts, got %d", len(mounts)) - } - mp := mountInfo{"/dev/0", "/path/to/0", "type0", []string{"flags"}, "0", "0"} - if !mountPointsEqual(mounts[0], mp) { - t.Errorf("got unexpected mountInfo[0]: %#v", mounts[0]) - } - mp = mountInfo{"/dev/1", "/path/to/1", "type1", []string{"flags"}, "1", "1"} - if !mountPointsEqual(mounts[1], mp) { - t.Errorf("got unexpected mountInfo[1]: %#v", mounts[1]) - } - mp = mountInfo{"/dev/2", "/path/to/2", "type2", []string{"flags", "1", "2=3"}, "2", "2"} - if !mountPointsEqual(mounts[2], mp) { - t.Errorf("got unexpected mountInfo[2]: %#v", mounts[2]) - } - } - // Error cases where parsing fails with invalid Freq and Pass params. - { - errorCases := []string{ - "/dev/0 /path/to/mount\n", - "/dev/1 /path/to/mount type flags a 0\n", - "/dev/2 /path/to/mount type flags 0 b\n", - } - for _, ec := range errorCases { - _, rerr := parseMountFrom(strings.NewReader(ec)) - if rerr == nil { - t.Errorf("expected error") - } - } - } -} - -// Helpers for tests. - -// Check if two `mountInfo` are equal. -func mountPointsEqual(a, b mountInfo) bool { - if a.Device != b.Device || a.Path != b.Path || a.FSType != b.FSType || !slicesEqual(a.Options, b.Options) || a.Pass != b.Pass || a.Freq != b.Freq { - return false - } - return true -} - -// Checks if two string slices are equal. -func slicesEqual(a, b []string) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if a[i] != b[i] { - return false - } - } - return true -} diff --git a/pkg/mountinfo/mountinfo_others.go b/pkg/mountinfo/mountinfo_others.go deleted file mode 100644 index 9f25e882..00000000 --- a/pkg/mountinfo/mountinfo_others.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build !linux,!windows - -/* - * MinIO Cloud Storage, (C) 2017, 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package mountinfo - -// CheckCrossDevice - check if any input path has multiple sub-mounts. -// this is a dummy function and returns nil for now. -func CheckCrossDevice(paths []string) error { - return nil -} - -// IsLikelyMountPoint determines if a directory is a mountpoint. -func IsLikelyMountPoint(file string) bool { - return false -} diff --git a/pkg/mountinfo/mountinfo_windows.go b/pkg/mountinfo/mountinfo_windows.go deleted file mode 100644 index dc8ef340..00000000 --- a/pkg/mountinfo/mountinfo_windows.go +++ /dev/null @@ -1,61 +0,0 @@ -// +build windows - -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package mountinfo - -import ( - "path/filepath" - "sync" - - "golang.org/x/sys/windows" -) - -// CheckCrossDevice - check if any input path has multiple sub-mounts. -// this is a dummy function and returns nil for now. -func CheckCrossDevice(paths []string) error { - return nil -} - -// mountPointCache contains results of IsLikelyMountPoint -var mountPointCache sync.Map - -// IsLikelyMountPoint determines if a directory is a mountpoint. -func IsLikelyMountPoint(path string) bool { - path = filepath.Dir(path) - if v, ok := mountPointCache.Load(path); ok { - return v.(bool) - } - wpath, _ := windows.UTF16PtrFromString(path) - wvolume := make([]uint16, len(path)+1) - - if err := windows.GetVolumePathName(wpath, &wvolume[0], uint32(len(wvolume))); err != nil { - mountPointCache.Store(path, false) - return false - } - - switch windows.GetDriveType(&wvolume[0]) { - case windows.DRIVE_FIXED, windows.DRIVE_REMOVABLE, windows.DRIVE_REMOTE, windows.DRIVE_RAMDISK: - // Recognize "fixed", "removable", "remote" and "ramdisk" drives as proper drives - // which can be treated as an actual mount-point, rest can be ignored. - // https://docs.microsoft.com/en-us/windows/desktop/api/fileapi/nf-fileapi-getdrivetypew - mountPointCache.Store(path, true) - return true - } - mountPointCache.Store(path, false) - return false -} diff --git a/pkg/net/host.go b/pkg/net/host.go deleted file mode 100644 index d20568fe..00000000 --- a/pkg/net/host.go +++ /dev/null @@ -1,170 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package net - -import ( - "encoding/json" - "errors" - "net" - "regexp" - "strings" -) - -var hostLabelRegexp = regexp.MustCompile("^[a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?$") - -// Host - holds network host IP/name and its port. -type Host struct { - Name string - Port Port - IsPortSet bool -} - -// IsEmpty - returns whether Host is empty or not -func (host Host) IsEmpty() bool { - return host.Name == "" -} - -// String - returns string representation of Host. -func (host Host) String() string { - if !host.IsPortSet { - return host.Name - } - - return net.JoinHostPort(host.Name, host.Port.String()) -} - -// Equal - checks whether given host is equal or not. -func (host Host) Equal(compHost Host) bool { - return host.String() == compHost.String() -} - -// MarshalJSON - converts Host into JSON data -func (host Host) MarshalJSON() ([]byte, error) { - return json.Marshal(host.String()) -} - -// UnmarshalJSON - parses data into Host. -func (host *Host) UnmarshalJSON(data []byte) (err error) { - var s string - if err = json.Unmarshal(data, &s); err != nil { - return err - } - - // Allow empty string - if s == "" { - *host = Host{} - return nil - } - - var h *Host - if h, err = ParseHost(s); err != nil { - return err - } - - *host = *h - return nil -} - -// ParseHost - parses string into Host -func ParseHost(s string) (*Host, error) { - if s == "" { - return nil, errors.New("invalid argument") - } - isValidHost := func(host string) bool { - if host == "" { - return true - } - - if ip := net.ParseIP(host); ip != nil { - return true - } - - // host is not a valid IPv4 or IPv6 address - // host may be a hostname - // refer https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names - // why checks are done like below - if len(host) < 1 || len(host) > 253 { - return false - } - - for _, label := range strings.Split(host, ".") { - if len(label) < 1 || len(label) > 63 { - return false - } - - if !hostLabelRegexp.MatchString(label) { - return false - } - } - - return true - } - - var port Port - var isPortSet bool - host, portStr, err := net.SplitHostPort(s) - if err != nil { - if !strings.Contains(err.Error(), "missing port in address") { - return nil, err - } - host = s - } else { - if port, err = ParsePort(portStr); err != nil { - return nil, err - } - - isPortSet = true - } - - if host != "" { - host, err = trimIPv6(host) - if err != nil { - return nil, err - } - } - - // IPv6 requires a link-local address on every network interface. - // `%interface` should be preserved. - trimmedHost := host - - if i := strings.LastIndex(trimmedHost, "%"); i > -1 { - // `%interface` can be skipped for validity check though. - trimmedHost = trimmedHost[:i] - } - - if !isValidHost(trimmedHost) { - return nil, errors.New("invalid hostname") - } - - return &Host{ - Name: host, - Port: port, - IsPortSet: isPortSet, - }, nil -} - -// IPv6 can be embedded with square brackets. -func trimIPv6(host string) (string, error) { - // `missing ']' in host` error is already handled in `SplitHostPort` - if host[len(host)-1] == ']' { - if host[0] != '[' { - return "", errors.New("missing '[' in host") - } - return host[1:][:len(host)-2], nil - } - return host, nil -} diff --git a/pkg/net/host_test.go b/pkg/net/host_test.go deleted file mode 100644 index 57e786c9..00000000 --- a/pkg/net/host_test.go +++ /dev/null @@ -1,250 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package net - -import ( - "reflect" - "testing" -) - -func TestHostIsEmpty(t *testing.T) { - testCases := []struct { - host Host - expectedResult bool - }{ - {Host{"", 0, false}, true}, - {Host{"", 0, true}, true}, - {Host{"play", 9000, false}, false}, - {Host{"play", 9000, true}, false}, - } - - for i, testCase := range testCases { - result := testCase.host.IsEmpty() - - if result != testCase.expectedResult { - t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestHostString(t *testing.T) { - testCases := []struct { - host Host - expectedStr string - }{ - {Host{"", 0, false}, ""}, - {Host{"", 0, true}, ":0"}, - {Host{"play", 9000, false}, "play"}, - {Host{"play", 9000, true}, "play:9000"}, - } - - for i, testCase := range testCases { - str := testCase.host.String() - - if str != testCase.expectedStr { - t.Fatalf("test %v: string: expected: %v, got: %v", i+1, testCase.expectedStr, str) - } - } -} - -func TestHostEqual(t *testing.T) { - testCases := []struct { - host Host - compHost Host - expectedResult bool - }{ - {Host{"", 0, false}, Host{"", 0, true}, false}, - {Host{"play", 9000, true}, Host{"play", 9000, false}, false}, - {Host{"", 0, true}, Host{"", 0, true}, true}, - {Host{"play", 9000, false}, Host{"play", 9000, false}, true}, - {Host{"play", 9000, true}, Host{"play", 9000, true}, true}, - } - - for i, testCase := range testCases { - result := testCase.host.Equal(testCase.compHost) - - if result != testCase.expectedResult { - t.Fatalf("test %v: string: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestHostMarshalJSON(t *testing.T) { - testCases := []struct { - host Host - expectedData []byte - expectErr bool - }{ - {Host{}, []byte(`""`), false}, - {Host{"play", 0, false}, []byte(`"play"`), false}, - {Host{"play", 0, true}, []byte(`"play:0"`), false}, - {Host{"play", 9000, true}, []byte(`"play:9000"`), false}, - {Host{"play.min.io", 0, false}, []byte(`"play.min.io"`), false}, - {Host{"play.min.io", 9000, true}, []byte(`"play.min.io:9000"`), false}, - {Host{"147.75.201.93", 0, false}, []byte(`"147.75.201.93"`), false}, - {Host{"147.75.201.93", 9000, true}, []byte(`"147.75.201.93:9000"`), false}, - {Host{"play12", 0, false}, []byte(`"play12"`), false}, - {Host{"12play", 0, false}, []byte(`"12play"`), false}, - {Host{"play-minio-io", 0, false}, []byte(`"play-minio-io"`), false}, - {Host{"play--min.io", 0, false}, []byte(`"play--min.io"`), false}, - } - - for i, testCase := range testCases { - data, err := testCase.host.MarshalJSON() - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(data, testCase.expectedData) { - t.Fatalf("test %v: data: expected: %v, got: %v", i+1, string(testCase.expectedData), string(data)) - } - } - } -} - -func TestHostUnmarshalJSON(t *testing.T) { - testCases := []struct { - data []byte - expectedHost *Host - expectErr bool - }{ - {[]byte(`""`), &Host{}, false}, - {[]byte(`"play"`), &Host{"play", 0, false}, false}, - {[]byte(`"play:0"`), &Host{"play", 0, true}, false}, - {[]byte(`"play:9000"`), &Host{"play", 9000, true}, false}, - {[]byte(`"play.min.io"`), &Host{"play.min.io", 0, false}, false}, - {[]byte(`"play.min.io:9000"`), &Host{"play.min.io", 9000, true}, false}, - {[]byte(`"147.75.201.93"`), &Host{"147.75.201.93", 0, false}, false}, - {[]byte(`"147.75.201.93:9000"`), &Host{"147.75.201.93", 9000, true}, false}, - {[]byte(`"play12"`), &Host{"play12", 0, false}, false}, - {[]byte(`"12play"`), &Host{"12play", 0, false}, false}, - {[]byte(`"play-minio-io"`), &Host{"play-minio-io", 0, false}, false}, - {[]byte(`"play--min.io"`), &Host{"play--min.io", 0, false}, false}, - {[]byte(`":9000"`), &Host{"", 9000, true}, false}, - {[]byte(`"[fe80::8097:76eb:b397:e067%wlp2s0]"`), &Host{"fe80::8097:76eb:b397:e067%wlp2s0", 0, false}, false}, - {[]byte(`"[fe80::8097:76eb:b397:e067]:9000"`), &Host{"fe80::8097:76eb:b397:e067", 9000, true}, false}, - {[]byte(`"fe80::8097:76eb:b397:e067%wlp2s0"`), nil, true}, - {[]byte(`"fe80::8097:76eb:b397:e067%wlp2s0]"`), nil, true}, - {[]byte(`"[fe80::8097:76eb:b397:e067%wlp2s0"`), nil, true}, - {[]byte(`"[[fe80::8097:76eb:b397:e067%wlp2s0]]"`), nil, true}, - {[]byte(`"[[fe80::8097:76eb:b397:e067%wlp2s0"`), nil, true}, - {[]byte(`"play:"`), nil, true}, - {[]byte(`"play::"`), nil, true}, - {[]byte(`"play:90000"`), nil, true}, - {[]byte(`"play:-10"`), nil, true}, - {[]byte(`"play-"`), nil, true}, - {[]byte(`"play.minio..io"`), nil, true}, - {[]byte(`":"`), nil, true}, - } - - for _, testCase := range testCases { - testCase := testCase - t.Run("", func(t *testing.T) { - var host Host - err := host.UnmarshalJSON(testCase.data) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Errorf("error: expected: %v, got: %v", testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(&host, testCase.expectedHost) { - t.Errorf("host: expected: %#v, got: %#v", testCase.expectedHost, host) - } - } - }) - } -} - -func TestParseHost(t *testing.T) { - testCases := []struct { - s string - expectedHost *Host - expectErr bool - }{ - {"play", &Host{"play", 0, false}, false}, - {"play:0", &Host{"play", 0, true}, false}, - {"play:9000", &Host{"play", 9000, true}, false}, - {"play.min.io", &Host{"play.min.io", 0, false}, false}, - {"play.min.io:9000", &Host{"play.min.io", 9000, true}, false}, - {"147.75.201.93", &Host{"147.75.201.93", 0, false}, false}, - {"147.75.201.93:9000", &Host{"147.75.201.93", 9000, true}, false}, - {"play12", &Host{"play12", 0, false}, false}, - {"12play", &Host{"12play", 0, false}, false}, - {"play-minio-io", &Host{"play-minio-io", 0, false}, false}, - {"play--min.io", &Host{"play--min.io", 0, false}, false}, - {":9000", &Host{"", 9000, true}, false}, - {"play:", nil, true}, - {"play::", nil, true}, - {"play:90000", nil, true}, - {"play:-10", nil, true}, - {"play-", nil, true}, - {"play.minio..io", nil, true}, - {":", nil, true}, - {"", nil, true}, - } - - for _, testCase := range testCases { - testCase := testCase - t.Run("", func(t *testing.T) { - host, err := ParseHost(testCase.s) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Errorf("error: expected: %v, got: %v", testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(host, testCase.expectedHost) { - t.Errorf("host: expected: %#v, got: %#v", testCase.expectedHost, host) - } - } - }) - } -} - -func TestTrimIPv6(t *testing.T) { - testCases := []struct { - IP string - expectedIP string - expectErr bool - }{ - {"[fe80::8097:76eb:b397:e067%wlp2s0]", "fe80::8097:76eb:b397:e067%wlp2s0", false}, - {"fe80::8097:76eb:b397:e067%wlp2s0]", "fe80::8097:76eb:b397:e067%wlp2s0", true}, - {"[fe80::8097:76eb:b397:e067%wlp2s0]]", "fe80::8097:76eb:b397:e067%wlp2s0]", false}, - {"[[fe80::8097:76eb:b397:e067%wlp2s0]]", "[fe80::8097:76eb:b397:e067%wlp2s0]", false}, - } - - for i, testCase := range testCases { - ip, err := trimIPv6(testCase.IP) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if ip != testCase.expectedIP { - t.Fatalf("test %v: IP: expected: %#v, got: %#v", i+1, testCase.expectedIP, ip) - } - } - } -} diff --git a/pkg/net/obd.go b/pkg/net/obd.go deleted file mode 100644 index 89bd6db1..00000000 --- a/pkg/net/obd.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package net - -import ( - "github.com/montanaflynn/stats" -) - -// Latency holds latency information for read/write operations to the drive -type Latency struct { - Avg float64 `json:"avg_secs,omitempty"` - Percentile50 float64 `json:"percentile50_secs,omitempty"` - Percentile90 float64 `json:"percentile90_secs,omitempty"` - Percentile99 float64 `json:"percentile99_secs,omitempty"` - Min float64 `json:"min_secs,omitempty"` - Max float64 `json:"max_secs,omitempty"` -} - -// Throughput holds throughput information for read/write operations to the drive -type Throughput struct { - Avg float64 `json:"avg_bytes_per_sec,omitempty"` - Percentile50 float64 `json:"percentile50_bytes_per_sec,omitempty"` - Percentile90 float64 `json:"percentile90_bytes_per_sec,omitempty"` - Percentile99 float64 `json:"percentile99_bytes_per_sec,omitempty"` - Min float64 `json:"min_bytes_per_sec,omitempty"` - Max float64 `json:"max_bytes_per_sec,omitempty"` -} - -// ComputeOBDStats takes arrays of Latency & Throughput to compute Statistics -func ComputeOBDStats(latencies, throughputs []float64) (Latency, Throughput, error) { - var avgLatency float64 - var percentile50Latency float64 - var percentile90Latency float64 - var percentile99Latency float64 - var minLatency float64 - var maxLatency float64 - - var avgThroughput float64 - var percentile50Throughput float64 - var percentile90Throughput float64 - var percentile99Throughput float64 - var minThroughput float64 - var maxThroughput float64 - var err error - - if avgLatency, err = stats.Mean(latencies); err != nil { - return Latency{}, Throughput{}, err - } - if percentile50Latency, err = stats.Percentile(latencies, 50); err != nil { - return Latency{}, Throughput{}, err - } - if percentile90Latency, err = stats.Percentile(latencies, 90); err != nil { - return Latency{}, Throughput{}, err - } - if percentile99Latency, err = stats.Percentile(latencies, 99); err != nil { - return Latency{}, Throughput{}, err - } - if maxLatency, err = stats.Max(latencies); err != nil { - return Latency{}, Throughput{}, err - } - if minLatency, err = stats.Min(latencies); err != nil { - return Latency{}, Throughput{}, err - } - l := Latency{ - Avg: avgLatency, - Percentile50: percentile50Latency, - Percentile90: percentile90Latency, - Percentile99: percentile99Latency, - Min: minLatency, - Max: maxLatency, - } - - if avgThroughput, err = stats.Mean(throughputs); err != nil { - return Latency{}, Throughput{}, err - } - if percentile50Throughput, err = stats.Percentile(throughputs, 50); err != nil { - return Latency{}, Throughput{}, err - } - if percentile90Throughput, err = stats.Percentile(throughputs, 90); err != nil { - return Latency{}, Throughput{}, err - } - if percentile99Throughput, err = stats.Percentile(throughputs, 99); err != nil { - return Latency{}, Throughput{}, err - } - if maxThroughput, err = stats.Max(throughputs); err != nil { - return Latency{}, Throughput{}, err - } - if minThroughput, err = stats.Min(throughputs); err != nil { - return Latency{}, Throughput{}, err - } - t := Throughput{ - Avg: avgThroughput, - Percentile50: percentile50Throughput, - Percentile90: percentile90Throughput, - Percentile99: percentile99Throughput, - Min: minThroughput, - Max: maxThroughput, - } - - return l, t, nil -} diff --git a/pkg/net/port.go b/pkg/net/port.go deleted file mode 100644 index db731970..00000000 --- a/pkg/net/port.go +++ /dev/null @@ -1,50 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package net - -import ( - "errors" - "strconv" -) - -// Port - network port -type Port uint16 - -// String - returns string representation of port. -func (p Port) String() string { - return strconv.Itoa(int(p)) -} - -// ParsePort - parses string into Port -func ParsePort(s string) (p Port, err error) { - if s == "https" { - return Port(443), nil - } else if s == "http" { - return Port(80), nil - } - - var i int - if i, err = strconv.Atoi(s); err != nil { - return p, errors.New("invalid port number") - } - - if i < 0 || i > 65535 { - return p, errors.New("port must be between 0 to 65535") - } - - return Port(i), nil -} diff --git a/pkg/net/port_test.go b/pkg/net/port_test.go deleted file mode 100644 index db04e9e0..00000000 --- a/pkg/net/port_test.go +++ /dev/null @@ -1,74 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package net - -import ( - "testing" -) - -func TestPortString(t *testing.T) { - testCases := []struct { - port Port - expectedStr string - }{ - {Port(0), "0"}, - {Port(9000), "9000"}, - {Port(65535), "65535"}, - {Port(1024), "1024"}, - } - - for i, testCase := range testCases { - str := testCase.port.String() - - if str != testCase.expectedStr { - t.Fatalf("test %v: error: port: %v, got: %v", i+1, testCase.expectedStr, str) - } - } -} - -func TestParsePort(t *testing.T) { - testCases := []struct { - s string - expectedPort Port - expectErr bool - }{ - {"0", Port(0), false}, - {"9000", Port(9000), false}, - {"65535", Port(65535), false}, - {"http", Port(80), false}, - {"https", Port(443), false}, - {"90000", Port(0), true}, - {"-10", Port(0), true}, - {"", Port(0), true}, - {" 1024", Port(0), true}, - } - - for i, testCase := range testCases { - port, err := ParsePort(testCase.s) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if port != testCase.expectedPort { - t.Fatalf("test %v: error: port: %v, got: %v", i+1, testCase.expectedPort, port) - } - } - } -} diff --git a/pkg/net/url.go b/pkg/net/url.go deleted file mode 100644 index 1979455b..00000000 --- a/pkg/net/url.go +++ /dev/null @@ -1,180 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package net - -import ( - "encoding/json" - "errors" - "fmt" - "net" - "net/url" - "path" - "strings" -) - -// URL - improved JSON friendly url.URL. -type URL url.URL - -// IsEmpty - checks URL is empty or not. -func (u URL) IsEmpty() bool { - return u.String() == "" -} - -// String - returns string representation of URL. -func (u URL) String() string { - // if port number 80 and 443, remove for http and https scheme respectively - if u.Host != "" { - host, err := ParseHost(u.Host) - if err != nil { - panic(err) - } - switch { - case u.Scheme == "http" && host.Port == 80: - fallthrough - case u.Scheme == "https" && host.Port == 443: - u.Host = host.Name - } - } - - uu := url.URL(u) - return uu.String() -} - -// MarshalJSON - converts to JSON string data. -func (u URL) MarshalJSON() ([]byte, error) { - return json.Marshal(u.String()) -} - -// UnmarshalJSON - parses given data into URL. -func (u *URL) UnmarshalJSON(data []byte) (err error) { - var s string - if err = json.Unmarshal(data, &s); err != nil { - return err - } - - // Allow empty string - if s == "" { - *u = URL{} - return nil - } - - var ru *URL - if ru, err = ParseURL(s); err != nil { - return err - } - - *u = *ru - return nil -} - -// ParseHTTPURL - parses a string into HTTP URL, string is -// expected to be of form http:// or https:// -func ParseHTTPURL(s string) (u *URL, err error) { - u, err = ParseURL(s) - if err != nil { - return nil, err - } - switch u.Scheme { - default: - return nil, fmt.Errorf("unexpected scheme found %s", u.Scheme) - case "http", "https": - return u, nil - } -} - -// ParseURL - parses string into URL. -func ParseURL(s string) (u *URL, err error) { - var uu *url.URL - if uu, err = url.Parse(s); err != nil { - return nil, err - } - - if uu.Hostname() == "" { - if uu.Scheme != "" { - return nil, errors.New("scheme appears with empty host") - } - } else { - portStr := uu.Port() - if portStr == "" { - switch uu.Scheme { - case "http": - portStr = "80" - case "https": - portStr = "443" - } - } - if _, err = ParseHost(net.JoinHostPort(uu.Hostname(), portStr)); err != nil { - return nil, err - } - } - - // Clean path in the URL. - // Note: path.Clean() is used on purpose because in MS Windows filepath.Clean() converts - // `/` into `\` ie `/foo` becomes `\foo` - if uu.Path != "" { - uu.Path = path.Clean(uu.Path) - } - - // path.Clean removes the trailing '/' and converts '//' to '/'. - if strings.HasSuffix(s, "/") && !strings.HasSuffix(uu.Path, "/") { - uu.Path += "/" - } - - v := URL(*uu) - u = &v - return u, nil -} - -// IsNetworkOrHostDown - if there was a network error or if the host is down. -func IsNetworkOrHostDown(err error) bool { - if err == nil { - return false - } - // We need to figure if the error either a timeout - // or a non-temporary error. - e, ok := err.(net.Error) - if ok { - urlErr, ok := e.(*url.Error) - if ok { - switch urlErr.Err.(type) { - case *net.DNSError, *net.OpError, net.UnknownNetworkError: - return true - } - } - if e.Timeout() { - return true - } - } - ok = false - // Fallback to other mechanisms. - if strings.Contains(err.Error(), "Connection closed by foreign host") { - ok = true - } else if strings.Contains(err.Error(), "TLS handshake timeout") { - // If error is - tlsHandshakeTimeoutError. - ok = true - } else if strings.Contains(err.Error(), "i/o timeout") { - // If error is - tcp timeoutError. - ok = true - } else if strings.Contains(err.Error(), "connection timed out") { - // If err is a net.Dial timeout. - ok = true - } else if strings.Contains(strings.ToLower(err.Error()), "503 service unavailable") { - // Denial errors - ok = true - } - return ok -} diff --git a/pkg/net/url_test.go b/pkg/net/url_test.go deleted file mode 100644 index 5d9973bf..00000000 --- a/pkg/net/url_test.go +++ /dev/null @@ -1,204 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package net - -import ( - "reflect" - "testing" -) - -func TestURLIsEmpty(t *testing.T) { - testCases := []struct { - url URL - expectedResult bool - }{ - {URL{}, true}, - {URL{Scheme: "http", Host: "play"}, false}, - {URL{Path: "path/to/play"}, false}, - } - - for i, testCase := range testCases { - result := testCase.url.IsEmpty() - - if result != testCase.expectedResult { - t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) - } - } -} - -func TestURLString(t *testing.T) { - testCases := []struct { - url URL - expectedStr string - }{ - {URL{}, ""}, - {URL{Scheme: "http", Host: "play"}, "http://play"}, - {URL{Scheme: "https", Host: "play:443"}, "https://play"}, - {URL{Scheme: "https", Host: "play.min.io:80"}, "https://play.min.io:80"}, - {URL{Scheme: "https", Host: "147.75.201.93:9000", Path: "/"}, "https://147.75.201.93:9000/"}, - {URL{Scheme: "https", Host: "s3.amazonaws.com", Path: "/", RawQuery: "location"}, "https://s3.amazonaws.com/?location"}, - {URL{Scheme: "http", Host: "myminio:10000", Path: "/mybucket/myobject"}, "http://myminio:10000/mybucket/myobject"}, - {URL{Scheme: "ftp", Host: "myftp.server:10000", Path: "/myuser"}, "ftp://myftp.server:10000/myuser"}, - {URL{Path: "path/to/play"}, "path/to/play"}, - } - - for i, testCase := range testCases { - str := testCase.url.String() - - if str != testCase.expectedStr { - t.Fatalf("test %v: string: expected: %v, got: %v", i+1, testCase.expectedStr, str) - } - } -} - -func TestURLMarshalJSON(t *testing.T) { - testCases := []struct { - url URL - expectedData []byte - expectErr bool - }{ - {URL{}, []byte(`""`), false}, - {URL{Scheme: "http", Host: "play"}, []byte(`"http://play"`), false}, - {URL{Scheme: "https", Host: "play.min.io:0"}, []byte(`"https://play.min.io:0"`), false}, - {URL{Scheme: "https", Host: "147.75.201.93:9000", Path: "/"}, []byte(`"https://147.75.201.93:9000/"`), false}, - {URL{Scheme: "https", Host: "s3.amazonaws.com", Path: "/", RawQuery: "location"}, []byte(`"https://s3.amazonaws.com/?location"`), false}, - {URL{Scheme: "http", Host: "myminio:10000", Path: "/mybucket/myobject"}, []byte(`"http://myminio:10000/mybucket/myobject"`), false}, - {URL{Scheme: "ftp", Host: "myftp.server:10000", Path: "/myuser"}, []byte(`"ftp://myftp.server:10000/myuser"`), false}, - } - - for i, testCase := range testCases { - data, err := testCase.url.MarshalJSON() - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(data, testCase.expectedData) { - t.Fatalf("test %v: data: expected: %v, got: %v", i+1, string(testCase.expectedData), string(data)) - } - } - } -} - -func TestURLUnmarshalJSON(t *testing.T) { - testCases := []struct { - data []byte - expectedURL *URL - expectErr bool - }{ - {[]byte(`""`), &URL{}, false}, - {[]byte(`"http://play"`), &URL{Scheme: "http", Host: "play"}, false}, - {[]byte(`"https://play.min.io:0"`), &URL{Scheme: "https", Host: "play.min.io:0"}, false}, - {[]byte(`"https://147.75.201.93:9000/"`), &URL{Scheme: "https", Host: "147.75.201.93:9000", Path: "/"}, false}, - {[]byte(`"https://s3.amazonaws.com/?location"`), &URL{Scheme: "https", Host: "s3.amazonaws.com", Path: "/", RawQuery: "location"}, false}, - {[]byte(`"http://myminio:10000/mybucket/myobject//"`), &URL{Scheme: "http", Host: "myminio:10000", Path: "/mybucket/myobject/"}, false}, - {[]byte(`"ftp://myftp.server:10000/myuser"`), &URL{Scheme: "ftp", Host: "myftp.server:10000", Path: "/myuser"}, false}, - {[]byte(`"http://webhook.server:10000/mywebhook/"`), &URL{Scheme: "http", Host: "webhook.server:10000", Path: "/mywebhook/"}, false}, - {[]byte(`"myserver:1000"`), nil, true}, - {[]byte(`"http://:1000/mybucket"`), nil, true}, - {[]byte(`"https://147.75.201.93:90000/"`), nil, true}, - {[]byte(`"http:/play"`), nil, true}, - } - - for i, testCase := range testCases { - var url URL - err := url.UnmarshalJSON(testCase.data) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(&url, testCase.expectedURL) { - t.Fatalf("test %v: host: expected: %#v, got: %#v", i+1, testCase.expectedURL, url) - } - } - } -} - -func TestParseHTTPURL(t *testing.T) { - testCases := []struct { - s string - expectedURL *URL - expectErr bool - }{ - {"http://play", &URL{Scheme: "http", Host: "play"}, false}, - {"https://play.min.io:0", &URL{Scheme: "https", Host: "play.min.io:0"}, false}, - {"https://147.75.201.93:9000/", &URL{Scheme: "https", Host: "147.75.201.93:9000", Path: "/"}, false}, - {"https://s3.amazonaws.com/?location", &URL{Scheme: "https", Host: "s3.amazonaws.com", Path: "/", RawQuery: "location"}, false}, - {"http://myminio:10000/mybucket//myobject/", &URL{Scheme: "http", Host: "myminio:10000", Path: "/mybucket/myobject/"}, false}, - {"ftp://myftp.server:10000/myuser", nil, true}, - {"https://my.server:10000000/myuser", nil, true}, - {"myserver:1000", nil, true}, - {"http://:1000/mybucket", nil, true}, - {"https://147.75.201.93:90000/", nil, true}, - {"http:/play", nil, true}, - } - - for _, testCase := range testCases { - testCase := testCase - t.Run(testCase.s, func(t *testing.T) { - url, err := ParseHTTPURL(testCase.s) - expectErr := (err != nil) - if expectErr != testCase.expectErr { - t.Fatalf("error: expected: %v, got: %v", testCase.expectErr, expectErr) - } - if !testCase.expectErr { - if !reflect.DeepEqual(url, testCase.expectedURL) { - t.Fatalf("host: expected: %#v, got: %#v", testCase.expectedURL, url) - } - } - }) - } -} - -func TestParseURL(t *testing.T) { - testCases := []struct { - s string - expectedURL *URL - expectErr bool - }{ - {"http://play", &URL{Scheme: "http", Host: "play"}, false}, - {"https://play.min.io:0", &URL{Scheme: "https", Host: "play.min.io:0"}, false}, - {"https://147.75.201.93:9000/", &URL{Scheme: "https", Host: "147.75.201.93:9000", Path: "/"}, false}, - {"https://s3.amazonaws.com/?location", &URL{Scheme: "https", Host: "s3.amazonaws.com", Path: "/", RawQuery: "location"}, false}, - {"http://myminio:10000/mybucket//myobject/", &URL{Scheme: "http", Host: "myminio:10000", Path: "/mybucket/myobject/"}, false}, - {"ftp://myftp.server:10000/myuser", &URL{Scheme: "ftp", Host: "myftp.server:10000", Path: "/myuser"}, false}, - {"myserver:1000", nil, true}, - {"http://:1000/mybucket", nil, true}, - {"https://147.75.201.93:90000/", nil, true}, - {"http:/play", nil, true}, - } - - for i, testCase := range testCases { - url, err := ParseURL(testCase.s) - expectErr := (err != nil) - - if expectErr != testCase.expectErr { - t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) - } - - if !testCase.expectErr { - if !reflect.DeepEqual(url, testCase.expectedURL) { - t.Fatalf("test %v: host: expected: %#v, got: %#v", i+1, testCase.expectedURL, url) - } - } - } -} diff --git a/pkg/pubsub/pubsub.go b/pkg/pubsub/pubsub.go deleted file mode 100644 index 2e082078..00000000 --- a/pkg/pubsub/pubsub.go +++ /dev/null @@ -1,84 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package pubsub - -import ( - "sync" -) - -// Sub - subscriber entity. -type Sub struct { - ch chan interface{} - filter func(entry interface{}) bool -} - -// PubSub holds publishers and subscribers -type PubSub struct { - subs []*Sub - sync.RWMutex -} - -// Publish message to the subscribers. -// Note that publish is always nob-blocking send so that we don't block on slow receivers. -// Hence receivers should use buffered channel so as not to miss the published events. -func (ps *PubSub) Publish(item interface{}) { - ps.RLock() - defer ps.RUnlock() - - for _, sub := range ps.subs { - if sub.filter == nil || sub.filter(item) { - select { - case sub.ch <- item: - default: - } - } - } -} - -// Subscribe - Adds a subscriber to pubsub system -func (ps *PubSub) Subscribe(subCh chan interface{}, doneCh <-chan struct{}, filter func(entry interface{}) bool) { - ps.Lock() - defer ps.Unlock() - - sub := &Sub{subCh, filter} - ps.subs = append(ps.subs, sub) - - go func() { - <-doneCh - - ps.Lock() - defer ps.Unlock() - - for i, s := range ps.subs { - if s == sub { - ps.subs = append(ps.subs[:i], ps.subs[i+1:]...) - } - } - }() -} - -// HasSubscribers returns true if pubsub system has subscribers -func (ps *PubSub) HasSubscribers() bool { - ps.RLock() - defer ps.RUnlock() - return len(ps.subs) > 0 -} - -// New inits a PubSub system -func New() *PubSub { - return &PubSub{} -} diff --git a/pkg/pubsub/pubsub_test.go b/pkg/pubsub/pubsub_test.go deleted file mode 100644 index f30a71a2..00000000 --- a/pkg/pubsub/pubsub_test.go +++ /dev/null @@ -1,90 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package pubsub - -import ( - "fmt" - "testing" - "time" -) - -func TestSubscribe(t *testing.T) { - ps := New() - ch1 := make(chan interface{}, 1) - ch2 := make(chan interface{}, 1) - doneCh := make(chan struct{}) - defer close(doneCh) - ps.Subscribe(ch1, doneCh, nil) - ps.Subscribe(ch2, doneCh, nil) - ps.Lock() - defer ps.Unlock() - if len(ps.subs) != 2 { - t.Errorf("expected 2 subscribers") - } -} - -func TestUnsubscribe(t *testing.T) { - ps := New() - ch1 := make(chan interface{}, 1) - ch2 := make(chan interface{}, 1) - doneCh1 := make(chan struct{}) - doneCh2 := make(chan struct{}) - ps.Subscribe(ch1, doneCh1, nil) - ps.Subscribe(ch2, doneCh2, nil) - - close(doneCh1) - // Allow for the above statement to take effect. - time.Sleep(100 * time.Millisecond) - ps.Lock() - if len(ps.subs) != 1 { - t.Errorf("expected 1 subscriber") - } - ps.Unlock() - close(doneCh2) -} - -func TestPubSub(t *testing.T) { - ps := New() - ch1 := make(chan interface{}, 1) - doneCh1 := make(chan struct{}) - defer close(doneCh1) - ps.Subscribe(ch1, doneCh1, func(entry interface{}) bool { return true }) - val := "hello" - ps.Publish(val) - msg := <-ch1 - if msg != "hello" { - t.Errorf(fmt.Sprintf("expected %s , found %s", val, msg)) - } -} - -func TestMultiPubSub(t *testing.T) { - ps := New() - ch1 := make(chan interface{}, 1) - ch2 := make(chan interface{}, 1) - doneCh := make(chan struct{}) - defer close(doneCh) - ps.Subscribe(ch1, doneCh, func(entry interface{}) bool { return true }) - ps.Subscribe(ch2, doneCh, func(entry interface{}) bool { return true }) - val := "hello" - ps.Publish(val) - - msg1 := <-ch1 - msg2 := <-ch2 - if msg1 != "hello" && msg2 != "hello" { - t.Errorf(fmt.Sprintf("expected both subscribers to have%s , found %s and %s", val, msg1, msg2)) - } -} diff --git a/pkg/quick/encoding.go b/pkg/quick/encoding.go deleted file mode 100644 index 3ac0f25b..00000000 --- a/pkg/quick/encoding.go +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Quick - Quick key value store for config files and persistent state files - * - * Quick (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package quick - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strings" - "time" - - etcd "github.com/coreos/etcd/clientv3" - yaml "gopkg.in/yaml.v2" -) - -// ConfigEncoding is a generic interface which -// marshal/unmarshal configuration. -type ConfigEncoding interface { - Unmarshal([]byte, interface{}) error - Marshal(interface{}) ([]byte, error) -} - -// YAML encoding implements ConfigEncoding -type yamlEncoding struct{} - -func (y yamlEncoding) Unmarshal(b []byte, v interface{}) error { - return yaml.Unmarshal(b, v) -} - -func (y yamlEncoding) Marshal(v interface{}) ([]byte, error) { - return yaml.Marshal(v) -} - -// JSON encoding implements ConfigEncoding -type jsonEncoding struct{} - -func (j jsonEncoding) Unmarshal(b []byte, v interface{}) error { - err := json.Unmarshal(b, v) - if err != nil { - // Try to return a sophisticated json error message if possible - switch jerr := err.(type) { - case *json.SyntaxError: - return fmt.Errorf("Unable to parse JSON schema due to a syntax error at '%s'", - FormatJSONSyntaxError(bytes.NewReader(b), jerr.Offset)) - case *json.UnmarshalTypeError: - return fmt.Errorf("Unable to parse JSON, type '%v' cannot be converted into the Go '%v' type", - jerr.Value, jerr.Type) - } - return err - } - return nil -} - -func (j jsonEncoding) Marshal(v interface{}) ([]byte, error) { - return json.MarshalIndent(v, "", "\t") -} - -// Convert a file extension to the appropriate struct capable -// to marshal/unmarshal data -func ext2EncFormat(fileExtension string) ConfigEncoding { - // Lower the file extension - ext := strings.ToLower(fileExtension) - ext = strings.TrimPrefix(ext, ".") - // Return the appropriate encoder/decoder according - // to the extension - switch ext { - case "yml", "yaml": - // YAML - return yamlEncoding{} - default: - // JSON - return jsonEncoding{} - } -} - -// toMarshaller returns the right marshal function according -// to the given file extension -func toMarshaller(ext string) func(interface{}) ([]byte, error) { - return ext2EncFormat(ext).Marshal -} - -// toUnmarshaller returns the right marshal function according -// to the given file extension -func toUnmarshaller(ext string) func([]byte, interface{}) error { - return ext2EncFormat(ext).Unmarshal -} - -// saveFileConfig marshals with the right encoding format -// according to the filename extension, if no extension is -// provided, json will be selected. -func saveFileConfig(filename string, v interface{}) error { - // Fetch filename's extension - ext := filepath.Ext(filename) - // Marshal data - dataBytes, err := toMarshaller(ext)(v) - if err != nil { - return err - } - if runtime.GOOS == "windows" { - dataBytes = []byte(strings.Replace(string(dataBytes), "\n", "\r\n", -1)) - } - // Save data. - return writeFile(filename, dataBytes) - -} - -func saveFileConfigEtcd(filename string, clnt *etcd.Client, v interface{}) error { - // Fetch filename's extension - ext := filepath.Ext(filename) - // Marshal data - dataBytes, err := toMarshaller(ext)(v) - if err != nil { - return err - } - if runtime.GOOS == "windows" { - dataBytes = []byte(strings.Replace(string(dataBytes), "\n", "\r\n", -1)) - } - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - _, err = clnt.Put(ctx, filename, string(dataBytes)) - if err == context.DeadlineExceeded { - return fmt.Errorf("etcd setup is unreachable, please check your endpoints %s", clnt.Endpoints()) - } else if err != nil { - return fmt.Errorf("unexpected error %w returned by etcd setup, please check your endpoints %s", err, clnt.Endpoints()) - } - return nil -} - -func loadFileConfigEtcd(filename string, clnt *etcd.Client, v interface{}) error { - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - resp, err := clnt.Get(ctx, filename) - if err != nil { - if err == context.DeadlineExceeded { - return fmt.Errorf("etcd setup is unreachable, please check your endpoints %s", clnt.Endpoints()) - } - return fmt.Errorf("unexpected error %w returned by etcd setup, please check your endpoints %s", err, clnt.Endpoints()) - } - if resp.Count == 0 { - return os.ErrNotExist - } - - for _, ev := range resp.Kvs { - if string(ev.Key) == filename { - fileData := ev.Value - if runtime.GOOS == "windows" { - fileData = bytes.Replace(fileData, []byte("\r\n"), []byte("\n"), -1) - } - // Unmarshal file's content - return toUnmarshaller(filepath.Ext(filename))(fileData, v) - } - } - return os.ErrNotExist -} - -// loadFileConfig unmarshals the file's content with the right -// decoder format according to the filename extension. If no -// extension is provided, json will be selected by default. -func loadFileConfig(filename string, v interface{}) error { - fileData, err := ioutil.ReadFile(filename) - if err != nil { - return err - } - if runtime.GOOS == "windows" { - fileData = []byte(strings.Replace(string(fileData), "\r\n", "\n", -1)) - } - - // Unmarshal file's content - return toUnmarshaller(filepath.Ext(filename))(fileData, v) -} diff --git a/pkg/quick/errorutil.go b/pkg/quick/errorutil.go deleted file mode 100644 index 33bc82c4..00000000 --- a/pkg/quick/errorutil.go +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Quick - Quick key value store for config files and persistent state files - * - * Quick (C) 2015 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package quick - -import ( - "bufio" - "bytes" - "fmt" - "io" - - "github.com/cheggaaa/pb" -) - -const errorFmt = "%5d: %s <<<<" - -// FormatJSONSyntaxError generates a pretty printed json syntax error since -// golang doesn't provide an easy way to report the location of the error -func FormatJSONSyntaxError(data io.Reader, offset int64) (highlight string) { - var readLine bytes.Buffer - var errLine = 1 - var readBytes int64 - - bio := bufio.NewReader(data) - - // termWidth is set to a default one to use when we are - // not able to calculate terminal width via OS syscalls - termWidth := 25 - - // errorShift is the length of the minimum needed place for - // error msg accessories, like <--, etc.. We calculate it - // dynamically to avoid an eventual bug after modifying errorFmt - errorShift := len(fmt.Sprintf(errorFmt, 1, "")) - - if width, err := pb.GetTerminalWidth(); err == nil { - termWidth = width - } - - for { - b, err := bio.ReadByte() - if err != nil { - break - } - readBytes++ - if readBytes > offset { - break - } - if b == '\n' { - readLine.Reset() - errLine++ - continue - } else if b == '\t' { - readLine.WriteByte(' ') - } else if b == '\r' { - break - } - readLine.WriteByte(b) - } - - lineLen := readLine.Len() - idx := lineLen - termWidth + errorShift - if idx < 0 || idx > lineLen-1 { - idx = 0 - } - - return fmt.Sprintf(errorFmt, errLine, readLine.String()[idx:]) -} diff --git a/pkg/quick/quick.go b/pkg/quick/quick.go deleted file mode 100644 index d1b23f18..00000000 --- a/pkg/quick/quick.go +++ /dev/null @@ -1,237 +0,0 @@ -/* - * Quick - Quick key value store for config files and persistent state files - * - * Quick (C) 2015, 2016, 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package quick - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "reflect" - "sync" - - etcd "github.com/coreos/etcd/clientv3" - "github.com/fatih/structs" - "github.com/minio/minio/pkg/safe" -) - -// Config - generic config interface functions -type Config interface { - String() string - Version() string - Save(string) error - Load(string) error - Data() interface{} - Diff(Config) ([]structs.Field, error) - DeepDiff(Config) ([]structs.Field, error) -} - -// config - implements quick.Config interface -type config struct { - data interface{} - clnt *etcd.Client - lock *sync.RWMutex -} - -// Version returns the current config file format version -func (d config) Version() string { - st := structs.New(d.data) - f := st.Field("Version") - return f.Value().(string) -} - -// String converts JSON config to printable string -func (d config) String() string { - configBytes, _ := json.MarshalIndent(d.data, "", "\t") - return string(configBytes) -} - -// Save writes config data to a file. Data format -// is selected based on file extension or JSON if -// not provided. -func (d config) Save(filename string) error { - d.lock.Lock() - defer d.lock.Unlock() - - if d.clnt != nil { - return saveFileConfigEtcd(filename, d.clnt, d.data) - } - - // Backup if given file exists - oldData, err := ioutil.ReadFile(filename) - if err != nil { - // Ignore if file does not exist. - if !os.IsNotExist(err) { - return err - } - } else { - // Save read data to the backup file. - backupFilename := filename + ".old" - if err = writeFile(backupFilename, oldData); err != nil { - return err - } - } - - // Save data. - return saveFileConfig(filename, d.data) -} - -// Load - loads config from file and merge with currently set values -// File content format is guessed from the file name extension, if not -// available, consider that we have JSON. -func (d config) Load(filename string) error { - d.lock.Lock() - defer d.lock.Unlock() - if d.clnt != nil { - return loadFileConfigEtcd(filename, d.clnt, d.data) - } - return loadFileConfig(filename, d.data) -} - -// Data - grab internal data map for reading -func (d config) Data() interface{} { - return d.data -} - -// Diff - list fields that are in A but not in B -func (d config) Diff(c Config) ([]structs.Field, error) { - var fields []structs.Field - - currFields := structs.Fields(d.Data()) - newFields := structs.Fields(c.Data()) - - var found bool - for _, currField := range currFields { - found = false - for _, newField := range newFields { - if reflect.DeepEqual(currField.Name(), newField.Name()) { - found = true - } - } - if !found { - fields = append(fields, *currField) - } - } - return fields, nil -} - -// DeepDiff - list fields in A that are missing or not equal to fields in B -func (d config) DeepDiff(c Config) ([]structs.Field, error) { - var fields []structs.Field - - currFields := structs.Fields(d.Data()) - newFields := structs.Fields(c.Data()) - - var found bool - for _, currField := range currFields { - found = false - for _, newField := range newFields { - if reflect.DeepEqual(currField.Value(), newField.Value()) { - found = true - } - } - if !found { - fields = append(fields, *currField) - } - } - return fields, nil -} - -// CheckData - checks the validity of config data. Data should be of -// type struct and contain a string type field called "Version". -func CheckData(data interface{}) error { - if !structs.IsStruct(data) { - return fmt.Errorf("interface must be struct type") - } - - st := structs.New(data) - f, ok := st.FieldOk("Version") - if !ok { - return fmt.Errorf("struct ‘%s’ must have field ‘Version’", st.Name()) - } - - if f.Kind() != reflect.String { - return fmt.Errorf("‘Version’ field in struct ‘%s’ must be a string type", st.Name()) - } - - return nil -} - -// writeFile writes data to a file named by filename. -// If the file does not exist, writeFile creates it; -// otherwise writeFile truncates it before writing. -func writeFile(filename string, data []byte) error { - safeFile, err := safe.CreateFile(filename) - if err != nil { - return err - } - _, err = safeFile.Write(data) - if err != nil { - return err - } - return safeFile.Close() -} - -// GetVersion - extracts the version information. -func GetVersion(filename string, clnt *etcd.Client) (version string, err error) { - var qc Config - qc, err = LoadConfig(filename, clnt, &struct { - Version string - }{}) - if err != nil { - return "", err - } - return qc.Version(), nil -} - -// LoadConfig - loads json config from filename for the a given struct data -func LoadConfig(filename string, clnt *etcd.Client, data interface{}) (qc Config, err error) { - qc, err = NewConfig(data, clnt) - if err != nil { - return nil, err - } - return qc, qc.Load(filename) -} - -// SaveConfig - saves given configuration data into given file as JSON. -func SaveConfig(data interface{}, filename string, clnt *etcd.Client) (err error) { - if err = CheckData(data); err != nil { - return err - } - var qc Config - qc, err = NewConfig(data, clnt) - if err != nil { - return err - } - return qc.Save(filename) -} - -// NewConfig loads config from etcd client if provided, otherwise loads from a local filename. -// fails when all else fails. -func NewConfig(data interface{}, clnt *etcd.Client) (cfg Config, err error) { - if err := CheckData(data); err != nil { - return nil, err - } - - d := new(config) - d.data = data - d.clnt = clnt - d.lock = new(sync.RWMutex) - return d, nil -} diff --git a/pkg/quick/quick_test.go b/pkg/quick/quick_test.go deleted file mode 100644 index 92844a5e..00000000 --- a/pkg/quick/quick_test.go +++ /dev/null @@ -1,506 +0,0 @@ -/* - * Quick - Quick key value store for config files and persistent state files - * - * Quick (C) 2015, 2016, 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package quick - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "os" - "reflect" - "runtime" - "strings" - "testing" -) - -func TestReadVersion(t *testing.T) { - type myStruct struct { - Version string - } - saveMe := myStruct{"1"} - config, err := NewConfig(&saveMe, nil) - if err != nil { - t.Fatal(err) - } - err = config.Save("test.json") - if err != nil { - t.Fatal(err) - } - - version, err := GetVersion("test.json", nil) - if err != nil { - t.Fatal(err) - } - if version != "1" { - t.Fatalf("Expected version '1', got '%v'", version) - } -} - -func TestReadVersionErr(t *testing.T) { - type myStruct struct { - Version int - } - saveMe := myStruct{1} - _, err := NewConfig(&saveMe, nil) - if err == nil { - t.Fatal("Unexpected should fail in initialization for bad input") - } - - err = ioutil.WriteFile("test.json", []byte("{ \"version\":2,"), 0644) - if err != nil { - t.Fatal(err) - } - - _, err = GetVersion("test.json", nil) - if err == nil { - t.Fatal("Unexpected should fail to fetch version") - } - - err = ioutil.WriteFile("test.json", []byte("{ \"version\":2 }"), 0644) - if err != nil { - t.Fatal(err) - } - - _, err = GetVersion("test.json", nil) - if err == nil { - t.Fatal("Unexpected should fail to fetch version") - } -} - -func TestSaveFailOnDir(t *testing.T) { - defer os.RemoveAll("test-1.json") - err := os.MkdirAll("test-1.json", 0644) - if err != nil { - t.Fatal(err) - } - type myStruct struct { - Version string - } - saveMe := myStruct{"1"} - config, err := NewConfig(&saveMe, nil) - if err != nil { - t.Fatal(err) - } - err = config.Save("test-1.json") - if err == nil { - t.Fatal("Unexpected should fail to save if test-1.json is a directory") - } -} - -func TestCheckData(t *testing.T) { - err := CheckData(nil) - if err == nil { - t.Fatal("Unexpected should fail") - } - - type myStructBadNoVersion struct { - User string - Password string - Directories []string - } - saveMeBadNoVersion := myStructBadNoVersion{"guest", "nopassword", []string{"Work", "Documents", "Music"}} - err = CheckData(&saveMeBadNoVersion) - if err == nil { - t.Fatal("Unexpected should fail if Version is not set") - } - - type myStructBadVersionInt struct { - Version int - User string - Password string - } - saveMeBadVersionInt := myStructBadVersionInt{1, "guest", "nopassword"} - err = CheckData(&saveMeBadVersionInt) - if err == nil { - t.Fatal("Unexpected should fail if Version is integer") - } - - type myStructGood struct { - Version string - User string - Password string - Directories []string - } - - saveMeGood := myStructGood{"1", "guest", "nopassword", []string{"Work", "Documents", "Music"}} - err = CheckData(&saveMeGood) - if err != nil { - t.Fatal(err) - } -} - -func TestLoadFile(t *testing.T) { - type myStruct struct { - Version string - User string - Password string - Directories []string - } - saveMe := myStruct{} - _, err := LoadConfig("test.json", nil, &saveMe) - if err == nil { - t.Fatal(err) - } - - file, err := os.Create("test.json") - if err != nil { - t.Fatal(err) - } - if err = file.Close(); err != nil { - t.Fatal(err) - } - _, err = LoadConfig("test.json", nil, &saveMe) - if err == nil { - t.Fatal("Unexpected should fail to load empty JSON") - } - config, err := NewConfig(&saveMe, nil) - if err != nil { - t.Fatal(err) - } - err = config.Load("test-non-exist.json") - if err == nil { - t.Fatal("Unexpected should fail to Load non-existent config") - } - - err = config.Load("test.json") - if err == nil { - t.Fatal("Unexpected should fail to load empty JSON") - } - - saveMe = myStruct{"1", "guest", "nopassword", []string{"Work", "Documents", "Music"}} - config, err = NewConfig(&saveMe, nil) - if err != nil { - t.Fatal(err) - } - err = config.Save("test.json") - if err != nil { - t.Fatal(err) - } - saveMe1 := myStruct{} - _, err = LoadConfig("test.json", nil, &saveMe1) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(saveMe1, saveMe) { - t.Fatalf("Expected %v, got %v", saveMe1, saveMe) - } - - saveMe2 := myStruct{} - err = json.Unmarshal([]byte(config.String()), &saveMe2) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(saveMe2, saveMe1) { - t.Fatalf("Expected %v, got %v", saveMe2, saveMe1) - } -} - -func TestYAMLFormat(t *testing.T) { - testYAML := "test.yaml" - defer os.RemoveAll(testYAML) - - type myStruct struct { - Version string - User string - Password string - Directories []string - } - - plainYAML := `version: "1" -user: guest -password: nopassword -directories: -- Work -- Documents -- Music -` - - if runtime.GOOS == "windows" { - plainYAML = strings.Replace(plainYAML, "\n", "\r\n", -1) - } - - saveMe := myStruct{"1", "guest", "nopassword", []string{"Work", "Documents", "Music"}} - - // Save format using - config, err := NewConfig(&saveMe, nil) - if err != nil { - t.Fatal(err) - } - - err = config.Save(testYAML) - if err != nil { - t.Fatal(err) - } - - // Check if the saved structure in actually an YAML format - b, err := ioutil.ReadFile(testYAML) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal([]byte(plainYAML), b) { - t.Fatalf("Expected %v, got %v", plainYAML, string(b)) - } - - // Check if the loaded data is the same as the saved one - loadMe := myStruct{} - config, err = NewConfig(&loadMe, nil) - if err != nil { - t.Fatal(err) - } - - err = config.Load(testYAML) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(saveMe, loadMe) { - t.Fatalf("Expected %v, got %v", saveMe, loadMe) - } -} - -func TestJSONFormat(t *testing.T) { - testJSON := "test.json" - defer os.RemoveAll(testJSON) - - type myStruct struct { - Version string - User string - Password string - Directories []string - } - - plainJSON := `{ - "Version": "1", - "User": "guest", - "Password": "nopassword", - "Directories": [ - "Work", - "Documents", - "Music" - ] -}` - - if runtime.GOOS == "windows" { - plainJSON = strings.Replace(plainJSON, "\n", "\r\n", -1) - } - - saveMe := myStruct{"1", "guest", "nopassword", []string{"Work", "Documents", "Music"}} - - // Save format using - config, err := NewConfig(&saveMe, nil) - if err != nil { - t.Fatal(err) - } - - err = config.Save(testJSON) - if err != nil { - t.Fatal(err) - } - - // Check if the saved structure in actually an JSON format - b, err := ioutil.ReadFile(testJSON) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal([]byte(plainJSON), b) { - t.Fatalf("Expected %v, got %v", plainJSON, string(b)) - } - - // Check if the loaded data is the same as the saved one - loadMe := myStruct{} - config, err = NewConfig(&loadMe, nil) - if err != nil { - t.Fatal(err) - } - err = config.Load(testJSON) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(saveMe, loadMe) { - t.Fatalf("Expected %v, got %v", saveMe, loadMe) - } -} - -func TestSaveLoad(t *testing.T) { - defer os.RemoveAll("test.json") - type myStruct struct { - Version string - User string - Password string - Directories []string - } - saveMe := myStruct{"1", "guest", "nopassword", []string{"Work", "Documents", "Music"}} - config, err := NewConfig(&saveMe, nil) - if err != nil { - t.Fatal(err) - } - err = config.Save("test.json") - if err != nil { - t.Fatal(err) - } - - loadMe := myStruct{Version: "1"} - newConfig, err := NewConfig(&loadMe, nil) - if err != nil { - t.Fatal(err) - } - err = newConfig.Load("test.json") - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(config.Data(), newConfig.Data()) { - t.Fatalf("Expected %v, got %v", config.Data(), newConfig.Data()) - } - if !reflect.DeepEqual(config.Data(), &loadMe) { - t.Fatalf("Expected %v, got %v", config.Data(), &loadMe) - } - - mismatch := myStruct{"1.1", "guest", "nopassword", []string{"Work", "Documents", "Music"}} - if reflect.DeepEqual(config.Data(), &mismatch) { - t.Fatal("Expected to mismatch but succeeded instead") - } -} - -func TestSaveBackup(t *testing.T) { - defer os.RemoveAll("test.json") - defer os.RemoveAll("test.json.old") - type myStruct struct { - Version string - User string - Password string - Directories []string - } - saveMe := myStruct{"1", "guest", "nopassword", []string{"Work", "Documents", "Music"}} - config, err := NewConfig(&saveMe, nil) - if err != nil { - t.Fatal(err) - } - err = config.Save("test.json") - if err != nil { - t.Fatal(err) - } - - loadMe := myStruct{Version: "1"} - newConfig, err := NewConfig(&loadMe, nil) - if err != nil { - t.Fatal(err) - } - err = newConfig.Load("test.json") - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(config.Data(), newConfig.Data()) { - t.Fatalf("Expected %v, got %v", config.Data(), newConfig.Data()) - } - if !reflect.DeepEqual(config.Data(), &loadMe) { - t.Fatalf("Expected %v, got %v", config.Data(), &loadMe) - } - - mismatch := myStruct{"1.1", "guest", "nopassword", []string{"Work", "Documents", "Music"}} - if reflect.DeepEqual(newConfig.Data(), &mismatch) { - t.Fatal("Expected to mismatch but succeeded instead") - } - - config, err = NewConfig(&mismatch, nil) - if err != nil { - t.Fatal(err) - } - err = config.Save("test.json") - if err != nil { - t.Fatal(err) - } -} - -func TestDiff(t *testing.T) { - type myStruct struct { - Version string - User string - Password string - Directories []string - } - saveMe := myStruct{"1", "guest", "nopassword", []string{"Work", "Documents", "Music"}} - config, err := NewConfig(&saveMe, nil) - if err != nil { - t.Fatal(err) - } - - type myNewConfigStruct struct { - Version string - // User string - Password string - Directories []string - } - - mismatch := myNewConfigStruct{"1", "nopassword", []string{"Work", "documents", "Music"}} - newConfig, err := NewConfig(&mismatch, nil) - if err != nil { - t.Fatal(err) - } - - fields, err := config.Diff(newConfig) - if err != nil { - t.Fatal(err) - } - if len(fields) != 1 { - t.Fatalf("Expected len 1, got %v", len(fields)) - } - - // Uncomment for debugging - // for i, field := range fields { - // fmt.Printf("Diff[%d]: %s=%v\n", i, field.Name(), field.Value()) - // } -} - -func TestDeepDiff(t *testing.T) { - type myStruct struct { - Version string - User string - Password string - Directories []string - } - saveMe := myStruct{"1", "guest", "nopassword", []string{"Work", "Documents", "Music"}} - config, err := NewConfig(&saveMe, nil) - if err != nil { - t.Fatal(err) - } - - mismatch := myStruct{"1", "Guest", "nopassword", []string{"Work", "documents", "Music"}} - newConfig, err := NewConfig(&mismatch, nil) - if err != nil { - t.Fatal(err) - } - - fields, err := config.DeepDiff(newConfig) - if err != nil { - t.Fatal(err) - } - if len(fields) != 2 { - t.Fatalf("Expected len 2, got %v", len(fields)) - } - - // Uncomment for debugging - // for i, field := range fields { - // fmt.Printf("DeepDiff[%d]: %s=%v\n", i, field.Name(), field.Value()) - // } -} diff --git a/pkg/retry/retry.go b/pkg/retry/retry.go deleted file mode 100644 index f8423cba..00000000 --- a/pkg/retry/retry.go +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2020 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package retry - -import ( - "context" - "math" - "math/rand" - "time" -) - -// MaxJitter will randomize over the full exponential backoff time -const MaxJitter = 1.0 - -// NoJitter disables the use of jitter for randomizing the -// exponential backoff time -const NoJitter = 0.0 - -// defaultTimer implements Timer interface using time.Timer -type defaultTimer struct { - timer *time.Timer -} - -// C returns the timers channel which receives the current time when the timer fires. -func (t *defaultTimer) C() <-chan time.Time { - return t.timer.C -} - -// Start starts the timer to fire after the given duration -// don't use this code concurrently. -func (t *defaultTimer) Start(duration time.Duration) { - if t.timer == nil { - t.timer = time.NewTimer(duration) - } else { - t.timer.Reset(duration) - } -} - -// Stop is called when the timer is not used anymore and resources may be freed. -func (t *defaultTimer) Stop() { - if t.timer != nil { - t.timer.Stop() - } -} - -// NewTimerWithJitter creates a timer with exponentially increasing delays -// until the maximum retry attempts are reached. - this function is a fully -// configurable version, meant for only advanced use cases. For the most part -// one should use newRetryTimerSimple and newRetryTimer. -func NewTimerWithJitter(ctx context.Context, unit time.Duration, cap time.Duration, jitter float64) <-chan int { - attemptCh := make(chan int) - - // normalize jitter to the range [0, 1.0] - jitter = math.Max(NoJitter, math.Min(MaxJitter, jitter)) - - // computes the exponential backoff duration according to - // https://www.awsarchitectureblog.com/2015/03/backoff.html - exponentialBackoffWait := func(attempt int) time.Duration { - // 1< maxAttempt { - attempt = maxAttempt - } - //sleep = random_between(0, min(cap, base * 2 ** attempt)) - sleep := unit * time.Duration(1< cap { - sleep = cap - } - if jitter != NoJitter { - sleep -= time.Duration(rand.Float64() * float64(sleep) * jitter) - } - return sleep - } - - go func() { - nextBackoff := 0 - t := &defaultTimer{} - - defer func() { - t.Stop() - }() - - defer close(attemptCh) - - // Channel used to signal after the expiry of backoff wait seconds. - for { - select { - case attemptCh <- nextBackoff: - nextBackoff++ - case <-ctx.Done(): - return - } - - t.Start(exponentialBackoffWait(nextBackoff)) - - select { - case <-ctx.Done(): - return - case <-t.C(): - } - } - }() - - // Start reading.. - return attemptCh -} - -// Default retry constants. -const ( - defaultRetryUnit = 50 * time.Millisecond // 50 millisecond. - defaultRetryCap = 500 * time.Millisecond // 500 millisecond. -) - -// NewTimer creates a timer with exponentially increasing delays -// until the maximum retry attempts are reached. - this function is a -// simpler version with all default values. -func NewTimer(ctx context.Context) <-chan int { - return NewTimerWithJitter(ctx, defaultRetryUnit, defaultRetryCap, MaxJitter) -} diff --git a/pkg/retry/retry_test.go b/pkg/retry/retry_test.go deleted file mode 100644 index bb67a344..00000000 --- a/pkg/retry/retry_test.go +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2020 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package retry - -import ( - "context" - "testing" - "time" -) - -// Tests for retry timer. -func TestRetryTimerSimple(t *testing.T) { - retryCtx, cancel := context.WithCancel(context.Background()) - attemptCh := NewTimer(retryCtx) - i := <-attemptCh - if i != 0 { - cancel() - t.Fatalf("Invalid attempt counter returned should be 0, found %d instead", i) - } - i = <-attemptCh - if i <= 0 { - cancel() - t.Fatalf("Invalid attempt counter returned should be greater than 0, found %d instead", i) - } - cancel() - _, ok := <-attemptCh - if ok { - t.Fatal("Attempt counter should be closed") - } -} - -// Test retry time with no jitter. -func TestRetryTimerWithNoJitter(t *testing.T) { - retryCtx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // No jitter - attemptCh := NewTimerWithJitter(retryCtx, time.Millisecond, 5*time.Millisecond, NoJitter) - i := <-attemptCh - if i != 0 { - cancel() - t.Fatalf("Invalid attempt counter returned should be 0, found %d instead", i) - } - // Loop through the maximum possible attempt. - for i = range attemptCh { - if i == 30 { - cancel() - } - } - _, ok := <-attemptCh - if ok { - t.Fatal("Attempt counter should be closed") - } -} - -// Test retry time with Jitter greater than MaxJitter. -func TestRetryTimerWithJitter(t *testing.T) { - retryCtx, cancel := context.WithCancel(context.Background()) - // Jitter will be set back to 1.0 - attemptCh := NewTimerWithJitter(retryCtx, time.Second, 30*time.Second, 2.0) - i := <-attemptCh - if i != 0 { - cancel() - t.Fatalf("Invalid attempt counter returned should be 0, found %d instead", i) - } - cancel() - _, ok := <-attemptCh - if ok { - t.Fatal("Attempt counter should be closed") - } -} diff --git a/pkg/s3select/csv/args.go b/pkg/s3select/csv/args.go deleted file mode 100644 index 0b60a03f..00000000 --- a/pkg/s3select/csv/args.go +++ /dev/null @@ -1,200 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package csv - -import ( - "encoding/xml" - "errors" - "fmt" - "io" - "strings" - "unicode/utf8" -) - -const ( - none = "none" - use = "use" - - defaultRecordDelimiter = "\n" - defaultFieldDelimiter = "," - defaultQuoteCharacter = `"` - defaultQuoteEscapeCharacter = `"` - defaultCommentCharacter = "#" - - asneeded = "asneeded" -) - -// ReaderArgs - represents elements inside in request XML. -type ReaderArgs struct { - FileHeaderInfo string `xml:"FileHeaderInfo"` - RecordDelimiter string `xml:"RecordDelimiter"` - FieldDelimiter string `xml:"FieldDelimiter"` - QuoteCharacter string `xml:"QuoteCharacter"` - QuoteEscapeCharacter string `xml:"QuoteEscapeCharacter"` - CommentCharacter string `xml:"Comments"` - AllowQuotedRecordDelimiter bool `xml:"AllowQuotedRecordDelimiter"` - unmarshaled bool -} - -// IsEmpty - returns whether reader args is empty or not. -func (args *ReaderArgs) IsEmpty() bool { - return !args.unmarshaled -} - -// UnmarshalXML - decodes XML data. -func (args *ReaderArgs) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) { - args.FileHeaderInfo = none - args.RecordDelimiter = defaultRecordDelimiter - args.FieldDelimiter = defaultFieldDelimiter - args.QuoteCharacter = defaultQuoteCharacter - args.QuoteEscapeCharacter = defaultQuoteEscapeCharacter - args.CommentCharacter = defaultCommentCharacter - args.AllowQuotedRecordDelimiter = false - - for { - // Read tokens from the XML document in a stream. - t, err := d.Token() - if err != nil { - if err == io.EOF { - break - } - return err - } - - switch se := t.(type) { - case xml.StartElement: - tagName := se.Name.Local - switch tagName { - case "AllowQuotedRecordDelimiter": - var b bool - if err = d.DecodeElement(&b, &se); err != nil { - return err - } - args.AllowQuotedRecordDelimiter = b - default: - var s string - if err = d.DecodeElement(&s, &se); err != nil { - return err - } - switch tagName { - case "FileHeaderInfo": - args.FileHeaderInfo = strings.ToLower(s) - case "RecordDelimiter": - args.RecordDelimiter = s - case "FieldDelimiter": - args.FieldDelimiter = s - case "QuoteCharacter": - if utf8.RuneCountInString(s) > 1 { - return fmt.Errorf("unsupported QuoteCharacter '%v'", s) - } - args.QuoteCharacter = s - case "QuoteEscapeCharacter": - switch utf8.RuneCountInString(s) { - case 0: - args.QuoteEscapeCharacter = defaultQuoteEscapeCharacter - case 1: - args.QuoteEscapeCharacter = s - default: - return fmt.Errorf("unsupported QuoteEscapeCharacter '%v'", s) - } - case "Comments": - args.CommentCharacter = s - default: - return errors.New("unrecognized option") - } - } - } - } - - args.unmarshaled = true - return nil -} - -// WriterArgs - represents elements inside in request XML. -type WriterArgs struct { - QuoteFields string `xml:"QuoteFields"` - RecordDelimiter string `xml:"RecordDelimiter"` - FieldDelimiter string `xml:"FieldDelimiter"` - QuoteCharacter string `xml:"QuoteCharacter"` - QuoteEscapeCharacter string `xml:"QuoteEscapeCharacter"` - unmarshaled bool -} - -// IsEmpty - returns whether writer args is empty or not. -func (args *WriterArgs) IsEmpty() bool { - return !args.unmarshaled -} - -// UnmarshalXML - decodes XML data. -func (args *WriterArgs) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - - args.QuoteFields = asneeded - args.RecordDelimiter = defaultRecordDelimiter - args.FieldDelimiter = defaultFieldDelimiter - args.QuoteCharacter = defaultQuoteCharacter - args.QuoteEscapeCharacter = defaultQuoteCharacter - - for { - // Read tokens from the XML document in a stream. - t, err := d.Token() - if err != nil { - if err == io.EOF { - break - } - return err - } - - switch se := t.(type) { - case xml.StartElement: - var s string - if err = d.DecodeElement(&s, &se); err != nil { - return err - } - switch se.Name.Local { - case "QuoteFields": - args.QuoteFields = strings.ToLower(s) - case "RecordDelimiter": - args.RecordDelimiter = s - case "FieldDelimiter": - args.FieldDelimiter = s - case "QuoteCharacter": - switch utf8.RuneCountInString(s) { - case 0: - args.QuoteCharacter = "\x00" - case 1: - args.QuoteCharacter = s - default: - return fmt.Errorf("unsupported QuoteCharacter '%v'", s) - } - case "QuoteEscapeCharacter": - switch utf8.RuneCountInString(s) { - case 0: - args.QuoteEscapeCharacter = defaultQuoteEscapeCharacter - case 1: - args.QuoteEscapeCharacter = s - default: - return fmt.Errorf("unsupported QuoteCharacter '%v'", s) - } - default: - return errors.New("unrecognized option") - } - } - } - - args.unmarshaled = true - return nil -} diff --git a/pkg/s3select/csv/errors.go b/pkg/s3select/csv/errors.go deleted file mode 100644 index b01a448d..00000000 --- a/pkg/s3select/csv/errors.go +++ /dev/null @@ -1,53 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package csv - -type s3Error struct { - code string - message string - statusCode int - cause error -} - -func (err *s3Error) Cause() error { - return err.cause -} - -func (err *s3Error) ErrorCode() string { - return err.code -} - -func (err *s3Error) ErrorMessage() string { - return err.message -} - -func (err *s3Error) HTTPStatusCode() int { - return err.statusCode -} - -func (err *s3Error) Error() string { - return err.message -} - -func errCSVParsingError(err error) *s3Error { - return &s3Error{ - code: "CSVParsingError", - message: "Encountered an error parsing the CSV file. Check the file and try again.", - statusCode: 400, - cause: err, - } -} diff --git a/pkg/s3select/csv/reader.go b/pkg/s3select/csv/reader.go deleted file mode 100644 index 84b5deeb..00000000 --- a/pkg/s3select/csv/reader.go +++ /dev/null @@ -1,314 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package csv - -import ( - "bufio" - "bytes" - "fmt" - "io" - "runtime" - "sync" - - csv "github.com/minio/minio/pkg/csvparser" - "github.com/minio/minio/pkg/s3select/sql" -) - -// Reader - CSV record reader for S3Select. -type Reader struct { - args *ReaderArgs - readCloser io.ReadCloser // raw input - buf *bufio.Reader // input to the splitter - columnNames []string // names of columns - nameIndexMap map[string]int64 // name to column index - current [][]string // current block of results to be returned - recordsRead int // number of records read in current slice - input chan *queueItem // input for workers - queue chan *queueItem // output from workers in order - err error // global error state, only touched by Reader.Read - bufferPool sync.Pool // pool of []byte objects for input - csvDstPool sync.Pool // pool of [][]string used for output - close chan struct{} // used for shutting down the splitter before end of stream - readerWg sync.WaitGroup // used to keep track of async reader. -} - -// queueItem is an item in the queue. -type queueItem struct { - input []byte // raw input sent to the worker - dst chan [][]string // result of block decode - err error // any error encountered will be set here -} - -// Read - reads single record. -// Once Read is called the previous record should no longer be referenced. -func (r *Reader) Read(dst sql.Record) (sql.Record, error) { - // If we have have any records left, return these before any error. - for len(r.current) <= r.recordsRead { - if r.err != nil { - return nil, r.err - } - // Move to next block - item, ok := <-r.queue - if !ok { - r.err = io.EOF - return nil, r.err - } - //lint:ignore SA6002 Using pointer would allocate more since we would have to copy slice header before taking a pointer. - r.csvDstPool.Put(r.current) - r.current = <-item.dst - r.err = item.err - r.recordsRead = 0 - } - csvRecord := r.current[r.recordsRead] - r.recordsRead++ - - // If no column names are set, use _(index) - if r.columnNames == nil { - r.columnNames = make([]string, len(csvRecord)) - for i := range csvRecord { - r.columnNames[i] = fmt.Sprintf("_%v", i+1) - } - } - - // If no index map, add that. - if r.nameIndexMap == nil { - r.nameIndexMap = make(map[string]int64) - for i := range r.columnNames { - r.nameIndexMap[r.columnNames[i]] = int64(i) - } - } - dstRec, ok := dst.(*Record) - if !ok { - dstRec = &Record{} - } - dstRec.columnNames = r.columnNames - dstRec.csvRecord = csvRecord - dstRec.nameIndexMap = r.nameIndexMap - - return dstRec, nil -} - -// Close - closes underlying reader. -func (r *Reader) Close() error { - if r.close != nil { - close(r.close) - r.readerWg.Wait() - r.close = nil - } - r.recordsRead = len(r.current) - if r.err == nil { - r.err = io.EOF - } - return r.readCloser.Close() -} - -// nextSplit will attempt to skip a number of bytes and -// return the buffer until the next newline occurs. -// The last block will be sent along with an io.EOF. -func (r *Reader) nextSplit(skip int, dst []byte) ([]byte, error) { - if cap(dst) < skip { - dst = make([]byte, 0, skip+1024) - } - dst = dst[:skip] - if skip > 0 { - n, err := io.ReadFull(r.buf, dst) - if err != nil && err != io.ErrUnexpectedEOF { - // If an EOF happens after reading some but not all the bytes, - // ReadFull returns ErrUnexpectedEOF. - return dst[:n], err - } - dst = dst[:n] - if err == io.ErrUnexpectedEOF { - return dst, io.EOF - } - } - // Read until next line. - in, err := r.buf.ReadBytes('\n') - dst = append(dst, in...) - return dst, err -} - -// csvSplitSize is the size of each block. -// Blocks will read this much and find the first following newline. -// 128KB appears to be a very reasonable default. -const csvSplitSize = 128 << 10 - -// startReaders will read the header if needed and spin up a parser -// and a number of workers based on GOMAXPROCS. -// If an error is returned no goroutines have been started and r.err will have been set. -func (r *Reader) startReaders(newReader func(io.Reader) *csv.Reader) error { - if r.args.FileHeaderInfo != none { - // Read column names - // Get one line. - b, err := r.nextSplit(0, nil) - if err != nil { - r.err = err - return err - } - reader := newReader(bytes.NewReader(b)) - record, err := reader.Read() - if err != nil { - r.err = err - if err != io.EOF { - r.err = errCSVParsingError(err) - return errCSVParsingError(err) - } - return err - } - - if r.args.FileHeaderInfo == use { - // Copy column names since records will be reused. - columns := append(make([]string, 0, len(record)), record...) - r.columnNames = columns - } - } - - r.bufferPool.New = func() interface{} { - return make([]byte, csvSplitSize+1024) - } - - // Create queue - r.queue = make(chan *queueItem, runtime.GOMAXPROCS(0)) - r.input = make(chan *queueItem, runtime.GOMAXPROCS(0)) - r.readerWg.Add(1) - - // Start splitter - go func() { - defer close(r.input) - defer close(r.queue) - defer r.readerWg.Done() - for { - next, err := r.nextSplit(csvSplitSize, r.bufferPool.Get().([]byte)) - q := queueItem{ - input: next, - dst: make(chan [][]string, 1), - err: err, - } - select { - case <-r.close: - return - case r.queue <- &q: - } - - select { - case <-r.close: - return - case r.input <- &q: - } - if err != nil { - // Exit on any error. - return - } - } - }() - - // Start parsers - for i := 0; i < runtime.GOMAXPROCS(0); i++ { - go func() { - for in := range r.input { - if len(in.input) == 0 { - in.dst <- nil - continue - } - dst, ok := r.csvDstPool.Get().([][]string) - if !ok { - dst = make([][]string, 0, 1000) - } - - cr := newReader(bytes.NewBuffer(in.input)) - all := dst[:0] - err := func() error { - // Read all records until EOF or another error. - for { - record, err := cr.Read() - if err == io.EOF { - return nil - } - if err != nil { - return errCSVParsingError(err) - } - var recDst []string - if len(dst) > len(all) { - recDst = dst[len(all)] - } - if cap(recDst) < len(record) { - recDst = make([]string, len(record)) - } - recDst = recDst[:len(record)] - copy(recDst, record) - all = append(all, recDst) - } - }() - if err != nil { - in.err = err - } - // We don't need the input any more. - //lint:ignore SA6002 Using pointer would allocate more since we would have to copy slice header before taking a pointer. - r.bufferPool.Put(in.input) - in.input = nil - in.dst <- all - } - }() - } - return nil - -} - -// NewReader - creates new CSV reader using readCloser. -func NewReader(readCloser io.ReadCloser, args *ReaderArgs) (*Reader, error) { - if args == nil || args.IsEmpty() { - panic(fmt.Errorf("empty args passed %v", args)) - } - csvIn := io.Reader(readCloser) - if args.RecordDelimiter != "\n" { - csvIn = &recordTransform{ - reader: readCloser, - recordDelimiter: []byte(args.RecordDelimiter), - oneByte: make([]byte, len(args.RecordDelimiter)-1), - } - } - - r := &Reader{ - args: args, - buf: bufio.NewReaderSize(csvIn, csvSplitSize*2), - readCloser: readCloser, - close: make(chan struct{}), - } - - // Assume args are validated by ReaderArgs.UnmarshalXML() - newCsvReader := func(r io.Reader) *csv.Reader { - ret := csv.NewReader(r) - ret.Comma = []rune(args.FieldDelimiter)[0] - ret.Comment = []rune(args.CommentCharacter)[0] - ret.Quote = []rune{} - if len([]rune(args.QuoteCharacter)) > 0 { - // Add the first rune of args.QuoteChracter - ret.Quote = append(ret.Quote, []rune(args.QuoteCharacter)[0]) - } - ret.QuoteEscape = []rune(args.QuoteEscapeCharacter)[0] - ret.FieldsPerRecord = -1 - // If LazyQuotes is true, a quote may appear in an unquoted field and a - // non-doubled quote may appear in a quoted field. - ret.LazyQuotes = true - // We do not trim leading space to keep consistent with s3. - ret.TrimLeadingSpace = false - ret.ReuseRecord = true - return ret - } - - return r, r.startReaders(newCsvReader) -} diff --git a/pkg/s3select/csv/reader_test.go b/pkg/s3select/csv/reader_test.go deleted file mode 100644 index 71ae7813..00000000 --- a/pkg/s3select/csv/reader_test.go +++ /dev/null @@ -1,650 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package csv - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "path/filepath" - "reflect" - "strings" - "testing" - - "github.com/klauspost/compress/zip" - "github.com/minio/minio/pkg/s3select/sql" -) - -func TestRead(t *testing.T) { - cases := []struct { - content string - recordDelimiter string - fieldDelimiter string - }{ - {"1,2,3\na,b,c\n", "\n", ","}, - {"1,2,3\ta,b,c\t", "\t", ","}, - {"1,2,3\r\na,b,c\r\n", "\r\n", ","}, - } - - for i, c := range cases { - var err error - var record sql.Record - var result bytes.Buffer - - r, _ := NewReader(ioutil.NopCloser(strings.NewReader(c.content)), &ReaderArgs{ - FileHeaderInfo: none, - RecordDelimiter: c.recordDelimiter, - FieldDelimiter: c.fieldDelimiter, - QuoteCharacter: defaultQuoteCharacter, - QuoteEscapeCharacter: defaultQuoteEscapeCharacter, - CommentCharacter: defaultCommentCharacter, - AllowQuotedRecordDelimiter: false, - unmarshaled: true, - }) - - for { - record, err = r.Read(record) - if err != nil { - break - } - opts := sql.WriteCSVOpts{ - FieldDelimiter: []rune(c.fieldDelimiter)[0], - Quote: '"', - QuoteEscape: '"', - AlwaysQuote: false, - } - record.WriteCSV(&result, opts) - result.Truncate(result.Len() - 1) - result.WriteString(c.recordDelimiter) - } - r.Close() - if err != io.EOF { - t.Fatalf("Case %d failed with %s", i, err) - } - - if result.String() != c.content { - t.Errorf("Case %d failed: expected %v result %v", i, c.content, result.String()) - } - } -} - -type tester interface { - Fatal(...interface{}) -} - -func openTestFile(t tester, file string) []byte { - f, err := ioutil.ReadFile(filepath.Join("testdata/testdata.zip")) - if err != nil { - t.Fatal(err) - } - z, err := zip.NewReader(bytes.NewReader(f), int64(len(f))) - if err != nil { - t.Fatal(err) - } - for _, f := range z.File { - if f.Name == file { - rc, err := f.Open() - if err != nil { - t.Fatal(err) - } - defer rc.Close() - b, err := ioutil.ReadAll(rc) - if err != nil { - t.Fatal(err) - } - return b - } - } - t.Fatal(file, "not found in testdata/testdata.zip") - return nil -} - -func TestReadExtended(t *testing.T) { - cases := []struct { - file string - recordDelimiter string - fieldDelimiter string - header bool - wantColumns []string - wantTenFields string - totalFields int - }{ - { - file: "nyc-taxi-data-100k.csv", - recordDelimiter: "\n", - fieldDelimiter: ",", - header: true, - wantColumns: []string{"trip_id", "vendor_id", "pickup_datetime", "dropoff_datetime", "store_and_fwd_flag", "rate_code_id", "pickup_longitude", "pickup_latitude", "dropoff_longitude", "dropoff_latitude", "passenger_count", "trip_distance", "fare_amount", "extra", "mta_tax", "tip_amount", "tolls_amount", "ehail_fee", "improvement_surcharge", "total_amount", "payment_type", "trip_type", "pickup", "dropoff", "cab_type", "precipitation", "snow_depth", "snowfall", "max_temp", "min_temp", "wind", "pickup_nyct2010_gid", "pickup_ctlabel", "pickup_borocode", "pickup_boroname", "pickup_ct2010", "pickup_boroct2010", "pickup_cdeligibil", "pickup_ntacode", "pickup_ntaname", "pickup_puma", "dropoff_nyct2010_gid", "dropoff_ctlabel", "dropoff_borocode", "dropoff_boroname", "dropoff_ct2010", "dropoff_boroct2010", "dropoff_cdeligibil", "dropoff_ntacode", "dropoff_ntaname", "dropoff_puma"}, - wantTenFields: `3389224,2,2014-03-26 00:26:15,2014-03-26 00:28:38,N,1,-73.950431823730469,40.792251586914063,-73.938949584960937,40.794425964355469,1,0.84,4.5,0.5,0.5,1,0,,,6.5,1,1,75,74,green,0.00,0.0,0.0,36,24,11.86,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1828,180,1,Manhattan,018000,1018000,E,MN34,East Harlem North,3804 -3389225,2,2014-03-31 09:42:15,2014-03-31 10:01:17,N,1,-73.950340270996094,40.792228698730469,-73.941970825195313,40.842235565185547,1,4.47,17.5,0,0.5,0,0,,,18,2,1,75,244,green,0.16,0.0,0.0,56,36,8.28,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,911,251,1,Manhattan,025100,1025100,E,MN36,Washington Heights South,3801 -3389226,2,2014-03-26 17:13:28,2014-03-26 17:19:07,N,1,-73.949493408203125,40.793506622314453,-73.943374633789063,40.786155700683594,1,0.82,5.5,1,0.5,0,0,,,7,1,1,75,75,green,0.00,0.0,0.0,36,24,11.86,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1387,164,1,Manhattan,016400,1016400,E,MN33,East Harlem South,3804 -3389227,2,2014-03-14 21:07:19,2014-03-14 21:11:41,N,1,-73.950538635253906,40.792228698730469,-73.940811157226563,40.809253692626953,1,1.40,6,0.5,0.5,0,0,,,7,2,1,75,42,green,0.00,0.0,0.0,46,22,5.59,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1184,208,1,Manhattan,020800,1020800,E,MN03,Central Harlem North-Polo Grounds,3803 -3389228,1,2014-03-28 13:52:56,2014-03-28 14:29:01,N,1,-73.950569152832031,40.792312622070313,-73.868507385253906,40.688491821289063,2,16.10,46,0,0.5,0,5.33,,,51.83,2,,75,63,green,0.04,0.0,0.0,62,37,5.37,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1544,1182.02,3,Brooklyn,118202,3118202,E,BK83,Cypress Hills-City Line,4008 -3389229,2,2014-03-07 09:46:32,2014-03-07 09:55:01,N,1,-73.952301025390625,40.789798736572266,-73.935806274414062,40.794448852539063,1,1.67,8,0,0.5,2,0,,,10.5,1,1,75,74,green,0.00,3.9,0.0,37,26,7.83,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1553,178,1,Manhattan,017800,1017800,E,MN34,East Harlem North,3804 -3389230,2,2014-03-17 18:23:05,2014-03-17 18:28:38,N,1,-73.952346801757813,40.789844512939453,-73.946319580078125,40.783851623535156,5,0.95,5.5,1,0.5,0.65,0,,,7.65,1,1,75,263,green,0.00,0.0,0.0,35,23,8.05,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,32,156.01,1,Manhattan,015601,1015601,I,MN32,Yorkville,3805 -3389231,1,2014-03-19 19:09:36,2014-03-19 19:12:20,N,1,-73.952377319335938,40.789779663085938,-73.947494506835938,40.796474456787109,1,0.50,4,1,0.5,1,0,,,6.5,1,,75,75,green,0.92,0.0,0.0,46,32,7.16,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1401,174.02,1,Manhattan,017402,1017402,E,MN33,East Harlem South,3804 -3389232,2,2014-03-20 19:06:28,2014-03-20 19:21:35,N,1,-73.952583312988281,40.789516448974609,-73.985870361328125,40.776973724365234,2,3.04,13,1,0.5,2.8,0,,,17.3,1,1,75,143,green,0.00,0.0,0.0,54,40,8.05,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1742,155,1,Manhattan,015500,1015500,I,MN14,Lincoln Square,3806 -3389233,2,2014-03-29 09:38:12,2014-03-29 09:44:16,N,1,-73.952728271484375,40.789501190185547,-73.950935363769531,40.775600433349609,1,1.10,6.5,0,0.5,1.3,0,,,8.3,1,1,75,263,green,1.81,0.0,0.0,59,43,10.74,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,2048,138,1,Manhattan,013800,1013800,I,MN32,Yorkville,3805 -`, - totalFields: 308*2 + 1, - }, { - file: "nyc-taxi-data-tabs-100k.csv", - recordDelimiter: "\n", - fieldDelimiter: "\t", - header: true, - wantColumns: []string{"trip_id", "vendor_id", "pickup_datetime", "dropoff_datetime", "store_and_fwd_flag", "rate_code_id", "pickup_longitude", "pickup_latitude", "dropoff_longitude", "dropoff_latitude", "passenger_count", "trip_distance", "fare_amount", "extra", "mta_tax", "tip_amount", "tolls_amount", "ehail_fee", "improvement_surcharge", "total_amount", "payment_type", "trip_type", "pickup", "dropoff", "cab_type", "precipitation", "snow_depth", "snowfall", "max_temp", "min_temp", "wind", "pickup_nyct2010_gid", "pickup_ctlabel", "pickup_borocode", "pickup_boroname", "pickup_ct2010", "pickup_boroct2010", "pickup_cdeligibil", "pickup_ntacode", "pickup_ntaname", "pickup_puma", "dropoff_nyct2010_gid", "dropoff_ctlabel", "dropoff_borocode", "dropoff_boroname", "dropoff_ct2010", "dropoff_boroct2010", "dropoff_cdeligibil", "dropoff_ntacode", "dropoff_ntaname", "dropoff_puma"}, - wantTenFields: `3389224,2,2014-03-26 00:26:15,2014-03-26 00:28:38,N,1,-73.950431823730469,40.792251586914063,-73.938949584960937,40.794425964355469,1,0.84,4.5,0.5,0.5,1,0,,,6.5,1,1,75,74,green,0.00,0.0,0.0,36,24,11.86,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1828,180,1,Manhattan,018000,1018000,E,MN34,East Harlem North,3804 -3389225,2,2014-03-31 09:42:15,2014-03-31 10:01:17,N,1,-73.950340270996094,40.792228698730469,-73.941970825195313,40.842235565185547,1,4.47,17.5,0,0.5,0,0,,,18,2,1,75,244,green,0.16,0.0,0.0,56,36,8.28,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,911,251,1,Manhattan,025100,1025100,E,MN36,Washington Heights South,3801 -3389226,2,2014-03-26 17:13:28,2014-03-26 17:19:07,N,1,-73.949493408203125,40.793506622314453,-73.943374633789063,40.786155700683594,1,0.82,5.5,1,0.5,0,0,,,7,1,1,75,75,green,0.00,0.0,0.0,36,24,11.86,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1387,164,1,Manhattan,016400,1016400,E,MN33,East Harlem South,3804 -3389227,2,2014-03-14 21:07:19,2014-03-14 21:11:41,N,1,-73.950538635253906,40.792228698730469,-73.940811157226563,40.809253692626953,1,1.40,6,0.5,0.5,0,0,,,7,2,1,75,42,green,0.00,0.0,0.0,46,22,5.59,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1184,208,1,Manhattan,020800,1020800,E,MN03,Central Harlem North-Polo Grounds,3803 -3389228,1,2014-03-28 13:52:56,2014-03-28 14:29:01,N,1,-73.950569152832031,40.792312622070313,-73.868507385253906,40.688491821289063,2,16.10,46,0,0.5,0,5.33,,,51.83,2,,75,63,green,0.04,0.0,0.0,62,37,5.37,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1544,1182.02,3,Brooklyn,118202,3118202,E,BK83,Cypress Hills-City Line,4008 -3389229,2,2014-03-07 09:46:32,2014-03-07 09:55:01,N,1,-73.952301025390625,40.789798736572266,-73.935806274414062,40.794448852539063,1,1.67,8,0,0.5,2,0,,,10.5,1,1,75,74,green,0.00,3.9,0.0,37,26,7.83,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1553,178,1,Manhattan,017800,1017800,E,MN34,East Harlem North,3804 -3389230,2,2014-03-17 18:23:05,2014-03-17 18:28:38,N,1,-73.952346801757813,40.789844512939453,-73.946319580078125,40.783851623535156,5,0.95,5.5,1,0.5,0.65,0,,,7.65,1,1,75,263,green,0.00,0.0,0.0,35,23,8.05,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,32,156.01,1,Manhattan,015601,1015601,I,MN32,Yorkville,3805 -3389231,1,2014-03-19 19:09:36,2014-03-19 19:12:20,N,1,-73.952377319335938,40.789779663085938,-73.947494506835938,40.796474456787109,1,0.50,4,1,0.5,1,0,,,6.5,1,,75,75,green,0.92,0.0,0.0,46,32,7.16,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1401,174.02,1,Manhattan,017402,1017402,E,MN33,East Harlem South,3804 -3389232,2,2014-03-20 19:06:28,2014-03-20 19:21:35,N,1,-73.952583312988281,40.789516448974609,-73.985870361328125,40.776973724365234,2,3.04,13,1,0.5,2.8,0,,,17.3,1,1,75,143,green,0.00,0.0,0.0,54,40,8.05,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1742,155,1,Manhattan,015500,1015500,I,MN14,Lincoln Square,3806 -3389233,2,2014-03-29 09:38:12,2014-03-29 09:44:16,N,1,-73.952728271484375,40.789501190185547,-73.950935363769531,40.775600433349609,1,1.10,6.5,0,0.5,1.3,0,,,8.3,1,1,75,263,green,1.81,0.0,0.0,59,43,10.74,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,2048,138,1,Manhattan,013800,1013800,I,MN32,Yorkville,3805 -`, - totalFields: 308*2 + 1, - }, { - file: "nyc-taxi-data-100k-single-delim.csv", - recordDelimiter: "^", - fieldDelimiter: ",", - header: true, - wantColumns: []string{"trip_id", "vendor_id", "pickup_datetime", "dropoff_datetime", "store_and_fwd_flag", "rate_code_id", "pickup_longitude", "pickup_latitude", "dropoff_longitude", "dropoff_latitude", "passenger_count", "trip_distance", "fare_amount", "extra", "mta_tax", "tip_amount", "tolls_amount", "ehail_fee", "improvement_surcharge", "total_amount", "payment_type", "trip_type", "pickup", "dropoff", "cab_type", "precipitation", "snow_depth", "snowfall", "max_temp", "min_temp", "wind", "pickup_nyct2010_gid", "pickup_ctlabel", "pickup_borocode", "pickup_boroname", "pickup_ct2010", "pickup_boroct2010", "pickup_cdeligibil", "pickup_ntacode", "pickup_ntaname", "pickup_puma", "dropoff_nyct2010_gid", "dropoff_ctlabel", "dropoff_borocode", "dropoff_boroname", "dropoff_ct2010", "dropoff_boroct2010", "dropoff_cdeligibil", "dropoff_ntacode", "dropoff_ntaname", "dropoff_puma"}, - wantTenFields: `3389224,2,2014-03-26 00:26:15,2014-03-26 00:28:38,N,1,-73.950431823730469,40.792251586914063,-73.938949584960937,40.794425964355469,1,0.84,4.5,0.5,0.5,1,0,,,6.5,1,1,75,74,green,0.00,0.0,0.0,36,24,11.86,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1828,180,1,Manhattan,018000,1018000,E,MN34,East Harlem North,3804 -3389225,2,2014-03-31 09:42:15,2014-03-31 10:01:17,N,1,-73.950340270996094,40.792228698730469,-73.941970825195313,40.842235565185547,1,4.47,17.5,0,0.5,0,0,,,18,2,1,75,244,green,0.16,0.0,0.0,56,36,8.28,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,911,251,1,Manhattan,025100,1025100,E,MN36,Washington Heights South,3801 -3389226,2,2014-03-26 17:13:28,2014-03-26 17:19:07,N,1,-73.949493408203125,40.793506622314453,-73.943374633789063,40.786155700683594,1,0.82,5.5,1,0.5,0,0,,,7,1,1,75,75,green,0.00,0.0,0.0,36,24,11.86,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1387,164,1,Manhattan,016400,1016400,E,MN33,East Harlem South,3804 -3389227,2,2014-03-14 21:07:19,2014-03-14 21:11:41,N,1,-73.950538635253906,40.792228698730469,-73.940811157226563,40.809253692626953,1,1.40,6,0.5,0.5,0,0,,,7,2,1,75,42,green,0.00,0.0,0.0,46,22,5.59,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1184,208,1,Manhattan,020800,1020800,E,MN03,Central Harlem North-Polo Grounds,3803 -3389228,1,2014-03-28 13:52:56,2014-03-28 14:29:01,N,1,-73.950569152832031,40.792312622070313,-73.868507385253906,40.688491821289063,2,16.10,46,0,0.5,0,5.33,,,51.83,2,,75,63,green,0.04,0.0,0.0,62,37,5.37,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1544,1182.02,3,Brooklyn,118202,3118202,E,BK83,Cypress Hills-City Line,4008 -3389229,2,2014-03-07 09:46:32,2014-03-07 09:55:01,N,1,-73.952301025390625,40.789798736572266,-73.935806274414062,40.794448852539063,1,1.67,8,0,0.5,2,0,,,10.5,1,1,75,74,green,0.00,3.9,0.0,37,26,7.83,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1553,178,1,Manhattan,017800,1017800,E,MN34,East Harlem North,3804 -3389230,2,2014-03-17 18:23:05,2014-03-17 18:28:38,N,1,-73.952346801757813,40.789844512939453,-73.946319580078125,40.783851623535156,5,0.95,5.5,1,0.5,0.65,0,,,7.65,1,1,75,263,green,0.00,0.0,0.0,35,23,8.05,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,32,156.01,1,Manhattan,015601,1015601,I,MN32,Yorkville,3805 -3389231,1,2014-03-19 19:09:36,2014-03-19 19:12:20,N,1,-73.952377319335938,40.789779663085938,-73.947494506835938,40.796474456787109,1,0.50,4,1,0.5,1,0,,,6.5,1,,75,75,green,0.92,0.0,0.0,46,32,7.16,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1401,174.02,1,Manhattan,017402,1017402,E,MN33,East Harlem South,3804 -3389232,2,2014-03-20 19:06:28,2014-03-20 19:21:35,N,1,-73.952583312988281,40.789516448974609,-73.985870361328125,40.776973724365234,2,3.04,13,1,0.5,2.8,0,,,17.3,1,1,75,143,green,0.00,0.0,0.0,54,40,8.05,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1742,155,1,Manhattan,015500,1015500,I,MN14,Lincoln Square,3806 -3389233,2,2014-03-29 09:38:12,2014-03-29 09:44:16,N,1,-73.952728271484375,40.789501190185547,-73.950935363769531,40.775600433349609,1,1.10,6.5,0,0.5,1.3,0,,,8.3,1,1,75,263,green,1.81,0.0,0.0,59,43,10.74,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,2048,138,1,Manhattan,013800,1013800,I,MN32,Yorkville,3805 -`, - totalFields: 308*2 + 1, - }, { - file: "nyc-taxi-data-100k-multi-delim.csv", - recordDelimiter: "^Y", - fieldDelimiter: ",", - header: true, - wantColumns: []string{"trip_id", "vendor_id", "pickup_datetime", "dropoff_datetime", "store_and_fwd_flag", "rate_code_id", "pickup_longitude", "pickup_latitude", "dropoff_longitude", "dropoff_latitude", "passenger_count", "trip_distance", "fare_amount", "extra", "mta_tax", "tip_amount", "tolls_amount", "ehail_fee", "improvement_surcharge", "total_amount", "payment_type", "trip_type", "pickup", "dropoff", "cab_type", "precipitation", "snow_depth", "snowfall", "max_temp", "min_temp", "wind", "pickup_nyct2010_gid", "pickup_ctlabel", "pickup_borocode", "pickup_boroname", "pickup_ct2010", "pickup_boroct2010", "pickup_cdeligibil", "pickup_ntacode", "pickup_ntaname", "pickup_puma", "dropoff_nyct2010_gid", "dropoff_ctlabel", "dropoff_borocode", "dropoff_boroname", "dropoff_ct2010", "dropoff_boroct2010", "dropoff_cdeligibil", "dropoff_ntacode", "dropoff_ntaname", "dropoff_puma"}, - wantTenFields: `3389224,2,2014-03-26 00:26:15,2014-03-26 00:28:38,N,1,-73.950431823730469,40.792251586914063,-73.938949584960937,40.794425964355469,1,0.84,4.5,0.5,0.5,1,0,,,6.5,1,1,75,74,green,0.00,0.0,0.0,36,24,11.86,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1828,180,1,Manhattan,018000,1018000,E,MN34,East Harlem North,3804 -3389225,2,2014-03-31 09:42:15,2014-03-31 10:01:17,N,1,-73.950340270996094,40.792228698730469,-73.941970825195313,40.842235565185547,1,4.47,17.5,0,0.5,0,0,,,18,2,1,75,244,green,0.16,0.0,0.0,56,36,8.28,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,911,251,1,Manhattan,025100,1025100,E,MN36,Washington Heights South,3801 -3389226,2,2014-03-26 17:13:28,2014-03-26 17:19:07,N,1,-73.949493408203125,40.793506622314453,-73.943374633789063,40.786155700683594,1,0.82,5.5,1,0.5,0,0,,,7,1,1,75,75,green,0.00,0.0,0.0,36,24,11.86,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1387,164,1,Manhattan,016400,1016400,E,MN33,East Harlem South,3804 -3389227,2,2014-03-14 21:07:19,2014-03-14 21:11:41,N,1,-73.950538635253906,40.792228698730469,-73.940811157226563,40.809253692626953,1,1.40,6,0.5,0.5,0,0,,,7,2,1,75,42,green,0.00,0.0,0.0,46,22,5.59,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1184,208,1,Manhattan,020800,1020800,E,MN03,Central Harlem North-Polo Grounds,3803 -3389228,1,2014-03-28 13:52:56,2014-03-28 14:29:01,N,1,-73.950569152832031,40.792312622070313,-73.868507385253906,40.688491821289063,2,16.10,46,0,0.5,0,5.33,,,51.83,2,,75,63,green,0.04,0.0,0.0,62,37,5.37,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1544,1182.02,3,Brooklyn,118202,3118202,E,BK83,Cypress Hills-City Line,4008 -3389229,2,2014-03-07 09:46:32,2014-03-07 09:55:01,N,1,-73.952301025390625,40.789798736572266,-73.935806274414062,40.794448852539063,1,1.67,8,0,0.5,2,0,,,10.5,1,1,75,74,green,0.00,3.9,0.0,37,26,7.83,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1553,178,1,Manhattan,017800,1017800,E,MN34,East Harlem North,3804 -3389230,2,2014-03-17 18:23:05,2014-03-17 18:28:38,N,1,-73.952346801757813,40.789844512939453,-73.946319580078125,40.783851623535156,5,0.95,5.5,1,0.5,0.65,0,,,7.65,1,1,75,263,green,0.00,0.0,0.0,35,23,8.05,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,32,156.01,1,Manhattan,015601,1015601,I,MN32,Yorkville,3805 -3389231,1,2014-03-19 19:09:36,2014-03-19 19:12:20,N,1,-73.952377319335938,40.789779663085938,-73.947494506835938,40.796474456787109,1,0.50,4,1,0.5,1,0,,,6.5,1,,75,75,green,0.92,0.0,0.0,46,32,7.16,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1401,174.02,1,Manhattan,017402,1017402,E,MN33,East Harlem South,3804 -3389232,2,2014-03-20 19:06:28,2014-03-20 19:21:35,N,1,-73.952583312988281,40.789516448974609,-73.985870361328125,40.776973724365234,2,3.04,13,1,0.5,2.8,0,,,17.3,1,1,75,143,green,0.00,0.0,0.0,54,40,8.05,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1742,155,1,Manhattan,015500,1015500,I,MN14,Lincoln Square,3806 -3389233,2,2014-03-29 09:38:12,2014-03-29 09:44:16,N,1,-73.952728271484375,40.789501190185547,-73.950935363769531,40.775600433349609,1,1.10,6.5,0,0.5,1.3,0,,,8.3,1,1,75,263,green,1.81,0.0,0.0,59,43,10.74,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,2048,138,1,Manhattan,013800,1013800,I,MN32,Yorkville,3805 -`, - totalFields: 308*2 + 1, - }, { - file: "nyc-taxi-data-noheader-100k.csv", - recordDelimiter: "\n", - fieldDelimiter: ",", - header: false, - wantColumns: []string{"_1", "_2", "_3", "_4", "_5", "_6", "_7", "_8", "_9", "_10", "_11", "_12", "_13", "_14", "_15", "_16", "_17", "_18", "_19", "_20", "_21", "_22", "_23", "_24", "_25", "_26", "_27", "_28", "_29", "_30", "_31", "_32", "_33", "_34", "_35", "_36", "_37", "_38", "_39", "_40", "_41", "_42", "_43", "_44", "_45", "_46", "_47", "_48", "_49", "_50", "_51"}, - wantTenFields: `3389224,2,2014-03-26 00:26:15,2014-03-26 00:28:38,N,1,-73.950431823730469,40.792251586914063,-73.938949584960937,40.794425964355469,1,0.84,4.5,0.5,0.5,1,0,,,6.5,1,1,75,74,green,0.00,0.0,0.0,36,24,11.86,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1828,180,1,Manhattan,018000,1018000,E,MN34,East Harlem North,3804 -3389225,2,2014-03-31 09:42:15,2014-03-31 10:01:17,N,1,-73.950340270996094,40.792228698730469,-73.941970825195313,40.842235565185547,1,4.47,17.5,0,0.5,0,0,,,18,2,1,75,244,green,0.16,0.0,0.0,56,36,8.28,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,911,251,1,Manhattan,025100,1025100,E,MN36,Washington Heights South,3801 -3389226,2,2014-03-26 17:13:28,2014-03-26 17:19:07,N,1,-73.949493408203125,40.793506622314453,-73.943374633789063,40.786155700683594,1,0.82,5.5,1,0.5,0,0,,,7,1,1,75,75,green,0.00,0.0,0.0,36,24,11.86,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1387,164,1,Manhattan,016400,1016400,E,MN33,East Harlem South,3804 -3389227,2,2014-03-14 21:07:19,2014-03-14 21:11:41,N,1,-73.950538635253906,40.792228698730469,-73.940811157226563,40.809253692626953,1,1.40,6,0.5,0.5,0,0,,,7,2,1,75,42,green,0.00,0.0,0.0,46,22,5.59,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1184,208,1,Manhattan,020800,1020800,E,MN03,Central Harlem North-Polo Grounds,3803 -3389228,1,2014-03-28 13:52:56,2014-03-28 14:29:01,N,1,-73.950569152832031,40.792312622070313,-73.868507385253906,40.688491821289063,2,16.10,46,0,0.5,0,5.33,,,51.83,2,,75,63,green,0.04,0.0,0.0,62,37,5.37,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1544,1182.02,3,Brooklyn,118202,3118202,E,BK83,Cypress Hills-City Line,4008 -3389229,2,2014-03-07 09:46:32,2014-03-07 09:55:01,N,1,-73.952301025390625,40.789798736572266,-73.935806274414062,40.794448852539063,1,1.67,8,0,0.5,2,0,,,10.5,1,1,75,74,green,0.00,3.9,0.0,37,26,7.83,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1553,178,1,Manhattan,017800,1017800,E,MN34,East Harlem North,3804 -3389230,2,2014-03-17 18:23:05,2014-03-17 18:28:38,N,1,-73.952346801757813,40.789844512939453,-73.946319580078125,40.783851623535156,5,0.95,5.5,1,0.5,0.65,0,,,7.65,1,1,75,263,green,0.00,0.0,0.0,35,23,8.05,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,32,156.01,1,Manhattan,015601,1015601,I,MN32,Yorkville,3805 -3389231,1,2014-03-19 19:09:36,2014-03-19 19:12:20,N,1,-73.952377319335938,40.789779663085938,-73.947494506835938,40.796474456787109,1,0.50,4,1,0.5,1,0,,,6.5,1,,75,75,green,0.92,0.0,0.0,46,32,7.16,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1401,174.02,1,Manhattan,017402,1017402,E,MN33,East Harlem South,3804 -3389232,2,2014-03-20 19:06:28,2014-03-20 19:21:35,N,1,-73.952583312988281,40.789516448974609,-73.985870361328125,40.776973724365234,2,3.04,13,1,0.5,2.8,0,,,17.3,1,1,75,143,green,0.00,0.0,0.0,54,40,8.05,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1742,155,1,Manhattan,015500,1015500,I,MN14,Lincoln Square,3806 -3389233,2,2014-03-29 09:38:12,2014-03-29 09:44:16,N,1,-73.952728271484375,40.789501190185547,-73.950935363769531,40.775600433349609,1,1.10,6.5,0,0.5,1.3,0,,,8.3,1,1,75,263,green,1.81,0.0,0.0,59,43,10.74,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,2048,138,1,Manhattan,013800,1013800,I,MN32,Yorkville,3805 -`, - totalFields: 308 * 2, - }, - } - - for i, c := range cases { - t.Run(c.file, func(t *testing.T) { - - var err error - var record sql.Record - var result bytes.Buffer - input := openTestFile(t, c.file) - // Get above block size. - input = append(input, input...) - args := ReaderArgs{ - FileHeaderInfo: use, - RecordDelimiter: c.recordDelimiter, - FieldDelimiter: c.fieldDelimiter, - QuoteCharacter: defaultQuoteCharacter, - QuoteEscapeCharacter: defaultQuoteEscapeCharacter, - CommentCharacter: defaultCommentCharacter, - AllowQuotedRecordDelimiter: false, - unmarshaled: true, - } - if !c.header { - args.FileHeaderInfo = none - } - r, _ := NewReader(ioutil.NopCloser(bytes.NewReader(input)), &args) - fields := 0 - for { - record, err = r.Read(record) - if err != nil { - break - } - if fields < 10 { - opts := sql.WriteCSVOpts{ - FieldDelimiter: ',', - Quote: '"', - QuoteEscape: '"', - AlwaysQuote: false, - } - // Write with fixed delimiters, newlines. - err := record.WriteCSV(&result, opts) - if err != nil { - t.Error(err) - } - } - fields++ - } - r.Close() - if err != io.EOF { - t.Fatalf("Case %d failed with %s", i, err) - } - if !reflect.DeepEqual(r.columnNames, c.wantColumns) { - t.Errorf("Case %d failed: expected %#v, got result %#v", i, c.wantColumns, r.columnNames) - } - if result.String() != c.wantTenFields { - t.Errorf("Case %d failed: expected %v, got result %v", i, c.wantTenFields, result.String()) - } - if fields != c.totalFields { - t.Errorf("Case %d failed: expected %v results %v", i, c.totalFields, fields) - } - }) - } -} - -type errReader struct { - err error -} - -func (e errReader) Read(p []byte) (n int, err error) { - return 0, e.err -} - -func TestReadFailures(t *testing.T) { - customErr := errors.New("unable to read file :(") - cases := []struct { - file string - recordDelimiter string - fieldDelimiter string - sendErr error - header bool - wantColumns []string - wantFields string - wantErr error - }{ - { - file: "truncated-records.csv", - recordDelimiter: "^Y", - fieldDelimiter: ",", - header: true, - wantColumns: []string{"trip_id", "vendor_id", "pickup_datetime", "dropoff_datetime", "store_and_fwd_flag", "rate_code_id", "pickup_longitude", "pickup_latitude", "dropoff_longitude", "dropoff_latitude", "passenger_count", "trip_distance", "fare_amount", "extra", "mta_tax", "tip_amount", "tolls_amount", "ehail_fee", "improvement_surcharge", "total_amount", "payment_type", "trip_type", "pickup", "dropoff", "cab_type", "precipitation", "snow_depth", "snowfall", "max_temp", "min_temp", "wind", "pickup_nyct2010_gid", "pickup_ctlabel", "pickup_borocode", "pickup_boroname", "pickup_ct2010", "pickup_boroct2010", "pickup_cdeligibil", "pickup_ntacode", "pickup_ntaname", "pickup_puma", "dropoff_nyct2010_gid", "dropoff_ctlabel", "dropoff_borocode", "dropoff_boroname", "dropoff_ct2010", "dropoff_boroct2010", "dropoff_cdeligibil", "dropoff_ntacode", "dropoff_ntaname", "dropoff_puma"}, - wantFields: `3389224,2,2014-03-26 00:26:15,2014-03-26 00:28:38,N,1,-73.950431823730469,40.792251586914063,-73.938949584960937,40.794425964355469,1,0.84,4.5,0.5,0.5,1,0,,,6.5,1,1,75,74,green,0.00,0.0,0.0,36,24,11.86,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1828,180,1,Manhattan,018000,1018000,E,MN34,East Harlem North,3804 -3389225,2,2014-03-31 09:42:15,2014-03-31 10:01:17,N,1,-73.950340270996094,40.792228698730469,-73.941970825195313,40.842235565185547,1,4.47,17.5,0,0.5,0,0,,,18,2,1,75,244,green,0.16,0.0,0.0,56,36,8.28,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,911,251,1,Manhattan,025100 -`, - wantErr: io.EOF, - }, - { - file: "truncated-records.csv", - recordDelimiter: "^Y", - fieldDelimiter: ",", - sendErr: customErr, - header: true, - wantColumns: []string{"trip_id", "vendor_id", "pickup_datetime", "dropoff_datetime", "store_and_fwd_flag", "rate_code_id", "pickup_longitude", "pickup_latitude", "dropoff_longitude", "dropoff_latitude", "passenger_count", "trip_distance", "fare_amount", "extra", "mta_tax", "tip_amount", "tolls_amount", "ehail_fee", "improvement_surcharge", "total_amount", "payment_type", "trip_type", "pickup", "dropoff", "cab_type", "precipitation", "snow_depth", "snowfall", "max_temp", "min_temp", "wind", "pickup_nyct2010_gid", "pickup_ctlabel", "pickup_borocode", "pickup_boroname", "pickup_ct2010", "pickup_boroct2010", "pickup_cdeligibil", "pickup_ntacode", "pickup_ntaname", "pickup_puma", "dropoff_nyct2010_gid", "dropoff_ctlabel", "dropoff_borocode", "dropoff_boroname", "dropoff_ct2010", "dropoff_boroct2010", "dropoff_cdeligibil", "dropoff_ntacode", "dropoff_ntaname", "dropoff_puma"}, - wantFields: `3389224,2,2014-03-26 00:26:15,2014-03-26 00:28:38,N,1,-73.950431823730469,40.792251586914063,-73.938949584960937,40.794425964355469,1,0.84,4.5,0.5,0.5,1,0,,,6.5,1,1,75,74,green,0.00,0.0,0.0,36,24,11.86,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,1828,180,1,Manhattan,018000,1018000,E,MN34,East Harlem North,3804 -3389225,2,2014-03-31 09:42:15,2014-03-31 10:01:17,N,1,-73.950340270996094,40.792228698730469,-73.941970825195313,40.842235565185547,1,4.47,17.5,0,0.5,0,0,,,18,2,1,75,244,green,0.16,0.0,0.0,56,36,8.28,1267,168,1,Manhattan,016800,1016800,E,MN33,East Harlem South,3804,911,251,1,Manhattan,025100 -`, - wantErr: customErr, - }, - { - // This works since LazyQuotes is true: - file: "invalid-badbarequote.csv", - recordDelimiter: "\n", - fieldDelimiter: ",", - sendErr: nil, - header: true, - wantColumns: []string{"header1", "header2", "header3"}, - wantFields: "ok1,ok2,ok3\n" + `"a ""word""",b` + "\n", - wantErr: io.EOF, - }, - { - // This works since LazyQuotes is true: - file: "invalid-baddoubleq.csv", - recordDelimiter: "\n", - fieldDelimiter: ",", - sendErr: nil, - header: true, - wantColumns: []string{"header1", "header2", "header3"}, - wantFields: "ok1,ok2,ok3\n" + `"a""""b",c` + "\n", - wantErr: io.EOF, - }, - { - // This works since LazyQuotes is true: - file: "invalid-badextraq.csv", - recordDelimiter: "\n", - fieldDelimiter: ",", - sendErr: nil, - header: true, - wantColumns: []string{"header1", "header2", "header3"}, - wantFields: "ok1,ok2,ok3\n" + `a word,"b"""` + "\n", - wantErr: io.EOF, - }, - { - // This works since LazyQuotes is true: - file: "invalid-badstartline.csv", - recordDelimiter: "\n", - fieldDelimiter: ",", - sendErr: nil, - header: true, - wantColumns: []string{"header1", "header2", "header3"}, - wantFields: "ok1,ok2,ok3\n" + `a,"b` + "\n" + `c""d,e` + "\n\"\n", - wantErr: io.EOF, - }, - { - // This works since LazyQuotes is true: - file: "invalid-badstartline2.csv", - recordDelimiter: "\n", - fieldDelimiter: ",", - sendErr: nil, - header: true, - wantColumns: []string{"header1", "header2", "header3"}, - wantFields: "ok1,ok2,ok3\n" + `a,b` + "\n" + `"d` + "\n\ne\"\n", - wantErr: io.EOF, - }, - { - // This works since LazyQuotes is true: - file: "invalid-badtrailingq.csv", - recordDelimiter: "\n", - fieldDelimiter: ",", - sendErr: nil, - header: true, - wantColumns: []string{"header1", "header2", "header3"}, - wantFields: "ok1,ok2,ok3\n" + `a word,"b"""` + "\n", - wantErr: io.EOF, - }, - { - // This works since LazyQuotes is true: - file: "invalid-crlfquoted.csv", - recordDelimiter: "\n", - fieldDelimiter: ",", - sendErr: nil, - header: true, - wantColumns: []string{"header1", "header2", "header3"}, - wantFields: "ok1,ok2,ok3\n" + `"foo""bar"` + "\n", - wantErr: io.EOF, - }, - { - // This works since LazyQuotes is true: - file: "invalid-csv.csv", - recordDelimiter: "\n", - fieldDelimiter: ",", - sendErr: nil, - header: true, - wantColumns: []string{"header1", "header2", "header3"}, - wantFields: "ok1,ok2,ok3\n" + `"a""""b",c` + "\n", - wantErr: io.EOF, - }, - { - // This works since LazyQuotes is true, but output is very weird. - file: "invalid-oddquote.csv", - recordDelimiter: "\n", - fieldDelimiter: ",", - sendErr: nil, - header: true, - wantColumns: []string{"header1", "header2", "header3"}, - wantFields: "ok1,ok2,ok3\n" + `""""""",b,c` + "\n\"\n", - wantErr: io.EOF, - }, - { - // Test when file ends with a half separator - file: "endswithhalfsep.csv", - recordDelimiter: "%!", - fieldDelimiter: ",", - sendErr: nil, - header: false, - wantColumns: []string{"_1", "_2", "_3"}, - wantFields: "a,b,c\na2,b2,c2%\n", - wantErr: io.EOF, - }, - } - - for i, c := range cases { - t.Run(c.file, func(t *testing.T) { - - var err error - var record sql.Record - var result bytes.Buffer - input := openTestFile(t, c.file) - args := ReaderArgs{ - FileHeaderInfo: use, - RecordDelimiter: c.recordDelimiter, - FieldDelimiter: c.fieldDelimiter, - QuoteCharacter: defaultQuoteCharacter, - QuoteEscapeCharacter: defaultQuoteEscapeCharacter, - CommentCharacter: defaultCommentCharacter, - AllowQuotedRecordDelimiter: false, - unmarshaled: true, - } - if !c.header { - args.FileHeaderInfo = none - } - inr := io.Reader(bytes.NewReader(input)) - if c.sendErr != nil { - inr = io.MultiReader(inr, errReader{c.sendErr}) - } - r, _ := NewReader(ioutil.NopCloser(inr), &args) - fields := 0 - for { - record, err = r.Read(record) - if err != nil { - break - } - - opts := sql.WriteCSVOpts{ - FieldDelimiter: ',', - Quote: '"', - QuoteEscape: '"', - AlwaysQuote: false, - } - // Write with fixed delimiters, newlines. - err := record.WriteCSV(&result, opts) - if err != nil { - t.Error(err) - } - fields++ - } - r.Close() - if err != c.wantErr { - t.Fatalf("Case %d failed with %s", i, err) - } - if !reflect.DeepEqual(r.columnNames, c.wantColumns) { - t.Errorf("Case %d failed: expected \n%#v, got result \n%#v", i, c.wantColumns, r.columnNames) - } - if result.String() != c.wantFields { - t.Errorf("Case %d failed: expected \n%v\nGot result \n%v", i, c.wantFields, result.String()) - } - }) - } -} - -func BenchmarkReaderBasic(b *testing.B) { - args := ReaderArgs{ - FileHeaderInfo: use, - RecordDelimiter: "\n", - FieldDelimiter: ",", - QuoteCharacter: defaultQuoteCharacter, - QuoteEscapeCharacter: defaultQuoteEscapeCharacter, - CommentCharacter: defaultCommentCharacter, - AllowQuotedRecordDelimiter: false, - unmarshaled: true, - } - f := openTestFile(b, "nyc-taxi-data-100k.csv") - r, err := NewReader(ioutil.NopCloser(bytes.NewBuffer(f)), &args) - if err != nil { - b.Fatalf("Reading init failed with %s", err) - } - defer r.Close() - b.ReportAllocs() - b.ResetTimer() - b.SetBytes(int64(len(f))) - var record sql.Record - for i := 0; i < b.N; i++ { - r, err = NewReader(ioutil.NopCloser(bytes.NewBuffer(f)), &args) - if err != nil { - b.Fatalf("Reading init failed with %s", err) - } - for err == nil { - record, err = r.Read(record) - if err != nil && err != io.EOF { - b.Fatalf("Reading failed with %s", err) - } - } - r.Close() - } -} - -func BenchmarkReaderHuge(b *testing.B) { - args := ReaderArgs{ - FileHeaderInfo: use, - RecordDelimiter: "\n", - FieldDelimiter: ",", - QuoteCharacter: defaultQuoteCharacter, - QuoteEscapeCharacter: defaultQuoteEscapeCharacter, - CommentCharacter: defaultCommentCharacter, - AllowQuotedRecordDelimiter: false, - unmarshaled: true, - } - for n := 0; n < 11; n++ { - f := openTestFile(b, "nyc-taxi-data-100k.csv") - want := 309 - for i := 0; i < n; i++ { - f = append(f, f...) - want *= 2 - } - b.Run(fmt.Sprint(len(f)/(1<<10), "K"), func(b *testing.B) { - b.ReportAllocs() - b.SetBytes(int64(len(f))) - b.ResetTimer() - var record sql.Record - for i := 0; i < b.N; i++ { - r, err := NewReader(ioutil.NopCloser(bytes.NewBuffer(f)), &args) - if err != nil { - b.Fatalf("Reading init failed with %s", err) - } - - got := 0 - for err == nil { - record, err = r.Read(record) - if err != nil && err != io.EOF { - b.Fatalf("Reading failed with %s", err) - } - got++ - } - r.Close() - if got != want { - b.Errorf("want %d records, got %d", want, got) - } - } - }) - } -} - -func BenchmarkReaderReplace(b *testing.B) { - args := ReaderArgs{ - FileHeaderInfo: use, - RecordDelimiter: "^", - FieldDelimiter: ",", - QuoteCharacter: defaultQuoteCharacter, - QuoteEscapeCharacter: defaultQuoteEscapeCharacter, - CommentCharacter: defaultCommentCharacter, - AllowQuotedRecordDelimiter: false, - unmarshaled: true, - } - f := openTestFile(b, "nyc-taxi-data-100k-single-delim.csv") - r, err := NewReader(ioutil.NopCloser(bytes.NewBuffer(f)), &args) - if err != nil { - b.Fatalf("Reading init failed with %s", err) - } - defer r.Close() - b.ReportAllocs() - b.ResetTimer() - b.SetBytes(int64(len(f))) - var record sql.Record - for i := 0; i < b.N; i++ { - r, err = NewReader(ioutil.NopCloser(bytes.NewBuffer(f)), &args) - if err != nil { - b.Fatalf("Reading init failed with %s", err) - } - - for err == nil { - record, err = r.Read(record) - if err != nil && err != io.EOF { - b.Fatalf("Reading failed with %s", err) - } - } - r.Close() - } -} - -func BenchmarkReaderReplaceTwo(b *testing.B) { - args := ReaderArgs{ - FileHeaderInfo: use, - RecordDelimiter: "^Y", - FieldDelimiter: ",", - QuoteCharacter: defaultQuoteCharacter, - QuoteEscapeCharacter: defaultQuoteEscapeCharacter, - CommentCharacter: defaultCommentCharacter, - AllowQuotedRecordDelimiter: false, - unmarshaled: true, - } - f := openTestFile(b, "nyc-taxi-data-100k-multi-delim.csv") - r, err := NewReader(ioutil.NopCloser(bytes.NewBuffer(f)), &args) - if err != nil { - b.Fatalf("Reading init failed with %s", err) - } - defer r.Close() - b.ReportAllocs() - b.ResetTimer() - b.SetBytes(int64(len(f))) - var record sql.Record - for i := 0; i < b.N; i++ { - r, err = NewReader(ioutil.NopCloser(bytes.NewBuffer(f)), &args) - if err != nil { - b.Fatalf("Reading init failed with %s", err) - } - - for err == nil { - record, err = r.Read(record) - if err != nil && err != io.EOF { - b.Fatalf("Reading failed with %s", err) - } - } - r.Close() - } -} diff --git a/pkg/s3select/csv/record.go b/pkg/s3select/csv/record.go deleted file mode 100644 index c9f0f842..00000000 --- a/pkg/s3select/csv/record.go +++ /dev/null @@ -1,134 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package csv - -import ( - "encoding/json" - "errors" - "fmt" - "io" - - "github.com/bcicen/jstream" - csv "github.com/minio/minio/pkg/csvparser" - "github.com/minio/minio/pkg/s3select/sql" -) - -// Record - is a CSV record. -type Record struct { - columnNames []string - csvRecord []string - nameIndexMap map[string]int64 -} - -// Get - gets the value for a column name. CSV fields do not have any -// defined type (other than the default string). So this function -// always returns fields using sql.FromBytes so that the type -// specified/implied by the query can be used, or can be automatically -// converted based on the query. -func (r *Record) Get(name string) (*sql.Value, error) { - index, found := r.nameIndexMap[name] - if !found { - return nil, fmt.Errorf("column %v not found", name) - } - - if index >= int64(len(r.csvRecord)) { - // No value found for column 'name', hence return null - // value - return sql.FromNull(), nil - } - - return sql.FromBytes([]byte(r.csvRecord[index])), nil -} - -// Set - sets the value for a column name. -func (r *Record) Set(name string, value *sql.Value) (sql.Record, error) { - r.columnNames = append(r.columnNames, name) - r.csvRecord = append(r.csvRecord, value.CSVString()) - return r, nil -} - -// Reset data in record. -func (r *Record) Reset() { - if len(r.columnNames) > 0 { - r.columnNames = r.columnNames[:0] - } - if len(r.csvRecord) > 0 { - r.csvRecord = r.csvRecord[:0] - } - for k := range r.nameIndexMap { - delete(r.nameIndexMap, k) - } -} - -// Clone the record. -func (r *Record) Clone(dst sql.Record) sql.Record { - other, ok := dst.(*Record) - if !ok { - other = &Record{} - } - if len(other.columnNames) > 0 { - other.columnNames = other.columnNames[:0] - } - if len(other.csvRecord) > 0 { - other.csvRecord = other.csvRecord[:0] - } - other.columnNames = append(other.columnNames, r.columnNames...) - other.csvRecord = append(other.csvRecord, r.csvRecord...) - return other -} - -// WriteCSV - encodes to CSV data. -func (r *Record) WriteCSV(writer io.Writer, opts sql.WriteCSVOpts) error { - w := csv.NewWriter(writer) - w.Comma = opts.FieldDelimiter - w.AlwaysQuote = opts.AlwaysQuote - w.Quote = opts.Quote - w.QuoteEscape = opts.QuoteEscape - if err := w.Write(r.csvRecord); err != nil { - return err - } - w.Flush() - if err := w.Error(); err != nil { - return err - } - - return nil -} - -// WriteJSON - encodes to JSON data. -func (r *Record) WriteJSON(writer io.Writer) error { - var kvs jstream.KVS = make([]jstream.KV, len(r.columnNames)) - for i := 0; i < len(r.columnNames); i++ { - kvs[i] = jstream.KV{Key: r.columnNames[i], Value: r.csvRecord[i]} - } - return json.NewEncoder(writer).Encode(kvs) -} - -// Raw - returns the underlying data with format info. -func (r *Record) Raw() (sql.SelectObjectFormat, interface{}) { - return sql.SelectFmtCSV, r -} - -// Replace - is not supported for CSV -func (r *Record) Replace(_ interface{}) error { - return errors.New("Replace is not supported for CSV") -} - -// NewRecord - creates new CSV record. -func NewRecord() *Record { - return &Record{} -} diff --git a/pkg/s3select/csv/recordtransform.go b/pkg/s3select/csv/recordtransform.go deleted file mode 100644 index 18b5ea48..00000000 --- a/pkg/s3select/csv/recordtransform.go +++ /dev/null @@ -1,93 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package csv - -import ( - "bytes" - "io" -) - -// recordTransform will convert records to always have newline records. -type recordTransform struct { - reader io.Reader - // recordDelimiter can be up to 2 characters. - recordDelimiter []byte - oneByte []byte - useOneByte bool -} - -func (rr *recordTransform) Read(p []byte) (n int, err error) { - if rr.useOneByte { - p[0] = rr.oneByte[0] - rr.useOneByte = false - n, err = rr.reader.Read(p[1:]) - n++ - } else { - n, err = rr.reader.Read(p) - } - - if err != nil { - return n, err - } - - // Do nothing if record-delimiter is already newline. - if string(rr.recordDelimiter) == "\n" { - return n, nil - } - - // Change record delimiters to newline. - if len(rr.recordDelimiter) == 1 { - for idx := 0; idx < len(p); { - i := bytes.Index(p[idx:], rr.recordDelimiter) - if i < 0 { - break - } - idx += i - p[idx] = '\n' - } - return n, nil - } - - // 2 characters... - for idx := 0; idx < len(p); { - i := bytes.Index(p[idx:], rr.recordDelimiter) - if i < 0 { - break - } - idx += i - - p[idx] = '\n' - p = append(p[:idx+1], p[idx+2:]...) - n-- - } - - if p[n-1] != rr.recordDelimiter[0] { - return n, nil - } - - if _, err = rr.reader.Read(rr.oneByte); err != nil { - return n, err - } - - if rr.oneByte[0] == rr.recordDelimiter[1] { - p[n-1] = '\n' - return n, nil - } - - rr.useOneByte = true - return n, nil -} diff --git a/pkg/s3select/csv/testdata/testdata.zip b/pkg/s3select/csv/testdata/testdata.zip deleted file mode 100644 index e4519334..00000000 Binary files a/pkg/s3select/csv/testdata/testdata.zip and /dev/null differ diff --git a/pkg/s3select/errors.go b/pkg/s3select/errors.go deleted file mode 100644 index feb1d17c..00000000 --- a/pkg/s3select/errors.go +++ /dev/null @@ -1,144 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3select - -// SelectError - represents s3 select error specified in -// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html#RESTObjectSELECTContent-responses-special-errors. -type SelectError interface { - Cause() error - ErrorCode() string - ErrorMessage() string - HTTPStatusCode() int - Error() string -} - -type s3Error struct { - code string - message string - statusCode int - cause error -} - -func (err *s3Error) Cause() error { - return err.cause -} - -func (err *s3Error) ErrorCode() string { - return err.code -} - -func (err *s3Error) ErrorMessage() string { - return err.message -} - -func (err *s3Error) HTTPStatusCode() int { - return err.statusCode -} - -func (err *s3Error) Error() string { - return err.message -} - -func errMalformedXML(err error) *s3Error { - return &s3Error{ - code: "MalformedXML", - message: "The XML provided was not well-formed or did not validate against our published schema. Check the service documentation and try again: " + err.Error(), - statusCode: 400, - cause: err, - } -} - -func errInvalidCompressionFormat(err error) *s3Error { - return &s3Error{ - code: "InvalidCompressionFormat", - message: "The file is not in a supported compression format. Only GZIP and BZIP2 are supported.", - statusCode: 400, - cause: err, - } -} - -func errInvalidBZIP2CompressionFormat(err error) *s3Error { - return &s3Error{ - code: "InvalidCompressionFormat", - message: "BZIP2 is not applicable to the queried object. Please correct the request and try again.", - statusCode: 400, - cause: err, - } -} - -func errInvalidGZIPCompressionFormat(err error) *s3Error { - return &s3Error{ - code: "InvalidCompressionFormat", - message: "GZIP is not applicable to the queried object. Please correct the request and try again.", - statusCode: 400, - cause: err, - } -} - -func errInvalidDataSource(err error) *s3Error { - return &s3Error{ - code: "InvalidDataSource", - message: "Invalid data source type. Only CSV, JSON, and Parquet are supported.", - statusCode: 400, - cause: err, - } -} - -func errInvalidRequestParameter(err error) *s3Error { - return &s3Error{ - code: "InvalidRequestParameter", - message: "The value of a parameter in SelectRequest element is invalid. Check the service API documentation and try again.", - statusCode: 400, - cause: err, - } -} - -func errObjectSerializationConflict(err error) *s3Error { - return &s3Error{ - code: "ObjectSerializationConflict", - message: "InputSerialization specifies more than one format (CSV, JSON, or Parquet), or OutputSerialization specifies more than one format (CSV or JSON). InputSerialization and OutputSerialization can only specify one format each.", - statusCode: 400, - cause: err, - } -} - -func errInvalidExpressionType(err error) *s3Error { - return &s3Error{ - code: "InvalidExpressionType", - message: "The ExpressionType is invalid. Only SQL expressions are supported.", - statusCode: 400, - cause: err, - } -} - -func errMissingRequiredParameter(err error) *s3Error { - return &s3Error{ - code: "MissingRequiredParameter", - message: "The SelectRequest entity is missing a required parameter. Check the service documentation and try again.", - statusCode: 400, - cause: err, - } -} - -func errTruncatedInput(err error) *s3Error { - return &s3Error{ - code: "TruncatedInput", - message: "Object decompression failed. Check that the object is properly compressed using the format specified in the request.", - statusCode: 400, - cause: err, - } -} diff --git a/pkg/s3select/genmessage.go b/pkg/s3select/genmessage.go deleted file mode 100644 index 4b3ead33..00000000 --- a/pkg/s3select/genmessage.go +++ /dev/null @@ -1,182 +0,0 @@ -// +build ignore - -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3select - -import ( - "bytes" - "encoding/binary" - "fmt" - "hash/crc32" -) - -func genRecordsHeader() { - buf := new(bytes.Buffer) - - buf.WriteByte(13) - buf.WriteString(":message-type") - buf.WriteByte(7) - buf.Write([]byte{0, 5}) - buf.WriteString("event") - - buf.WriteByte(13) - buf.WriteString(":content-type") - buf.WriteByte(7) - buf.Write([]byte{0, 24}) - buf.WriteString("application/octet-stream") - - buf.WriteByte(11) - buf.WriteString(":event-type") - buf.WriteByte(7) - buf.Write([]byte{0, 7}) - buf.WriteString("Records") - - fmt.Println(buf.Bytes()) -} - -// Continuation Message -// ==================== -// Header specification -// -------------------- -// Continuation messages contain two headers, as follows: -// https://docs.aws.amazon.com/AmazonS3/latest/API/images/s3select-frame-diagram-cont.png -// -// Payload specification -// --------------------- -// Continuation messages have no payload. -func genContinuationMessage() { - buf := new(bytes.Buffer) - - buf.WriteByte(13) - buf.WriteString(":message-type") - buf.WriteByte(7) - buf.Write([]byte{0, 5}) - buf.WriteString("event") - - buf.WriteByte(11) - buf.WriteString(":event-type") - buf.WriteByte(7) - buf.Write([]byte{0, 4}) - buf.WriteString("Cont") - - header := buf.Bytes() - headerLength := len(header) - payloadLength := 0 - totalLength := totalByteLength(headerLength, payloadLength) - - buf = new(bytes.Buffer) - binary.Write(buf, binary.BigEndian, uint32(totalLength)) - binary.Write(buf, binary.BigEndian, uint32(headerLength)) - prelude := buf.Bytes() - binary.Write(buf, binary.BigEndian, crc32.ChecksumIEEE(prelude)) - buf.Write(header) - message := buf.Bytes() - binary.Write(buf, binary.BigEndian, crc32.ChecksumIEEE(message)) - - fmt.Println(buf.Bytes()) -} - -func genProgressHeader() { - buf := new(bytes.Buffer) - - buf.WriteByte(13) - buf.WriteString(":message-type") - buf.WriteByte(7) - buf.Write([]byte{0, 5}) - buf.WriteString("event") - - buf.WriteByte(13) - buf.WriteString(":content-type") - buf.WriteByte(7) - buf.Write([]byte{0, 8}) - buf.WriteString("text/xml") - - buf.WriteByte(11) - buf.WriteString(":event-type") - buf.WriteByte(7) - buf.Write([]byte{0, 8}) - buf.WriteString("Progress") - - fmt.Println(buf.Bytes()) -} - -func genStatsHeader() { - buf := new(bytes.Buffer) - - buf.WriteByte(13) - buf.WriteString(":message-type") - buf.WriteByte(7) - buf.Write([]byte{0, 5}) - buf.WriteString("event") - - buf.WriteByte(13) - buf.WriteString(":content-type") - buf.WriteByte(7) - buf.Write([]byte{0, 8}) - buf.WriteString("text/xml") - - buf.WriteByte(11) - buf.WriteString(":event-type") - buf.WriteByte(7) - buf.Write([]byte{0, 5}) - buf.WriteString("Stats") - - fmt.Println(buf.Bytes()) -} - -// End Message -// =========== -// Header specification -// -------------------- -// End messages contain two headers, as follows: -// https://docs.aws.amazon.com/AmazonS3/latest/API/images/s3select-frame-diagram-end.png -// -// Payload specification -// --------------------- -// End messages have no payload. -func genEndMessage() { - buf := new(bytes.Buffer) - - buf.WriteByte(13) - buf.WriteString(":message-type") - buf.WriteByte(7) - buf.Write([]byte{0, 5}) - buf.WriteString("event") - - buf.WriteByte(11) - buf.WriteString(":event-type") - buf.WriteByte(7) - buf.Write([]byte{0, 3}) - buf.WriteString("End") - - header := buf.Bytes() - headerLength := len(header) - payloadLength := 0 - totalLength := totalByteLength(headerLength, payloadLength) - - buf = new(bytes.Buffer) - binary.Write(buf, binary.BigEndian, uint32(totalLength)) - binary.Write(buf, binary.BigEndian, uint32(headerLength)) - prelude := buf.Bytes() - binary.Write(buf, binary.BigEndian, crc32.ChecksumIEEE(prelude)) - buf.Write(header) - message := buf.Bytes() - binary.Write(buf, binary.BigEndian, crc32.ChecksumIEEE(message)) - - fmt.Println(buf.Bytes()) -} diff --git a/pkg/s3select/json/args.go b/pkg/s3select/json/args.go deleted file mode 100644 index 729e9bc7..00000000 --- a/pkg/s3select/json/args.go +++ /dev/null @@ -1,95 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package json - -import ( - "encoding/xml" - "fmt" - "strings" -) - -const ( - document = "document" - lines = "lines" - - defaultRecordDelimiter = "\n" -) - -// ReaderArgs - represents elements inside in request XML. -type ReaderArgs struct { - ContentType string `xml:"Type"` - unmarshaled bool -} - -// IsEmpty - returns whether reader args is empty or not. -func (args *ReaderArgs) IsEmpty() bool { - return !args.unmarshaled -} - -// UnmarshalXML - decodes XML data. -func (args *ReaderArgs) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - // Make subtype to avoid recursive UnmarshalXML(). - type subReaderArgs ReaderArgs - parsedArgs := subReaderArgs{} - if err := d.DecodeElement(&parsedArgs, &start); err != nil { - return err - } - - parsedArgs.ContentType = strings.ToLower(parsedArgs.ContentType) - switch parsedArgs.ContentType { - case document, lines: - default: - return errInvalidJSONType(fmt.Errorf("invalid ContentType '%v'", parsedArgs.ContentType)) - } - - *args = ReaderArgs(parsedArgs) - args.unmarshaled = true - return nil -} - -// WriterArgs - represents elements inside in request XML. -type WriterArgs struct { - RecordDelimiter string `xml:"RecordDelimiter"` - unmarshaled bool -} - -// IsEmpty - returns whether writer args is empty or not. -func (args *WriterArgs) IsEmpty() bool { - return !args.unmarshaled -} - -// UnmarshalXML - decodes XML data. -func (args *WriterArgs) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - // Make subtype to avoid recursive UnmarshalXML(). - type subWriterArgs WriterArgs - parsedArgs := subWriterArgs{} - if err := d.DecodeElement(&parsedArgs, &start); err != nil { - return err - } - - switch len(parsedArgs.RecordDelimiter) { - case 0: - parsedArgs.RecordDelimiter = defaultRecordDelimiter - case 1, 2: - default: - return fmt.Errorf("invalid RecordDelimiter '%v'", parsedArgs.RecordDelimiter) - } - - *args = WriterArgs(parsedArgs) - args.unmarshaled = true - return nil -} diff --git a/pkg/s3select/json/errors.go b/pkg/s3select/json/errors.go deleted file mode 100644 index ef0d5982..00000000 --- a/pkg/s3select/json/errors.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package json - -type s3Error struct { - code string - message string - statusCode int - cause error -} - -func (err *s3Error) Cause() error { - return err.cause -} - -func (err *s3Error) ErrorCode() string { - return err.code -} - -func (err *s3Error) ErrorMessage() string { - return err.message -} - -func (err *s3Error) HTTPStatusCode() int { - return err.statusCode -} - -func (err *s3Error) Error() string { - return err.message -} - -func errInvalidJSONType(err error) *s3Error { - return &s3Error{ - code: "InvalidJsonType", - message: "The JsonType is invalid. Only DOCUMENT and LINES are supported.", - statusCode: 400, - cause: err, - } -} - -func errJSONParsingError(err error) *s3Error { - return &s3Error{ - code: "JSONParsingError", - message: "Encountered an error parsing the JSON file. Check the file and try again.", - statusCode: 400, - cause: err, - } -} diff --git a/pkg/s3select/json/preader.go b/pkg/s3select/json/preader.go deleted file mode 100644 index bb779800..00000000 --- a/pkg/s3select/json/preader.go +++ /dev/null @@ -1,227 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package json - -import ( - "bufio" - "bytes" - "io" - "runtime" - "sync" - - "github.com/bcicen/jstream" - "github.com/minio/minio/pkg/s3select/sql" -) - -// PReader - JSON record reader for S3Select. -// Operates concurrently on line-delimited JSON. -type PReader struct { - args *ReaderArgs - readCloser io.ReadCloser // raw input - buf *bufio.Reader // input to the splitter - current []jstream.KVS // current block of results to be returned - recordsRead int // number of records read in current slice - input chan *queueItem // input for workers - queue chan *queueItem // output from workers in order - err error // global error state, only touched by Reader.Read - bufferPool sync.Pool // pool of []byte objects for input - kvDstPool sync.Pool // pool of []jstream.KV used for output - close chan struct{} // used for shutting down the splitter before end of stream - readerWg sync.WaitGroup // used to keep track of async reader. -} - -// queueItem is an item in the queue. -type queueItem struct { - input []byte // raw input sent to the worker - dst chan []jstream.KVS // result of block decode - err error // any error encountered will be set here -} - -// Read - reads single record. -// Once Read is called the previous record should no longer be referenced. -func (r *PReader) Read(dst sql.Record) (sql.Record, error) { - // If we have have any records left, return these before any error. - for len(r.current) <= r.recordsRead { - if r.err != nil { - return nil, r.err - } - // Move to next block - item, ok := <-r.queue - if !ok { - r.err = io.EOF - return nil, r.err - } - //lint:ignore SA6002 Using pointer would allocate more since we would have to copy slice header before taking a pointer. - r.kvDstPool.Put(r.current) - r.current = <-item.dst - r.err = item.err - r.recordsRead = 0 - } - kvRecord := r.current[r.recordsRead] - r.recordsRead++ - - dstRec, ok := dst.(*Record) - if !ok { - dstRec = &Record{} - } - dstRec.KVS = kvRecord - dstRec.SelectFormat = sql.SelectFmtJSON - return dstRec, nil -} - -// Close - closes underlying reader. -func (r *PReader) Close() error { - if r.close != nil { - close(r.close) - r.readerWg.Wait() - r.close = nil - } - r.recordsRead = len(r.current) - if r.err == nil { - r.err = io.EOF - } - return r.readCloser.Close() -} - -// nextSplit will attempt to skip a number of bytes and -// return the buffer until the next newline occurs. -// The last block will be sent along with an io.EOF. -func (r *PReader) nextSplit(skip int, dst []byte) ([]byte, error) { - if cap(dst) < skip { - dst = make([]byte, 0, skip+1024) - } - dst = dst[:skip] - if skip > 0 { - n, err := io.ReadFull(r.buf, dst) - if err != nil && err != io.ErrUnexpectedEOF { - // If an EOF happens after reading some but not all the bytes, - // ReadFull returns ErrUnexpectedEOF. - return dst[:n], err - } - dst = dst[:n] - if err == io.ErrUnexpectedEOF { - return dst, io.EOF - } - } - // Read until next line. - in, err := r.buf.ReadBytes('\n') - dst = append(dst, in...) - return dst, err -} - -// jsonSplitSize is the size of each block. -// Blocks will read this much and find the first following newline. -// 128KB appears to be a very reasonable default. -const jsonSplitSize = 128 << 10 - -// startReaders will read the header if needed and spin up a parser -// and a number of workers based on GOMAXPROCS. -// If an error is returned no goroutines have been started and r.err will have been set. -func (r *PReader) startReaders() { - r.bufferPool.New = func() interface{} { - return make([]byte, jsonSplitSize+1024) - } - - // Create queue - r.queue = make(chan *queueItem, runtime.GOMAXPROCS(0)) - r.input = make(chan *queueItem, runtime.GOMAXPROCS(0)) - r.readerWg.Add(1) - - // Start splitter - go func() { - defer close(r.input) - defer close(r.queue) - defer r.readerWg.Done() - for { - next, err := r.nextSplit(jsonSplitSize, r.bufferPool.Get().([]byte)) - q := queueItem{ - input: next, - dst: make(chan []jstream.KVS, 1), - err: err, - } - select { - case <-r.close: - return - case r.queue <- &q: - } - - select { - case <-r.close: - return - case r.input <- &q: - } - if err != nil { - // Exit on any error. - return - } - } - }() - - // Start parsers - for i := 0; i < runtime.GOMAXPROCS(0); i++ { - go func() { - for in := range r.input { - if len(in.input) == 0 { - in.dst <- nil - continue - } - dst, ok := r.kvDstPool.Get().([]jstream.KVS) - if !ok { - dst = make([]jstream.KVS, 0, 1000) - } - - d := jstream.NewDecoder(bytes.NewBuffer(in.input), 0).ObjectAsKVS() - stream := d.Stream() - all := dst[:0] - for mv := range stream { - var kvs jstream.KVS - if mv.ValueType == jstream.Object { - // This is a JSON object type (that preserves key - // order) - kvs = mv.Value.(jstream.KVS) - } else { - // To be AWS S3 compatible Select for JSON needs to - // output non-object JSON as single column value - // i.e. a map with `_1` as key and value as the - // non-object. - kvs = jstream.KVS{jstream.KV{Key: "_1", Value: mv.Value}} - } - all = append(all, kvs) - } - // We don't need the input any more. - //lint:ignore SA6002 Using pointer would allocate more since we would have to copy slice header before taking a pointer. - r.bufferPool.Put(in.input) - in.input = nil - in.err = d.Err() - in.dst <- all - } - }() - } -} - -// NewPReader - creates new parallel JSON reader using readCloser. -// Should only be used for LINES types. -func NewPReader(readCloser io.ReadCloser, args *ReaderArgs) *PReader { - r := &PReader{ - args: args, - buf: bufio.NewReaderSize(readCloser, jsonSplitSize*2), - readCloser: readCloser, - close: make(chan struct{}), - } - r.startReaders() - return r -} diff --git a/pkg/s3select/json/preader_test.go b/pkg/s3select/json/preader_test.go deleted file mode 100644 index 7d345e6e..00000000 --- a/pkg/s3select/json/preader_test.go +++ /dev/null @@ -1,106 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package json - -import ( - "bytes" - "io" - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/minio/minio/pkg/s3select/sql" -) - -func TestNewPReader(t *testing.T) { - files, err := ioutil.ReadDir("testdata") - if err != nil { - t.Fatal(err) - } - for _, file := range files { - t.Run(file.Name(), func(t *testing.T) { - f, err := os.Open(filepath.Join("testdata", file.Name())) - if err != nil { - t.Fatal(err) - } - r := NewPReader(f, &ReaderArgs{}) - var record sql.Record - for { - record, err = r.Read(record) - if err != nil { - break - } - } - r.Close() - if err != io.EOF { - t.Fatalf("Reading failed with %s, %s", err, file.Name()) - } - }) - - t.Run(file.Name()+"-close", func(t *testing.T) { - f, err := os.Open(filepath.Join("testdata", file.Name())) - if err != nil { - t.Fatal(err) - } - r := NewPReader(f, &ReaderArgs{}) - r.Close() - var record sql.Record - for { - record, err = r.Read(record) - if err != nil { - break - } - } - if err != io.EOF { - t.Fatalf("Reading failed with %s, %s", err, file.Name()) - } - }) - } -} - -func BenchmarkPReader(b *testing.B) { - files, err := ioutil.ReadDir("testdata") - if err != nil { - b.Fatal(err) - } - for _, file := range files { - b.Run(file.Name(), func(b *testing.B) { - f, err := ioutil.ReadFile(filepath.Join("testdata", file.Name())) - if err != nil { - b.Fatal(err) - } - b.SetBytes(int64(len(f))) - b.ReportAllocs() - b.ResetTimer() - var record sql.Record - for i := 0; i < b.N; i++ { - r := NewPReader(ioutil.NopCloser(bytes.NewBuffer(f)), &ReaderArgs{}) - for { - record, err = r.Read(record) - if err != nil { - break - } - } - r.Close() - if err != io.EOF { - b.Fatalf("Reading failed with %s, %s", err, file.Name()) - } - } - }) - } -} diff --git a/pkg/s3select/json/reader.go b/pkg/s3select/json/reader.go deleted file mode 100644 index cd672db7..00000000 --- a/pkg/s3select/json/reader.go +++ /dev/null @@ -1,133 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package json - -import ( - "errors" - "io" - "sync" - - "github.com/minio/minio/pkg/s3select/sql" - - "github.com/bcicen/jstream" -) - -// Reader - JSON record reader for S3Select. -type Reader struct { - args *ReaderArgs - decoder *jstream.Decoder - valueCh chan *jstream.MetaValue - readCloser io.ReadCloser -} - -// Read - reads single record. -func (r *Reader) Read(dst sql.Record) (sql.Record, error) { - v, ok := <-r.valueCh - if !ok { - if err := r.decoder.Err(); err != nil { - return nil, errJSONParsingError(err) - } - return nil, io.EOF - } - - var kvs jstream.KVS - if v.ValueType == jstream.Object { - // This is a JSON object type (that preserves key - // order) - kvs = v.Value.(jstream.KVS) - } else { - // To be AWS S3 compatible Select for JSON needs to - // output non-object JSON as single column value - // i.e. a map with `_1` as key and value as the - // non-object. - kvs = jstream.KVS{jstream.KV{Key: "_1", Value: v.Value}} - } - - dstRec, ok := dst.(*Record) - if !ok { - dstRec = &Record{} - } - dstRec.KVS = kvs - dstRec.SelectFormat = sql.SelectFmtJSON - return dstRec, nil -} - -// Close - closes underlying reader. -func (r *Reader) Close() error { - // Close the input. - err := r.readCloser.Close() - for range r.valueCh { - // Drain values so we don't leak a goroutine. - // Since we have closed the input, it should fail rather quickly. - } - return err -} - -// NewReader - creates new JSON reader using readCloser. -func NewReader(readCloser io.ReadCloser, args *ReaderArgs) *Reader { - readCloser = &syncReadCloser{rc: readCloser} - d := jstream.NewDecoder(readCloser, 0).ObjectAsKVS() - return &Reader{ - args: args, - decoder: d, - valueCh: d.Stream(), - readCloser: readCloser, - } -} - -// syncReadCloser will wrap a readcloser and make it safe to call Close -// while reads are running. -// All read errors are also postponed until Close is called and -// io.EOF is returned instead. -type syncReadCloser struct { - rc io.ReadCloser - errMu sync.Mutex - err error -} - -func (pr *syncReadCloser) Read(p []byte) (n int, err error) { - // This ensures that Close will block until Read has completed. - // This allows another goroutine to close the reader. - pr.errMu.Lock() - defer pr.errMu.Unlock() - if pr.err != nil { - return 0, io.EOF - } - n, pr.err = pr.rc.Read(p) - if pr.err != nil { - // Translate any error into io.EOF, so we don't crash: - // https://github.com/bcicen/jstream/blob/master/scanner.go#L48 - return n, io.EOF - } - - return n, nil -} - -var errClosed = errors.New("read after close") - -func (pr *syncReadCloser) Close() error { - pr.errMu.Lock() - defer pr.errMu.Unlock() - if pr.err == errClosed { - return nil - } - if pr.err != nil { - return pr.err - } - pr.err = errClosed - return pr.rc.Close() -} diff --git a/pkg/s3select/json/reader_test.go b/pkg/s3select/json/reader_test.go deleted file mode 100644 index c8970a91..00000000 --- a/pkg/s3select/json/reader_test.go +++ /dev/null @@ -1,106 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package json - -import ( - "bytes" - "io" - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/minio/minio/pkg/s3select/sql" -) - -func TestNewReader(t *testing.T) { - files, err := ioutil.ReadDir("testdata") - if err != nil { - t.Fatal(err) - } - for _, file := range files { - t.Run(file.Name(), func(t *testing.T) { - f, err := os.Open(filepath.Join("testdata", file.Name())) - if err != nil { - t.Fatal(err) - } - r := NewReader(f, &ReaderArgs{}) - var record sql.Record - for { - record, err = r.Read(record) - if err != nil { - break - } - } - r.Close() - if err != io.EOF { - t.Fatalf("Reading failed with %s, %s", err, file.Name()) - } - }) - - t.Run(file.Name()+"-close", func(t *testing.T) { - f, err := os.Open(filepath.Join("testdata", file.Name())) - if err != nil { - t.Fatal(err) - } - r := NewReader(f, &ReaderArgs{}) - r.Close() - var record sql.Record - for { - record, err = r.Read(record) - if err != nil { - break - } - } - if err != io.EOF { - t.Fatalf("Reading failed with %s, %s", err, file.Name()) - } - }) - } -} - -func BenchmarkReader(b *testing.B) { - files, err := ioutil.ReadDir("testdata") - if err != nil { - b.Fatal(err) - } - for _, file := range files { - b.Run(file.Name(), func(b *testing.B) { - f, err := ioutil.ReadFile(filepath.Join("testdata", file.Name())) - if err != nil { - b.Fatal(err) - } - b.SetBytes(int64(len(f))) - b.ReportAllocs() - b.ResetTimer() - var record sql.Record - for i := 0; i < b.N; i++ { - r := NewReader(ioutil.NopCloser(bytes.NewBuffer(f)), &ReaderArgs{}) - for { - record, err = r.Read(record) - if err != nil { - break - } - } - r.Close() - if err != io.EOF { - b.Fatalf("Reading failed with %s, %s", err, file.Name()) - } - } - }) - } -} diff --git a/pkg/s3select/json/record.go b/pkg/s3select/json/record.go deleted file mode 100644 index d599466f..00000000 --- a/pkg/s3select/json/record.go +++ /dev/null @@ -1,209 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package json - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "math" - "strconv" - "strings" - - "github.com/bcicen/jstream" - csv "github.com/minio/minio/pkg/csvparser" - "github.com/minio/minio/pkg/s3select/sql" -) - -// RawJSON is a byte-slice that contains valid JSON -type RawJSON []byte - -// MarshalJSON instance for []byte that assumes that byte-slice is -// already serialized JSON -func (b RawJSON) MarshalJSON() ([]byte, error) { - return b, nil -} - -// Record - is JSON record. -type Record struct { - // Used in Set(), Marshal*() - KVS jstream.KVS - - SelectFormat sql.SelectObjectFormat -} - -// Get - gets the value for a column name. -func (r *Record) Get(name string) (*sql.Value, error) { - // Get is implemented directly in the sql package. - return nil, errors.New("not implemented here") -} - -// Reset the record. -func (r *Record) Reset() { - if len(r.KVS) > 0 { - r.KVS = r.KVS[:0] - } -} - -// Clone the record and if possible use the destination provided. -func (r *Record) Clone(dst sql.Record) sql.Record { - other, ok := dst.(*Record) - if !ok { - other = &Record{} - } - if len(other.KVS) > 0 { - other.KVS = other.KVS[:0] - } - other.KVS = append(other.KVS, r.KVS...) - return other -} - -// Set - sets the value for a column name. -func (r *Record) Set(name string, value *sql.Value) (sql.Record, error) { - var v interface{} - if b, ok := value.ToBool(); ok { - v = b - } else if f, ok := value.ToFloat(); ok { - v = f - } else if i, ok := value.ToInt(); ok { - v = i - } else if t, ok := value.ToTimestamp(); ok { - v = sql.FormatSQLTimestamp(t) - } else if s, ok := value.ToString(); ok { - v = s - } else if value.IsNull() { - v = nil - } else if b, ok := value.ToBytes(); ok { - // This can either be raw json or a CSV value. - // Only treat objects and arrays as JSON. - if len(b) > 0 && (b[0] == '{' || b[0] == '[') { - v = RawJSON(b) - } else { - v = string(b) - } - } else if arr, ok := value.ToArray(); ok { - v = arr - } else { - return nil, fmt.Errorf("unsupported sql value %v and type %v", value, value.GetTypeString()) - } - - name = strings.Replace(name, "*", "__ALL__", -1) - r.KVS = append(r.KVS, jstream.KV{Key: name, Value: v}) - return r, nil -} - -// WriteCSV - encodes to CSV data. -func (r *Record) WriteCSV(writer io.Writer, opts sql.WriteCSVOpts) error { - var csvRecord []string - for _, kv := range r.KVS { - var columnValue string - switch val := kv.Value.(type) { - case float64: - columnValue = jsonFloat(val) - case string: - columnValue = val - case bool, int64: - columnValue = fmt.Sprintf("%v", val) - case nil: - columnValue = "" - case RawJSON: - columnValue = string([]byte(val)) - case []interface{}: - b, err := json.Marshal(val) - if err != nil { - return err - } - columnValue = string(b) - default: - return fmt.Errorf("Cannot marshal unhandled type: %T", kv.Value) - } - csvRecord = append(csvRecord, columnValue) - } - - w := csv.NewWriter(writer) - w.Comma = opts.FieldDelimiter - w.Quote = opts.Quote - w.AlwaysQuote = opts.AlwaysQuote - w.QuoteEscape = opts.QuoteEscape - if err := w.Write(csvRecord); err != nil { - return err - } - w.Flush() - if err := w.Error(); err != nil { - return err - } - - return nil -} - -// Raw - returns the underlying representation. -func (r *Record) Raw() (sql.SelectObjectFormat, interface{}) { - return r.SelectFormat, r.KVS -} - -// WriteJSON - encodes to JSON data. -func (r *Record) WriteJSON(writer io.Writer) error { - return json.NewEncoder(writer).Encode(r.KVS) -} - -// Replace the underlying buffer of json data. -func (r *Record) Replace(k interface{}) error { - v, ok := k.(jstream.KVS) - if !ok { - return fmt.Errorf("cannot replace internal data in json record with type %T", k) - } - r.KVS = v - return nil -} - -// NewRecord - creates new empty JSON record. -func NewRecord(f sql.SelectObjectFormat) *Record { - return &Record{ - KVS: jstream.KVS{}, - SelectFormat: f, - } -} - -// jsonFloat converts a float to string similar to Go stdlib formats json floats. -func jsonFloat(f float64) string { - var tmp [32]byte - dst := tmp[:0] - - // Convert as if by ES6 number to string conversion. - // This matches most other JSON generators. - // See golang.org/issue/6384 and golang.org/issue/14135. - // Like fmt %g, but the exponent cutoffs are different - // and exponents themselves are not padded to two digits. - abs := math.Abs(f) - fmt := byte('f') - if abs != 0 { - if abs < 1e-6 || abs >= 1e21 { - fmt = 'e' - } - } - dst = strconv.AppendFloat(dst, f, fmt, -1, 64) - if fmt == 'e' { - // clean up e-09 to e-9 - n := len(dst) - if n >= 4 && dst[n-4] == 'e' && dst[n-3] == '-' && dst[n-2] == '0' { - dst[n-2] = dst[n-1] - dst = dst[:n-1] - } - } - return string(dst) -} diff --git a/pkg/s3select/json/testdata/10.json b/pkg/s3select/json/testdata/10.json deleted file mode 100644 index 57bf5e80..00000000 --- a/pkg/s3select/json/testdata/10.json +++ /dev/null @@ -1,12 +0,0 @@ -[ - { - "key_1": "value", - "key_2": "value" - } -] -[ - { - "key_1": "value2", - "key_2": "value3" - } -] diff --git a/pkg/s3select/json/testdata/11.json b/pkg/s3select/json/testdata/11.json deleted file mode 100644 index e63d62d8..00000000 --- a/pkg/s3select/json/testdata/11.json +++ /dev/null @@ -1,8 +0,0 @@ -"a" -1 -3.145 -["a"] -{} -{ - "a": 1 -} diff --git a/pkg/s3select/json/testdata/12.json b/pkg/s3select/json/testdata/12.json deleted file mode 100644 index 8c175ec2..00000000 --- a/pkg/s3select/json/testdata/12.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "a": 1 -}{ - "b": 2 -} diff --git a/pkg/s3select/json/testdata/2.json b/pkg/s3select/json/testdata/2.json deleted file mode 100644 index 45fed099..00000000 --- a/pkg/s3select/json/testdata/2.json +++ /dev/null @@ -1 +0,0 @@ -{"text": "hello world\\n2nd line"} diff --git a/pkg/s3select/json/testdata/3.json b/pkg/s3select/json/testdata/3.json deleted file mode 100644 index 949390e2..00000000 --- a/pkg/s3select/json/testdata/3.json +++ /dev/null @@ -1 +0,0 @@ -{"hello":"wor{l}d"} diff --git a/pkg/s3select/json/testdata/4.json b/pkg/s3select/json/testdata/4.json deleted file mode 100644 index ef2b6547..00000000 --- a/pkg/s3select/json/testdata/4.json +++ /dev/null @@ -1,26 +0,0 @@ -{ - "id": "0001", - "type": "donut", - "name": "Cake", - "ppu": 0.55, - "batters": - { - "batter": - [ - { "id": "1001", "type": "Regular" }, - { "id": "1002", "type": "Chocolate" }, - { "id": "1003", "type": "Blueberry" }, - { "id": "1004", "type": "Devil's Food" } - ] - }, - "topping": - [ - { "id": "5001", "type": "None" }, - { "id": "5002", "type": "Glazed" }, - { "id": "5005", "type": "Sugar" }, - { "id": "5007", "type": "Powdered Sugar" }, - { "id": "5006", "type": "Chocolate with Sprinkles" }, - { "id": "5003", "type": "Chocolate" }, - { "id": "5004", "type": "Maple" } - ] -} diff --git a/pkg/s3select/json/testdata/5.json b/pkg/s3select/json/testdata/5.json deleted file mode 100644 index ef69872d..00000000 --- a/pkg/s3select/json/testdata/5.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "foo": { - "bar": "baz" - } -} diff --git a/pkg/s3select/json/testdata/6.json b/pkg/s3select/json/testdata/6.json deleted file mode 100644 index 31e3e433..00000000 --- a/pkg/s3select/json/testdata/6.json +++ /dev/null @@ -1 +0,0 @@ -{ "name": "John", "age":28, "hobby": { "name": "chess", "type": "boardgame" }} diff --git a/pkg/s3select/json/testdata/7.json b/pkg/s3select/json/testdata/7.json deleted file mode 100644 index 60e603e3..00000000 --- a/pkg/s3select/json/testdata/7.json +++ /dev/null @@ -1,3 +0,0 @@ -{"name":"Michael", "age": 31} -{"name":"Andy", "age": 30} -{"name":"Justin", "age": 19} diff --git a/pkg/s3select/json/testdata/8.json b/pkg/s3select/json/testdata/8.json deleted file mode 100644 index cc9350e5..00000000 --- a/pkg/s3select/json/testdata/8.json +++ /dev/null @@ -1,2 +0,0 @@ -{"a":"}" -} diff --git a/pkg/s3select/json/testdata/9.json b/pkg/s3select/json/testdata/9.json deleted file mode 100644 index f9ab7046..00000000 --- a/pkg/s3select/json/testdata/9.json +++ /dev/null @@ -1,6 +0,0 @@ -[ - { - "key_1": "value", - "key_2": "value" - } -] diff --git a/pkg/s3select/message.go b/pkg/s3select/message.go deleted file mode 100644 index 1733e905..00000000 --- a/pkg/s3select/message.go +++ /dev/null @@ -1,432 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3select - -import ( - "bytes" - "encoding/binary" - "fmt" - "hash/crc32" - "net/http" - "strconv" - "sync/atomic" - "time" -) - -// A message is in the format specified in -// https://docs.aws.amazon.com/AmazonS3/latest/API/images/s3select-frame-diagram-frame-overview.png -// hence the calculation is made accordingly. -func totalByteLength(headerLength, payloadLength int) int { - return 4 + 4 + 4 + headerLength + payloadLength + 4 -} - -func genMessage(header, payload []byte) []byte { - headerLength := len(header) - payloadLength := len(payload) - totalLength := totalByteLength(headerLength, payloadLength) - - buf := new(bytes.Buffer) - binary.Write(buf, binary.BigEndian, uint32(totalLength)) - binary.Write(buf, binary.BigEndian, uint32(headerLength)) - prelude := buf.Bytes() - binary.Write(buf, binary.BigEndian, crc32.ChecksumIEEE(prelude)) - buf.Write(header) - if payload != nil { - buf.Write(payload) - } - message := buf.Bytes() - binary.Write(buf, binary.BigEndian, crc32.ChecksumIEEE(message)) - - return buf.Bytes() -} - -// Refer genRecordsHeader(). -var recordsHeader = []byte{ - 13, ':', 'm', 'e', 's', 's', 'a', 'g', 'e', '-', 't', 'y', 'p', 'e', 7, 0, 5, 'e', 'v', 'e', 'n', 't', - 13, ':', 'c', 'o', 'n', 't', 'e', 'n', 't', '-', 't', 'y', 'p', 'e', 7, 0, 24, 'a', 'p', 'p', 'l', 'i', 'c', 'a', 't', 'i', 'o', 'n', '/', 'o', 'c', 't', 'e', 't', '-', 's', 't', 'r', 'e', 'a', 'm', - 11, ':', 'e', 'v', 'e', 'n', 't', '-', 't', 'y', 'p', 'e', 7, 0, 7, 'R', 'e', 'c', 'o', 'r', 'd', 's', -} - -const ( - // Chosen for compatibility with AWS JAVA SDK - // It has a a buffer size of 128K: - // https://github.com/aws/aws-sdk-java/blob/master/aws-java-sdk-s3/src/main/java/com/amazonaws/services/s3/internal/eventstreaming/MessageDecoder.java#L26 - // but we must make sure there is always space to add 256 bytes: - // https://github.com/aws/aws-sdk-java/blob/master/aws-java-sdk-s3/src/main/java/com/amazonaws/services/s3/model/SelectObjectContentEventStream.java#L197 - maxRecordMessageLength = (128 << 10) - 256 -) - -var ( - bufLength = payloadLenForMsgLen(maxRecordMessageLength) -) - -// newRecordsMessage - creates new Records Message which can contain a single record, partial records, -// or multiple records. Depending on the size of the result, a response can contain one or more of these messages. -// -// Header specification -// Records messages contain three headers, as follows: -// https://docs.aws.amazon.com/AmazonS3/latest/API/images/s3select-frame-diagram-record.png -// -// Payload specification -// Records message payloads can contain a single record, partial records, or multiple records. -func newRecordsMessage(payload []byte) []byte { - return genMessage(recordsHeader, payload) -} - -// payloadLenForMsgLen computes the length of the payload in a record -// message given the total length of the message. -func payloadLenForMsgLen(messageLength int) int { - headerLength := len(recordsHeader) - payloadLength := messageLength - 4 - 4 - 4 - headerLength - 4 - return payloadLength -} - -// continuationMessage - S3 periodically sends this message to keep the TCP connection open. -// These messages appear in responses at random. The client must detect the message type and process accordingly. -// -// Header specification: -// Continuation messages contain two headers, as follows: -// https://docs.aws.amazon.com/AmazonS3/latest/API/images/s3select-frame-diagram-cont.png -// -// Payload specification: -// Continuation messages have no payload. -var continuationMessage = []byte{ - 0, 0, 0, 57, // total byte-length. - 0, 0, 0, 41, // headers byte-length. - 139, 161, 157, 242, // prelude crc. - 13, ':', 'm', 'e', 's', 's', 'a', 'g', 'e', '-', 't', 'y', 'p', 'e', 7, 0, 5, 'e', 'v', 'e', 'n', 't', // headers. - 11, ':', 'e', 'v', 'e', 'n', 't', '-', 't', 'y', 'p', 'e', 7, 0, 4, 'C', 'o', 'n', 't', // headers. - 156, 134, 74, 13, // message crc. -} - -// Refer genProgressHeader(). -var progressHeader = []byte{ - 13, ':', 'm', 'e', 's', 's', 'a', 'g', 'e', '-', 't', 'y', 'p', 'e', 7, 0, 5, 'e', 'v', 'e', 'n', 't', - 13, ':', 'c', 'o', 'n', 't', 'e', 'n', 't', '-', 't', 'y', 'p', 'e', 7, 0, 8, 't', 'e', 'x', 't', '/', 'x', 'm', 'l', - 11, ':', 'e', 'v', 'e', 'n', 't', '-', 't', 'y', 'p', 'e', 7, 0, 8, 'P', 'r', 'o', 'g', 'r', 'e', 's', 's', -} - -// newProgressMessage - creates new Progress Message. S3 periodically sends this message, if requested. -// It contains information about the progress of a query that has started but has not yet completed. -// -// Header specification: -// Progress messages contain three headers, as follows: -// https://docs.aws.amazon.com/AmazonS3/latest/API/images/s3select-frame-diagram-progress.png -// -// Payload specification: -// Progress message payload is an XML document containing information about the progress of a request. -// * BytesScanned => Number of bytes that have been processed before being uncompressed (if the file is compressed). -// * BytesProcessed => Number of bytes that have been processed after being uncompressed (if the file is compressed). -// * BytesReturned => Current number of bytes of records payload data returned by S3. -// -// For uncompressed files, BytesScanned and BytesProcessed are equal. -// -// Example: -// -// -// -// 512 -// 1024 -// 1024 -// -// -func newProgressMessage(bytesScanned, bytesProcessed, bytesReturned int64) []byte { - payload := []byte(`` + - strconv.FormatInt(bytesScanned, 10) + `` + - strconv.FormatInt(bytesProcessed, 10) + `` + - strconv.FormatInt(bytesReturned, 10) + ``) - return genMessage(progressHeader, payload) -} - -// Refer genStatsHeader(). -var statsHeader = []byte{ - 13, ':', 'm', 'e', 's', 's', 'a', 'g', 'e', '-', 't', 'y', 'p', 'e', 7, 0, 5, 'e', 'v', 'e', 'n', 't', - 13, ':', 'c', 'o', 'n', 't', 'e', 'n', 't', '-', 't', 'y', 'p', 'e', 7, 0, 8, 't', 'e', 'x', 't', '/', 'x', 'm', 'l', - 11, ':', 'e', 'v', 'e', 'n', 't', '-', 't', 'y', 'p', 'e', 7, 0, 5, 'S', 't', 'a', 't', 's', -} - -// newStatsMessage - creates new Stats Message. S3 sends this message at the end of the request. -// It contains statistics about the query. -// -// Header specification: -// Stats messages contain three headers, as follows: -// https://docs.aws.amazon.com/AmazonS3/latest/API/images/s3select-frame-diagram-stats.png -// -// Payload specification: -// Stats message payload is an XML document containing information about a request's stats when processing is complete. -// * BytesScanned => Number of bytes that have been processed before being uncompressed (if the file is compressed). -// * BytesProcessed => Number of bytes that have been processed after being uncompressed (if the file is compressed). -// * BytesReturned => Total number of bytes of records payload data returned by S3. -// -// For uncompressed files, BytesScanned and BytesProcessed are equal. -// -// Example: -// -// -// -// 512 -// 1024 -// 1024 -// -func newStatsMessage(bytesScanned, bytesProcessed, bytesReturned int64) []byte { - payload := []byte(`` + - strconv.FormatInt(bytesScanned, 10) + `` + - strconv.FormatInt(bytesProcessed, 10) + `` + - strconv.FormatInt(bytesReturned, 10) + ``) - return genMessage(statsHeader, payload) -} - -// endMessage - indicates that the request is complete, and no more messages will be sent. -// You should not assume that the request is complete until the client receives an End message. -// -// Header specification: -// End messages contain two headers, as follows: -// https://docs.aws.amazon.com/AmazonS3/latest/API/images/s3select-frame-diagram-end.png -// -// Payload specification: -// End messages have no payload. -var endMessage = []byte{ - 0, 0, 0, 56, // total byte-length. - 0, 0, 0, 40, // headers byte-length. - 193, 198, 132, 212, // prelude crc. - 13, ':', 'm', 'e', 's', 's', 'a', 'g', 'e', '-', 't', 'y', 'p', 'e', 7, 0, 5, 'e', 'v', 'e', 'n', 't', // headers. - 11, ':', 'e', 'v', 'e', 'n', 't', '-', 't', 'y', 'p', 'e', 7, 0, 3, 'E', 'n', 'd', // headers. - 207, 151, 211, 146, // message crc. -} - -// newErrorMessage - creates new Request Level Error Message. S3 sends this message if the request failed for any reason. -// It contains the error code and error message for the failure. If S3 sends a RequestLevelError message, -// it doesn't send an End message. -// -// Header specification: -// Request-level error messages contain three headers, as follows: -// https://docs.aws.amazon.com/AmazonS3/latest/API/images/s3select-frame-diagram-error.png -// -// Payload specification: -// Request-level error messages have no payload. -func newErrorMessage(errorCode, errorMessage []byte) []byte { - buf := new(bytes.Buffer) - - buf.Write([]byte{13, ':', 'm', 'e', 's', 's', 'a', 'g', 'e', '-', 't', 'y', 'p', 'e', 7, 0, 5, 'e', 'r', 'r', 'o', 'r'}) - - buf.Write([]byte{14, ':', 'e', 'r', 'r', 'o', 'r', '-', 'm', 'e', 's', 's', 'a', 'g', 'e', 7}) - binary.Write(buf, binary.BigEndian, uint16(len(errorMessage))) - buf.Write(errorMessage) - - buf.Write([]byte{11, ':', 'e', 'r', 'r', 'o', 'r', '-', 'c', 'o', 'd', 'e', 7}) - binary.Write(buf, binary.BigEndian, uint16(len(errorCode))) - buf.Write(errorCode) - - return genMessage(buf.Bytes(), nil) -} - -// NewErrorMessage - creates new Request Level Error Message specified in -// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html. -func NewErrorMessage(errorCode, errorMessage string) []byte { - return newErrorMessage([]byte(errorCode), []byte(errorMessage)) -} - -type messageWriter struct { - writer http.ResponseWriter - getProgressFunc func() (int64, int64) - bytesReturned int64 - - payloadBuffer []byte - payloadBufferIndex int - payloadCh chan *bytes.Buffer - - finBytesScanned, finBytesProcessed int64 - - errCh chan []byte - doneCh chan struct{} -} - -func (writer *messageWriter) write(data []byte) bool { - if _, err := writer.writer.Write(data); err != nil { - return false - } - - writer.writer.(http.Flusher).Flush() - return true -} - -func (writer *messageWriter) start() { - keepAliveTicker := time.NewTicker(1 * time.Second) - var progressTicker *time.Ticker - var progressTickerC <-chan time.Time - if writer.getProgressFunc != nil { - progressTicker = time.NewTicker(1 * time.Minute) - progressTickerC = progressTicker.C - } - recordStagingTicker := time.NewTicker(500 * time.Millisecond) - - // Exit conditions: - // - // 1. If a writer.write() returns false, select loop below exits and - // closes `doneCh` to indicate to caller to also exit. - // - // 2. If caller (Evaluate()) has an error, it sends an error - // message and waits for this go-routine to quit in - // FinishWithError() - // - // 3. If caller is done, it waits for this go-routine to exit - // in Finish() - - quitFlag := false - for !quitFlag { - select { - case data := <-writer.errCh: - quitFlag = true - // Flush collected records before sending error message - if !writer.flushRecords() { - break - } - writer.write(data) - - case payload, ok := <-writer.payloadCh: - if !ok { - // payloadCh is closed by caller to - // indicate finish with success - quitFlag = true - - if !writer.flushRecords() { - break - } - // Write Stats message, then End message - bytesReturned := atomic.LoadInt64(&writer.bytesReturned) - if !writer.write(newStatsMessage(writer.finBytesScanned, writer.finBytesProcessed, bytesReturned)) { - break - } - writer.write(endMessage) - } else { - for payload.Len() > 0 { - copiedLen := copy(writer.payloadBuffer[writer.payloadBufferIndex:], payload.Bytes()) - writer.payloadBufferIndex += copiedLen - payload.Next(copiedLen) - - // If buffer is filled, flush it now! - freeSpace := bufLength - writer.payloadBufferIndex - if freeSpace == 0 { - if !writer.flushRecords() { - quitFlag = true - break - } - } - } - - bufPool.Put(payload) - } - - case <-recordStagingTicker.C: - if !writer.flushRecords() { - quitFlag = true - } - - case <-keepAliveTicker.C: - if !writer.write(continuationMessage) { - quitFlag = true - } - - case <-progressTickerC: - bytesScanned, bytesProcessed := writer.getProgressFunc() - bytesReturned := atomic.LoadInt64(&writer.bytesReturned) - if !writer.write(newProgressMessage(bytesScanned, bytesProcessed, bytesReturned)) { - quitFlag = true - } - } - } - close(writer.doneCh) - - recordStagingTicker.Stop() - keepAliveTicker.Stop() - if progressTicker != nil { - progressTicker.Stop() - } - - // Whatever drain the payloadCh to prevent from memory leaking. - for len(writer.payloadCh) > 0 { - payload := <-writer.payloadCh - bufPool.Put(payload) - } -} - -// Sends a single whole record. -func (writer *messageWriter) SendRecord(payload *bytes.Buffer) error { - select { - case writer.payloadCh <- payload: - return nil - case <-writer.doneCh: - return fmt.Errorf("messageWriter is done") - } -} - -func (writer *messageWriter) flushRecords() bool { - if writer.payloadBufferIndex == 0 { - return true - } - result := writer.write(newRecordsMessage(writer.payloadBuffer[0:writer.payloadBufferIndex])) - if result { - atomic.AddInt64(&writer.bytesReturned, int64(writer.payloadBufferIndex)) - writer.payloadBufferIndex = 0 - } - return result -} - -// Finish is the last call to the message writer - it sends any -// remaining record payload, then sends statistics and finally the end -// message. -func (writer *messageWriter) Finish(bytesScanned, bytesProcessed int64) error { - select { - case <-writer.doneCh: - return fmt.Errorf("messageWriter is done") - default: - writer.finBytesScanned = bytesScanned - writer.finBytesProcessed = bytesProcessed - close(writer.payloadCh) - // Wait until the `start` go-routine is done. - <-writer.doneCh - return nil - } -} - -func (writer *messageWriter) FinishWithError(errorCode, errorMessage string) error { - select { - case <-writer.doneCh: - return fmt.Errorf("messageWriter is done") - case writer.errCh <- newErrorMessage([]byte(errorCode), []byte(errorMessage)): - // Wait until the `start` go-routine is done. - <-writer.doneCh - return nil - } -} - -// newMessageWriter creates a message writer that writes to the HTTP -// response writer -func newMessageWriter(w http.ResponseWriter, getProgressFunc func() (bytesScanned, bytesProcessed int64)) *messageWriter { - writer := &messageWriter{ - writer: w, - getProgressFunc: getProgressFunc, - - payloadBuffer: make([]byte, bufLength), - payloadCh: make(chan *bytes.Buffer, 1), - - errCh: make(chan []byte), - doneCh: make(chan struct{}), - } - go writer.start() - return writer -} diff --git a/pkg/s3select/parquet/args.go b/pkg/s3select/parquet/args.go deleted file mode 100644 index 8fd7c0c7..00000000 --- a/pkg/s3select/parquet/args.go +++ /dev/null @@ -1,42 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package parquet - -import "encoding/xml" - -// ReaderArgs - represents elements inside in request XML. -type ReaderArgs struct { - unmarshaled bool -} - -// IsEmpty - returns whether reader args is empty or not. -func (args *ReaderArgs) IsEmpty() bool { - return !args.unmarshaled -} - -// UnmarshalXML - decodes XML data. -func (args *ReaderArgs) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - // Make subtype to avoid recursive UnmarshalXML(). - type subReaderArgs ReaderArgs - parsedArgs := subReaderArgs{} - if err := d.DecodeElement(&parsedArgs, &start); err != nil { - return err - } - - args.unmarshaled = true - return nil -} diff --git a/pkg/s3select/parquet/errors.go b/pkg/s3select/parquet/errors.go deleted file mode 100644 index 69365e9d..00000000 --- a/pkg/s3select/parquet/errors.go +++ /dev/null @@ -1,53 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package parquet - -type s3Error struct { - code string - message string - statusCode int - cause error -} - -func (err *s3Error) Cause() error { - return err.cause -} - -func (err *s3Error) ErrorCode() string { - return err.code -} - -func (err *s3Error) ErrorMessage() string { - return err.message -} - -func (err *s3Error) HTTPStatusCode() int { - return err.statusCode -} - -func (err *s3Error) Error() string { - return err.message -} - -func errParquetParsingError(err error) *s3Error { - return &s3Error{ - code: "ParquetParsingError", - message: "Error parsing Parquet file. Please check the file and try again.", - statusCode: 400, - cause: err, - } -} diff --git a/pkg/s3select/parquet/reader.go b/pkg/s3select/parquet/reader.go deleted file mode 100644 index c63967bc..00000000 --- a/pkg/s3select/parquet/reader.go +++ /dev/null @@ -1,109 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package parquet - -import ( - "io" - - "github.com/bcicen/jstream" - jsonfmt "github.com/minio/minio/pkg/s3select/json" - "github.com/minio/minio/pkg/s3select/sql" - parquetgo "github.com/minio/parquet-go" - parquetgen "github.com/minio/parquet-go/gen-go/parquet" -) - -// Reader - Parquet record reader for S3Select. -type Reader struct { - args *ReaderArgs - reader *parquetgo.Reader -} - -// Read - reads single record. -func (r *Reader) Read(dst sql.Record) (rec sql.Record, rerr error) { - parquetRecord, err := r.reader.Read() - if err != nil { - if err != io.EOF { - return nil, errParquetParsingError(err) - } - - return nil, err - } - - kvs := jstream.KVS{} - f := func(name string, v parquetgo.Value) bool { - if v.Value == nil { - kvs = append(kvs, jstream.KV{Key: name, Value: nil}) - return true - } - - var value interface{} - switch v.Type { - case parquetgen.Type_BOOLEAN: - value = v.Value.(bool) - case parquetgen.Type_INT32: - value = int64(v.Value.(int32)) - case parquetgen.Type_INT64: - value = int64(v.Value.(int64)) - case parquetgen.Type_FLOAT: - value = float64(v.Value.(float32)) - case parquetgen.Type_DOUBLE: - value = v.Value.(float64) - case parquetgen.Type_INT96, parquetgen.Type_BYTE_ARRAY, parquetgen.Type_FIXED_LEN_BYTE_ARRAY: - value = string(v.Value.([]byte)) - default: - rerr = errParquetParsingError(nil) - return false - } - - kvs = append(kvs, jstream.KV{Key: name, Value: value}) - return true - } - - // Apply our range - parquetRecord.Range(f) - - // Reuse destination if we can. - dstRec, ok := dst.(*jsonfmt.Record) - if !ok { - dstRec = &jsonfmt.Record{} - } - dstRec.SelectFormat = sql.SelectFmtParquet - dstRec.KVS = kvs - return dstRec, nil -} - -// Close - closes underlying readers. -func (r *Reader) Close() error { - return r.reader.Close() -} - -// NewReader - creates new Parquet reader using readerFunc callback. -func NewReader(getReaderFunc func(offset, length int64) (io.ReadCloser, error), args *ReaderArgs) (*Reader, error) { - reader, err := parquetgo.NewReader(getReaderFunc, nil) - if err != nil { - if err != io.EOF { - return nil, errParquetParsingError(err) - } - - return nil, err - } - - return &Reader{ - args: args, - reader: reader, - }, nil -} diff --git a/pkg/s3select/progress.go b/pkg/s3select/progress.go deleted file mode 100644 index df68163f..00000000 --- a/pkg/s3select/progress.go +++ /dev/null @@ -1,121 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3select - -import ( - "compress/bzip2" - "errors" - "fmt" - "io" - "sync" - "sync/atomic" - - gzip "github.com/klauspost/pgzip" -) - -type countUpReader struct { - reader io.Reader - bytesRead int64 -} - -func (r *countUpReader) Read(p []byte) (n int, err error) { - n, err = r.reader.Read(p) - atomic.AddInt64(&r.bytesRead, int64(n)) - return n, err -} - -func (r *countUpReader) BytesRead() int64 { - if r == nil { - return 0 - } - return atomic.LoadInt64(&r.bytesRead) -} - -func newCountUpReader(reader io.Reader) *countUpReader { - return &countUpReader{ - reader: reader, - } -} - -type progressReader struct { - rc io.ReadCloser - scannedReader *countUpReader - processedReader *countUpReader - - closedMu sync.Mutex - closed bool -} - -func (pr *progressReader) Read(p []byte) (n int, err error) { - // This ensures that Close will block until Read has completed. - // This allows another goroutine to close the reader. - pr.closedMu.Lock() - defer pr.closedMu.Unlock() - if pr.closed { - return 0, errors.New("progressReader: read after Close") - } - return pr.processedReader.Read(p) -} - -func (pr *progressReader) Close() error { - if pr.rc == nil { - return nil - } - pr.closedMu.Lock() - defer pr.closedMu.Unlock() - if pr.closed { - return nil - } - pr.closed = true - return pr.rc.Close() -} - -func (pr *progressReader) Stats() (bytesScanned, bytesProcessed int64) { - if pr == nil { - return 0, 0 - } - return pr.scannedReader.BytesRead(), pr.processedReader.BytesRead() -} - -func newProgressReader(rc io.ReadCloser, compType CompressionType) (*progressReader, error) { - scannedReader := newCountUpReader(rc) - var r io.Reader - var err error - - switch compType { - case noneType: - r = scannedReader - case gzipType: - r, err = gzip.NewReader(scannedReader) - if err != nil { - if errors.Is(err, gzip.ErrHeader) || errors.Is(err, gzip.ErrChecksum) { - return nil, errInvalidGZIPCompressionFormat(err) - } - return nil, errTruncatedInput(err) - } - case bzip2Type: - r = bzip2.NewReader(scannedReader) - default: - return nil, errInvalidCompressionFormat(fmt.Errorf("unknown compression type '%v'", compType)) - } - - return &progressReader{ - rc: rc, - scannedReader: scannedReader, - processedReader: newCountUpReader(r), - }, nil -} diff --git a/pkg/s3select/select.go b/pkg/s3select/select.go deleted file mode 100644 index f76e10de..00000000 --- a/pkg/s3select/select.go +++ /dev/null @@ -1,544 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3select - -import ( - "bufio" - "bytes" - "compress/bzip2" - "encoding/xml" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "strings" - "sync" - - "github.com/minio/minio/pkg/s3select/csv" - "github.com/minio/minio/pkg/s3select/json" - "github.com/minio/minio/pkg/s3select/parquet" - "github.com/minio/minio/pkg/s3select/simdj" - "github.com/minio/minio/pkg/s3select/sql" - "github.com/minio/simdjson-go" -) - -type recordReader interface { - // Read a record. - // dst is optional but will be used if valid. - Read(dst sql.Record) (sql.Record, error) - Close() error -} - -const ( - csvFormat = "csv" - jsonFormat = "json" - parquetFormat = "parquet" -) - -// CompressionType - represents value inside in request XML. -type CompressionType string - -const ( - noneType CompressionType = "none" - gzipType CompressionType = "gzip" - bzip2Type CompressionType = "bzip2" -) - -const ( - maxRecordSize = 1 << 20 // 1 MiB -) - -var bufPool = sync.Pool{ - New: func() interface{} { - // make a buffer with a reasonable capacity. - return bytes.NewBuffer(make([]byte, 0, maxRecordSize)) - }, -} - -var bufioWriterPool = sync.Pool{ - New: func() interface{} { - // ioutil.Discard is just used to create the writer. Actual destination - // writer is set later by Reset() before using it. - return bufio.NewWriter(ioutil.Discard) - }, -} - -// UnmarshalXML - decodes XML data. -func (c *CompressionType) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - var s string - if err := d.DecodeElement(&s, &start); err != nil { - return errMalformedXML(err) - } - - parsedType := CompressionType(strings.ToLower(s)) - if s == "" { - parsedType = noneType - } - - switch parsedType { - case noneType, gzipType, bzip2Type: - default: - return errInvalidCompressionFormat(fmt.Errorf("invalid compression format '%v'", s)) - } - - *c = parsedType - return nil -} - -// InputSerialization - represents elements inside in request XML. -type InputSerialization struct { - CompressionType CompressionType `xml:"CompressionType"` - CSVArgs csv.ReaderArgs `xml:"CSV"` - JSONArgs json.ReaderArgs `xml:"JSON"` - ParquetArgs parquet.ReaderArgs `xml:"Parquet"` - unmarshaled bool - format string -} - -// IsEmpty - returns whether input serialization is empty or not. -func (input *InputSerialization) IsEmpty() bool { - return !input.unmarshaled -} - -// UnmarshalXML - decodes XML data. -func (input *InputSerialization) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - // Make subtype to avoid recursive UnmarshalXML(). - type subInputSerialization InputSerialization - parsedInput := subInputSerialization{} - if err := d.DecodeElement(&parsedInput, &start); err != nil { - return errMalformedXML(err) - } - - // If no compression is specified, set to noneType - if parsedInput.CompressionType == CompressionType("") { - parsedInput.CompressionType = noneType - } - - found := 0 - if !parsedInput.CSVArgs.IsEmpty() { - parsedInput.format = csvFormat - found++ - } - if !parsedInput.JSONArgs.IsEmpty() { - parsedInput.format = jsonFormat - found++ - } - if !parsedInput.ParquetArgs.IsEmpty() { - if parsedInput.CompressionType != "" && parsedInput.CompressionType != noneType { - return errInvalidRequestParameter(fmt.Errorf("CompressionType must be NONE for Parquet format")) - } - - parsedInput.format = parquetFormat - found++ - } - - if found != 1 { - return errInvalidDataSource(nil) - } - - *input = InputSerialization(parsedInput) - input.unmarshaled = true - return nil -} - -// OutputSerialization - represents elements inside in request XML. -type OutputSerialization struct { - CSVArgs csv.WriterArgs `xml:"CSV"` - JSONArgs json.WriterArgs `xml:"JSON"` - unmarshaled bool - format string -} - -// IsEmpty - returns whether output serialization is empty or not. -func (output *OutputSerialization) IsEmpty() bool { - return !output.unmarshaled -} - -// UnmarshalXML - decodes XML data. -func (output *OutputSerialization) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - // Make subtype to avoid recursive UnmarshalXML(). - type subOutputSerialization OutputSerialization - parsedOutput := subOutputSerialization{} - if err := d.DecodeElement(&parsedOutput, &start); err != nil { - return errMalformedXML(err) - } - - found := 0 - if !parsedOutput.CSVArgs.IsEmpty() { - parsedOutput.format = csvFormat - found++ - } - if !parsedOutput.JSONArgs.IsEmpty() { - parsedOutput.format = jsonFormat - found++ - } - if found != 1 { - return errObjectSerializationConflict(fmt.Errorf("either CSV or JSON should be present in OutputSerialization")) - } - - *output = OutputSerialization(parsedOutput) - output.unmarshaled = true - return nil -} - -// RequestProgress - represents elements inside in request XML. -type RequestProgress struct { - Enabled bool `xml:"Enabled"` -} - -// S3Select - filters the contents on a simple structured query language (SQL) statement. It -// represents elements inside in request XML specified in detail at -// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html. -type S3Select struct { - XMLName xml.Name `xml:"SelectRequest"` - Expression string `xml:"Expression"` - ExpressionType string `xml:"ExpressionType"` - Input InputSerialization `xml:"InputSerialization"` - Output OutputSerialization `xml:"OutputSerialization"` - Progress RequestProgress `xml:"RequestProgress"` - - statement *sql.SelectStatement - progressReader *progressReader - recordReader recordReader -} - -var ( - legacyXMLName = "SelectObjectContentRequest" -) - -// UnmarshalXML - decodes XML data. -func (s3Select *S3Select) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - // S3 also supports the older SelectObjectContentRequest tag, - // though it is no longer found in documentation. This is - // checked and renamed below to allow older clients to also - // work. - if start.Name.Local == legacyXMLName { - start.Name = xml.Name{Space: "", Local: "SelectRequest"} - } - - // Make subtype to avoid recursive UnmarshalXML(). - type subS3Select S3Select - parsedS3Select := subS3Select{} - if err := d.DecodeElement(&parsedS3Select, &start); err != nil { - if _, ok := err.(*s3Error); ok { - return err - } - - return errMalformedXML(err) - } - - parsedS3Select.ExpressionType = strings.ToLower(parsedS3Select.ExpressionType) - if parsedS3Select.ExpressionType != "sql" { - return errInvalidExpressionType(fmt.Errorf("invalid expression type '%v'", parsedS3Select.ExpressionType)) - } - - if parsedS3Select.Input.IsEmpty() { - return errMissingRequiredParameter(fmt.Errorf("InputSerialization must be provided")) - } - - if parsedS3Select.Output.IsEmpty() { - return errMissingRequiredParameter(fmt.Errorf("OutputSerialization must be provided")) - } - - statement, err := sql.ParseSelectStatement(parsedS3Select.Expression) - if err != nil { - return err - } - - parsedS3Select.statement = &statement - - *s3Select = S3Select(parsedS3Select) - return nil -} - -func (s3Select *S3Select) outputRecord() sql.Record { - switch s3Select.Output.format { - case csvFormat: - return csv.NewRecord() - case jsonFormat: - return json.NewRecord(sql.SelectFmtJSON) - } - - panic(fmt.Errorf("unknown output format '%v'", s3Select.Output.format)) -} - -func (s3Select *S3Select) getProgress() (bytesScanned, bytesProcessed int64) { - if s3Select.progressReader != nil { - return s3Select.progressReader.Stats() - } - - return -1, -1 -} - -// Open - opens S3 object by using callback for SQL selection query. -// Currently CSV, JSON and Apache Parquet formats are supported. -func (s3Select *S3Select) Open(getReader func(offset, length int64) (io.ReadCloser, error)) error { - switch s3Select.Input.format { - case csvFormat: - rc, err := getReader(0, -1) - if err != nil { - return err - } - - s3Select.progressReader, err = newProgressReader(rc, s3Select.Input.CompressionType) - if err != nil { - rc.Close() - return err - } - - s3Select.recordReader, err = csv.NewReader(s3Select.progressReader, &s3Select.Input.CSVArgs) - if err != nil { - rc.Close() - var stErr bzip2.StructuralError - if errors.As(err, &stErr) { - return errInvalidBZIP2CompressionFormat(err) - } - return err - } - return nil - case jsonFormat: - rc, err := getReader(0, -1) - if err != nil { - return err - } - - s3Select.progressReader, err = newProgressReader(rc, s3Select.Input.CompressionType) - if err != nil { - rc.Close() - return err - } - - if strings.EqualFold(s3Select.Input.JSONArgs.ContentType, "lines") { - if simdjson.SupportedCPU() { - s3Select.recordReader = simdj.NewReader(s3Select.progressReader, &s3Select.Input.JSONArgs) - } else { - s3Select.recordReader = json.NewPReader(s3Select.progressReader, &s3Select.Input.JSONArgs) - } - } else { - s3Select.recordReader = json.NewReader(s3Select.progressReader, &s3Select.Input.JSONArgs) - } - return nil - case parquetFormat: - var err error - s3Select.recordReader, err = parquet.NewReader(getReader, &s3Select.Input.ParquetArgs) - return err - } - - panic(fmt.Errorf("unknown input format '%v'", s3Select.Input.format)) -} - -func (s3Select *S3Select) marshal(buf *bytes.Buffer, record sql.Record) error { - switch s3Select.Output.format { - case csvFormat: - // Use bufio Writer to prevent csv.Writer from allocating a new buffer. - bufioWriter := bufioWriterPool.Get().(*bufio.Writer) - defer func() { - bufioWriter.Reset(ioutil.Discard) - bufioWriterPool.Put(bufioWriter) - }() - - bufioWriter.Reset(buf) - opts := sql.WriteCSVOpts{ - FieldDelimiter: []rune(s3Select.Output.CSVArgs.FieldDelimiter)[0], - Quote: []rune(s3Select.Output.CSVArgs.QuoteCharacter)[0], - QuoteEscape: []rune(s3Select.Output.CSVArgs.QuoteEscapeCharacter)[0], - AlwaysQuote: strings.ToLower(s3Select.Output.CSVArgs.QuoteFields) == "always", - } - err := record.WriteCSV(bufioWriter, opts) - if err != nil { - return err - } - err = bufioWriter.Flush() - if err != nil { - return err - } - if buf.Bytes()[buf.Len()-1] == '\n' { - buf.Truncate(buf.Len() - 1) - } - buf.WriteString(s3Select.Output.CSVArgs.RecordDelimiter) - - return nil - case jsonFormat: - err := record.WriteJSON(buf) - if err != nil { - return err - } - // Trim trailing newline from non-simd output - if buf.Bytes()[buf.Len()-1] == '\n' { - buf.Truncate(buf.Len() - 1) - } - buf.WriteString(s3Select.Output.JSONArgs.RecordDelimiter) - - return nil - } - - panic(fmt.Errorf("unknown output format '%v'", s3Select.Output.format)) -} - -// Evaluate - filters and sends records read from opened reader as per select statement to http response writer. -func (s3Select *S3Select) Evaluate(w http.ResponseWriter) { - getProgressFunc := s3Select.getProgress - if !s3Select.Progress.Enabled { - getProgressFunc = nil - } - writer := newMessageWriter(w, getProgressFunc) - - var outputQueue []sql.Record - - // Create queue based on the type. - if s3Select.statement.IsAggregated() { - outputQueue = make([]sql.Record, 0, 1) - } else { - outputQueue = make([]sql.Record, 0, 100) - } - var err error - sendRecord := func() bool { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() - - for _, outputRecord := range outputQueue { - if outputRecord == nil { - continue - } - before := buf.Len() - if err = s3Select.marshal(buf, outputRecord); err != nil { - bufPool.Put(buf) - return false - } - if buf.Len()-before > maxRecordSize { - writer.FinishWithError("OverMaxRecordSize", "The length of a record in the input or result is greater than maxCharsPerRecord of 1 MB.") - bufPool.Put(buf) - return false - } - } - - if err = writer.SendRecord(buf); err != nil { - // FIXME: log this error. - err = nil - bufPool.Put(buf) - return false - } - outputQueue = outputQueue[:0] - return true - } - - var rec sql.Record -OuterLoop: - for { - if s3Select.statement.LimitReached() { - if !sendRecord() { - break - } - if err = writer.Finish(s3Select.getProgress()); err != nil { - // FIXME: log this error. - err = nil - } - break - } - - if rec, err = s3Select.recordReader.Read(rec); err != nil { - if err != io.EOF { - break - } - - if s3Select.statement.IsAggregated() { - outputRecord := s3Select.outputRecord() - if err = s3Select.statement.AggregateResult(outputRecord); err != nil { - break - } - outputQueue = append(outputQueue, outputRecord) - } - - if !sendRecord() { - break - } - - if err = writer.Finish(s3Select.getProgress()); err != nil { - // FIXME: log this error. - err = nil - } - break - } - - var inputRecords []*sql.Record - if inputRecords, err = s3Select.statement.EvalFrom(s3Select.Input.format, rec); err != nil { - break - } - - for _, inputRecord := range inputRecords { - if s3Select.statement.IsAggregated() { - if err = s3Select.statement.AggregateRow(*inputRecord); err != nil { - break OuterLoop - } - } else { - var outputRecord sql.Record - // We will attempt to reuse the records in the table. - // The type of these should not change. - // The queue should always have at least one entry left for this to work. - outputQueue = outputQueue[:len(outputQueue)+1] - if t := outputQueue[len(outputQueue)-1]; t != nil { - // If the output record is already set, we reuse it. - outputRecord = t - outputRecord.Reset() - } else { - // Create new one - outputRecord = s3Select.outputRecord() - outputQueue[len(outputQueue)-1] = outputRecord - } - outputRecord, err = s3Select.statement.Eval(*inputRecord, outputRecord) - if outputRecord == nil || err != nil { - // This should not be written. - // Remove it from the queue. - outputQueue = outputQueue[:len(outputQueue)-1] - if err != nil { - break OuterLoop - } - continue - } - - outputQueue[len(outputQueue)-1] = outputRecord - if len(outputQueue) < cap(outputQueue) { - continue - } - - if !sendRecord() { - break OuterLoop - } - } - } - } - - if err != nil { - _ = writer.FinishWithError("InternalError", err.Error()) - } -} - -// Close - closes opened S3 object. -func (s3Select *S3Select) Close() error { - return s3Select.recordReader.Close() -} - -// NewS3Select - creates new S3Select by given request XML reader. -func NewS3Select(r io.Reader) (*S3Select, error) { - s3Select := &S3Select{} - if err := xml.NewDecoder(r).Decode(s3Select); err != nil { - return nil, err - } - - return s3Select, nil -} diff --git a/pkg/s3select/select_benchmark_test.go b/pkg/s3select/select_benchmark_test.go deleted file mode 100644 index 7c2b0a81..00000000 --- a/pkg/s3select/select_benchmark_test.go +++ /dev/null @@ -1,197 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3select - -import ( - "bytes" - "encoding/csv" - "io" - "io/ioutil" - "math/rand" - "net/http" - "strconv" - "testing" - "time" - - humanize "github.com/dustin/go-humanize" -) - -var randSrc = rand.New(rand.NewSource(time.Now().UnixNano())) - -const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" - -func newRandString(length int) string { - b := make([]byte, length) - for i := range b { - b[i] = charset[randSrc.Intn(len(charset))] - } - return string(b) -} - -func genSampleCSVData(count int) []byte { - buf := &bytes.Buffer{} - csvWriter := csv.NewWriter(buf) - csvWriter.Write([]string{"id", "name", "age", "city"}) - - for i := 0; i < count; i++ { - csvWriter.Write([]string{ - strconv.Itoa(i), - newRandString(10), - newRandString(5), - newRandString(10), - }) - } - - csvWriter.Flush() - return buf.Bytes() -} - -type nullResponseWriter struct { -} - -func (w *nullResponseWriter) Header() http.Header { - return nil -} - -func (w *nullResponseWriter) Write(p []byte) (int, error) { - return len(p), nil -} - -func (w *nullResponseWriter) WriteHeader(statusCode int) { -} - -func (w *nullResponseWriter) Flush() { -} - -func benchmarkSelect(b *testing.B, count int, query string) { - var requestXML = []byte(` - - - ` + query + ` - SQL - - NONE - - USE - - - - - - - - FALSE - - -`) - - csvData := genSampleCSVData(count) - - b.ResetTimer() - b.ReportAllocs() - b.SetBytes(int64(count)) - - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - s3Select, err := NewS3Select(bytes.NewReader(requestXML)) - if err != nil { - b.Fatal(err) - } - - if err = s3Select.Open(func(offset, length int64) (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewReader(csvData)), nil - }); err != nil { - b.Fatal(err) - } - - s3Select.Evaluate(&nullResponseWriter{}) - s3Select.Close() - } - }) -} - -func benchmarkSelectAll(b *testing.B, count int) { - benchmarkSelect(b, count, "select * from S3Object") -} - -// BenchmarkSelectAll_100K - benchmark * function with 100k records. -func BenchmarkSelectAll_100K(b *testing.B) { - benchmarkSelectAll(b, 100*humanize.KiByte) -} - -// BenchmarkSelectAll_1M - benchmark * function with 1m records. -func BenchmarkSelectAll_1M(b *testing.B) { - benchmarkSelectAll(b, 1*humanize.MiByte) -} - -// BenchmarkSelectAll_2M - benchmark * function with 2m records. -func BenchmarkSelectAll_2M(b *testing.B) { - benchmarkSelectAll(b, 2*humanize.MiByte) -} - -// BenchmarkSelectAll_10M - benchmark * function with 10m records. -func BenchmarkSelectAll_10M(b *testing.B) { - benchmarkSelectAll(b, 10*humanize.MiByte) -} - -func benchmarkSingleCol(b *testing.B, count int) { - benchmarkSelect(b, count, "select id from S3Object") -} - -// BenchmarkSingleRow_100K - benchmark SELECT column function with 100k records. -func BenchmarkSingleCol_100K(b *testing.B) { - benchmarkSingleCol(b, 1e5) -} - -// BenchmarkSelectAll_1M - benchmark * function with 1m records. -func BenchmarkSingleCol_1M(b *testing.B) { - benchmarkSingleCol(b, 1e6) -} - -// BenchmarkSelectAll_2M - benchmark * function with 2m records. -func BenchmarkSingleCol_2M(b *testing.B) { - benchmarkSingleCol(b, 2e6) -} - -// BenchmarkSelectAll_10M - benchmark * function with 10m records. -func BenchmarkSingleCol_10M(b *testing.B) { - benchmarkSingleCol(b, 1e7) -} - -func benchmarkAggregateCount(b *testing.B, count int) { - benchmarkSelect(b, count, "select count(*) from S3Object") -} - -// BenchmarkAggregateCount_100K - benchmark count(*) function with 100k records. -func BenchmarkAggregateCount_100K(b *testing.B) { - benchmarkAggregateCount(b, 100*humanize.KiByte) -} - -// BenchmarkAggregateCount_1M - benchmark count(*) function with 1m records. -func BenchmarkAggregateCount_1M(b *testing.B) { - benchmarkAggregateCount(b, 1*humanize.MiByte) -} - -// BenchmarkAggregateCount_2M - benchmark count(*) function with 2m records. -func BenchmarkAggregateCount_2M(b *testing.B) { - benchmarkAggregateCount(b, 2*humanize.MiByte) -} - -// BenchmarkAggregateCount_10M - benchmark count(*) function with 10m records. -func BenchmarkAggregateCount_10M(b *testing.B) { - benchmarkAggregateCount(b, 10*humanize.MiByte) -} diff --git a/pkg/s3select/select_test.go b/pkg/s3select/select_test.go deleted file mode 100644 index 032d17ab..00000000 --- a/pkg/s3select/select_test.go +++ /dev/null @@ -1,1040 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3select - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "math" - "net/http" - "os" - "reflect" - "strings" - "testing" - - "github.com/klauspost/cpuid" - "github.com/minio/minio-go/v6" - "github.com/minio/simdjson-go" -) - -type testResponseWriter struct { - statusCode int - response []byte -} - -func (w *testResponseWriter) Header() http.Header { - return nil -} - -func (w *testResponseWriter) Write(p []byte) (int, error) { - w.response = append(w.response, p...) - return len(p), nil -} - -func (w *testResponseWriter) WriteHeader(statusCode int) { - w.statusCode = statusCode -} - -func (w *testResponseWriter) Flush() { -} - -func TestJSONQueries(t *testing.T) { - input := `{"id": 0,"title": "Test Record","desc": "Some text","synonyms": ["foo", "bar", "whatever"]} - {"id": 1,"title": "Second Record","desc": "another text","synonyms": ["some", "synonym", "value"]} - {"id": 2,"title": "Second Record","desc": "another text","numbers": [2, 3.0, 4]} - {"id": 3,"title": "Second Record","desc": "another text","nested": [[2, 3.0, 4], [7, 8.5, 9]]}` - - var testTable = []struct { - name string - query string - requestXML []byte // override request XML - wantResult string - withJSON string // Override JSON input - }{ - { - name: "select-in-array-full", - query: `SELECT * from s3object s WHERE 'bar' IN s.synonyms[*]`, - wantResult: `{"id":0,"title":"Test Record","desc":"Some text","synonyms":["foo","bar","whatever"]}`, - }, - { - name: "simple-in-array", - query: `SELECT * from s3object s WHERE s.id IN (1,3)`, - wantResult: `{"id":1,"title":"Second Record","desc":"another text","synonyms":["some","synonym","value"]} -{"id":3,"title":"Second Record","desc":"another text","nested":[[2,3,4],[7,8.5,9]]}`, - }, - { - name: "select-in-array-single", - query: `SELECT synonyms from s3object s WHERE 'bar' IN s.synonyms[*] `, - wantResult: `{"synonyms":["foo","bar","whatever"]}`, - }, - { - name: "donatello-1", - query: `SELECT * from s3object s WHERE 'bar' in s.synonyms`, - wantResult: `{"id":0,"title":"Test Record","desc":"Some text","synonyms":["foo","bar","whatever"]}`, - }, - { - name: "donatello-2", - query: `SELECT * from s3object s WHERE 'bar' in s.synonyms[*]`, - wantResult: `{"id":0,"title":"Test Record","desc":"Some text","synonyms":["foo","bar","whatever"]}`, - }, - { - name: "donatello-3", - query: `SELECT * from s3object s WHERE 'value' IN s.synonyms[*]`, - wantResult: `{"id":1,"title":"Second Record","desc":"another text","synonyms":["some","synonym","value"]}`, - }, - { - name: "select-in-number", - query: `SELECT * from s3object s WHERE 4 in s.numbers[*]`, - wantResult: `{"id":2,"title":"Second Record","desc":"another text","numbers":[2,3,4]}`, - }, - { - name: "select-in-number-float", - query: `SELECT * from s3object s WHERE 3 in s.numbers[*]`, - wantResult: `{"id":2,"title":"Second Record","desc":"another text","numbers":[2,3,4]}`, - }, - { - name: "select-in-number-float-in-sql", - query: `SELECT * from s3object s WHERE 3.0 in s.numbers[*]`, - wantResult: `{"id":2,"title":"Second Record","desc":"another text","numbers":[2,3,4]}`, - }, - { - name: "select-in-list-match", - query: `SELECT * from s3object s WHERE (2,3,4) IN s.nested[*]`, - wantResult: `{"id":3,"title":"Second Record","desc":"another text","nested":[[2,3,4],[7,8.5,9]]}`, - }, - { - name: "select-in-nested-float", - query: `SELECT s.nested from s3object s WHERE 8.5 IN s.nested[*][*]`, - wantResult: `{"nested":[[2,3,4],[7,8.5,9]]}`, - }, - { - name: "select-in-combine-and", - query: `SELECT s.nested from s3object s WHERE (8.5 IN s.nested[*][*]) AND (s.id > 0)`, - wantResult: `{"nested":[[2,3,4],[7,8.5,9]]}`, - }, - { - name: "select-in-combine-and-no", - query: `SELECT s.nested from s3object s WHERE (8.5 IN s.nested[*][*]) AND (s.id = 0)`, - wantResult: ``, - }, - { - name: "select-in-nested-float-no-flat", - query: `SELECT s.nested from s3object s WHERE 8.5 IN s.nested[*]`, - wantResult: ``, - }, - { - name: "select-empty-field-result", - query: `SELECT * from s3object s WHERE s.nested[0][0] = 2`, - wantResult: `{"id":3,"title":"Second Record","desc":"another text","nested":[[2,3,4],[7,8.5,9]]}`, - }, - { - name: "select-arrays-specific", - query: `SELECT * from s3object s WHERE s.nested[1][0] = 7`, - wantResult: `{"id":3,"title":"Second Record","desc":"another text","nested":[[2,3,4],[7,8.5,9]]}`, - }, - { - name: "wrong-index-no-result", - query: `SELECT * from s3object s WHERE s.nested[0][0] = 7`, - wantResult: ``, - }, - { - name: "not-equal-result", - query: `SELECT * from s3object s WHERE s.nested[1][0] != 7`, - wantResult: `{"id":0,"title":"Test Record","desc":"Some text","synonyms":["foo","bar","whatever"]} -{"id":1,"title":"Second Record","desc":"another text","synonyms":["some","synonym","value"]} -{"id":2,"title":"Second Record","desc":"another text","numbers":[2,3,4]}`, - }, - { - name: "indexed-list-match", - query: `SELECT * from s3object s WHERE (7,8.5,9) IN s.nested[1]`, - wantResult: ``, - }, - { - name: "indexed-list-match-equals", - query: `SELECT * from s3object s WHERE (7,8.5,9) = s.nested[1]`, - wantResult: `{"id":3,"title":"Second Record","desc":"another text","nested":[[2,3,4],[7,8.5,9]]}`, - }, - { - name: "indexed-list-match-equals-s-star", - query: `SELECT s.* from s3object s WHERE (7,8.5,9) = s.nested[1]`, - wantResult: `{"id":3,"title":"Second Record","desc":"another text","nested":[[2,3,4],[7,8.5,9]]}`, - }, - { - name: "indexed-list-match-equals-s-index", - query: `SELECT s.nested[1], s.nested[0] from s3object s WHERE (7,8.5,9) = s.nested[1]`, - wantResult: `{"_1":[7,8.5,9],"_2":[2,3,4]}`, - }, - { - name: "indexed-list-match-not-equals", - query: `SELECT * from s3object s WHERE (7,8.5,9) != s.nested[1]`, - wantResult: `{"id":0,"title":"Test Record","desc":"Some text","synonyms":["foo","bar","whatever"]} -{"id":1,"title":"Second Record","desc":"another text","synonyms":["some","synonym","value"]} -{"id":2,"title":"Second Record","desc":"another text","numbers":[2,3,4]}`, - }, - { - name: "indexed-list-square-bracket", - query: `SELECT * from s3object s WHERE [7,8.5,9] = s.nested[1]`, - wantResult: `{"id":3,"title":"Second Record","desc":"another text","nested":[[2,3,4],[7,8.5,9]]}`, - }, - { - name: "indexed-list-square-bracket", - query: `SELECT * from s3object s WHERE [7,8.5,9] IN s.nested`, - wantResult: `{"id":3,"title":"Second Record","desc":"another text","nested":[[2,3,4],[7,8.5,9]]}`, - }, - { - name: "indexed-list-square-bracket", - query: `SELECT * from s3object s WHERE id IN [3,2]`, - wantResult: `{"id":2,"title":"Second Record","desc":"another text","numbers":[2,3,4]} -{"id":3,"title":"Second Record","desc":"another text","nested":[[2,3,4],[7,8.5,9]]}`, - }, - { - name: "index-wildcard-in", - query: `SELECT * from s3object s WHERE (8.5) IN s.nested[1][*]`, - wantResult: `{"id":3,"title":"Second Record","desc":"another text","nested":[[2,3,4],[7,8.5,9]]}`, - }, - { - name: "index-wildcard-in", - query: `SELECT * from s3object s WHERE (8.0+0.5) IN s.nested[1][*]`, - wantResult: `{"id":3,"title":"Second Record","desc":"another text","nested":[[2,3,4],[7,8.5,9]]}`, - }, - { - name: "compare-mixed", - query: `SELECT id from s3object s WHERE value = true`, - wantResult: `{"id":1}`, - withJSON: `{"id":0, "value": false} -{"id":1, "value": true} -{"id":2, "value": 42} -{"id":3, "value": "true"} -`, - }, - { - name: "compare-mixed-not", - query: `SELECT COUNT(id) as n from s3object s WHERE value != true`, - wantResult: `{"n":3}`, - withJSON: `{"id":0, "value": false} -{"id":1, "value": true} -{"id":2, "value": 42} -{"id":3, "value": "true"} -`, - }, - { - name: "index-wildcard-in", - query: `SELECT * from s3object s WHERE title = 'Test Record'`, - wantResult: `{"id":0,"title":"Test Record","desc":"Some text","synonyms":["foo","bar","whatever"]}`, - }, - { - name: "select-output-field-as-csv", - requestXML: []byte(` - - SELECT s.synonyms from s3object s WHERE 'whatever' IN s.synonyms - SQL - - NONE - - DOCUMENT - - - - - " - - - - FALSE - -`), - wantResult: `"[""foo"",""bar"",""whatever""]"`, - }, - { - name: "document", - query: "", - requestXML: []byte(` - - - select * from s3object[*].elements[*] s where s.element_type = '__elem__merfu' - SQL - - NONE - - DOCUMENT - - - - - - - - FALSE - -`), - withJSON: ` -{ - "name": "small_pdf1.pdf", - "lume_id": "9507193e-572d-4f95-bcf1-e9226d96be65", - "elements": [ - { - "element_type": "__elem__image", - "element_id": "859d09c4-7cf1-4a37-9674-3a7de8b56abc", - "attributes": { - "__attr__image_dpi": 300, - "__attr__image_size": [ - 2550, - 3299 - ], - "__attr__image_index": 1, - "__attr__image_format": "JPEG", - "__attr__file_extension": "jpg", - "__attr__data": null - } - }, - { - "element_type": "__elem__merfu", - "element_id": "d868aefe-ef9a-4be2-b9b2-c9fd89cc43eb", - "attributes": { - "__attr__image_dpi": 300, - "__attr__image_size": [ - 2550, - 3299 - ], - "__attr__image_index": 2, - "__attr__image_format": "JPEG", - "__attr__file_extension": "jpg", - "__attr__data": null - } - } - ], - "data": "asdascasdc1234e123erdasdas" -}`, - wantResult: `{"element_type":"__elem__merfu","element_id":"d868aefe-ef9a-4be2-b9b2-c9fd89cc43eb","attributes":{"__attr__image_dpi":300,"__attr__image_size":[2550,3299],"__attr__image_index":2,"__attr__image_format":"JPEG","__attr__file_extension":"jpg","__attr__data":null}}`, - }, - } - - defRequest := ` - - %s - SQL - - NONE - - LINES - - - - - - - - FALSE - -` - - for _, testCase := range testTable { - t.Run(testCase.name, func(t *testing.T) { - // Hack cpuid to the CPU doesn't appear to support AVX2. - // Restore whatever happens. - defer func(f cpuid.Flags) { - cpuid.CPU.Features = f - }(cpuid.CPU.Features) - cpuid.CPU.Features &= math.MaxUint64 - cpuid.AVX2 - - testReq := testCase.requestXML - if len(testReq) == 0 { - testReq = []byte(fmt.Sprintf(defRequest, testCase.query)) - } - s3Select, err := NewS3Select(bytes.NewReader(testReq)) - if err != nil { - t.Fatal(err) - } - - if err = s3Select.Open(func(offset, length int64) (io.ReadCloser, error) { - in := input - if len(testCase.withJSON) > 0 { - in = testCase.withJSON - } - return ioutil.NopCloser(bytes.NewBufferString(in)), nil - }); err != nil { - t.Fatal(err) - } - - w := &testResponseWriter{} - s3Select.Evaluate(w) - s3Select.Close() - resp := http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(w.response)), - ContentLength: int64(len(w.response)), - } - res, err := minio.NewSelectResults(&resp, "testbucket") - if err != nil { - t.Error(err) - return - } - got, err := ioutil.ReadAll(res) - if err != nil { - t.Error(err) - return - } - gotS := strings.TrimSpace(string(got)) - if !reflect.DeepEqual(gotS, testCase.wantResult) { - t.Errorf("received response does not match with expected reply. Query: %s\ngot: %s\nwant:%s", testCase.query, gotS, testCase.wantResult) - } - }) - t.Run("simd-"+testCase.name, func(t *testing.T) { - if !simdjson.SupportedCPU() { - t.Skip("No CPU support") - } - testReq := testCase.requestXML - if len(testReq) == 0 { - testReq = []byte(fmt.Sprintf(defRequest, testCase.query)) - } - s3Select, err := NewS3Select(bytes.NewReader(testReq)) - if err != nil { - t.Fatal(err) - } - - if err = s3Select.Open(func(offset, length int64) (io.ReadCloser, error) { - in := input - if len(testCase.withJSON) > 0 { - in = testCase.withJSON - } - return ioutil.NopCloser(bytes.NewBufferString(in)), nil - }); err != nil { - t.Fatal(err) - } - - w := &testResponseWriter{} - s3Select.Evaluate(w) - s3Select.Close() - resp := http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(w.response)), - ContentLength: int64(len(w.response)), - } - res, err := minio.NewSelectResults(&resp, "testbucket") - if err != nil { - t.Error(err) - return - } - got, err := ioutil.ReadAll(res) - if err != nil { - t.Error(err) - return - } - gotS := strings.TrimSpace(string(got)) - if !reflect.DeepEqual(gotS, testCase.wantResult) { - t.Errorf("received response does not match with expected reply. Query: %s\ngot: %s\nwant:%s", testCase.query, gotS, testCase.wantResult) - } - }) - } -} - -func TestCSVQueries(t *testing.T) { - input := `index,ID,CaseNumber,Date,Day,Month,Year,Block,IUCR,PrimaryType,Description,LocationDescription,Arrest,Domestic,Beat,District,Ward,CommunityArea,FBI Code,XCoordinate,YCoordinate,UpdatedOn,Latitude,Longitude,Location -2700763,7732229,,2010-05-26 00:00:00,26,May,2010,113XX S HALSTED ST,1150,,CREDIT CARD FRAUD,,False,False,2233,22.0,34.0,,11,,,,41.688043288,-87.6422444,"(41.688043288, -87.6422444)"` - - var testTable = []struct { - name string - query string - requestXML []byte - wantResult string - }{ - { - name: "select-in-text-simple", - query: `SELECT index FROM s3Object s WHERE "Month"='May'`, - wantResult: `2700763`, - }, - } - - defRequest := ` - - %s - SQL - - NONE - - , - USE - " - " - \n - - - - - - - - FALSE - -` - - for _, testCase := range testTable { - t.Run(testCase.name, func(t *testing.T) { - testReq := testCase.requestXML - if len(testReq) == 0 { - testReq = []byte(fmt.Sprintf(defRequest, testCase.query)) - } - s3Select, err := NewS3Select(bytes.NewReader(testReq)) - if err != nil { - t.Fatal(err) - } - - if err = s3Select.Open(func(offset, length int64) (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewBufferString(input)), nil - }); err != nil { - t.Fatal(err) - } - - w := &testResponseWriter{} - s3Select.Evaluate(w) - s3Select.Close() - resp := http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(w.response)), - ContentLength: int64(len(w.response)), - } - res, err := minio.NewSelectResults(&resp, "testbucket") - if err != nil { - t.Error(err) - return - } - got, err := ioutil.ReadAll(res) - if err != nil { - t.Error(err) - return - } - gotS := strings.TrimSpace(string(got)) - if !reflect.DeepEqual(gotS, testCase.wantResult) { - t.Errorf("received response does not match with expected reply. Query: %s\ngot: %s\nwant:%s", testCase.query, gotS, testCase.wantResult) - } - }) - } -} - -func TestCSVQueries2(t *testing.T) { - input := `id,time,num,num2,text -1,2010-01-01T,7867786,4565.908123,"a text, with comma" -2,2017-01-02T03:04Z,-5, 0.765111, -` - var testTable = []struct { - name string - query string - requestXML []byte // override request XML - wantResult string - }{ - { - name: "select-all", - query: `SELECT * from s3object AS s WHERE id = '1'`, - wantResult: `{"id":"1","time":"2010-01-01T","num":"7867786","num2":"4565.908123","text":"a text, with comma"}`, - }, - { - name: "select-all-2", - query: `SELECT * from s3object s WHERE id = 2`, - wantResult: `{"id":"2","time":"2017-01-02T03:04Z","num":"-5","num2":" 0.765111","text":""}`, - }, - { - name: "select-text-convert", - query: `SELECT CAST(text AS STRING) AS text from s3object s WHERE id = 1`, - wantResult: `{"text":"a text, with comma"}`, - }, - { - name: "select-text-direct", - query: `SELECT text from s3object s WHERE id = 1`, - wantResult: `{"text":"a text, with comma"}`, - }, - { - name: "select-time-direct", - query: `SELECT time from s3object s WHERE id = 2`, - wantResult: `{"time":"2017-01-02T03:04Z"}`, - }, - { - name: "select-int-direct", - query: `SELECT num from s3object s WHERE id = 2`, - wantResult: `{"num":"-5"}`, - }, - { - name: "select-float-direct", - query: `SELECT num2 from s3object s WHERE id = 2`, - wantResult: `{"num2":" 0.765111"}`, - }, - { - name: "select-in-array", - query: `select id from S3Object s WHERE id in [1,3]`, - wantResult: `{"id":"1"}`, - }, - { - name: "select-in-array-matchnone", - query: `select id from S3Object s WHERE s.id in [4,3]`, - wantResult: ``, - }, - { - name: "select-float-by-val", - query: `SELECT num2 from s3object s WHERE num2 = 0.765111`, - wantResult: `{"num2":" 0.765111"}`, - }, - } - - defRequest := ` - - %s - SQL - - NONE - - USE - " - - - - - - - - FALSE - -` - - for _, testCase := range testTable { - t.Run(testCase.name, func(t *testing.T) { - testReq := testCase.requestXML - if len(testReq) == 0 { - testReq = []byte(fmt.Sprintf(defRequest, testCase.query)) - } - s3Select, err := NewS3Select(bytes.NewReader(testReq)) - if err != nil { - t.Fatal(err) - } - - if err = s3Select.Open(func(offset, length int64) (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewBufferString(input)), nil - }); err != nil { - t.Fatal(err) - } - - w := &testResponseWriter{} - s3Select.Evaluate(w) - s3Select.Close() - resp := http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(w.response)), - ContentLength: int64(len(w.response)), - } - res, err := minio.NewSelectResults(&resp, "testbucket") - if err != nil { - t.Error(err) - return - } - got, err := ioutil.ReadAll(res) - if err != nil { - t.Error(err) - return - } - gotS := strings.TrimSpace(string(got)) - if !reflect.DeepEqual(gotS, testCase.wantResult) { - t.Errorf("received response does not match with expected reply. Query: %s\ngot: %s\nwant:%s", testCase.query, gotS, testCase.wantResult) - } - }) - } -} - -func TestCSVInput(t *testing.T) { - var testTable = []struct { - requestXML []byte - expectedResult []byte - }{ - { - []byte(` - - - SELECT one, two, three from S3Object - SQL - - NONE - - USE - - - - - - - - FALSE - - -`), []byte{ - 0, 0, 0, 137, 0, 0, 0, 85, 194, 213, 168, 241, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 24, 97, 112, 112, 108, 105, 99, 97, 116, 105, 111, 110, 47, 111, 99, 116, 101, 116, 45, 115, 116, 114, 101, 97, 109, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 7, 82, 101, 99, 111, 114, 100, 115, 45, 49, 44, 102, 111, 111, 44, 116, 114, 117, 101, 10, 44, 98, 97, 114, 44, 102, 97, 108, 115, 101, 10, 50, 46, 53, 44, 98, 97, 122, 44, 116, 114, 117, 101, 10, 75, 182, 193, 80, 0, 0, 0, 235, 0, 0, 0, 67, 213, 243, 57, 141, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 8, 116, 101, 120, 116, 47, 120, 109, 108, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 5, 83, 116, 97, 116, 115, 60, 63, 120, 109, 108, 32, 118, 101, 114, 115, 105, 111, 110, 61, 34, 49, 46, 48, 34, 32, 101, 110, 99, 111, 100, 105, 110, 103, 61, 34, 85, 84, 70, 45, 56, 34, 63, 62, 60, 83, 116, 97, 116, 115, 62, 60, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 53, 48, 60, 47, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 60, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 53, 48, 60, 47, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 60, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 51, 54, 60, 47, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 60, 47, 83, 116, 97, 116, 115, 62, 253, 105, 8, 216, 0, 0, 0, 56, 0, 0, 0, 40, 193, 198, 132, 212, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 3, 69, 110, 100, 207, 151, 211, 146, - }, - }, - { - []byte(` - - - SELECT COUNT(*) AS total_record_count from S3Object - SQL - - NONE - - USE - - - - - - - - FALSE - - -`), []byte{ - 0, 0, 0, 126, 0, 0, 0, 85, 56, 193, 36, 188, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 24, 97, 112, 112, 108, 105, 99, 97, 116, 105, 111, 110, 47, 111, 99, 116, 101, 116, 45, 115, 116, 114, 101, 97, 109, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 7, 82, 101, 99, 111, 114, 100, 115, 123, 34, 116, 111, 116, 97, 108, 95, 114, 101, 99, 111, 114, 100, 95, 99, 111, 117, 110, 116, 34, 58, 51, 125, 10, 196, 183, 134, 242, 0, 0, 0, 235, 0, 0, 0, 67, 213, 243, 57, 141, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 8, 116, 101, 120, 116, 47, 120, 109, 108, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 5, 83, 116, 97, 116, 115, 60, 63, 120, 109, 108, 32, 118, 101, 114, 115, 105, 111, 110, 61, 34, 49, 46, 48, 34, 32, 101, 110, 99, 111, 100, 105, 110, 103, 61, 34, 85, 84, 70, 45, 56, 34, 63, 62, 60, 83, 116, 97, 116, 115, 62, 60, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 53, 48, 60, 47, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 60, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 53, 48, 60, 47, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 60, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 50, 53, 60, 47, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 60, 47, 83, 116, 97, 116, 115, 62, 47, 153, 24, 28, 0, 0, 0, 56, 0, 0, 0, 40, 193, 198, 132, 212, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 3, 69, 110, 100, 207, 151, 211, 146, - }, - }, - { - []byte(` - - - SELECT * from S3Object - SQL - - NONE - - USE - - - - - - - - FALSE - - -`), []byte{0x0, 0x0, 0x0, 0xdd, 0x0, 0x0, 0x0, 0x55, 0xf, 0x46, 0xc1, 0xfa, 0xd, 0x3a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x5, 0x65, 0x76, 0x65, 0x6e, 0x74, 0xd, 0x3a, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x18, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f, 0x63, 0x74, 0x65, 0x74, 0x2d, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0xb, 0x3a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x7, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x7b, 0x22, 0x6f, 0x6e, 0x65, 0x22, 0x3a, 0x22, 0x2d, 0x31, 0x22, 0x2c, 0x22, 0x74, 0x77, 0x6f, 0x22, 0x3a, 0x22, 0x66, 0x6f, 0x6f, 0x22, 0x2c, 0x22, 0x74, 0x68, 0x72, 0x65, 0x65, 0x22, 0x3a, 0x22, 0x74, 0x72, 0x75, 0x65, 0x22, 0x7d, 0xa, 0x7b, 0x22, 0x6f, 0x6e, 0x65, 0x22, 0x3a, 0x22, 0x22, 0x2c, 0x22, 0x74, 0x77, 0x6f, 0x22, 0x3a, 0x22, 0x62, 0x61, 0x72, 0x22, 0x2c, 0x22, 0x74, 0x68, 0x72, 0x65, 0x65, 0x22, 0x3a, 0x22, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x22, 0x7d, 0xa, 0x7b, 0x22, 0x6f, 0x6e, 0x65, 0x22, 0x3a, 0x22, 0x32, 0x2e, 0x35, 0x22, 0x2c, 0x22, 0x74, 0x77, 0x6f, 0x22, 0x3a, 0x22, 0x62, 0x61, 0x7a, 0x22, 0x2c, 0x22, 0x74, 0x68, 0x72, 0x65, 0x65, 0x22, 0x3a, 0x22, 0x74, 0x72, 0x75, 0x65, 0x22, 0x7d, 0xa, 0x7e, 0xb5, 0x99, 0xfb, 0x0, 0x0, 0x0, 0xec, 0x0, 0x0, 0x0, 0x43, 0x67, 0xd3, 0xe5, 0x9d, 0xd, 0x3a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x5, 0x65, 0x76, 0x65, 0x6e, 0x74, 0xd, 0x3a, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x8, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x78, 0x6d, 0x6c, 0xb, 0x3a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x5, 0x53, 0x74, 0x61, 0x74, 0x73, 0x3c, 0x3f, 0x78, 0x6d, 0x6c, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3d, 0x22, 0x31, 0x2e, 0x30, 0x22, 0x20, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x3d, 0x22, 0x55, 0x54, 0x46, 0x2d, 0x38, 0x22, 0x3f, 0x3e, 0x3c, 0x53, 0x74, 0x61, 0x74, 0x73, 0x3e, 0x3c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x3e, 0x35, 0x30, 0x3c, 0x2f, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x3e, 0x3c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x3e, 0x35, 0x30, 0x3c, 0x2f, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x3e, 0x3c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x65, 0x64, 0x3e, 0x31, 0x32, 0x30, 0x3c, 0x2f, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x65, 0x64, 0x3e, 0x3c, 0x2f, 0x53, 0x74, 0x61, 0x74, 0x73, 0x3e, 0x5a, 0xe5, 0xd, 0x84, 0x0, 0x0, 0x0, 0x38, 0x0, 0x0, 0x0, 0x28, 0xc1, 0xc6, 0x84, 0xd4, 0xd, 0x3a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x5, 0x65, 0x76, 0x65, 0x6e, 0x74, 0xb, 0x3a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x3, 0x45, 0x6e, 0x64, 0xcf, 0x97, 0xd3, 0x92}, - }, - { - []byte(` - - - SELECT one from S3Object limit 1 - SQL - - NONE - - USE - - - - - - - - FALSE - - -`), []byte{ - 0x0, 0x0, 0x0, 0x68, 0x0, 0x0, 0x0, 0x55, 0xd7, 0x61, 0x46, 0x9e, 0xd, 0x3a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x5, 0x65, 0x76, 0x65, 0x6e, 0x74, 0xd, 0x3a, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x18, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f, 0x63, 0x74, 0x65, 0x74, 0x2d, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0xb, 0x3a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x7, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x2d, 0x31, 0xa, 0x17, 0xfb, 0x1, 0x90, 0x0, 0x0, 0x0, 0xea, 0x0, 0x0, 0x0, 0x43, 0xe8, 0x93, 0x10, 0x3d, 0xd, 0x3a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x5, 0x65, 0x76, 0x65, 0x6e, 0x74, 0xd, 0x3a, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x8, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x78, 0x6d, 0x6c, 0xb, 0x3a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x5, 0x53, 0x74, 0x61, 0x74, 0x73, 0x3c, 0x3f, 0x78, 0x6d, 0x6c, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3d, 0x22, 0x31, 0x2e, 0x30, 0x22, 0x20, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x3d, 0x22, 0x55, 0x54, 0x46, 0x2d, 0x38, 0x22, 0x3f, 0x3e, 0x3c, 0x53, 0x74, 0x61, 0x74, 0x73, 0x3e, 0x3c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x3e, 0x35, 0x30, 0x3c, 0x2f, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x3e, 0x3c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x3e, 0x35, 0x30, 0x3c, 0x2f, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x3e, 0x3c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x65, 0x64, 0x3e, 0x33, 0x3c, 0x2f, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x65, 0x64, 0x3e, 0x3c, 0x2f, 0x53, 0x74, 0x61, 0x74, 0x73, 0x3e, 0x15, 0x72, 0x19, 0x94, 0x0, 0x0, 0x0, 0x38, 0x0, 0x0, 0x0, 0x28, 0xc1, 0xc6, 0x84, 0xd4, 0xd, 0x3a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x5, 0x65, 0x76, 0x65, 0x6e, 0x74, 0xb, 0x3a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x3, 0x45, 0x6e, 0x64, 0xcf, 0x97, 0xd3, 0x92, - }, - }, - } - - var csvData = []byte(`one,two,three --1,foo,true -,bar,false -2.5,baz,true -`) - - for i, testCase := range testTable { - t.Run(fmt.Sprint(i), func(t *testing.T) { - s3Select, err := NewS3Select(bytes.NewReader(testCase.requestXML)) - if err != nil { - t.Fatal(err) - } - - if err = s3Select.Open(func(offset, length int64) (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewReader(csvData)), nil - }); err != nil { - t.Fatal(err) - } - - w := &testResponseWriter{} - s3Select.Evaluate(w) - s3Select.Close() - - if !reflect.DeepEqual(w.response, testCase.expectedResult) { - resp := http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(w.response)), - ContentLength: int64(len(w.response)), - } - res, err := minio.NewSelectResults(&resp, "testbucket") - if err != nil { - t.Error(err) - return - } - got, err := ioutil.ReadAll(res) - if err != nil { - t.Error(err) - return - } - - t.Errorf("received response does not match with expected reply\ngot: %#v\nwant:%#v\ndecoded:%s", w.response, testCase.expectedResult, string(got)) - } - }) - } - -} - -func TestJSONInput(t *testing.T) { - - var testTable = []struct { - requestXML []byte - expectedResult []byte - }{ - { - []byte(` - - - SELECT one, two, three from S3Object - SQL - - NONE - - DOCUMENT - - - - - - - - FALSE - - -`), []byte{ - 0, 0, 0, 137, 0, 0, 0, 85, 194, 213, 168, 241, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 24, 97, 112, 112, 108, 105, 99, 97, 116, 105, 111, 110, 47, 111, 99, 116, 101, 116, 45, 115, 116, 114, 101, 97, 109, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 7, 82, 101, 99, 111, 114, 100, 115, 45, 49, 44, 102, 111, 111, 44, 116, 114, 117, 101, 10, 44, 98, 97, 114, 44, 102, 97, 108, 115, 101, 10, 50, 46, 53, 44, 98, 97, 122, 44, 116, 114, 117, 101, 10, 75, 182, 193, 80, 0, 0, 0, 237, 0, 0, 0, 67, 90, 179, 204, 45, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 8, 116, 101, 120, 116, 47, 120, 109, 108, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 5, 83, 116, 97, 116, 115, 60, 63, 120, 109, 108, 32, 118, 101, 114, 115, 105, 111, 110, 61, 34, 49, 46, 48, 34, 32, 101, 110, 99, 111, 100, 105, 110, 103, 61, 34, 85, 84, 70, 45, 56, 34, 63, 62, 60, 83, 116, 97, 116, 115, 62, 60, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 49, 49, 50, 60, 47, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 60, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 49, 49, 50, 60, 47, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 60, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 51, 54, 60, 47, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 60, 47, 83, 116, 97, 116, 115, 62, 181, 40, 50, 250, 0, 0, 0, 56, 0, 0, 0, 40, 193, 198, 132, 212, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 3, 69, 110, 100, 207, 151, 211, 146, - }, - }, - { - []byte(` - - - SELECT COUNT(*) AS total_record_count from S3Object - SQL - - NONE - - DOCUMENT - - - - - - - - FALSE - - -`), []byte{ - 0, 0, 0, 103, 0, 0, 0, 85, 85, 49, 209, 79, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 24, 97, 112, 112, 108, 105, 99, 97, 116, 105, 111, 110, 47, 111, 99, 116, 101, 116, 45, 115, 116, 114, 101, 97, 109, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 7, 82, 101, 99, 111, 114, 100, 115, 51, 10, 175, 58, 213, 152, 0, 0, 0, 236, 0, 0, 0, 67, 103, 211, 229, 157, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 8, 116, 101, 120, 116, 47, 120, 109, 108, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 5, 83, 116, 97, 116, 115, 60, 63, 120, 109, 108, 32, 118, 101, 114, 115, 105, 111, 110, 61, 34, 49, 46, 48, 34, 32, 101, 110, 99, 111, 100, 105, 110, 103, 61, 34, 85, 84, 70, 45, 56, 34, 63, 62, 60, 83, 116, 97, 116, 115, 62, 60, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 49, 49, 50, 60, 47, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 60, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 49, 49, 50, 60, 47, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 60, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 50, 60, 47, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 60, 47, 83, 116, 97, 116, 115, 62, 52, 192, 77, 114, 0, 0, 0, 56, 0, 0, 0, 40, 193, 198, 132, 212, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 3, 69, 110, 100, 207, 151, 211, 146, - }, - }, - { - []byte(` - - - SELECT * from S3Object - SQL - - NONE - - DOCUMENT - - - - - - - - FALSE - - -`), []byte{0x0, 0x0, 0x0, 0x89, 0x0, 0x0, 0x0, 0x55, 0xc2, 0xd5, 0xa8, 0xf1, 0xd, 0x3a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x5, 0x65, 0x76, 0x65, 0x6e, 0x74, 0xd, 0x3a, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x18, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x6f, 0x63, 0x74, 0x65, 0x74, 0x2d, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0xb, 0x3a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x7, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x74, 0x72, 0x75, 0x65, 0x2c, 0x66, 0x6f, 0x6f, 0x2c, 0x2d, 0x31, 0xa, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x2c, 0x62, 0x61, 0x72, 0x2c, 0xa, 0x74, 0x72, 0x75, 0x65, 0x2c, 0x62, 0x61, 0x7a, 0x2c, 0x32, 0x2e, 0x35, 0xa, 0xef, 0x22, 0x13, 0xa3, 0x0, 0x0, 0x0, 0xed, 0x0, 0x0, 0x0, 0x43, 0x5a, 0xb3, 0xcc, 0x2d, 0xd, 0x3a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x5, 0x65, 0x76, 0x65, 0x6e, 0x74, 0xd, 0x3a, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x8, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x78, 0x6d, 0x6c, 0xb, 0x3a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x5, 0x53, 0x74, 0x61, 0x74, 0x73, 0x3c, 0x3f, 0x78, 0x6d, 0x6c, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x3d, 0x22, 0x31, 0x2e, 0x30, 0x22, 0x20, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x3d, 0x22, 0x55, 0x54, 0x46, 0x2d, 0x38, 0x22, 0x3f, 0x3e, 0x3c, 0x53, 0x74, 0x61, 0x74, 0x73, 0x3e, 0x3c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x3e, 0x31, 0x31, 0x32, 0x3c, 0x2f, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x63, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x3e, 0x3c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x3e, 0x31, 0x31, 0x32, 0x3c, 0x2f, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x65, 0x64, 0x3e, 0x3c, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x65, 0x64, 0x3e, 0x33, 0x36, 0x3c, 0x2f, 0x42, 0x79, 0x74, 0x65, 0x73, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x65, 0x64, 0x3e, 0x3c, 0x2f, 0x53, 0x74, 0x61, 0x74, 0x73, 0x3e, 0xb5, 0x28, 0x32, 0xfa, 0x0, 0x0, 0x0, 0x38, 0x0, 0x0, 0x0, 0x28, 0xc1, 0xc6, 0x84, 0xd4, 0xd, 0x3a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x5, 0x65, 0x76, 0x65, 0x6e, 0x74, 0xb, 0x3a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x7, 0x0, 0x3, 0x45, 0x6e, 0x64, 0xcf, 0x97, 0xd3, 0x92}, - }, - } - - var jsonData = []byte(`{"three":true,"two":"foo","one":-1} -{"three":false,"two":"bar","one":null} -{"three":true,"two":"baz","one":2.5} -`) - - for i, testCase := range testTable { - t.Run(fmt.Sprint(i), func(t *testing.T) { - s3Select, err := NewS3Select(bytes.NewReader(testCase.requestXML)) - if err != nil { - t.Fatal(err) - } - - if err = s3Select.Open(func(offset, length int64) (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewReader(jsonData)), nil - }); err != nil { - t.Fatal(err) - } - - w := &testResponseWriter{} - s3Select.Evaluate(w) - s3Select.Close() - - if !reflect.DeepEqual(w.response, testCase.expectedResult) { - resp := http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(w.response)), - ContentLength: int64(len(w.response)), - } - res, err := minio.NewSelectResults(&resp, "testbucket") - if err != nil { - t.Error(err) - return - } - got, err := ioutil.ReadAll(res) - if err != nil { - t.Error(err) - return - } - - t.Errorf("received response does not match with expected reply\ngot: %#v\nwant:%#v\ndecoded:%s", w.response, testCase.expectedResult, string(got)) - } - }) - } -} - -func TestParquetInput(t *testing.T) { - - var testTable = []struct { - requestXML []byte - expectedResult []byte - }{ - { - []byte(` - - - SELECT one, two, three from S3Object - SQL - - NONE - - - - - - - - - FALSE - - -`), []byte{ - 0, 0, 0, 137, 0, 0, 0, 85, 194, 213, 168, 241, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 24, 97, 112, 112, 108, 105, 99, 97, 116, 105, 111, 110, 47, 111, 99, 116, 101, 116, 45, 115, 116, 114, 101, 97, 109, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 7, 82, 101, 99, 111, 114, 100, 115, 45, 49, 44, 102, 111, 111, 44, 116, 114, 117, 101, 10, 44, 98, 97, 114, 44, 102, 97, 108, 115, 101, 10, 50, 46, 53, 44, 98, 97, 122, 44, 116, 114, 117, 101, 10, 75, 182, 193, 80, 0, 0, 0, 235, 0, 0, 0, 67, 213, 243, 57, 141, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 8, 116, 101, 120, 116, 47, 120, 109, 108, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 5, 83, 116, 97, 116, 115, 60, 63, 120, 109, 108, 32, 118, 101, 114, 115, 105, 111, 110, 61, 34, 49, 46, 48, 34, 32, 101, 110, 99, 111, 100, 105, 110, 103, 61, 34, 85, 84, 70, 45, 56, 34, 63, 62, 60, 83, 116, 97, 116, 115, 62, 60, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 45, 49, 60, 47, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 60, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 45, 49, 60, 47, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 60, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 51, 54, 60, 47, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 60, 47, 83, 116, 97, 116, 115, 62, 128, 96, 253, 66, 0, 0, 0, 56, 0, 0, 0, 40, 193, 198, 132, 212, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 3, 69, 110, 100, 207, 151, 211, 146, - }, - }, - { - []byte(` - - - SELECT COUNT(*) AS total_record_count from S3Object - SQL - - NONE - - - - - - - - - FALSE - - -`), []byte{ - 0, 0, 0, 103, 0, 0, 0, 85, 85, 49, 209, 79, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 24, 97, 112, 112, 108, 105, 99, 97, 116, 105, 111, 110, 47, 111, 99, 116, 101, 116, 45, 115, 116, 114, 101, 97, 109, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 7, 82, 101, 99, 111, 114, 100, 115, 51, 10, 175, 58, 213, 152, 0, 0, 0, 234, 0, 0, 0, 67, 232, 147, 16, 61, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 13, 58, 99, 111, 110, 116, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 8, 116, 101, 120, 116, 47, 120, 109, 108, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 5, 83, 116, 97, 116, 115, 60, 63, 120, 109, 108, 32, 118, 101, 114, 115, 105, 111, 110, 61, 34, 49, 46, 48, 34, 32, 101, 110, 99, 111, 100, 105, 110, 103, 61, 34, 85, 84, 70, 45, 56, 34, 63, 62, 60, 83, 116, 97, 116, 115, 62, 60, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 45, 49, 60, 47, 66, 121, 116, 101, 115, 83, 99, 97, 110, 110, 101, 100, 62, 60, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 45, 49, 60, 47, 66, 121, 116, 101, 115, 80, 114, 111, 99, 101, 115, 115, 101, 100, 62, 60, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 50, 60, 47, 66, 121, 116, 101, 115, 82, 101, 116, 117, 114, 110, 101, 100, 62, 60, 47, 83, 116, 97, 116, 115, 62, 190, 146, 162, 21, 0, 0, 0, 56, 0, 0, 0, 40, 193, 198, 132, 212, 13, 58, 109, 101, 115, 115, 97, 103, 101, 45, 116, 121, 112, 101, 7, 0, 5, 101, 118, 101, 110, 116, 11, 58, 101, 118, 101, 110, 116, 45, 116, 121, 112, 101, 7, 0, 3, 69, 110, 100, 207, 151, 211, 146, - }, - }, - } - - for i, testCase := range testTable { - t.Run(fmt.Sprint(i), func(t *testing.T) { - getReader := func(offset int64, length int64) (io.ReadCloser, error) { - testdataFile := "testdata.parquet" - file, err := os.Open(testdataFile) - if err != nil { - return nil, err - } - - fi, err := file.Stat() - if err != nil { - return nil, err - } - - if offset < 0 { - offset = fi.Size() + offset - } - - if _, err = file.Seek(offset, io.SeekStart); err != nil { - return nil, err - } - - return file, nil - } - - s3Select, err := NewS3Select(bytes.NewReader(testCase.requestXML)) - if err != nil { - t.Fatal(err) - } - - if err = s3Select.Open(getReader); err != nil { - t.Fatal(err) - } - - w := &testResponseWriter{} - s3Select.Evaluate(w) - s3Select.Close() - - if !reflect.DeepEqual(w.response, testCase.expectedResult) { - resp := http.Response{ - StatusCode: http.StatusOK, - Body: ioutil.NopCloser(bytes.NewReader(w.response)), - ContentLength: int64(len(w.response)), - } - res, err := minio.NewSelectResults(&resp, "testbucket") - if err != nil { - t.Error(err) - return - } - got, err := ioutil.ReadAll(res) - if err != nil { - t.Error(err) - return - } - - t.Errorf("received response does not match with expected reply\ngot: %#v\nwant:%#v\ndecoded:%s", w.response, testCase.expectedResult, string(got)) - } - }) - } -} diff --git a/pkg/s3select/simdj/errors.go b/pkg/s3select/simdj/errors.go deleted file mode 100644 index 3b6106d1..00000000 --- a/pkg/s3select/simdj/errors.go +++ /dev/null @@ -1,55 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package simdj - -import "fmt" - -type s3Error struct { - code string - message string - statusCode int - cause error -} - -func (err *s3Error) Cause() error { - return err.cause -} - -func (err *s3Error) ErrorCode() string { - return err.code -} - -func (err *s3Error) ErrorMessage() string { - return err.message -} - -func (err *s3Error) HTTPStatusCode() int { - return err.statusCode -} - -func (err *s3Error) Error() string { - return err.message -} - -func errJSONParsingError(err error) *s3Error { - return &s3Error{ - code: "JSONParsingError", - message: fmt.Sprintf("Encountered an error parsing the JSON file: %v. Check the file and try again.", err), - statusCode: 400, - cause: err, - } -} diff --git a/pkg/s3select/simdj/reader.go b/pkg/s3select/simdj/reader.go deleted file mode 100644 index 8870ab95..00000000 --- a/pkg/s3select/simdj/reader.go +++ /dev/null @@ -1,187 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package simdj - -import ( - "fmt" - "io" - "sync" - - "github.com/minio/minio/pkg/s3select/json" - "github.com/minio/minio/pkg/s3select/sql" - "github.com/minio/simdjson-go" -) - -// Reader - JSON record reader for S3Select. -type Reader struct { - args *json.ReaderArgs - input chan simdjson.Stream - decoded chan simdjson.Object - - // err will only be returned after decoded has been closed. - err *error - readCloser io.ReadCloser - - exitReader chan struct{} - readerWg sync.WaitGroup -} - -// Read - reads single record. -func (r *Reader) Read(dst sql.Record) (sql.Record, error) { - v, ok := <-r.decoded - if !ok { - if r.err != nil && *r.err != nil { - return nil, errJSONParsingError(*r.err) - } - return nil, io.EOF - } - dstRec, ok := dst.(*Record) - if !ok { - dstRec = &Record{} - } - dstRec.object = v - return dstRec, nil -} - -// Close - closes underlying reader. -func (r *Reader) Close() error { - // Close the input. - // Potentially racy if the stream decoder is still reading. - if r.readCloser != nil { - r.readCloser.Close() - } - if r.exitReader != nil { - close(r.exitReader) - r.readerWg.Wait() - r.exitReader = nil - r.input = nil - } - return nil -} - -// startReader will start a reader that accepts input from r.input. -// Input should be root -> object input. Each root indicates a record. -// If r.input is closed, it is assumed that no more input will come. -// When this function returns r.readerWg will be decremented and r.decoded will be closed. -// On errors, r.err will be set. This should only be accessed after r.decoded has been closed. -func (r *Reader) startReader() { - defer r.readerWg.Done() - defer close(r.decoded) - var tmpObj simdjson.Object - for { - var in simdjson.Stream - select { - case in = <-r.input: - case <-r.exitReader: - return - } - if in.Error != nil && in.Error != io.EOF { - r.err = &in.Error - return - } - if in.Value == nil { - if in.Error == io.EOF { - return - } - continue - } - i := in.Value.Iter() - readloop: - for { - var next simdjson.Iter - typ, err := i.AdvanceIter(&next) - if err != nil { - r.err = &err - return - } - switch typ { - case simdjson.TypeNone: - break readloop - case simdjson.TypeRoot: - typ, obj, err := next.Root(nil) - if err != nil { - r.err = &err - return - } - if typ != simdjson.TypeObject { - if typ == simdjson.TypeNone { - continue - } - err = fmt.Errorf("unexpected json type below root :%v", typ) - r.err = &err - return - } - - o, err := obj.Object(&tmpObj) - if err != nil { - r.err = &err - return - } - select { - case <-r.exitReader: - return - case r.decoded <- *o: - } - default: - err = fmt.Errorf("unexpected root json type:%v", typ) - r.err = &err - return - } - } - if in.Error == io.EOF { - return - } - } -} - -// NewReader - creates new JSON reader using readCloser. -func NewReader(readCloser io.ReadCloser, args *json.ReaderArgs) *Reader { - r := Reader{ - args: args, - readCloser: readCloser, - decoded: make(chan simdjson.Object, 1000), - input: make(chan simdjson.Stream, 2), - exitReader: make(chan struct{}), - } - simdjson.ParseNDStream(readCloser, r.input, nil) - r.readerWg.Add(1) - go r.startReader() - return &r -} - -// NewElementReader - creates new JSON reader using readCloser. -func NewElementReader(ch chan simdjson.Object, err *error, args *json.ReaderArgs) *Reader { - return &Reader{ - args: args, - decoded: ch, - err: err, - readCloser: nil, - } -} - -// NewTapeReaderChan will start a reader that will read input from the provided channel. -func NewTapeReaderChan(pj chan simdjson.Stream, args *json.ReaderArgs) *Reader { - r := Reader{ - args: args, - decoded: make(chan simdjson.Object, 1000), - input: pj, - exitReader: make(chan struct{}), - } - r.readerWg.Add(1) - go r.startReader() - return &r -} diff --git a/pkg/s3select/simdj/reader_amd64_test.go b/pkg/s3select/simdj/reader_amd64_test.go deleted file mode 100644 index cb443221..00000000 --- a/pkg/s3select/simdj/reader_amd64_test.go +++ /dev/null @@ -1,176 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package simdj - -import ( - "bytes" - "io" - "io/ioutil" - "path/filepath" - "testing" - - "github.com/klauspost/compress/zstd" - "github.com/minio/minio/pkg/s3select/json" - "github.com/minio/minio/pkg/s3select/sql" - "github.com/minio/simdjson-go" -) - -type tester interface { - Fatal(args ...interface{}) -} - -func loadCompressed(t tester, file string) (js []byte) { - dec, err := zstd.NewReader(nil) - if err != nil { - t.Fatal(err) - } - defer dec.Close() - js, err = ioutil.ReadFile(filepath.Join("testdata", file+".json.zst")) - if err != nil { - t.Fatal(err) - } - js, err = dec.DecodeAll(js, nil) - if err != nil { - t.Fatal(err) - } - - return js -} - -var testCases = []struct { - name string - array bool -}{ - { - name: "parking-citations-10", - }, -} - -func TestNDJSON(t *testing.T) { - if !simdjson.SupportedCPU() { - t.Skip("Unsupported cpu") - } - - for _, tt := range testCases { - - t.Run(tt.name, func(t *testing.T) { - ref := loadCompressed(t, tt.name) - - var err error - dst := make(chan simdjson.Object, 100) - dec := NewElementReader(dst, &err, &json.ReaderArgs{ContentType: "json"}) - pj, err := simdjson.ParseND(ref, nil) - if err != nil { - t.Fatal(err) - } - i := pj.Iter() - cpy := i - b, err := cpy.MarshalJSON() - if err != nil { - t.Fatal(err) - } - if false { - t.Log(string(b)) - } - //_ = ioutil.WriteFile(filepath.Join("testdata", tt.name+".json"), b, os.ModePerm) - - parser: - for { - var next simdjson.Iter - typ, err := i.AdvanceIter(&next) - if err != nil { - t.Fatal(err) - } - switch typ { - case simdjson.TypeNone: - close(dst) - break parser - case simdjson.TypeRoot: - typ, obj, err := next.Root(nil) - if err != nil { - t.Fatal(err) - } - if typ != simdjson.TypeObject { - if typ == simdjson.TypeNone { - close(dst) - break parser - } - t.Fatal("Unexpected type:", typ.String()) - } - - o, err := obj.Object(nil) - if err != nil { - t.Fatal(err) - } - dst <- *o - default: - t.Fatal("unexpected type:", typ.String()) - } - } - refDec := json.NewReader(ioutil.NopCloser(bytes.NewBuffer(ref)), &json.ReaderArgs{ContentType: "json"}) - - for { - rec, err := dec.Read(nil) - if err == io.EOF { - break - } - if err != nil { - t.Error(err) - } - want, err := refDec.Read(nil) - if err != nil { - t.Error(err) - } - var gotB, wantB bytes.Buffer - opts := sql.WriteCSVOpts{ - FieldDelimiter: ',', - Quote: '"', - QuoteEscape: '"', - AlwaysQuote: false, - } - err = rec.WriteCSV(&gotB, opts) - if err != nil { - t.Error(err) - } - err = want.WriteCSV(&wantB, opts) - if err != nil { - t.Error(err) - } - - if !bytes.Equal(gotB.Bytes(), wantB.Bytes()) { - t.Errorf("CSV output mismatch.\nwant: %s(%x)\ngot: %s(%x)", wantB.String(), wantB.Bytes(), gotB.String(), gotB.Bytes()) - } - gotB.Reset() - wantB.Reset() - - err = rec.WriteJSON(&gotB) - if err != nil { - t.Error(err) - } - err = want.WriteJSON(&wantB) - if err != nil { - t.Error(err) - } - // truncate newline from 'want' - wantB.Truncate(wantB.Len() - 1) - if !bytes.Equal(gotB.Bytes(), wantB.Bytes()) { - t.Errorf("JSON output mismatch.\nwant: %s\ngot: %s", wantB.String(), gotB.String()) - } - } - }) - } -} diff --git a/pkg/s3select/simdj/record.go b/pkg/s3select/simdj/record.go deleted file mode 100644 index 83139a6b..00000000 --- a/pkg/s3select/simdj/record.go +++ /dev/null @@ -1,232 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package simdj - -import ( - "fmt" - "io" - - csv "github.com/minio/minio/pkg/csvparser" - - "github.com/bcicen/jstream" - "github.com/minio/minio/pkg/s3select/json" - "github.com/minio/minio/pkg/s3select/sql" - "github.com/minio/simdjson-go" -) - -// Record - is JSON record. -type Record struct { - // object - object simdjson.Object -} - -// Get - gets the value for a column name. -func (r *Record) Get(name string) (*sql.Value, error) { - elem := r.object.FindKey(name, nil) - if elem == nil { - return nil, nil - } - return iterToValue(elem.Iter) -} - -func iterToValue(iter simdjson.Iter) (*sql.Value, error) { - switch iter.Type() { - case simdjson.TypeString: - v, err := iter.String() - if err != nil { - return nil, err - } - return sql.FromString(v), nil - case simdjson.TypeFloat: - v, err := iter.Float() - if err != nil { - return nil, err - } - return sql.FromFloat(v), nil - case simdjson.TypeInt: - v, err := iter.Int() - if err != nil { - return nil, err - } - return sql.FromInt(v), nil - case simdjson.TypeUint: - v, err := iter.Int() - if err != nil { - // Can't fit into int, convert to float. - v, err := iter.Float() - return sql.FromFloat(v), err - } - return sql.FromInt(v), nil - case simdjson.TypeBool: - v, err := iter.Bool() - if err != nil { - return nil, err - } - return sql.FromBool(v), nil - case simdjson.TypeNull: - return sql.FromNull(), nil - case simdjson.TypeObject, simdjson.TypeArray: - b, err := iter.MarshalJSON() - return sql.FromBytes(b), err - } - return nil, fmt.Errorf("iterToValue: unknown JSON type: %s", iter.Type().String()) -} - -// Reset the record. -func (r *Record) Reset() { - r.object = simdjson.Object{} -} - -// Clone the record and if possible use the destination provided. -func (r *Record) Clone(dst sql.Record) sql.Record { - other, ok := dst.(*Record) - if !ok { - other = &Record{} - } - other.object = r.object - return other -} - -// CloneTo clones the record to a json Record. -// Values are only unmashaled on object level. -func (r *Record) CloneTo(dst *json.Record) (sql.Record, error) { - if dst == nil { - dst = &json.Record{SelectFormat: sql.SelectFmtJSON} - } - dst.Reset() - elems, err := r.object.Parse(nil) - if err != nil { - return nil, err - } - if cap(dst.KVS) < len(elems.Elements) { - dst.KVS = make(jstream.KVS, 0, len(elems.Elements)) - } - for _, elem := range elems.Elements { - v, err := sql.IterToValue(elem.Iter) - if err != nil { - v, err = elem.Iter.Interface() - if err != nil { - panic(err) - } - } - dst.KVS = append(dst.KVS, jstream.KV{ - Key: elem.Name, - Value: v, - }) - } - return dst, nil -} - -// Set - sets the value for a column name. -func (r *Record) Set(name string, value *sql.Value) (sql.Record, error) { - dst, err := r.CloneTo(nil) - if err != nil { - return nil, err - } - return dst.Set(name, value) -} - -// WriteCSV - encodes to CSV data. -func (r *Record) WriteCSV(writer io.Writer, opts sql.WriteCSVOpts) error { - csvRecord := make([]string, 0, 10) - var tmp simdjson.Iter - obj := r.object -allElems: - for { - _, typ, err := obj.NextElement(&tmp) - if err != nil { - return err - } - var columnValue string - switch typ { - case simdjson.TypeNull, simdjson.TypeFloat, simdjson.TypeUint, simdjson.TypeInt, simdjson.TypeBool, simdjson.TypeString: - val, err := tmp.StringCvt() - if err != nil { - return err - } - columnValue = val - case simdjson.TypeObject, simdjson.TypeArray: - b, err := tmp.MarshalJSON() - if err != nil { - return err - } - columnValue = string(b) - case simdjson.TypeNone: - break allElems - default: - return fmt.Errorf("cannot marshal unhandled type: %s", typ.String()) - } - csvRecord = append(csvRecord, columnValue) - } - w := csv.NewWriter(writer) - w.Comma = opts.FieldDelimiter - w.Quote = opts.Quote - w.QuoteEscape = opts.QuoteEscape - w.AlwaysQuote = opts.AlwaysQuote - if err := w.Write(csvRecord); err != nil { - return err - } - w.Flush() - if err := w.Error(); err != nil { - return err - } - - return nil -} - -// Raw - returns the underlying representation. -func (r *Record) Raw() (sql.SelectObjectFormat, interface{}) { - return sql.SelectFmtSIMDJSON, r.object -} - -// WriteJSON - encodes to JSON data. -func (r *Record) WriteJSON(writer io.Writer) error { - o := r.object - elems, err := o.Parse(nil) - if err != nil { - return err - } - b, err := elems.MarshalJSON() - if err != nil { - return err - } - n, err := writer.Write(b) - if err != nil { - return err - } - if n != len(b) { - return io.ErrShortWrite - } - return nil -} - -// Replace the underlying buffer of json data. -func (r *Record) Replace(k interface{}) error { - v, ok := k.(simdjson.Object) - if !ok { - return fmt.Errorf("cannot replace internal data in simd json record with type %T", k) - } - r.object = v - return nil -} - -// NewRecord - creates new empty JSON record. -func NewRecord(f sql.SelectObjectFormat, obj simdjson.Object) *Record { - return &Record{ - object: obj, - } -} diff --git a/pkg/s3select/simdj/testdata/parking-citations-10.json.zst b/pkg/s3select/simdj/testdata/parking-citations-10.json.zst deleted file mode 100644 index c25da344..00000000 Binary files a/pkg/s3select/simdj/testdata/parking-citations-10.json.zst and /dev/null differ diff --git a/pkg/s3select/sql/aggregation.go b/pkg/s3select/sql/aggregation.go deleted file mode 100644 index 2d8ed02e..00000000 --- a/pkg/s3select/sql/aggregation.go +++ /dev/null @@ -1,330 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sql - -import ( - "errors" - "fmt" -) - -// Aggregation Function name constants -const ( - aggFnAvg FuncName = "AVG" - aggFnCount FuncName = "COUNT" - aggFnMax FuncName = "MAX" - aggFnMin FuncName = "MIN" - aggFnSum FuncName = "SUM" -) - -var ( - errNonNumericArg = func(fnStr FuncName) error { - return fmt.Errorf("%s() requires a numeric argument", fnStr) - } - errInvalidAggregation = errors.New("Invalid aggregation seen") -) - -type aggVal struct { - runningSum *Value - runningCount int64 - runningMax, runningMin *Value - - // Stores if at least one record has been seen - seen bool -} - -func newAggVal(fn FuncName) *aggVal { - switch fn { - case aggFnAvg, aggFnSum: - return &aggVal{runningSum: FromFloat(0)} - case aggFnMin: - return &aggVal{runningMin: FromInt(0)} - case aggFnMax: - return &aggVal{runningMax: FromInt(0)} - default: - return &aggVal{} - } -} - -// evalAggregationNode - performs partial computation using the -// current row and stores the result. -// -// On success, it returns (nil, nil). -func (e *FuncExpr) evalAggregationNode(r Record) error { - // It is assumed that this function is called only when - // `e` is an aggregation function. - - var val *Value - var err error - funcName := e.getFunctionName() - if aggFnCount == funcName { - if e.Count.StarArg { - // Handle COUNT(*) - e.aggregate.runningCount++ - return nil - } - - val, err = e.Count.ExprArg.evalNode(r) - if err != nil { - return err - } - } else { - // Evaluate the (only) argument - val, err = e.SFunc.ArgsList[0].evalNode(r) - if err != nil { - return err - } - } - - if val.IsNull() { - // E.g. the column or field does not exist in the - // record - in all such cases the aggregation is not - // updated. - return nil - } - - argVal := val - if funcName != aggFnCount { - // All aggregation functions, except COUNT require a - // numeric argument. - - // Here, we diverge from Amazon S3 behavior by - // inferring untyped values are numbers. - if !argVal.isNumeric() { - if i, ok := argVal.bytesToInt(); ok { - argVal.setInt(i) - } else if f, ok := argVal.bytesToFloat(); ok { - argVal.setFloat(f) - } else { - return errNonNumericArg(funcName) - } - } - } - - // Mark that we have seen one non-null value. - isFirstRow := false - if !e.aggregate.seen { - e.aggregate.seen = true - isFirstRow = true - } - - switch funcName { - case aggFnCount: - // For all non-null values, the count is incremented. - e.aggregate.runningCount++ - - case aggFnAvg, aggFnSum: - e.aggregate.runningCount++ - // Convert to float. - f, ok := argVal.ToFloat() - if !ok { - return fmt.Errorf("Could not convert value %v (%s) to a number", argVal.value, argVal.GetTypeString()) - } - argVal.setFloat(f) - err = e.aggregate.runningSum.arithOp(opPlus, argVal) - - case aggFnMin: - err = e.aggregate.runningMin.minmax(argVal, false, isFirstRow) - - case aggFnMax: - err = e.aggregate.runningMax.minmax(argVal, true, isFirstRow) - - default: - err = errInvalidAggregation - } - - return err -} - -func (e *AliasedExpression) aggregateRow(r Record) error { - return e.Expression.aggregateRow(r) -} - -func (e *Expression) aggregateRow(r Record) error { - for _, ex := range e.And { - err := ex.aggregateRow(r) - if err != nil { - return err - } - } - return nil -} - -func (e *ListExpr) aggregateRow(r Record) error { - for _, ex := range e.Elements { - err := ex.aggregateRow(r) - if err != nil { - return err - } - } - return nil -} - -func (e *AndCondition) aggregateRow(r Record) error { - for _, ex := range e.Condition { - err := ex.aggregateRow(r) - if err != nil { - return err - } - } - return nil -} - -func (e *Condition) aggregateRow(r Record) error { - if e.Operand != nil { - return e.Operand.aggregateRow(r) - } - return e.Not.aggregateRow(r) -} - -func (e *ConditionOperand) aggregateRow(r Record) error { - err := e.Operand.aggregateRow(r) - if err != nil { - return err - } - - if e.ConditionRHS == nil { - return nil - } - - switch { - case e.ConditionRHS.Compare != nil: - return e.ConditionRHS.Compare.Operand.aggregateRow(r) - case e.ConditionRHS.Between != nil: - err = e.ConditionRHS.Between.Start.aggregateRow(r) - if err != nil { - return err - } - return e.ConditionRHS.Between.End.aggregateRow(r) - case e.ConditionRHS.In != nil: - elt := e.ConditionRHS.In.ListExpression - err = elt.aggregateRow(r) - if err != nil { - return err - } - return nil - case e.ConditionRHS.Like != nil: - err = e.ConditionRHS.Like.Pattern.aggregateRow(r) - if err != nil { - return err - } - return e.ConditionRHS.Like.EscapeChar.aggregateRow(r) - default: - return errInvalidASTNode - } -} - -func (e *Operand) aggregateRow(r Record) error { - err := e.Left.aggregateRow(r) - if err != nil { - return err - } - for _, rt := range e.Right { - err = rt.Right.aggregateRow(r) - if err != nil { - return err - } - } - return nil -} - -func (e *MultOp) aggregateRow(r Record) error { - err := e.Left.aggregateRow(r) - if err != nil { - return err - } - for _, rt := range e.Right { - err = rt.Right.aggregateRow(r) - if err != nil { - return err - } - } - return nil -} - -func (e *UnaryTerm) aggregateRow(r Record) error { - if e.Negated != nil { - return e.Negated.Term.aggregateRow(r) - } - return e.Primary.aggregateRow(r) -} - -func (e *PrimaryTerm) aggregateRow(r Record) error { - switch { - case e.ListExpr != nil: - return e.ListExpr.aggregateRow(r) - case e.SubExpression != nil: - return e.SubExpression.aggregateRow(r) - case e.FuncCall != nil: - return e.FuncCall.aggregateRow(r) - } - return nil -} - -func (e *FuncExpr) aggregateRow(r Record) error { - switch e.getFunctionName() { - case aggFnAvg, aggFnSum, aggFnMax, aggFnMin, aggFnCount: - return e.evalAggregationNode(r) - default: - // TODO: traverse arguments and call aggregateRow on - // them if they could be an ancestor of an - // aggregation. - } - return nil -} - -// getAggregate() implementation for each AST node follows. This is -// called after calling aggregateRow() on each input row, to calculate -// the final aggregate result. - -func (e *FuncExpr) getAggregate() (*Value, error) { - switch e.getFunctionName() { - case aggFnCount: - return FromInt(e.aggregate.runningCount), nil - - case aggFnAvg: - if e.aggregate.runningCount == 0 { - // No rows were seen by AVG. - return FromNull(), nil - } - err := e.aggregate.runningSum.arithOp(opDivide, FromInt(e.aggregate.runningCount)) - return e.aggregate.runningSum, err - - case aggFnMin: - if !e.aggregate.seen { - // No rows were seen by MIN - return FromNull(), nil - } - return e.aggregate.runningMin, nil - - case aggFnMax: - if !e.aggregate.seen { - // No rows were seen by MAX - return FromNull(), nil - } - return e.aggregate.runningMax, nil - - case aggFnSum: - // TODO: check if returning 0 when no rows were seen - // by SUM is expected behavior. - return e.aggregate.runningSum, nil - - default: - // TODO: - } - - return nil, errInvalidAggregation -} diff --git a/pkg/s3select/sql/analysis.go b/pkg/s3select/sql/analysis.go deleted file mode 100644 index 3e84cad4..00000000 --- a/pkg/s3select/sql/analysis.go +++ /dev/null @@ -1,322 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sql - -import ( - "errors" - "fmt" -) - -// Query analysis - The query is analyzed to determine if it involves -// aggregation. -// -// Aggregation functions - An expression that involves aggregation of -// rows in some manner. Requires all input rows to be processed, -// before a result is returned. -// -// Row function - An expression that depends on a value in the -// row. They have an output for each input row. -// -// Some types of a queries are not valid. For example, an aggregation -// function combined with a row function is meaningless ("AVG(s.Age) + -// s.Salary"). Analysis determines if such a scenario exists so an -// error can be returned. - -var ( - // Fatal error for query processing. - errNestedAggregation = errors.New("Cannot nest aggregations") - errFunctionNotImplemented = errors.New("Function is not yet implemented") - errUnexpectedInvalidNode = errors.New("Unexpected node value") - errInvalidKeypath = errors.New("A provided keypath is invalid") -) - -// qProp contains analysis info about an SQL term. -type qProp struct { - isAggregation, isRowFunc bool - - err error -} - -// `combine` combines a pair of `qProp`s, so that errors are -// propagated correctly, and checks that an aggregation is not being -// combined with a row-function term. -func (p *qProp) combine(q qProp) { - switch { - case p.err != nil: - // Do nothing - case q.err != nil: - p.err = q.err - default: - p.isAggregation = p.isAggregation || q.isAggregation - p.isRowFunc = p.isRowFunc || q.isRowFunc - if p.isAggregation && p.isRowFunc { - p.err = errNestedAggregation - } - } -} - -func (e *SelectExpression) analyze(s *Select) (result qProp) { - if e.All { - return qProp{isRowFunc: true} - } - - for _, ex := range e.Expressions { - result.combine(ex.analyze(s)) - } - return -} - -func (e *AliasedExpression) analyze(s *Select) qProp { - return e.Expression.analyze(s) -} - -func (e *Expression) analyze(s *Select) (result qProp) { - for _, ac := range e.And { - result.combine(ac.analyze(s)) - } - return -} - -func (e *AndCondition) analyze(s *Select) (result qProp) { - for _, ac := range e.Condition { - result.combine(ac.analyze(s)) - } - return -} - -func (e *Condition) analyze(s *Select) (result qProp) { - if e.Operand != nil { - result = e.Operand.analyze(s) - } else { - result = e.Not.analyze(s) - } - return -} - -func (e *ListExpr) analyze(s *Select) (result qProp) { - for _, ac := range e.Elements { - result.combine(ac.analyze(s)) - } - return -} - -func (e *ConditionOperand) analyze(s *Select) (result qProp) { - if e.ConditionRHS == nil { - result = e.Operand.analyze(s) - } else { - result.combine(e.Operand.analyze(s)) - result.combine(e.ConditionRHS.analyze(s)) - } - return -} - -func (e *ConditionRHS) analyze(s *Select) (result qProp) { - switch { - case e.Compare != nil: - result = e.Compare.Operand.analyze(s) - case e.Between != nil: - result.combine(e.Between.Start.analyze(s)) - result.combine(e.Between.End.analyze(s)) - case e.In != nil: - result.combine(e.In.ListExpression.analyze(s)) - case e.Like != nil: - result.combine(e.Like.Pattern.analyze(s)) - if e.Like.EscapeChar != nil { - result.combine(e.Like.EscapeChar.analyze(s)) - } - default: - result = qProp{err: errUnexpectedInvalidNode} - } - return -} - -func (e *Operand) analyze(s *Select) (result qProp) { - result.combine(e.Left.analyze(s)) - for _, r := range e.Right { - result.combine(r.Right.analyze(s)) - } - return -} - -func (e *MultOp) analyze(s *Select) (result qProp) { - result.combine(e.Left.analyze(s)) - for _, r := range e.Right { - result.combine(r.Right.analyze(s)) - } - return -} - -func (e *UnaryTerm) analyze(s *Select) (result qProp) { - if e.Negated != nil { - result = e.Negated.Term.analyze(s) - } else { - result = e.Primary.analyze(s) - } - return -} - -func (e *PrimaryTerm) analyze(s *Select) (result qProp) { - switch { - case e.Value != nil: - result = qProp{} - - case e.JPathExpr != nil: - // Check if the path expression is valid - if len(e.JPathExpr.PathExpr) > 0 { - if e.JPathExpr.BaseKey.String() != s.From.As { - result = qProp{err: errInvalidKeypath} - return - } - } - result = qProp{isRowFunc: true} - - case e.ListExpr != nil: - result = e.ListExpr.analyze(s) - - case e.SubExpression != nil: - result = e.SubExpression.analyze(s) - - case e.FuncCall != nil: - result = e.FuncCall.analyze(s) - - default: - result = qProp{err: errUnexpectedInvalidNode} - } - return -} - -func (e *FuncExpr) analyze(s *Select) (result qProp) { - funcName := e.getFunctionName() - - switch funcName { - case sqlFnCast: - return e.Cast.Expr.analyze(s) - - case sqlFnExtract: - return e.Extract.From.analyze(s) - - case sqlFnDateAdd: - result.combine(e.DateAdd.Quantity.analyze(s)) - result.combine(e.DateAdd.Timestamp.analyze(s)) - return result - - case sqlFnDateDiff: - result.combine(e.DateDiff.Timestamp1.analyze(s)) - result.combine(e.DateDiff.Timestamp2.analyze(s)) - return result - - // Handle aggregation function calls - case aggFnAvg, aggFnMax, aggFnMin, aggFnSum, aggFnCount: - // Initialize accumulator - e.aggregate = newAggVal(funcName) - - var exprA qProp - if funcName == aggFnCount { - if e.Count.StarArg { - return qProp{isAggregation: true} - } - - exprA = e.Count.ExprArg.analyze(s) - } else { - if len(e.SFunc.ArgsList) != 1 { - return qProp{err: fmt.Errorf("%s takes exactly one argument", funcName)} - } - exprA = e.SFunc.ArgsList[0].analyze(s) - } - - if exprA.err != nil { - return exprA - } - if exprA.isAggregation { - return qProp{err: errNestedAggregation} - } - return qProp{isAggregation: true} - - case sqlFnCoalesce: - if len(e.SFunc.ArgsList) == 0 { - return qProp{err: fmt.Errorf("%s needs at least one argument", string(funcName))} - } - for _, arg := range e.SFunc.ArgsList { - result.combine(arg.analyze(s)) - } - return result - - case sqlFnNullIf: - if len(e.SFunc.ArgsList) != 2 { - return qProp{err: fmt.Errorf("%s needs exactly 2 arguments", string(funcName))} - } - for _, arg := range e.SFunc.ArgsList { - result.combine(arg.analyze(s)) - } - return result - - case sqlFnCharLength, sqlFnCharacterLength: - if len(e.SFunc.ArgsList) != 1 { - return qProp{err: fmt.Errorf("%s needs exactly 2 arguments", string(funcName))} - } - for _, arg := range e.SFunc.ArgsList { - result.combine(arg.analyze(s)) - } - return result - - case sqlFnLower, sqlFnUpper: - if len(e.SFunc.ArgsList) != 1 { - return qProp{err: fmt.Errorf("%s needs exactly 2 arguments", string(funcName))} - } - for _, arg := range e.SFunc.ArgsList { - result.combine(arg.analyze(s)) - } - return result - - case sqlFnTrim: - if e.Trim.TrimChars != nil { - result.combine(e.Trim.TrimChars.analyze(s)) - } - if e.Trim.TrimFrom != nil { - result.combine(e.Trim.TrimFrom.analyze(s)) - } - return result - - case sqlFnSubstring: - errVal := fmt.Errorf("Invalid argument(s) to %s", string(funcName)) - result.combine(e.Substring.Expr.analyze(s)) - switch { - case e.Substring.From != nil: - result.combine(e.Substring.From.analyze(s)) - if e.Substring.For != nil { - result.combine(e.Substring.Expr.analyze(s)) - } - case e.Substring.Arg2 != nil: - result.combine(e.Substring.Arg2.analyze(s)) - if e.Substring.Arg3 != nil { - result.combine(e.Substring.Arg3.analyze(s)) - } - default: - result.err = errVal - } - return result - - case sqlFnUTCNow: - if len(e.SFunc.ArgsList) != 0 { - result.err = fmt.Errorf("%s() takes no arguments", string(funcName)) - } - return result - } - - // TODO: implement other functions - return qProp{err: errFunctionNotImplemented} -} diff --git a/pkg/s3select/sql/errors.go b/pkg/s3select/sql/errors.go deleted file mode 100644 index f5874721..00000000 --- a/pkg/s3select/sql/errors.go +++ /dev/null @@ -1,109 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sql - -import "fmt" - -type s3Error struct { - code string - message string - statusCode int - cause error -} - -func (err *s3Error) Cause() error { - return err.cause -} - -func (err *s3Error) ErrorCode() string { - return err.code -} - -func (err *s3Error) ErrorMessage() string { - return err.message -} - -func (err *s3Error) HTTPStatusCode() int { - return err.statusCode -} - -func (err *s3Error) Error() string { - return err.message -} - -func errInvalidDataType(err error) *s3Error { - return &s3Error{ - code: "InvalidDataType", - message: "The SQL expression contains an invalid data type.", - statusCode: 400, - cause: err, - } -} - -func errIncorrectSQLFunctionArgumentType(err error) *s3Error { - return &s3Error{ - code: "IncorrectSqlFunctionArgumentType", - message: "Incorrect type of arguments in function call.", - statusCode: 400, - cause: err, - } -} - -func errLikeInvalidInputs(err error) *s3Error { - return &s3Error{ - code: "LikeInvalidInputs", - message: "Invalid argument given to the LIKE clause in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errQueryParseFailure(err error) *s3Error { - return &s3Error{ - code: "ParseSelectFailure", - message: err.Error(), - statusCode: 400, - cause: err, - } -} - -func errQueryAnalysisFailure(err error) *s3Error { - return &s3Error{ - code: "InvalidQuery", - message: err.Error(), - statusCode: 400, - cause: err, - } -} - -func errBadTableName(err error) *s3Error { - return &s3Error{ - code: "BadTableName", - message: fmt.Sprintf("The table name is not supported: %v", err), - statusCode: 400, - cause: err, - } -} - -func errDataSource(err error) *s3Error { - return &s3Error{ - code: "DataSourcePathUnsupported", - message: fmt.Sprintf("Data source: %v", err), - statusCode: 400, - cause: err, - } -} diff --git a/pkg/s3select/sql/evaluate.go b/pkg/s3select/sql/evaluate.go deleted file mode 100644 index d15f45c8..00000000 --- a/pkg/s3select/sql/evaluate.go +++ /dev/null @@ -1,484 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sql - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "strings" - - "github.com/bcicen/jstream" - "github.com/minio/simdjson-go" -) - -var ( - errInvalidASTNode = errors.New("invalid AST Node") - errExpectedBool = errors.New("expected bool") - errLikeNonStrArg = errors.New("LIKE clause requires string arguments") - errLikeInvalidEscape = errors.New("LIKE clause has invalid ESCAPE character") - errNotImplemented = errors.New("not implemented") -) - -// AST Node Evaluation functions -// -// During evaluation, the query is known to be valid, as analysis is -// complete. The only errors possible are due to value type -// mismatches, etc. -// -// If an aggregation node is present as a descendant (when -// e.prop.isAggregation is true), we call evalNode on all child nodes, -// check for errors, but do not perform any combining of the results -// of child nodes. The final result row is returned after all rows are -// processed, and the `getAggregate` function is called. - -func (e *AliasedExpression) evalNode(r Record) (*Value, error) { - return e.Expression.evalNode(r) -} - -func (e *Expression) evalNode(r Record) (*Value, error) { - if len(e.And) == 1 { - // In this case, result is not required to be boolean - // type. - return e.And[0].evalNode(r) - } - - // Compute OR of conditions - result := false - for _, ex := range e.And { - res, err := ex.evalNode(r) - if err != nil { - return nil, err - } - b, ok := res.ToBool() - if !ok { - return nil, errExpectedBool - } - result = result || b - } - return FromBool(result), nil -} - -func (e *AndCondition) evalNode(r Record) (*Value, error) { - if len(e.Condition) == 1 { - // In this case, result does not have to be boolean - return e.Condition[0].evalNode(r) - } - - // Compute AND of conditions - result := true - for _, ex := range e.Condition { - res, err := ex.evalNode(r) - if err != nil { - return nil, err - } - b, ok := res.ToBool() - if !ok { - return nil, errExpectedBool - } - result = result && b - } - return FromBool(result), nil -} - -func (e *Condition) evalNode(r Record) (*Value, error) { - if e.Operand != nil { - // In this case, result does not have to be boolean - return e.Operand.evalNode(r) - } - - // Compute NOT of condition - res, err := e.Not.evalNode(r) - if err != nil { - return nil, err - } - b, ok := res.ToBool() - if !ok { - return nil, errExpectedBool - } - return FromBool(!b), nil -} - -func (e *ConditionOperand) evalNode(r Record) (*Value, error) { - opVal, opErr := e.Operand.evalNode(r) - if opErr != nil || e.ConditionRHS == nil { - return opVal, opErr - } - - // Need to evaluate the ConditionRHS - switch { - case e.ConditionRHS.Compare != nil: - cmpRight, cmpRErr := e.ConditionRHS.Compare.Operand.evalNode(r) - if cmpRErr != nil { - return nil, cmpRErr - } - - b, err := opVal.compareOp(e.ConditionRHS.Compare.Operator, cmpRight) - return FromBool(b), err - - case e.ConditionRHS.Between != nil: - return e.ConditionRHS.Between.evalBetweenNode(r, opVal) - - case e.ConditionRHS.Like != nil: - return e.ConditionRHS.Like.evalLikeNode(r, opVal) - - case e.ConditionRHS.In != nil: - return e.ConditionRHS.In.evalInNode(r, opVal) - - default: - return nil, errInvalidASTNode - } -} - -func (e *Between) evalBetweenNode(r Record, arg *Value) (*Value, error) { - stVal, stErr := e.Start.evalNode(r) - if stErr != nil { - return nil, stErr - } - - endVal, endErr := e.End.evalNode(r) - if endErr != nil { - return nil, endErr - } - - part1, err1 := stVal.compareOp(opLte, arg) - if err1 != nil { - return nil, err1 - } - - part2, err2 := arg.compareOp(opLte, endVal) - if err2 != nil { - return nil, err2 - } - - result := part1 && part2 - if e.Not { - result = !result - } - - return FromBool(result), nil -} - -func (e *Like) evalLikeNode(r Record, arg *Value) (*Value, error) { - inferTypeAsString(arg) - - s, ok := arg.ToString() - if !ok { - err := errLikeNonStrArg - return nil, errLikeInvalidInputs(err) - } - - pattern, err1 := e.Pattern.evalNode(r) - if err1 != nil { - return nil, err1 - } - - // Infer pattern as string (in case it is untyped) - inferTypeAsString(pattern) - - patternStr, ok := pattern.ToString() - if !ok { - err := errLikeNonStrArg - return nil, errLikeInvalidInputs(err) - } - - escape := runeZero - if e.EscapeChar != nil { - escapeVal, err2 := e.EscapeChar.evalNode(r) - if err2 != nil { - return nil, err2 - } - - inferTypeAsString(escapeVal) - - escapeStr, ok := escapeVal.ToString() - if !ok { - err := errLikeNonStrArg - return nil, errLikeInvalidInputs(err) - } - - if len([]rune(escapeStr)) > 1 { - err := errLikeInvalidEscape - return nil, errLikeInvalidInputs(err) - } - } - - matchResult, err := evalSQLLike(s, patternStr, escape) - if err != nil { - return nil, err - } - - if e.Not { - matchResult = !matchResult - } - - return FromBool(matchResult), nil -} - -func (e *ListExpr) evalNode(r Record) (*Value, error) { - res := make([]Value, len(e.Elements)) - if len(e.Elements) == 1 { - // If length 1, treat as single value. - return e.Elements[0].evalNode(r) - } - for i, elt := range e.Elements { - v, err := elt.evalNode(r) - if err != nil { - return nil, err - } - res[i] = *v - } - return FromArray(res), nil -} - -func (e *In) evalInNode(r Record, lhs *Value) (*Value, error) { - // Compare two values in terms of in-ness. - var cmp func(a, b Value) bool - cmp = func(a, b Value) bool { - // Convert if needed. - inferTypesForCmp(&a, &b) - - if a.Equals(b) { - return true - } - - // If elements, compare each. - aA, aOK := a.ToArray() - bA, bOK := b.ToArray() - if aOK && bOK { - if len(aA) != len(bA) { - return false - } - for i := range aA { - if !cmp(aA[i], bA[i]) { - return false - } - } - return true - } - // Try as numbers - aF, aOK := a.ToFloat() - bF, bOK := b.ToFloat() - - return aOK && bOK && aF == bF - } - - var rhs Value - if elt := e.ListExpression; elt != nil { - eltVal, err := elt.evalNode(r) - if err != nil { - return nil, err - } - rhs = *eltVal - } - - // If RHS is array compare each element. - if arr, ok := rhs.ToArray(); ok { - for _, element := range arr { - // If we have an array we are on the wrong level. - if cmp(element, *lhs) { - return FromBool(true), nil - } - } - return FromBool(false), nil - } - - return FromBool(cmp(rhs, *lhs)), nil -} - -func (e *Operand) evalNode(r Record) (*Value, error) { - lval, lerr := e.Left.evalNode(r) - if lerr != nil || len(e.Right) == 0 { - return lval, lerr - } - - // Process remaining child nodes - result must be - // numeric. This AST node is for terms separated by + or - - // symbols. - for _, rightTerm := range e.Right { - op := rightTerm.Op - rval, rerr := rightTerm.Right.evalNode(r) - if rerr != nil { - return nil, rerr - } - err := lval.arithOp(op, rval) - if err != nil { - return nil, err - } - } - return lval, nil -} - -func (e *MultOp) evalNode(r Record) (*Value, error) { - lval, lerr := e.Left.evalNode(r) - if lerr != nil || len(e.Right) == 0 { - return lval, lerr - } - - // Process other child nodes - result must be numeric. This - // AST node is for terms separated by *, / or % symbols. - for _, rightTerm := range e.Right { - op := rightTerm.Op - rval, rerr := rightTerm.Right.evalNode(r) - if rerr != nil { - return nil, rerr - } - - err := lval.arithOp(op, rval) - if err != nil { - return nil, err - } - } - return lval, nil -} - -func (e *UnaryTerm) evalNode(r Record) (*Value, error) { - if e.Negated == nil { - return e.Primary.evalNode(r) - } - - v, err := e.Negated.Term.evalNode(r) - if err != nil { - return nil, err - } - - inferTypeForArithOp(v) - v.negate() - if v.isNumeric() { - return v, nil - } - return nil, errArithMismatchedTypes -} - -func (e *JSONPath) evalNode(r Record) (*Value, error) { - // Strip the table name from the keypath. - keypath := e.String() - if strings.Contains(keypath, ".") { - ps := strings.SplitN(keypath, ".", 2) - if len(ps) == 2 { - keypath = ps[1] - } - } - _, rawVal := r.Raw() - switch rowVal := rawVal.(type) { - case jstream.KVS, simdjson.Object: - pathExpr := e.PathExpr - if len(pathExpr) == 0 { - pathExpr = []*JSONPathElement{{Key: &ObjectKey{ID: e.BaseKey}}} - } - - result, _, err := jsonpathEval(pathExpr, rowVal) - if err != nil { - return nil, err - } - - return jsonToValue(result) - default: - return r.Get(keypath) - } -} - -// jsonToValue will convert the json value to an internal value. -func jsonToValue(result interface{}) (*Value, error) { - switch rval := result.(type) { - case string: - return FromString(rval), nil - case float64: - return FromFloat(rval), nil - case int64: - return FromInt(rval), nil - case uint64: - if rval <= math.MaxInt64 { - return FromInt(int64(rval)), nil - } - return FromFloat(float64(rval)), nil - case bool: - return FromBool(rval), nil - case jstream.KVS: - bs, err := json.Marshal(result) - if err != nil { - return nil, err - } - return FromBytes(bs), nil - case []interface{}: - dst := make([]Value, len(rval)) - for i := range rval { - v, err := jsonToValue(rval[i]) - if err != nil { - return nil, err - } - dst[i] = *v - } - return FromArray(dst), nil - case simdjson.Object: - o := rval - elems, err := o.Parse(nil) - if err != nil { - return nil, err - } - bs, err := elems.MarshalJSON() - if err != nil { - return nil, err - } - return FromBytes(bs), nil - case []Value: - return FromArray(rval), nil - case nil: - return FromNull(), nil - } - return nil, fmt.Errorf("Unhandled value type: %T", result) -} - -func (e *PrimaryTerm) evalNode(r Record) (res *Value, err error) { - switch { - case e.Value != nil: - return e.Value.evalNode(r) - case e.JPathExpr != nil: - return e.JPathExpr.evalNode(r) - case e.ListExpr != nil: - return e.ListExpr.evalNode(r) - case e.SubExpression != nil: - return e.SubExpression.evalNode(r) - case e.FuncCall != nil: - return e.FuncCall.evalNode(r) - } - return nil, errInvalidASTNode -} - -func (e *FuncExpr) evalNode(r Record) (res *Value, err error) { - switch e.getFunctionName() { - case aggFnCount, aggFnAvg, aggFnMax, aggFnMin, aggFnSum: - return e.getAggregate() - default: - return e.evalSQLFnNode(r) - } -} - -// evalNode on a literal value is independent of the node being an -// aggregation or a row function - it always returns a value. -func (e *LitValue) evalNode(_ Record) (res *Value, err error) { - switch { - case e.Number != nil: - return floatToValue(*e.Number), nil - case e.String != nil: - return FromString(string(*e.String)), nil - case e.Boolean != nil: - return FromBool(bool(*e.Boolean)), nil - } - return FromNull(), nil -} diff --git a/pkg/s3select/sql/funceval.go b/pkg/s3select/sql/funceval.go deleted file mode 100644 index 04f0e3e2..00000000 --- a/pkg/s3select/sql/funceval.go +++ /dev/null @@ -1,564 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sql - -import ( - "errors" - "fmt" - "strconv" - "strings" - "time" -) - -// FuncName - SQL function name. -type FuncName string - -// SQL Function name constants -const ( - // Conditionals - sqlFnCoalesce FuncName = "COALESCE" - sqlFnNullIf FuncName = "NULLIF" - - // Conversion - sqlFnCast FuncName = "CAST" - - // Date and time - sqlFnDateAdd FuncName = "DATE_ADD" - sqlFnDateDiff FuncName = "DATE_DIFF" - sqlFnExtract FuncName = "EXTRACT" - sqlFnToString FuncName = "TO_STRING" - sqlFnToTimestamp FuncName = "TO_TIMESTAMP" - sqlFnUTCNow FuncName = "UTCNOW" - - // String - sqlFnCharLength FuncName = "CHAR_LENGTH" - sqlFnCharacterLength FuncName = "CHARACTER_LENGTH" - sqlFnLower FuncName = "LOWER" - sqlFnSubstring FuncName = "SUBSTRING" - sqlFnTrim FuncName = "TRIM" - sqlFnUpper FuncName = "UPPER" -) - -var ( - errUnimplementedCast = errors.New("This cast not yet implemented") - errNonStringTrimArg = errors.New("TRIM() received a non-string argument") - errNonTimestampArg = errors.New("Expected a timestamp argument") -) - -func (e *FuncExpr) getFunctionName() FuncName { - switch { - case e.SFunc != nil: - return FuncName(strings.ToUpper(e.SFunc.FunctionName)) - case e.Count != nil: - return FuncName(aggFnCount) - case e.Cast != nil: - return sqlFnCast - case e.Substring != nil: - return sqlFnSubstring - case e.Extract != nil: - return sqlFnExtract - case e.Trim != nil: - return sqlFnTrim - case e.DateAdd != nil: - return sqlFnDateAdd - case e.DateDiff != nil: - return sqlFnDateDiff - default: - return "" - } -} - -// evalSQLFnNode assumes that the FuncExpr is not an aggregation -// function. -func (e *FuncExpr) evalSQLFnNode(r Record) (res *Value, err error) { - // Handle functions that have phrase arguments - switch e.getFunctionName() { - case sqlFnCast: - expr := e.Cast.Expr - res, err = expr.castTo(r, strings.ToUpper(e.Cast.CastType)) - return - - case sqlFnSubstring: - return handleSQLSubstring(r, e.Substring) - - case sqlFnExtract: - return handleSQLExtract(r, e.Extract) - - case sqlFnTrim: - return handleSQLTrim(r, e.Trim) - - case sqlFnDateAdd: - return handleDateAdd(r, e.DateAdd) - - case sqlFnDateDiff: - return handleDateDiff(r, e.DateDiff) - - } - - // For all simple argument functions, we evaluate the arguments here - argVals := make([]*Value, len(e.SFunc.ArgsList)) - for i, arg := range e.SFunc.ArgsList { - argVals[i], err = arg.evalNode(r) - if err != nil { - return nil, err - } - } - - switch e.getFunctionName() { - case sqlFnCoalesce: - return coalesce(argVals) - - case sqlFnNullIf: - return nullif(argVals[0], argVals[1]) - - case sqlFnCharLength, sqlFnCharacterLength: - return charlen(argVals[0]) - - case sqlFnLower: - return lowerCase(argVals[0]) - - case sqlFnUpper: - return upperCase(argVals[0]) - - case sqlFnUTCNow: - return handleUTCNow() - - case sqlFnToString, sqlFnToTimestamp: - // TODO: implement - fallthrough - - default: - return nil, errNotImplemented - } -} - -func coalesce(args []*Value) (res *Value, err error) { - for _, arg := range args { - if arg.IsNull() { - continue - } - return arg, nil - } - return FromNull(), nil -} - -func nullif(v1, v2 *Value) (res *Value, err error) { - // Handle Null cases - if v1.IsNull() || v2.IsNull() { - return v1, nil - } - - err = inferTypesForCmp(v1, v2) - if err != nil { - return nil, err - } - - atleastOneNumeric := v1.isNumeric() || v2.isNumeric() - bothNumeric := v1.isNumeric() && v2.isNumeric() - if atleastOneNumeric || !bothNumeric { - return v1, nil - } - - if v1.SameTypeAs(*v2) { - return v1, nil - } - - cmpResult, cmpErr := v1.compareOp(opEq, v2) - if cmpErr != nil { - return nil, cmpErr - } - - if cmpResult { - return FromNull(), nil - } - - return v1, nil -} - -func charlen(v *Value) (*Value, error) { - inferTypeAsString(v) - s, ok := v.ToString() - if !ok { - err := fmt.Errorf("%s/%s expects a string argument", sqlFnCharLength, sqlFnCharacterLength) - return nil, errIncorrectSQLFunctionArgumentType(err) - } - return FromInt(int64(len([]rune(s)))), nil -} - -func lowerCase(v *Value) (*Value, error) { - inferTypeAsString(v) - s, ok := v.ToString() - if !ok { - err := fmt.Errorf("%s expects a string argument", sqlFnLower) - return nil, errIncorrectSQLFunctionArgumentType(err) - } - return FromString(strings.ToLower(s)), nil -} - -func upperCase(v *Value) (*Value, error) { - inferTypeAsString(v) - s, ok := v.ToString() - if !ok { - err := fmt.Errorf("%s expects a string argument", sqlFnUpper) - return nil, errIncorrectSQLFunctionArgumentType(err) - } - return FromString(strings.ToUpper(s)), nil -} - -func handleDateAdd(r Record, d *DateAddFunc) (*Value, error) { - q, err := d.Quantity.evalNode(r) - if err != nil { - return nil, err - } - inferTypeForArithOp(q) - qty, ok := q.ToFloat() - if !ok { - return nil, fmt.Errorf("QUANTITY must be a numeric argument to %s()", sqlFnDateAdd) - } - - ts, err := d.Timestamp.evalNode(r) - if err != nil { - return nil, err - } - if err = inferTypeAsTimestamp(ts); err != nil { - return nil, err - } - t, ok := ts.ToTimestamp() - if !ok { - return nil, fmt.Errorf("%s() expects a timestamp argument", sqlFnDateAdd) - } - - return dateAdd(strings.ToUpper(d.DatePart), qty, t) -} - -func handleDateDiff(r Record, d *DateDiffFunc) (*Value, error) { - tval1, err := d.Timestamp1.evalNode(r) - if err != nil { - return nil, err - } - if err = inferTypeAsTimestamp(tval1); err != nil { - return nil, err - } - ts1, ok := tval1.ToTimestamp() - if !ok { - return nil, fmt.Errorf("%s() expects two timestamp arguments", sqlFnDateDiff) - } - - tval2, err := d.Timestamp2.evalNode(r) - if err != nil { - return nil, err - } - if err = inferTypeAsTimestamp(tval2); err != nil { - return nil, err - } - ts2, ok := tval2.ToTimestamp() - if !ok { - return nil, fmt.Errorf("%s() expects two timestamp arguments", sqlFnDateDiff) - } - - return dateDiff(strings.ToUpper(d.DatePart), ts1, ts2) -} - -func handleUTCNow() (*Value, error) { - return FromTimestamp(time.Now().UTC()), nil -} - -func handleSQLSubstring(r Record, e *SubstringFunc) (val *Value, err error) { - // Both forms `SUBSTRING('abc' FROM 2 FOR 1)` and - // SUBSTRING('abc', 2, 1) are supported. - - // Evaluate the string argument - v1, err := e.Expr.evalNode(r) - if err != nil { - return nil, err - } - inferTypeAsString(v1) - s, ok := v1.ToString() - if !ok { - err := fmt.Errorf("Incorrect argument type passed to %s", sqlFnSubstring) - return nil, errIncorrectSQLFunctionArgumentType(err) - } - - // Assemble other arguments - arg2, arg3 := e.From, e.For - // Check if the second form of substring is being used - if e.From == nil { - arg2, arg3 = e.Arg2, e.Arg3 - } - - // Evaluate the FROM argument - v2, err := arg2.evalNode(r) - if err != nil { - return nil, err - } - inferTypeForArithOp(v2) - startIdx, ok := v2.ToInt() - if !ok { - err := fmt.Errorf("Incorrect type for start index argument in %s", sqlFnSubstring) - return nil, errIncorrectSQLFunctionArgumentType(err) - } - - length := -1 - // Evaluate the optional FOR argument - if arg3 != nil { - v3, err := arg3.evalNode(r) - if err != nil { - return nil, err - } - inferTypeForArithOp(v3) - lenInt, ok := v3.ToInt() - if !ok { - err := fmt.Errorf("Incorrect type for length argument in %s", sqlFnSubstring) - return nil, errIncorrectSQLFunctionArgumentType(err) - } - length = int(lenInt) - if length < 0 { - err := fmt.Errorf("Negative length argument in %s", sqlFnSubstring) - return nil, errIncorrectSQLFunctionArgumentType(err) - } - } - - res, err := evalSQLSubstring(s, int(startIdx), length) - return FromString(res), err -} - -func handleSQLTrim(r Record, e *TrimFunc) (res *Value, err error) { - chars := "" - ok := false - if e.TrimChars != nil { - charsV, cerr := e.TrimChars.evalNode(r) - if cerr != nil { - return nil, cerr - } - inferTypeAsString(charsV) - chars, ok = charsV.ToString() - if !ok { - return nil, errNonStringTrimArg - } - } - - fromV, ferr := e.TrimFrom.evalNode(r) - if ferr != nil { - return nil, ferr - } - inferTypeAsString(fromV) - from, ok := fromV.ToString() - if !ok { - return nil, errNonStringTrimArg - } - - result, terr := evalSQLTrim(e.TrimWhere, chars, from) - if terr != nil { - return nil, terr - } - return FromString(result), nil -} - -func handleSQLExtract(r Record, e *ExtractFunc) (res *Value, err error) { - timeVal, verr := e.From.evalNode(r) - if verr != nil { - return nil, verr - } - - if err = inferTypeAsTimestamp(timeVal); err != nil { - return nil, err - } - - t, ok := timeVal.ToTimestamp() - if !ok { - return nil, errNonTimestampArg - } - - return extract(strings.ToUpper(e.Timeword), t) -} - -func errUnsupportedCast(fromType, toType string) error { - return fmt.Errorf("Cannot cast from %v to %v", fromType, toType) -} - -func errCastFailure(msg string) error { - return fmt.Errorf("Error casting: %s", msg) -} - -// Allowed cast types -const ( - castBool = "BOOL" - castInt = "INT" - castInteger = "INTEGER" - castString = "STRING" - castFloat = "FLOAT" - castDecimal = "DECIMAL" - castNumeric = "NUMERIC" - castTimestamp = "TIMESTAMP" -) - -func (e *Expression) castTo(r Record, castType string) (res *Value, err error) { - v, err := e.evalNode(r) - if err != nil { - return nil, err - } - - switch castType { - case castInt, castInteger: - i, err := intCast(v) - return FromInt(i), err - - case castFloat: - f, err := floatCast(v) - return FromFloat(f), err - - case castString: - s, err := stringCast(v) - return FromString(s), err - - case castTimestamp: - t, err := timestampCast(v) - return FromTimestamp(t), err - - case castBool: - b, err := boolCast(v) - return FromBool(b), err - - case castDecimal, castNumeric: - fallthrough - - default: - return nil, errUnimplementedCast - } -} - -func intCast(v *Value) (int64, error) { - // This conversion truncates floating point numbers to - // integer. - strToInt := func(s string) (int64, bool) { - i, errI := strconv.ParseInt(s, 10, 64) - if errI == nil { - return i, true - } - f, errF := strconv.ParseFloat(s, 64) - if errF == nil { - return int64(f), true - } - return 0, false - } - - switch x := v.value.(type) { - case float64: - // Truncate fractional part - return int64(x), nil - case int64: - return x, nil - case string: - // Parse as number, truncate floating point if - // needed. - res, ok := strToInt(x) - if !ok { - return 0, errCastFailure("could not parse as int") - } - return res, nil - case []byte: - // Parse as number, truncate floating point if - // needed. - res, ok := strToInt(string(x)) - if !ok { - return 0, errCastFailure("could not parse as int") - } - return res, nil - - default: - return 0, errUnsupportedCast(v.GetTypeString(), castInt) - } -} - -func floatCast(v *Value) (float64, error) { - switch x := v.value.(type) { - case float64: - return x, nil - case int: - return float64(x), nil - case string: - f, err := strconv.ParseFloat(x, 64) - if err != nil { - return 0, errCastFailure("could not parse as float") - } - return f, nil - case []byte: - f, err := strconv.ParseFloat(string(x), 64) - if err != nil { - return 0, errCastFailure("could not parse as float") - } - return f, nil - default: - return 0, errUnsupportedCast(v.GetTypeString(), castFloat) - } -} - -func stringCast(v *Value) (string, error) { - switch x := v.value.(type) { - case float64: - return fmt.Sprintf("%v", x), nil - case int64: - return fmt.Sprintf("%v", x), nil - case string: - return x, nil - case []byte: - return string(x), nil - case bool: - return fmt.Sprintf("%v", x), nil - case nil: - // FIXME: verify this case is correct - return "NULL", nil - } - // This does not happen - return "", errCastFailure(fmt.Sprintf("cannot cast %v to string type", v.GetTypeString())) -} - -func timestampCast(v *Value) (t time.Time, _ error) { - switch x := v.value.(type) { - case string: - return parseSQLTimestamp(x) - case []byte: - return parseSQLTimestamp(string(x)) - case time.Time: - return x, nil - default: - return t, errCastFailure(fmt.Sprintf("cannot cast %v to Timestamp type", v.GetTypeString())) - } -} - -func boolCast(v *Value) (b bool, _ error) { - sToB := func(s string) (bool, error) { - switch s { - case "true": - return true, nil - case "false": - return false, nil - default: - return false, errCastFailure("cannot cast to Bool") - } - } - switch x := v.value.(type) { - case bool: - return x, nil - case string: - return sToB(strings.ToLower(x)) - case []byte: - return sToB(strings.ToLower(string(x))) - default: - return false, errCastFailure("cannot cast %v to Bool") - } -} diff --git a/pkg/s3select/sql/jsondata/books.json b/pkg/s3select/sql/jsondata/books.json deleted file mode 100644 index cd2785dc..00000000 --- a/pkg/s3select/sql/jsondata/books.json +++ /dev/null @@ -1,84 +0,0 @@ -{ - "title": "Murder on the Orient Express", - "authorInfo": { - "name": "Agatha Christie", - "yearRange": [1890, 1976], - "penName": "Mary Westmacott" - }, - "genre": "Crime novel", - "publicationHistory": [ - { - "year": 1934, - "publisher": "Collins Crime Club (London)", - "type": "Hardcover", - "pages": 256 - }, - { - "year": 1934, - "publisher": "Dodd Mead and Company (New York)", - "type": "Hardcover", - "pages": 302 - }, - { - "year": 2011, - "publisher": "Harper Collins", - "type": "Paperback", - "pages": 265 - } - ] -} -{ - "title": "The Robots of Dawn", - "authorInfo": { - "name": "Isaac Asimov", - "yearRange": [1920, 1992], - "penName": "Paul French" - }, - "genre": "Science fiction", - "publicationHistory": [ - { - "year": 1983, - "publisher": "Phantasia Press", - "type": "Hardcover", - "pages": 336 - }, - { - "year": 1984, - "publisher": "Granada", - "type": "Hardcover", - "pages": 419 - }, - { - "year": 2018, - "publisher": "Harper Voyager", - "type": "Paperback", - "pages": 432 - } - ] -} -{ - "title": "Pigs Have Wings", - "authorInfo": { - "name": "P. G. Wodehouse", - "yearRange": [1881, 1975] - }, - "genre": "Comic novel", - "publicationHistory": [ - { - "year": 1952, - "publisher": "Doubleday & Company", - "type": "Hardcover" - }, - { - "year": 2000, - "publisher": "Harry N. Abrams", - "type": "Hardcover" - }, - { - "year": 2019, - "publisher": "Ulverscroft Collections", - "type": "Paperback", - "pages": 294 - } - ] -} diff --git a/pkg/s3select/sql/jsonpath.go b/pkg/s3select/sql/jsonpath.go deleted file mode 100644 index 200b2d20..00000000 --- a/pkg/s3select/sql/jsonpath.go +++ /dev/null @@ -1,128 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sql - -import ( - "errors" - - "github.com/bcicen/jstream" - "github.com/minio/simdjson-go" -) - -var ( - errKeyLookup = errors.New("Cannot look up key in non-object value") - errIndexLookup = errors.New("Cannot look up array index in non-array value") - errWildcardObjectLookup = errors.New("Object wildcard used on non-object value") - errWildcardArrayLookup = errors.New("Array wildcard used on non-array value") - errWilcardObjectUsageInvalid = errors.New("Invalid usage of object wildcard") -) - -// jsonpathEval evaluates a JSON path and returns the value at the path. -// If the value should be considered flat (from wildcards) any array returned should be considered individual values. -func jsonpathEval(p []*JSONPathElement, v interface{}) (r interface{}, flat bool, err error) { - // fmt.Printf("JPATHexpr: %v jsonobj: %v\n\n", p, v) - if len(p) == 0 || v == nil { - return v, false, nil - } - - switch { - case p[0].Key != nil: - key := p[0].Key.keyString() - - switch kvs := v.(type) { - case jstream.KVS: - for _, kv := range kvs { - if kv.Key == key { - return jsonpathEval(p[1:], kv.Value) - } - } - // Key not found - return nil result - return nil, false, nil - case simdjson.Object: - elem := kvs.FindKey(key, nil) - if elem == nil { - // Key not found - return nil result - return nil, false, nil - } - val, err := IterToValue(elem.Iter) - if err != nil { - return nil, false, err - } - return jsonpathEval(p[1:], val) - default: - return nil, false, errKeyLookup - } - - case p[0].Index != nil: - idx := *p[0].Index - - arr, ok := v.([]interface{}) - if !ok { - return nil, false, errIndexLookup - } - - if idx >= len(arr) { - return nil, false, nil - } - return jsonpathEval(p[1:], arr[idx]) - - case p[0].ObjectWildcard: - switch kvs := v.(type) { - case jstream.KVS: - if len(p[1:]) > 0 { - return nil, false, errWilcardObjectUsageInvalid - } - - return kvs, false, nil - case simdjson.Object: - if len(p[1:]) > 0 { - return nil, false, errWilcardObjectUsageInvalid - } - - return kvs, false, nil - default: - return nil, false, errWildcardObjectLookup - } - - case p[0].ArrayWildcard: - arr, ok := v.([]interface{}) - if !ok { - return nil, false, errWildcardArrayLookup - } - - // Lookup remainder of path in each array element and - // make result array. - var result []interface{} - for _, a := range arr { - rval, flatten, err := jsonpathEval(p[1:], a) - if err != nil { - return nil, false, err - } - - if flatten { - // Flatten if array. - if arr, ok := rval.([]interface{}); ok { - result = append(result, arr...) - continue - } - } - result = append(result, rval) - } - return result, true, nil - } - panic("cannot reach here") -} diff --git a/pkg/s3select/sql/jsonpath_test.go b/pkg/s3select/sql/jsonpath_test.go deleted file mode 100644 index 53fcd136..00000000 --- a/pkg/s3select/sql/jsonpath_test.go +++ /dev/null @@ -1,96 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sql - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "testing" - - "github.com/alecthomas/participle" - "github.com/bcicen/jstream" -) - -func getJSONStructs(b []byte) ([]interface{}, error) { - dec := jstream.NewDecoder(bytes.NewBuffer(b), 0).ObjectAsKVS() - var result []interface{} - for parsedVal := range dec.Stream() { - result = append(result, parsedVal.Value) - } - if err := dec.Err(); err != nil { - return nil, err - } - return result, nil -} - -func TestJsonpathEval(t *testing.T) { - f, err := os.Open(filepath.Join("jsondata", "books.json")) - if err != nil { - t.Fatal(err) - } - - b, err := ioutil.ReadAll(f) - if err != nil { - t.Fatal(err) - } - - p := participle.MustBuild( - &JSONPath{}, - participle.Lexer(sqlLexer), - participle.CaseInsensitive("Keyword"), - ) - cases := []struct { - str string - res []interface{} - }{ - {"s.title", []interface{}{"Murder on the Orient Express", "The Robots of Dawn", "Pigs Have Wings"}}, - {"s.authorInfo.yearRange", []interface{}{[]interface{}{1890.0, 1976.0}, []interface{}{1920.0, 1992.0}, []interface{}{1881.0, 1975.0}}}, - {"s.authorInfo.name", []interface{}{"Agatha Christie", "Isaac Asimov", "P. G. Wodehouse"}}, - {"s.authorInfo.yearRange[0]", []interface{}{1890.0, 1920.0, 1881.0}}, - {"s.publicationHistory[0].pages", []interface{}{256.0, 336.0, nil}}, - } - for i, tc := range cases { - jp := JSONPath{} - err := p.ParseString(tc.str, &jp) - // fmt.Println(jp) - if err != nil { - t.Fatalf("parse failed!: %d %v %s", i, err, tc) - } - - // Read only the first json object from the file - recs, err := getJSONStructs(b) - if err != nil || len(recs) != 3 { - t.Fatalf("%v or length was not 3", err) - } - - for j, rec := range recs { - // fmt.Println(rec) - r, _, err := jsonpathEval(jp.PathExpr, rec) - if err != nil { - t.Errorf("Error: %d %d %v", i, j, err) - } - if !reflect.DeepEqual(r, tc.res[j]) { - fmt.Printf("%#v (%v) != %v (%v)\n", r, reflect.TypeOf(r), tc.res[j], reflect.TypeOf(tc.res[j])) - t.Errorf("case: %d %d failed", i, j) - } - } - } -} diff --git a/pkg/s3select/sql/parser.go b/pkg/s3select/sql/parser.go deleted file mode 100644 index 09500806..00000000 --- a/pkg/s3select/sql/parser.go +++ /dev/null @@ -1,366 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sql - -import ( - "strings" - - "github.com/alecthomas/participle" - "github.com/alecthomas/participle/lexer" -) - -// Types with custom Capture interface for parsing - -// Boolean is a type for a parsed Boolean literal -type Boolean bool - -// Capture interface used by participle -func (b *Boolean) Capture(values []string) error { - *b = strings.ToLower(values[0]) == "true" - return nil -} - -// LiteralString is a type for parsed SQL string literals -type LiteralString string - -// Capture interface used by participle -func (ls *LiteralString) Capture(values []string) error { - // Remove enclosing single quote - n := len(values[0]) - r := values[0][1 : n-1] - // Translate doubled quotes - *ls = LiteralString(strings.Replace(r, "''", "'", -1)) - return nil -} - -// LiteralList is a type for parsed SQL lists literals -type LiteralList []string - -// Capture interface used by participle -func (ls *LiteralList) Capture(values []string) error { - // Remove enclosing parenthesis. - n := len(values[0]) - r := values[0][1 : n-1] - // Translate doubled quotes - *ls = LiteralList(strings.Split(r, ",")) - return nil -} - -// ObjectKey is a type for parsed strings occurring in key paths -type ObjectKey struct { - Lit *LiteralString `parser:" \"[\" @LitString \"]\""` - ID *Identifier `parser:"| \".\" @@"` -} - -// QuotedIdentifier is a type for parsed strings that are double -// quoted. -type QuotedIdentifier string - -// Capture interface used by participle -func (qi *QuotedIdentifier) Capture(values []string) error { - // Remove enclosing quotes - n := len(values[0]) - r := values[0][1 : n-1] - - // Translate doubled quotes - *qi = QuotedIdentifier(strings.Replace(r, `""`, `"`, -1)) - return nil -} - -// Types representing AST of SQL statement. Only SELECT is supported. - -// Select is the top level AST node type -type Select struct { - Expression *SelectExpression `parser:"\"SELECT\" @@"` - From *TableExpression `parser:"\"FROM\" @@"` - Where *Expression `parser:"( \"WHERE\" @@ )?"` - Limit *LitValue `parser:"( \"LIMIT\" @@ )?"` -} - -// SelectExpression represents the items requested in the select -// statement -type SelectExpression struct { - All bool `parser:" @\"*\""` - Expressions []*AliasedExpression `parser:"| @@ { \",\" @@ }"` -} - -// TableExpression represents the FROM clause -type TableExpression struct { - Table *JSONPath `parser:"@@"` - As string `parser:"( \"AS\"? @Ident )?"` -} - -// JSONPathElement represents a keypath component -type JSONPathElement struct { - Key *ObjectKey `parser:" @@"` // ['name'] and .name forms - Index *int `parser:"| \"[\" @Number \"]\""` // [3] form - ObjectWildcard bool `parser:"| @\".*\""` // .* form - ArrayWildcard bool `parser:"| @\"[*]\""` // [*] form -} - -// JSONPath represents a keypath. -// Instances should be treated idempotent and not change once created. -type JSONPath struct { - BaseKey *Identifier `parser:" @@"` - PathExpr []*JSONPathElement `parser:"(@@)*"` - - // Cached values: - pathString string -} - -// AliasedExpression is an expression that can be optionally named -type AliasedExpression struct { - Expression *Expression `parser:"@@"` - As string `parser:"[ \"AS\" @Ident ]"` -} - -// Grammar for Expression -// -// Expression → AndCondition ("OR" AndCondition)* -// AndCondition → Condition ("AND" Condition)* -// Condition → "NOT" Condition | ConditionExpression -// ConditionExpression → ValueExpression ("=" | "<>" | "<=" | ">=" | "<" | ">") ValueExpression -// | ValueExpression "LIKE" ValueExpression ("ESCAPE" LitString)? -// | ValueExpression ("NOT"? "BETWEEN" ValueExpression "AND" ValueExpression) -// | ValueExpression "IN" "(" Expression ("," Expression)* ")" -// | ValueExpression -// ValueExpression → Operand -// -// Operand grammar follows below - -// Expression represents a logical disjunction of clauses -type Expression struct { - And []*AndCondition `parser:"@@ ( \"OR\" @@ )*"` -} - -// ListExpr represents a literal list with elements as expressions. -type ListExpr struct { - Elements []*Expression `parser:"\"(\" @@ ( \",\" @@ )* \")\" | \"[\" @@ ( \",\" @@ )* \"]\""` -} - -// AndCondition represents logical conjunction of clauses -type AndCondition struct { - Condition []*Condition `parser:"@@ ( \"AND\" @@ )*"` -} - -// Condition represents a negation or a condition operand -type Condition struct { - Operand *ConditionOperand `parser:" @@"` - Not *Condition `parser:"| \"NOT\" @@"` -} - -// ConditionOperand is a operand followed by an an optional operation -// expression -type ConditionOperand struct { - Operand *Operand `parser:"@@"` - ConditionRHS *ConditionRHS `parser:"@@?"` -} - -// ConditionRHS represents the right-hand-side of Compare, Between, In -// or Like expressions. -type ConditionRHS struct { - Compare *Compare `parser:" @@"` - Between *Between `parser:"| @@"` - In *In `parser:"| \"IN\" @@"` - Like *Like `parser:"| @@"` -} - -// Compare represents the RHS of a comparison expression -type Compare struct { - Operator string `parser:"@( \"<>\" | \"<=\" | \">=\" | \"=\" | \"<\" | \">\" | \"!=\" )"` - Operand *Operand `parser:" @@"` -} - -// Like represents the RHS of a LIKE expression -type Like struct { - Not bool `parser:" @\"NOT\"? "` - Pattern *Operand `parser:" \"LIKE\" @@ "` - EscapeChar *Operand `parser:" (\"ESCAPE\" @@)? "` -} - -// Between represents the RHS of a BETWEEN expression -type Between struct { - Not bool `parser:" @\"NOT\"? "` - Start *Operand `parser:" \"BETWEEN\" @@ "` - End *Operand `parser:" \"AND\" @@ "` -} - -// In represents the RHS of an IN expression -type In struct { - ListExpression *Expression `parser:"@@ "` -} - -// Grammar for Operand: -// -// operand → multOp ( ("-" | "+") multOp )* -// multOp → unary ( ("/" | "*" | "%") unary )* -// unary → "-" unary | primary -// primary → Value | Variable | "(" expression ")" -// - -// An Operand is a single term followed by an optional sequence of -// terms separated by +/- -type Operand struct { - Left *MultOp `parser:"@@"` - Right []*OpFactor `parser:"(@@)*"` -} - -// OpFactor represents the right-side of a +/- operation. -type OpFactor struct { - Op string `parser:"@(\"+\" | \"-\")"` - Right *MultOp `parser:"@@"` -} - -// MultOp represents a single term followed by an optional sequence of -// terms separated by *, / or % operators. -type MultOp struct { - Left *UnaryTerm `parser:"@@"` - Right []*OpUnaryTerm `parser:"(@@)*"` -} - -// OpUnaryTerm represents the right side of *, / or % binary operations. -type OpUnaryTerm struct { - Op string `parser:"@(\"*\" | \"/\" | \"%\")"` - Right *UnaryTerm `parser:"@@"` -} - -// UnaryTerm represents a single negated term or a primary term -type UnaryTerm struct { - Negated *NegatedTerm `parser:" @@"` - Primary *PrimaryTerm `parser:"| @@"` -} - -// NegatedTerm has a leading minus sign. -type NegatedTerm struct { - Term *PrimaryTerm `parser:"\"-\" @@"` -} - -// PrimaryTerm represents a Value, Path expression, a Sub-expression -// or a function call. -type PrimaryTerm struct { - Value *LitValue `parser:" @@"` - JPathExpr *JSONPath `parser:"| @@"` - ListExpr *ListExpr `parser:"| @@"` - SubExpression *Expression `parser:"| \"(\" @@ \")\""` - // Include function expressions here. - FuncCall *FuncExpr `parser:"| @@"` -} - -// FuncExpr represents a function call -type FuncExpr struct { - SFunc *SimpleArgFunc `parser:" @@"` - Count *CountFunc `parser:"| @@"` - Cast *CastFunc `parser:"| @@"` - Substring *SubstringFunc `parser:"| @@"` - Extract *ExtractFunc `parser:"| @@"` - Trim *TrimFunc `parser:"| @@"` - DateAdd *DateAddFunc `parser:"| @@"` - DateDiff *DateDiffFunc `parser:"| @@"` - - // Used during evaluation for aggregation funcs - aggregate *aggVal -} - -// SimpleArgFunc represents functions with simple expression -// arguments. -type SimpleArgFunc struct { - FunctionName string `parser:" @(\"AVG\" | \"MAX\" | \"MIN\" | \"SUM\" | \"COALESCE\" | \"NULLIF\" | \"TO_STRING\" | \"TO_TIMESTAMP\" | \"UTCNOW\" | \"CHAR_LENGTH\" | \"CHARACTER_LENGTH\" | \"LOWER\" | \"UPPER\") "` - - ArgsList []*Expression `parser:"\"(\" (@@ (\",\" @@)*)?\")\""` -} - -// CountFunc represents the COUNT sql function -type CountFunc struct { - StarArg bool `parser:" \"COUNT\" \"(\" ( @\"*\"?"` - ExprArg *Expression `parser:" @@? )! \")\""` -} - -// CastFunc represents CAST sql function -type CastFunc struct { - Expr *Expression `parser:" \"CAST\" \"(\" @@ "` - CastType string `parser:" \"AS\" @(\"BOOL\" | \"INT\" | \"INTEGER\" | \"STRING\" | \"FLOAT\" | \"DECIMAL\" | \"NUMERIC\" | \"TIMESTAMP\") \")\" "` -} - -// SubstringFunc represents SUBSTRING sql function -type SubstringFunc struct { - Expr *PrimaryTerm `parser:" \"SUBSTRING\" \"(\" @@ "` - From *Operand `parser:" ( \"FROM\" @@ "` - For *Operand `parser:" (\"FOR\" @@)? \")\" "` - Arg2 *Operand `parser:" | \",\" @@ "` - Arg3 *Operand `parser:" (\",\" @@)? \")\" )"` -} - -// ExtractFunc represents EXTRACT sql function -type ExtractFunc struct { - Timeword string `parser:" \"EXTRACT\" \"(\" @( \"YEAR\":Timeword | \"MONTH\":Timeword | \"DAY\":Timeword | \"HOUR\":Timeword | \"MINUTE\":Timeword | \"SECOND\":Timeword | \"TIMEZONE_HOUR\":Timeword | \"TIMEZONE_MINUTE\":Timeword ) "` - From *PrimaryTerm `parser:" \"FROM\" @@ \")\" "` -} - -// TrimFunc represents TRIM sql function -type TrimFunc struct { - TrimWhere *string `parser:" \"TRIM\" \"(\" ( @( \"LEADING\" | \"TRAILING\" | \"BOTH\" ) "` - TrimChars *PrimaryTerm `parser:" @@? "` - TrimFrom *PrimaryTerm `parser:" \"FROM\" )? @@ \")\" "` -} - -// DateAddFunc represents the DATE_ADD function -type DateAddFunc struct { - DatePart string `parser:" \"DATE_ADD\" \"(\" @( \"YEAR\":Timeword | \"MONTH\":Timeword | \"DAY\":Timeword | \"HOUR\":Timeword | \"MINUTE\":Timeword | \"SECOND\":Timeword ) \",\""` - Quantity *Operand `parser:" @@ \",\""` - Timestamp *PrimaryTerm `parser:" @@ \")\""` -} - -// DateDiffFunc represents the DATE_DIFF function -type DateDiffFunc struct { - DatePart string `parser:" \"DATE_DIFF\" \"(\" @( \"YEAR\":Timeword | \"MONTH\":Timeword | \"DAY\":Timeword | \"HOUR\":Timeword | \"MINUTE\":Timeword | \"SECOND\":Timeword ) \",\" "` - Timestamp1 *PrimaryTerm `parser:" @@ \",\" "` - Timestamp2 *PrimaryTerm `parser:" @@ \")\" "` -} - -// LitValue represents a literal value parsed from the sql -type LitValue struct { - Number *float64 `parser:"( @Number"` - String *LiteralString `parser:" | @LitString"` - Boolean *Boolean `parser:" | @(\"TRUE\" | \"FALSE\")"` - Null bool `parser:" | @\"NULL\")"` -} - -// Identifier represents a parsed identifier -type Identifier struct { - Unquoted *string `parser:" @Ident"` - Quoted *QuotedIdentifier `parser:"| @QuotIdent"` -} - -var ( - sqlLexer = lexer.Must(lexer.Regexp(`(\s+)` + - `|(?P(?i)\b(?:YEAR|MONTH|DAY|HOUR|MINUTE|SECOND|TIMEZONE_HOUR|TIMEZONE_MINUTE)\b)` + - `|(?P(?i)\b(?:SELECT|FROM|TOP|DISTINCT|ALL|WHERE|GROUP|BY|HAVING|UNION|MINUS|EXCEPT|INTERSECT|ORDER|LIMIT|OFFSET|TRUE|FALSE|NULL|IS|NOT|ANY|SOME|BETWEEN|AND|OR|LIKE|ESCAPE|AS|IN|BOOL|INT|INTEGER|STRING|FLOAT|DECIMAL|NUMERIC|TIMESTAMP|AVG|COUNT|MAX|MIN|SUM|COALESCE|NULLIF|CAST|DATE_ADD|DATE_DIFF|EXTRACT|TO_STRING|TO_TIMESTAMP|UTCNOW|CHAR_LENGTH|CHARACTER_LENGTH|LOWER|SUBSTRING|TRIM|UPPER|LEADING|TRAILING|BOTH|FOR)\b)` + - `|(?P[a-zA-Z_][a-zA-Z0-9_]*)` + - `|(?P"([^"]*("")?)*")` + - `|(?P\d*\.?\d+([eE][-+]?\d+)?)` + - `|(?P'([^']*('')?)*')` + - `|(?P<>|!=|<=|>=|\.\*|\[\*\]|[-+*/%,.()=<>\[\]])`, - )) - - // SQLParser is used to parse SQL statements - SQLParser = participle.MustBuild( - &Select{}, - participle.Lexer(sqlLexer), - participle.CaseInsensitive("Keyword"), - participle.CaseInsensitive("Timeword"), - ) -) diff --git a/pkg/s3select/sql/parser_test.go b/pkg/s3select/sql/parser_test.go deleted file mode 100644 index 6046d5a4..00000000 --- a/pkg/s3select/sql/parser_test.go +++ /dev/null @@ -1,385 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sql - -import ( - "bytes" - "testing" - - "github.com/alecthomas/participle" - "github.com/alecthomas/participle/lexer" -) - -func TestJSONPathElement(t *testing.T) { - p := participle.MustBuild( - &JSONPathElement{}, - participle.Lexer(sqlLexer), - participle.CaseInsensitive("Keyword"), - participle.CaseInsensitive("Timeword"), - ) - - j := JSONPathElement{} - cases := []string{ - // Key - "['name']", ".name", `."name"`, - - // Index - "[2]", "[0]", "[100]", - - // Object wilcard - ".*", - - // array wildcard - "[*]", - } - for i, tc := range cases { - err := p.ParseString(tc, &j) - if err != nil { - t.Fatalf("%d: %v", i, err) - } - // repr.Println(j, repr.Indent(" "), repr.OmitEmpty(true)) - } -} - -func TestJSONPath(t *testing.T) { - p := participle.MustBuild( - &JSONPath{}, - participle.Lexer(sqlLexer), - participle.CaseInsensitive("Keyword"), - participle.CaseInsensitive("Timeword"), - ) - - j := JSONPath{} - cases := []string{ - "S3Object", - "S3Object.id", - "S3Object.book.title", - "S3Object.id[1]", - "S3Object.id['abc']", - "S3Object.id['ab']", - "S3Object.words.*.id", - "S3Object.words.name[*].val", - "S3Object.words.name[*].val[*]", - "S3Object.words.name[*].val.*", - } - for i, tc := range cases { - err := p.ParseString(tc, &j) - if err != nil { - t.Fatalf("%d: %v", i, err) - } - // repr.Println(j, repr.Indent(" "), repr.OmitEmpty(true)) - } - -} - -func TestIdentifierParsing(t *testing.T) { - p := participle.MustBuild( - &Identifier{}, - participle.Lexer(sqlLexer), - participle.CaseInsensitive("Keyword"), - ) - - id := Identifier{} - validCases := []string{ - "a", - "_a", - "abc_a", - "a2", - `"abc"`, - `"abc\a""ac"`, - } - for i, tc := range validCases { - err := p.ParseString(tc, &id) - if err != nil { - t.Fatalf("%d: %v", i, err) - } - // repr.Println(id, repr.Indent(" "), repr.OmitEmpty(true)) - } - - invalidCases := []string{ - "+a", - "-a", - "1a", - `"ab`, - `abc"`, - `aa""a`, - `"a"a"`, - } - for i, tc := range invalidCases { - err := p.ParseString(tc, &id) - if err == nil { - t.Fatalf("%d: %v", i, err) - } - // fmt.Println(tc, err) - } -} - -func TestLiteralStringParsing(t *testing.T) { - var k ObjectKey - p := participle.MustBuild( - &ObjectKey{}, - participle.Lexer(sqlLexer), - participle.CaseInsensitive("Keyword"), - ) - - validCases := []string{ - "['abc']", - "['ab''c']", - "['a''b''c']", - "['abc-x_1##@(*&(#*))/\\']", - } - for i, tc := range validCases { - err := p.ParseString(tc, &k) - if err != nil { - t.Fatalf("%d: %v", i, err) - } - if string(*k.Lit) == "" { - t.Fatalf("Incorrect parse %#v", k) - } - // repr.Println(k, repr.Indent(" "), repr.OmitEmpty(true)) - } - - invalidCases := []string{ - "['abc'']", - "['-abc'sc']", - "[abc']", - "['ac]", - } - for i, tc := range invalidCases { - err := p.ParseString(tc, &k) - if err == nil { - t.Fatalf("%d: %v", i, err) - } - // fmt.Println(tc, err) - } -} - -func TestFunctionParsing(t *testing.T) { - var fex FuncExpr - p := participle.MustBuild( - &FuncExpr{}, - participle.Lexer(sqlLexer), - participle.CaseInsensitive("Keyword"), - participle.CaseInsensitive("Timeword"), - ) - - validCases := []string{ - "count(*)", - "sum(2 + s.id)", - "sum(t)", - "avg(s.id[1])", - "coalesce(s.id[1], 2, 2 + 3)", - - "cast(s as string)", - "cast(s AS INT)", - "cast(s as DECIMAL)", - "extract(YEAR from '2018-01-09')", - "extract(month from '2018-01-09')", - - "extract(hour from '2018-01-09')", - "extract(day from '2018-01-09')", - "substring('abcd' from 2 for 2)", - "substring('abcd' from 2)", - "substring('abcd' , 2 , 2)", - - "substring('abcd' , 22 )", - "trim(' aab ')", - "trim(leading from ' aab ')", - "trim(trailing from ' aab ')", - "trim(both from ' aab ')", - - "trim(both '12' from ' aab ')", - "trim(leading '12' from ' aab ')", - "trim(trailing '12' from ' aab ')", - "count(23)", - } - for i, tc := range validCases { - err := p.ParseString(tc, &fex) - if err != nil { - t.Fatalf("%d: %v", i, err) - } - // repr.Println(fex, repr.Indent(" "), repr.OmitEmpty(true)) - } -} - -func TestSqlLexer(t *testing.T) { - // s := bytes.NewBuffer([]byte("s.['name'].*.[*].abc.[\"abc\"]")) - s := bytes.NewBuffer([]byte("S3Object.words.*.id")) - // s := bytes.NewBuffer([]byte("COUNT(Id)")) - lex, err := sqlLexer.Lex(s) - if err != nil { - t.Fatal(err) - } - tokens, err := lexer.ConsumeAll(lex) - if err != nil { - t.Fatal(err) - } - // for i, t := range tokens { - // fmt.Printf("%d: %#v\n", i, t) - // } - if len(tokens) != 7 { - t.Fatalf("Expected 7 got %d", len(tokens)) - } -} - -func TestSelectWhere(t *testing.T) { - p := participle.MustBuild( - &Select{}, - participle.Lexer(sqlLexer), - participle.CaseInsensitive("Keyword"), - ) - - s := Select{} - cases := []string{ - "select * from s3object", - "select a, b from s3object s", - "select a, b from s3object as s", - "select a, b from s3object as s where a = 1", - "select a, b from s3object s where a = 1", - "select a, b from s3object where a = 1", - } - for i, tc := range cases { - err := p.ParseString(tc, &s) - if err != nil { - t.Fatalf("%d: %v", i, err) - } - - // repr.Println(s, repr.Indent(" "), repr.OmitEmpty(true)) - } -} - -func TestLikeClause(t *testing.T) { - p := participle.MustBuild( - &Select{}, - participle.Lexer(sqlLexer), - participle.CaseInsensitive("Keyword"), - ) - - s := Select{} - cases := []string{ - `select * from s3object where Name like 'abcd'`, - `select Name like 'abc' from s3object`, - `select * from s3object where Name not like 'abc'`, - `select * from s3object where Name like 'abc' escape 't'`, - `select * from s3object where Name like 'a\%' escape '?'`, - `select * from s3object where Name not like 'abc\' escape '?'`, - `select * from s3object where Name like 'a\%' escape LOWER('?')`, - `select * from s3object where Name not like LOWER('Bc\') escape '?'`, - } - for i, tc := range cases { - err := p.ParseString(tc, &s) - if err != nil { - t.Errorf("%d: %v", i, err) - } - } -} - -func TestBetweenClause(t *testing.T) { - p := participle.MustBuild( - &Select{}, - participle.Lexer(sqlLexer), - participle.CaseInsensitive("Keyword"), - ) - - s := Select{} - cases := []string{ - `select * from s3object where Id between 1 and 2`, - `select * from s3object where Id between 1 and 2 and name = 'Ab'`, - `select * from s3object where Id not between 1 and 2`, - `select * from s3object where Id not between 1 and 2 and name = 'Bc'`, - } - for i, tc := range cases { - err := p.ParseString(tc, &s) - if err != nil { - t.Errorf("%d: %v", i, err) - } - } -} - -func TestFromClauseJSONPath(t *testing.T) { - p := participle.MustBuild( - &Select{}, - participle.Lexer(sqlLexer), - participle.CaseInsensitive("Keyword"), - ) - - s := Select{} - cases := []string{ - "select * from s3object", - "select * from s3object[*].name", - "select * from s3object[*].books[*]", - "select * from s3object[*].books[*].name", - "select * from s3object where name > 2", - "select * from s3object[*].name where name > 2", - "select * from s3object[*].books[*] where name > 2", - "select * from s3object[*].books[*].name where name > 2", - "select * from s3object[*].books[*] s", - "select * from s3object[*].books[*].name as s", - "select * from s3object s where name > 2", - "select * from s3object[*].name as s where name > 2", - } - for i, tc := range cases { - err := p.ParseString(tc, &s) - if err != nil { - t.Fatalf("%d: %v", i, err) - } - - // repr.Println(s, repr.Indent(" "), repr.OmitEmpty(true)) - } - -} - -func TestSelectParsing(t *testing.T) { - p := participle.MustBuild( - &Select{}, - participle.Lexer(sqlLexer), - participle.CaseInsensitive("Keyword"), - ) - - s := Select{} - cases := []string{ - "select * from s3object where name > 2 or value > 1 or word > 2", - "select s.word.id + 2 from s3object s", - "select 1-2-3 from s3object s limit 1", - } - for i, tc := range cases { - err := p.ParseString(tc, &s) - if err != nil { - t.Fatalf("%d: %v", i, err) - } - - // repr.Println(s, repr.Indent(" "), repr.OmitEmpty(true)) - } -} - -func TestSqlLexerArithOps(t *testing.T) { - s := bytes.NewBuffer([]byte("year from select month hour distinct")) - lex, err := sqlLexer.Lex(s) - if err != nil { - t.Fatal(err) - } - tokens, err := lexer.ConsumeAll(lex) - if err != nil { - t.Fatal(err) - } - if len(tokens) != 7 { - t.Errorf("Expected 7 got %d", len(tokens)) - } - // for i, t := range tokens { - // fmt.Printf("%d: %#v\n", i, t) - // } -} diff --git a/pkg/s3select/sql/record.go b/pkg/s3select/sql/record.go deleted file mode 100644 index ced50706..00000000 --- a/pkg/s3select/sql/record.go +++ /dev/null @@ -1,141 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sql - -import ( - "fmt" - "io" - - "github.com/minio/simdjson-go" -) - -// SelectObjectFormat specifies the format of the underlying data -type SelectObjectFormat int - -const ( - // SelectFmtUnknown - unknown format (default value) - SelectFmtUnknown SelectObjectFormat = iota - // SelectFmtCSV - CSV format - SelectFmtCSV - // SelectFmtJSON - JSON format - SelectFmtJSON - // SelectFmtSIMDJSON - SIMD JSON format - SelectFmtSIMDJSON - // SelectFmtParquet - Parquet format - SelectFmtParquet -) - -// WriteCSVOpts - encapsulates options for Select CSV output -type WriteCSVOpts struct { - FieldDelimiter rune - Quote rune - QuoteEscape rune - AlwaysQuote bool -} - -// Record - is a type containing columns and their values. -type Record interface { - Get(name string) (*Value, error) - - // Set a value. - // Can return a different record type. - Set(name string, value *Value) (Record, error) - WriteCSV(writer io.Writer, opts WriteCSVOpts) error - WriteJSON(writer io.Writer) error - - // Clone the record and if possible use the destination provided. - Clone(dst Record) Record - Reset() - - // Returns underlying representation - Raw() (SelectObjectFormat, interface{}) - - // Replaces the underlying data - Replace(k interface{}) error -} - -// IterToValue converts a simdjson Iter to its underlying value. -// Objects are returned as simdjson.Object -// Arrays are returned as []interface{} with parsed values. -func IterToValue(iter simdjson.Iter) (interface{}, error) { - switch iter.Type() { - case simdjson.TypeString: - v, err := iter.String() - if err != nil { - return nil, err - } - return v, nil - case simdjson.TypeFloat: - v, err := iter.Float() - if err != nil { - return nil, err - } - return v, nil - case simdjson.TypeInt: - v, err := iter.Int() - if err != nil { - return nil, err - } - return v, nil - case simdjson.TypeUint: - v, err := iter.Int() - if err != nil { - // Can't fit into int, convert to float. - v, err := iter.Float() - return v, err - } - return v, nil - case simdjson.TypeBool: - v, err := iter.Bool() - if err != nil { - return nil, err - } - return v, nil - case simdjson.TypeObject: - obj, err := iter.Object(nil) - if err != nil { - return nil, err - } - return *obj, err - case simdjson.TypeArray: - arr, err := iter.Array(nil) - if err != nil { - return nil, err - } - iter := arr.Iter() - var dst []interface{} - var next simdjson.Iter - for { - typ, err := iter.AdvanceIter(&next) - if err != nil { - return nil, err - } - if typ == simdjson.TypeNone { - break - } - v, err := IterToValue(next) - if err != nil { - return nil, err - } - dst = append(dst, v) - } - return dst, err - case simdjson.TypeNull: - return nil, nil - } - return nil, fmt.Errorf("IterToValue: unknown JSON type: %s", iter.Type().String()) -} diff --git a/pkg/s3select/sql/statement.go b/pkg/s3select/sql/statement.go deleted file mode 100644 index 5383221b..00000000 --- a/pkg/s3select/sql/statement.go +++ /dev/null @@ -1,338 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sql - -import ( - "errors" - "fmt" - "strings" - - "github.com/bcicen/jstream" - "github.com/minio/simdjson-go" -) - -var ( - errBadLimitSpecified = errors.New("Limit value must be a positive integer") -) - -const ( - baseTableName = "s3object" -) - -// SelectStatement is the top level parsed and analyzed structure -type SelectStatement struct { - selectAST *Select - - // Analysis result of the statement - selectQProp qProp - - // Result of parsing the limit clause if one is present - // (otherwise -1) - limitValue int64 - - // Count of rows that have been output. - outputCount int64 -} - -// ParseSelectStatement - parses a select query from the given string -// and analyzes it. -func ParseSelectStatement(s string) (stmt SelectStatement, err error) { - var selectAST Select - err = SQLParser.ParseString(s, &selectAST) - if err != nil { - err = errQueryParseFailure(err) - return - } - - // Check if select is "SELECT s.* from S3Object s" - if !selectAST.Expression.All && - len(selectAST.Expression.Expressions) == 1 && - len(selectAST.Expression.Expressions[0].Expression.And) == 1 && - len(selectAST.Expression.Expressions[0].Expression.And[0].Condition) == 1 && - selectAST.Expression.Expressions[0].Expression.And[0].Condition[0].Operand != nil && - selectAST.Expression.Expressions[0].Expression.And[0].Condition[0].Operand.Operand.Left != nil && - selectAST.Expression.Expressions[0].Expression.And[0].Condition[0].Operand.Operand.Left.Left != nil && - selectAST.Expression.Expressions[0].Expression.And[0].Condition[0].Operand.Operand.Left.Left.Primary != nil && - selectAST.Expression.Expressions[0].Expression.And[0].Condition[0].Operand.Operand.Left.Left.Primary.JPathExpr != nil { - if selectAST.Expression.Expressions[0].Expression.And[0].Condition[0].Operand.Operand.Left.Left.Primary.JPathExpr.String() == selectAST.From.As+".*" { - selectAST.Expression.All = true - } - } - stmt.selectAST = &selectAST - - // Check the parsed limit value - stmt.limitValue, err = parseLimit(selectAST.Limit) - if err != nil { - err = errQueryAnalysisFailure(err) - return - } - - // Analyze where clause - if selectAST.Where != nil { - whereQProp := selectAST.Where.analyze(&selectAST) - if whereQProp.err != nil { - err = errQueryAnalysisFailure(fmt.Errorf("Where clause error: %w", whereQProp.err)) - return - } - - if whereQProp.isAggregation { - err = errQueryAnalysisFailure(errors.New("WHERE clause cannot have an aggregation")) - return - } - } - - // Validate table name - err = validateTableName(selectAST.From) - if err != nil { - return - } - - // Analyze main select expression - stmt.selectQProp = selectAST.Expression.analyze(&selectAST) - err = stmt.selectQProp.err - if err != nil { - err = errQueryAnalysisFailure(err) - } - return -} - -func validateTableName(from *TableExpression) error { - if strings.ToLower(from.Table.BaseKey.String()) != baseTableName { - return errBadTableName(errors.New("table name must be `s3object`")) - } - - if len(from.Table.PathExpr) > 0 { - if !from.Table.PathExpr[0].ArrayWildcard { - return errBadTableName(errors.New("keypath table name is invalid - please check the service documentation")) - } - } - return nil -} - -func parseLimit(v *LitValue) (int64, error) { - switch { - case v == nil: - return -1, nil - case v.Number == nil: - return -1, errBadLimitSpecified - default: - r := int64(*v.Number) - if r < 0 { - return -1, errBadLimitSpecified - } - return r, nil - } -} - -// EvalFrom evaluates the From clause on the input record. It only -// applies to JSON input data format (currently). -func (e *SelectStatement) EvalFrom(format string, input Record) ([]*Record, error) { - if !e.selectAST.From.HasKeypath() { - return []*Record{&input}, nil - } - _, rawVal := input.Raw() - - if format != "json" { - return nil, errDataSource(errors.New("path not supported")) - } - switch rec := rawVal.(type) { - case jstream.KVS: - txedRec, _, err := jsonpathEval(e.selectAST.From.Table.PathExpr[1:], rec) - if err != nil { - return nil, err - } - - var kvs jstream.KVS - switch v := txedRec.(type) { - case jstream.KVS: - kvs = v - - case []interface{}: - recs := make([]*Record, len(v)) - for i, val := range v { - tmpRec := input.Clone(nil) - if err = tmpRec.Replace(val); err != nil { - return nil, err - } - recs[i] = &tmpRec - } - return recs, nil - - default: - kvs = jstream.KVS{jstream.KV{Key: "_1", Value: v}} - } - - if err = input.Replace(kvs); err != nil { - return nil, err - } - - return []*Record{&input}, nil - case simdjson.Object: - txedRec, _, err := jsonpathEval(e.selectAST.From.Table.PathExpr[1:], rec) - if err != nil { - return nil, err - } - - switch v := txedRec.(type) { - case simdjson.Object: - err := input.Replace(v) - if err != nil { - return nil, err - } - - case []interface{}: - recs := make([]*Record, len(v)) - for i, val := range v { - tmpRec := input.Clone(nil) - if err = tmpRec.Replace(val); err != nil { - return nil, err - } - recs[i] = &tmpRec - } - return recs, nil - - default: - input.Reset() - input, err = input.Set("_1", &Value{value: v}) - if err != nil { - return nil, err - } - } - return []*Record{&input}, nil - } - return nil, errDataSource(errors.New("unexpected non JSON input")) -} - -// IsAggregated returns if the statement involves SQL aggregation -func (e *SelectStatement) IsAggregated() bool { - return e.selectQProp.isAggregation -} - -// AggregateResult - returns the aggregated result after all input -// records have been processed. Applies only to aggregation queries. -func (e *SelectStatement) AggregateResult(output Record) error { - for i, expr := range e.selectAST.Expression.Expressions { - v, err := expr.evalNode(nil) - if err != nil { - return err - } - if expr.As != "" { - output, err = output.Set(expr.As, v) - } else { - output, err = output.Set(fmt.Sprintf("_%d", i+1), v) - } - if err != nil { - return err - } - } - return nil -} - -func (e *SelectStatement) isPassingWhereClause(input Record) (bool, error) { - if e.selectAST.Where == nil { - return true, nil - } - value, err := e.selectAST.Where.evalNode(input) - if err != nil { - return false, err - } - - b, ok := value.ToBool() - if !ok { - err = fmt.Errorf("WHERE expression did not return bool") - return false, err - } - - return b, nil -} - -// AggregateRow - aggregates the input record. Applies only to -// aggregation queries. -func (e *SelectStatement) AggregateRow(input Record) error { - ok, err := e.isPassingWhereClause(input) - if err != nil { - return err - } - if !ok { - return nil - } - - for _, expr := range e.selectAST.Expression.Expressions { - err := expr.aggregateRow(input) - if err != nil { - return err - } - } - return nil -} - -// Eval - evaluates the Select statement for the given record. It -// applies only to non-aggregation queries. -// The function returns whether the statement passed the WHERE clause and should be outputted. -func (e *SelectStatement) Eval(input, output Record) (Record, error) { - ok, err := e.isPassingWhereClause(input) - if err != nil || !ok { - // Either error or row did not pass where clause - return nil, err - } - - if e.selectAST.Expression.All { - // Return the input record for `SELECT * FROM - // .. WHERE ..` - - // Update count of records output. - if e.limitValue > -1 { - e.outputCount++ - } - return input.Clone(output), nil - } - - for i, expr := range e.selectAST.Expression.Expressions { - v, err := expr.evalNode(input) - if err != nil { - return nil, err - } - - // Pick output column names - if expr.As != "" { - output, err = output.Set(expr.As, v) - } else if comp, ok := getLastKeypathComponent(expr.Expression); ok { - output, err = output.Set(comp, v) - } else { - output, err = output.Set(fmt.Sprintf("_%d", i+1), v) - } - if err != nil { - return nil, err - } - } - - // Update count of records output. - if e.limitValue > -1 { - e.outputCount++ - } - - return output, nil -} - -// LimitReached - returns true if the number of records output has -// reached the value of the `LIMIT` clause. -func (e *SelectStatement) LimitReached() bool { - if e.limitValue == -1 { - return false - } - return e.outputCount >= e.limitValue -} diff --git a/pkg/s3select/sql/stringfuncs.go b/pkg/s3select/sql/stringfuncs.go deleted file mode 100644 index b1a23956..00000000 --- a/pkg/s3select/sql/stringfuncs.go +++ /dev/null @@ -1,199 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sql - -import ( - "errors" - "strings" -) - -var ( - errMalformedEscapeSequence = errors.New("Malformed escape sequence in LIKE clause") - errInvalidTrimArg = errors.New("Trim argument is invalid - this should not happen") - errInvalidSubstringIndexLen = errors.New("Substring start index or length falls outside the string") -) - -const ( - percent rune = '%' - underscore rune = '_' - runeZero rune = 0 -) - -func evalSQLLike(text, pattern string, escape rune) (match bool, err error) { - s := []rune{} - prev := runeZero - hasLeadingPercent := false - patLen := len([]rune(pattern)) - for i, r := range pattern { - if i > 0 && prev == escape { - switch r { - case percent, escape, underscore: - s = append(s, r) - prev = r - if r == escape { - prev = runeZero - } - default: - return false, errMalformedEscapeSequence - } - continue - } - - prev = r - - var ok bool - switch r { - case percent: - if len(s) == 0 { - hasLeadingPercent = true - continue - } - - text, ok = matcher(text, string(s), hasLeadingPercent) - if !ok { - return false, nil - } - hasLeadingPercent = true - s = []rune{} - - if i == patLen-1 { - // Last pattern character is a %, so - // we are done. - return true, nil - } - - case underscore: - if len(s) == 0 { - text, ok = dropRune(text) - if !ok { - return false, nil - } - continue - } - - text, ok = matcher(text, string(s), hasLeadingPercent) - if !ok { - return false, nil - } - hasLeadingPercent = false - - text, ok = dropRune(text) - if !ok { - return false, nil - } - s = []rune{} - - case escape: - if i == patLen-1 { - return false, errMalformedEscapeSequence - } - // Otherwise do nothing. - - default: - s = append(s, r) - } - - } - if hasLeadingPercent { - return strings.HasSuffix(text, string(s)), nil - } - return string(s) == text, nil -} - -// matcher - Finds `pat` in `text`, and returns the part remainder of -// `text`, after the match. If leadingPercent is false, `pat` must be -// the prefix of `text`, otherwise it must be a substring. -func matcher(text, pat string, leadingPercent bool) (res string, match bool) { - if !leadingPercent { - res = strings.TrimPrefix(text, pat) - if len(text) == len(res) { - return "", false - } - } else { - parts := strings.SplitN(text, pat, 2) - if len(parts) == 1 { - return "", false - } - res = parts[1] - } - return res, true -} - -func dropRune(text string) (res string, ok bool) { - r := []rune(text) - if len(r) == 0 { - return "", false - } - return string(r[1:]), true -} - -func evalSQLSubstring(s string, startIdx, length int) (res string, err error) { - rs := []rune(s) - - // According to s3 document, if startIdx < 1, it is set to 1. - if startIdx < 1 { - startIdx = 1 - } - - if startIdx > len(rs) { - startIdx = len(rs) + 1 - } - - // StartIdx is 1-based in the input - startIdx-- - endIdx := len(rs) - if length != -1 { - if length < 0 { - return "", errInvalidSubstringIndexLen - } - - if length > (endIdx - startIdx) { - length = endIdx - startIdx - } - - endIdx = startIdx + length - } - - return string(rs[startIdx:endIdx]), nil -} - -const ( - trimLeading = "LEADING" - trimTrailing = "TRAILING" - trimBoth = "BOTH" -) - -func evalSQLTrim(where *string, trimChars, text string) (result string, err error) { - cutSet := " " - if trimChars != "" { - cutSet = trimChars - } - - trimFunc := strings.Trim - switch { - case where == nil: - case *where == trimBoth: - case *where == trimLeading: - trimFunc = strings.TrimLeft - case *where == trimTrailing: - trimFunc = strings.TrimRight - default: - return "", errInvalidTrimArg - } - - return trimFunc(text, cutSet), nil -} diff --git a/pkg/s3select/sql/stringfuncs_test.go b/pkg/s3select/sql/stringfuncs_test.go deleted file mode 100644 index 242c2283..00000000 --- a/pkg/s3select/sql/stringfuncs_test.go +++ /dev/null @@ -1,131 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sql - -import ( - "testing" -) - -func TestEvalSQLLike(t *testing.T) { - dropCases := []struct { - input, resultExpected string - matchExpected bool - }{ - {"", "", false}, - {"a", "", true}, - {"ab", "b", true}, - {"தமிழ்", "மிழ்", true}, - } - - for i, tc := range dropCases { - res, ok := dropRune(tc.input) - if res != tc.resultExpected || ok != tc.matchExpected { - t.Errorf("DropRune Case %d failed", i) - } - } - - matcherCases := []struct { - iText, iPat string - iHasLeadingPercent bool - resultExpected string - matchExpected bool - }{ - {"abcd", "bcd", false, "", false}, - {"abcd", "bcd", true, "", true}, - {"abcd", "abcd", false, "", true}, - {"abcd", "abcd", true, "", true}, - {"abcd", "ab", false, "cd", true}, - {"abcd", "ab", true, "cd", true}, - {"abcd", "bc", false, "", false}, - {"abcd", "bc", true, "d", true}, - } - - for i, tc := range matcherCases { - res, ok := matcher(tc.iText, tc.iPat, tc.iHasLeadingPercent) - if res != tc.resultExpected || ok != tc.matchExpected { - t.Errorf("Matcher Case %d failed", i) - } - } - - evalCases := []struct { - iText, iPat string - iEsc rune - matchExpected bool - errExpected error - }{ - {"abcd", "abc", runeZero, false, nil}, - {"abcd", "abcd", runeZero, true, nil}, - {"abcd", "abc_", runeZero, true, nil}, - {"abcd", "_bdd", runeZero, false, nil}, - {"abcd", "_b_d", runeZero, true, nil}, - - {"abcd", "____", runeZero, true, nil}, - {"abcd", "____%", runeZero, true, nil}, - {"abcd", "%____", runeZero, true, nil}, - {"abcd", "%__", runeZero, true, nil}, - {"abcd", "____", runeZero, true, nil}, - - {"", "_", runeZero, false, nil}, - {"", "%", runeZero, true, nil}, - {"abcd", "%%%%%", runeZero, true, nil}, - {"abcd", "_____", runeZero, false, nil}, - {"abcd", "%%%%%", runeZero, true, nil}, - - {"a%%d", `a\%\%d`, '\\', true, nil}, - {"a%%d", `a\%d`, '\\', false, nil}, - {`a%%\d`, `a\%\%\\d`, '\\', true, nil}, - {`a%%\`, `a\%\%\\`, '\\', true, nil}, - {`a%__%\`, `a\%\_\_\%\\`, '\\', true, nil}, - - {`a%__%\`, `a\%\_\_\%_`, '\\', true, nil}, - {`a%__%\`, `a\%\_\__`, '\\', false, nil}, - {`a%__%\`, `a\%\_\_%`, '\\', true, nil}, - {`a%__%\`, `a?%?_?_?%\`, '?', true, nil}, - } - - for i, tc := range evalCases { - // fmt.Println("Case:", i) - res, err := evalSQLLike(tc.iText, tc.iPat, tc.iEsc) - if res != tc.matchExpected || err != tc.errExpected { - t.Errorf("Eval Case %d failed: %v %v", i, res, err) - } - } -} - -func TestEvalSQLSubstring(t *testing.T) { - evalCases := []struct { - s string - startIdx int - length int - resExpected string - errExpected error - }{ - {"abcd", 1, 1, "a", nil}, - {"abcd", -1, 1, "a", nil}, - {"abcd", 999, 999, "", nil}, - {"", 999, 999, "", nil}, - {"测试abc", 1, 1, "测", nil}, - {"测试abc", 5, 5, "c", nil}, - } - - for i, tc := range evalCases { - res, err := evalSQLSubstring(tc.s, tc.startIdx, tc.length) - if res != tc.resExpected || err != tc.errExpected { - t.Errorf("Eval Case %d failed: %v %v", i, res, err) - } - } -} diff --git a/pkg/s3select/sql/timestampfuncs.go b/pkg/s3select/sql/timestampfuncs.go deleted file mode 100644 index d8c7f635..00000000 --- a/pkg/s3select/sql/timestampfuncs.go +++ /dev/null @@ -1,198 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sql - -import ( - "time" -) - -const ( - layoutYear = "2006T" - layoutMonth = "2006-01T" - layoutDay = "2006-01-02T" - layoutMinute = "2006-01-02T15:04Z07:00" - layoutSecond = "2006-01-02T15:04:05Z07:00" - layoutNanosecond = "2006-01-02T15:04:05.999999999Z07:00" -) - -var ( - tformats = []string{ - layoutYear, - layoutMonth, - layoutDay, - layoutMinute, - layoutSecond, - layoutNanosecond, - } -) - -func parseSQLTimestamp(s string) (t time.Time, err error) { - for _, f := range tformats { - t, err = time.Parse(f, s) - if err == nil { - break - } - } - return -} - -// FormatSQLTimestamp - returns the a string representation of the -// timestamp as used in S3 Select -func FormatSQLTimestamp(t time.Time) string { - _, zoneOffset := t.Zone() - hasZone := zoneOffset != 0 - hasFracSecond := t.Nanosecond() != 0 - hasSecond := t.Second() != 0 - hasTime := t.Hour() != 0 || t.Minute() != 0 - hasDay := t.Day() != 1 - hasMonth := t.Month() != 1 - - switch { - case hasFracSecond: - return t.Format(layoutNanosecond) - case hasSecond: - return t.Format(layoutSecond) - case hasTime || hasZone: - return t.Format(layoutMinute) - case hasDay: - return t.Format(layoutDay) - case hasMonth: - return t.Format(layoutMonth) - default: - return t.Format(layoutYear) - } -} - -const ( - timePartYear = "YEAR" - timePartMonth = "MONTH" - timePartDay = "DAY" - timePartHour = "HOUR" - timePartMinute = "MINUTE" - timePartSecond = "SECOND" - timePartTimezoneHour = "TIMEZONE_HOUR" - timePartTimezoneMinute = "TIMEZONE_MINUTE" -) - -func extract(what string, t time.Time) (v *Value, err error) { - switch what { - case timePartYear: - return FromInt(int64(t.Year())), nil - case timePartMonth: - return FromInt(int64(t.Month())), nil - case timePartDay: - return FromInt(int64(t.Day())), nil - case timePartHour: - return FromInt(int64(t.Hour())), nil - case timePartMinute: - return FromInt(int64(t.Minute())), nil - case timePartSecond: - return FromInt(int64(t.Second())), nil - case timePartTimezoneHour: - _, zoneOffset := t.Zone() - return FromInt(int64(zoneOffset / 3600)), nil - case timePartTimezoneMinute: - _, zoneOffset := t.Zone() - return FromInt(int64((zoneOffset % 3600) / 60)), nil - default: - // This does not happen - return nil, errNotImplemented - } -} - -func dateAdd(timePart string, qty float64, t time.Time) (*Value, error) { - var duration time.Duration - switch timePart { - case timePartYear: - return FromTimestamp(t.AddDate(int(qty), 0, 0)), nil - case timePartMonth: - return FromTimestamp(t.AddDate(0, int(qty), 0)), nil - case timePartDay: - return FromTimestamp(t.AddDate(0, 0, int(qty))), nil - case timePartHour: - duration = time.Duration(qty) * time.Hour - case timePartMinute: - duration = time.Duration(qty) * time.Minute - case timePartSecond: - duration = time.Duration(qty) * time.Second - default: - return nil, errNotImplemented - } - return FromTimestamp(t.Add(duration)), nil -} - -const ( - dayInNanoseconds = time.Hour * 24 -) - -// dateDiff computes the difference between two times in terms of the -// `timePart` which can be years, months, days, hours, minutes or -// seconds. For difference in years, months or days, the time part, -// including timezone is ignored. -func dateDiff(timePart string, ts1, ts2 time.Time) (*Value, error) { - if ts2.Before(ts1) { - v, err := dateDiff(timePart, ts2, ts1) - v.negate() - return v, err - } - - duration := ts2.Sub(ts1) - y1, m1, d1 := ts1.Date() - y2, m2, d2 := ts2.Date() - dy, dm := int64(y2-y1), int64(m2-m1) - - switch timePart { - case timePartYear: - if m2 > m1 || (m2 == m1 && d2 >= d1) { - return FromInt(dy), nil - } - return FromInt(dy - 1), nil - case timePartMonth: - months := 12 * dy - if m2 >= m1 { - months += dm - } else { - months += 12 + dm - } - if d2 < d1 { - months-- - } - return FromInt(months), nil - case timePartDay: - // To compute the number of days between two times - // using the time package, zero out the time portions - // of the timestamps, compute the difference duration - // and then divide by the length of a day. - d1 := time.Date(y1, m1, d1, 0, 0, 0, 0, time.UTC) - d2 := time.Date(y2, m2, d2, 0, 0, 0, 0, time.UTC) - diff := d2.Sub(d1) - days := diff / dayInNanoseconds - return FromInt(int64(days)), nil - case timePartHour: - hours := duration / time.Hour - return FromInt(int64(hours)), nil - case timePartMinute: - minutes := duration / time.Minute - return FromInt(int64(minutes)), nil - case timePartSecond: - seconds := duration / time.Second - return FromInt(int64(seconds)), nil - default: - - } - return nil, errNotImplemented -} diff --git a/pkg/s3select/sql/timestampfuncs_test.go b/pkg/s3select/sql/timestampfuncs_test.go deleted file mode 100644 index d529e048..00000000 --- a/pkg/s3select/sql/timestampfuncs_test.go +++ /dev/null @@ -1,60 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sql - -import ( - "testing" - "time" -) - -func TestParseAndDisplaySQLTimestamp(t *testing.T) { - beijing := time.FixedZone("", int((8 * time.Hour).Seconds())) - fakeLosAngeles := time.FixedZone("", -int((8 * time.Hour).Seconds())) - cases := []struct { - s string - t time.Time - }{ - {"2010T", time.Date(2010, 1, 1, 0, 0, 0, 0, time.UTC)}, - {"2010-02T", time.Date(2010, 2, 1, 0, 0, 0, 0, time.UTC)}, - {"2010-02-03T", time.Date(2010, 2, 3, 0, 0, 0, 0, time.UTC)}, - {"2010-02-03T04:11Z", time.Date(2010, 2, 3, 4, 11, 0, 0, time.UTC)}, - {"2010-02-03T04:11:30Z", time.Date(2010, 2, 3, 4, 11, 30, 0, time.UTC)}, - {"2010-02-03T04:11:30.23Z", time.Date(2010, 2, 3, 4, 11, 30, 230000000, time.UTC)}, - {"2010-02-03T04:11+08:00", time.Date(2010, 2, 3, 4, 11, 0, 0, beijing)}, - {"2010-02-03T04:11:30+08:00", time.Date(2010, 2, 3, 4, 11, 30, 0, beijing)}, - {"2010-02-03T04:11:30.23+08:00", time.Date(2010, 2, 3, 4, 11, 30, 230000000, beijing)}, - {"2010-02-03T04:11:30-08:00", time.Date(2010, 2, 3, 4, 11, 30, 0, fakeLosAngeles)}, - {"2010-02-03T04:11:30.23-08:00", time.Date(2010, 2, 3, 4, 11, 30, 230000000, fakeLosAngeles)}, - } - for i, tc := range cases { - tval, err := parseSQLTimestamp(tc.s) - if err != nil { - t.Errorf("Case %d: Unexpected error: %v", i+1, err) - continue - } - if !tval.Equal(tc.t) { - t.Errorf("Case %d: Expected %v got %v", i+1, tc.t, tval) - continue - } - - tstr := FormatSQLTimestamp(tc.t) - if tstr != tc.s { - t.Errorf("Case %d: Expected %s got %s", i+1, tc.s, tstr) - continue - } - } -} diff --git a/pkg/s3select/sql/utils.go b/pkg/s3select/sql/utils.go deleted file mode 100644 index 64ab96aa..00000000 --- a/pkg/s3select/sql/utils.go +++ /dev/null @@ -1,112 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sql - -import ( - "fmt" - "strings" -) - -// String functions - -// String - returns the JSONPath representation -func (e *JSONPath) String() string { - if len(e.pathString) == 0 { - parts := make([]string, len(e.PathExpr)+1) - parts[0] = e.BaseKey.String() - for i, pe := range e.PathExpr { - parts[i+1] = pe.String() - } - e.pathString = strings.Join(parts, "") - } - return e.pathString -} - -func (e *JSONPathElement) String() string { - switch { - case e.Key != nil: - return e.Key.String() - case e.Index != nil: - return fmt.Sprintf("[%d]", *e.Index) - case e.ObjectWildcard: - return ".*" - case e.ArrayWildcard: - return "[*]" - } - return "" -} - -// String removes double quotes in quoted identifiers -func (i *Identifier) String() string { - if i.Unquoted != nil { - return *i.Unquoted - } - return string(*i.Quoted) -} - -func (o *ObjectKey) String() string { - if o.Lit != nil { - return fmt.Sprintf("['%s']", string(*o.Lit)) - } - return fmt.Sprintf(".%s", o.ID.String()) -} - -func (o *ObjectKey) keyString() string { - if o.Lit != nil { - return string(*o.Lit) - } - return o.ID.String() -} - -// getLastKeypathComponent checks if the given expression is a path -// expression, and if so extracts the last dot separated component of -// the path. Otherwise it returns false. -func getLastKeypathComponent(e *Expression) (string, bool) { - if len(e.And) > 1 || - len(e.And[0].Condition) > 1 || - e.And[0].Condition[0].Not != nil || - e.And[0].Condition[0].Operand.ConditionRHS != nil { - return "", false - } - - operand := e.And[0].Condition[0].Operand.Operand - if operand.Right != nil || - operand.Left.Right != nil || - operand.Left.Left.Negated != nil || - operand.Left.Left.Primary.JPathExpr == nil { - return "", false - } - - // Check if path expression ends in a key - jpath := operand.Left.Left.Primary.JPathExpr - n := len(jpath.PathExpr) - if n > 0 && jpath.PathExpr[n-1].Key == nil { - return "", false - } - ps := jpath.String() - if idx := strings.LastIndex(ps, "."); idx >= 0 { - // Get last part of path string. - ps = ps[idx+1:] - } - return ps, true -} - -// HasKeypath returns if the from clause has a key path - -// e.g. S3object[*].id -func (from *TableExpression) HasKeypath() bool { - return len(from.Table.PathExpr) > 1 -} diff --git a/pkg/s3select/sql/value.go b/pkg/s3select/sql/value.go deleted file mode 100644 index 6991d9a4..00000000 --- a/pkg/s3select/sql/value.go +++ /dev/null @@ -1,940 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sql - -import ( - "encoding/json" - "errors" - "fmt" - "math" - "reflect" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -var ( - errArithMismatchedTypes = errors.New("cannot perform arithmetic on mismatched types") - errArithInvalidOperator = errors.New("invalid arithmetic operator") - errArithDivideByZero = errors.New("cannot divide by 0") - - errCmpMismatchedTypes = errors.New("cannot compare values of different types") - errCmpInvalidBoolOperator = errors.New("invalid comparison operator for boolean arguments") -) - -// Value represents a value of restricted type reduced from an -// expression represented by an ASTNode. Only one of the fields is -// non-nil. -// -// In cases where we are fetching data from a data source (like csv), -// the type may not be determined yet. In these cases, a byte-slice is -// used. -type Value struct { - value interface{} -} - -// MarshalJSON provides json marshaling of values. -func (v Value) MarshalJSON() ([]byte, error) { - if b, ok := v.ToBytes(); ok { - return b, nil - } - return json.Marshal(v.value) -} - -// GetTypeString returns a string representation for vType -func (v Value) GetTypeString() string { - switch v.value.(type) { - case nil: - return "NULL" - case bool: - return "BOOL" - case string: - return "STRING" - case int64: - return "INT" - case float64: - return "FLOAT" - case time.Time: - return "TIMESTAMP" - case []byte: - return "BYTES" - case []Value: - return "ARRAY" - } - return "--" -} - -// Repr returns a string representation of value. -func (v Value) Repr() string { - switch x := v.value.(type) { - case nil: - return ":NULL" - case bool, int64, float64: - return fmt.Sprintf("%v:%s", v.value, v.GetTypeString()) - case time.Time: - return fmt.Sprintf("%s:TIMESTAMP", x) - case string: - return fmt.Sprintf("\"%s\":%s", x, v.GetTypeString()) - case []byte: - return fmt.Sprintf("\"%s\":BYTES", string(x)) - case []Value: - var s strings.Builder - s.WriteByte('[') - for i, v := range x { - s.WriteString(v.Repr()) - if i < len(x)-1 { - s.WriteByte(',') - } - } - s.WriteString("]:ARRAY") - return s.String() - default: - return fmt.Sprintf("%v:INVALID", v.value) - } -} - -// FromFloat creates a Value from a number -func FromFloat(f float64) *Value { - return &Value{value: f} -} - -// FromInt creates a Value from an int -func FromInt(f int64) *Value { - return &Value{value: f} -} - -// FromString creates a Value from a string -func FromString(str string) *Value { - return &Value{value: str} -} - -// FromBool creates a Value from a bool -func FromBool(b bool) *Value { - return &Value{value: b} -} - -// FromTimestamp creates a Value from a timestamp -func FromTimestamp(t time.Time) *Value { - return &Value{value: t} -} - -// FromNull creates a Value with Null value -func FromNull() *Value { - return &Value{value: nil} -} - -// FromBytes creates a Value from a []byte -func FromBytes(b []byte) *Value { - return &Value{value: b} -} - -// FromArray creates a Value from an array of values. -func FromArray(a []Value) *Value { - return &Value{value: a} -} - -// ToFloat works for int and float values -func (v Value) ToFloat() (val float64, ok bool) { - switch x := v.value.(type) { - case float64: - return x, true - case int64: - return float64(x), true - } - return 0, false -} - -// ToInt returns the value if int. -func (v Value) ToInt() (val int64, ok bool) { - val, ok = v.value.(int64) - return -} - -// ToString returns the value if string. -func (v Value) ToString() (val string, ok bool) { - val, ok = v.value.(string) - return -} - -// Equals returns whether the values strictly match. -// Both type and value must match. -func (v Value) Equals(b Value) (ok bool) { - if !v.SameTypeAs(b) { - return false - } - return reflect.DeepEqual(v.value, b.value) -} - -// SameTypeAs return whether the two types are strictly the same. -func (v Value) SameTypeAs(b Value) (ok bool) { - switch v.value.(type) { - case bool: - _, ok = b.value.(bool) - case string: - _, ok = b.value.(string) - case int64: - _, ok = b.value.(int64) - case float64: - _, ok = b.value.(float64) - case time.Time: - _, ok = b.value.(time.Time) - case []byte: - _, ok = b.value.([]byte) - case []Value: - _, ok = b.value.([]Value) - default: - ok = reflect.TypeOf(v.value) == reflect.TypeOf(b.value) - } - return ok -} - -// ToBool returns the bool value; second return value refers to if the bool -// conversion succeeded. -func (v Value) ToBool() (val bool, ok bool) { - val, ok = v.value.(bool) - return -} - -// ToTimestamp returns the timestamp value if present. -func (v Value) ToTimestamp() (t time.Time, ok bool) { - t, ok = v.value.(time.Time) - return -} - -// ToBytes returns the value if byte-slice. -func (v Value) ToBytes() (val []byte, ok bool) { - val, ok = v.value.([]byte) - return -} - -// ToArray returns the value if it is a slice of values. -func (v Value) ToArray() (val []Value, ok bool) { - val, ok = v.value.([]Value) - return -} - -// IsNull - checks if value is missing. -func (v Value) IsNull() bool { - switch v.value.(type) { - case nil: - return true - } - return false -} - -// IsArray returns whether the value is an array. -func (v Value) IsArray() (ok bool) { - _, ok = v.value.([]Value) - return ok -} - -func (v Value) isNumeric() bool { - switch v.value.(type) { - case int64, float64: - return true - } - return false -} - -// setters used internally to mutate values - -func (v *Value) setInt(i int64) { - v.value = i -} - -func (v *Value) setFloat(f float64) { - v.value = f -} - -func (v *Value) setString(s string) { - v.value = s -} - -func (v *Value) setBool(b bool) { - v.value = b -} - -func (v *Value) setTimestamp(t time.Time) { - v.value = t -} - -func (v Value) String() string { - return fmt.Sprintf("%#v", v.value) -} - -// CSVString - convert to string for CSV serialization -func (v Value) CSVString() string { - switch x := v.value.(type) { - case nil: - return "" - case bool: - if x { - return "true" - } - return "false" - case string: - return x - case int64: - return strconv.FormatInt(x, 10) - case float64: - return strconv.FormatFloat(x, 'g', -1, 64) - case time.Time: - return FormatSQLTimestamp(x) - case []byte: - return string(x) - case []Value: - b, _ := json.Marshal(x) - return string(b) - - default: - return "CSV serialization not implemented for this type" - } -} - -// floatToValue converts a float into int representation if needed. -func floatToValue(f float64) *Value { - intPart, fracPart := math.Modf(f) - if fracPart == 0 { - return FromInt(int64(intPart)) - } - return FromFloat(f) -} - -// negate negates a numeric value -func (v *Value) negate() { - switch x := v.value.(type) { - case float64: - v.value = -x - case int64: - v.value = -x - } -} - -// Value comparison functions: we do not expose them outside the -// module. Logical operators "<", ">", ">=", "<=" work on strings and -// numbers. Equality operators "=", "!=" work on strings, -// numbers and booleans. - -// Supported comparison operators -const ( - opLt = "<" - opLte = "<=" - opGt = ">" - opGte = ">=" - opEq = "=" - opIneq = "!=" -) - -// InferBytesType will attempt to infer the data type of bytes. -// Will fail if value type is not bytes or it would result in invalid utf8. -// ORDER: int, float, bool, JSON (object or array), timestamp, string -// If the content is valid JSON, the type will still be bytes. -func (v *Value) InferBytesType() (err error) { - b, ok := v.ToBytes() - if !ok { - return fmt.Errorf("InferByteType: Input is not bytes, but %v", v.GetTypeString()) - } - - // Check for numeric inference - if x, ok := v.bytesToInt(); ok { - v.setInt(x) - return nil - } - if x, ok := v.bytesToFloat(); ok { - v.setFloat(x) - return nil - } - if x, ok := v.bytesToBool(); ok { - v.setBool(x) - return nil - } - - asString := strings.TrimSpace(v.bytesToString()) - if len(b) > 0 && - (strings.HasPrefix(asString, "{") || strings.HasPrefix(asString, "[")) { - return nil - } - - if t, err := parseSQLTimestamp(asString); err == nil { - v.setTimestamp(t) - return nil - } - if !utf8.Valid(b) { - return errors.New("value is not valid utf-8") - } - // Fallback to string - v.setString(asString) - return -} - -// When numeric types are compared, type promotions could happen. If -// values do not have types (e.g. when reading from CSV), for -// comparison operations, automatic type conversion happens by trying -// to check if the value is a number (first an integer, then a float), -// and falling back to string. -func (v *Value) compareOp(op string, a *Value) (res bool, err error) { - if !isValidComparisonOperator(op) { - return false, errArithInvalidOperator - } - - // Check if type conversion/inference is needed - it is needed - // if the Value is a byte-slice. - err = inferTypesForCmp(v, a) - if err != nil { - return false, err - } - - // Check if either is nil - if v.IsNull() || a.IsNull() { - // If one is, both must be. - return boolCompare(op, v.IsNull(), a.IsNull()) - } - - // Check array values - aArr, aOK := a.ToArray() - vArr, vOK := v.ToArray() - if aOK && vOK { - return arrayCompare(op, aArr, vArr) - } - - isNumeric := v.isNumeric() && a.isNumeric() - if isNumeric { - intV, ok1i := v.ToInt() - intA, ok2i := a.ToInt() - if ok1i && ok2i { - return intCompare(op, intV, intA), nil - } - - // If both values are numeric, then at least one is - // float since we got here, so we convert. - flV, _ := v.ToFloat() - flA, _ := a.ToFloat() - return floatCompare(op, flV, flA), nil - } - - strV, ok1s := v.ToString() - strA, ok2s := a.ToString() - if ok1s && ok2s { - return stringCompare(op, strV, strA), nil - } - - boolV, ok1b := v.ToBool() - boolA, ok2b := a.ToBool() - if ok1b && ok2b { - return boolCompare(op, boolV, boolA) - } - - timestampV, ok1t := v.ToTimestamp() - timestampA, ok2t := a.ToTimestamp() - if ok1t && ok2t { - return timestampCompare(op, timestampV, timestampA), nil - } - - // Types cannot be compared, they do not match. - switch op { - case opEq: - return false, nil - case opIneq: - return true, nil - } - return false, errCmpInvalidBoolOperator -} - -func inferTypesForCmp(a *Value, b *Value) error { - _, okA := a.ToBytes() - _, okB := b.ToBytes() - switch { - case !okA && !okB: - // Both Values already have types - return nil - - case okA && okB: - // Both Values are untyped so try the types in order: - // int, float, bool, string - - // Check for numeric inference - iA, okAi := a.bytesToInt() - iB, okBi := b.bytesToInt() - if okAi && okBi { - a.setInt(iA) - b.setInt(iB) - return nil - } - - fA, okAf := a.bytesToFloat() - fB, okBf := b.bytesToFloat() - if okAf && okBf { - a.setFloat(fA) - b.setFloat(fB) - return nil - } - - // Check if they int and float combination. - if okAi && okBf { - a.setInt(iA) - b.setFloat(fA) - return nil - } - if okBi && okAf { - a.setFloat(fA) - b.setInt(iB) - return nil - } - - // Not numeric types at this point. - - // Check for bool inference - bA, okAb := a.bytesToBool() - bB, okBb := b.bytesToBool() - if okAb && okBb { - a.setBool(bA) - b.setBool(bB) - return nil - } - - // Fallback to string - sA := a.bytesToString() - sB := b.bytesToString() - a.setString(sA) - b.setString(sB) - return nil - - case okA && !okB: - // Here a has `a` is untyped, but `b` has a fixed - // type. - switch b.value.(type) { - case string: - s := a.bytesToString() - a.setString(s) - - case int64, float64: - if iA, ok := a.bytesToInt(); ok { - a.setInt(iA) - } else if fA, ok := a.bytesToFloat(); ok { - a.setFloat(fA) - } else { - return fmt.Errorf("Could not convert %s to a number", a.String()) - } - - case bool: - if bA, ok := a.bytesToBool(); ok { - a.setBool(bA) - } else { - return fmt.Errorf("Could not convert %s to a boolean", a.String()) - } - - default: - return errCmpMismatchedTypes - } - return nil - - case !okA && okB: - // swap arguments to avoid repeating code - return inferTypesForCmp(b, a) - - default: - // Does not happen - return nil - } -} - -// Value arithmetic functions: we do not expose them outside the -// module. All arithmetic works only on numeric values with automatic -// promotion to the "larger" type that can represent the value. TODO: -// Add support for large number arithmetic. - -// Supported arithmetic operators -const ( - opPlus = "+" - opMinus = "-" - opDivide = "/" - opMultiply = "*" - opModulo = "%" -) - -// For arithmetic operations, if both values are numeric then the -// operation shall succeed. If the types are unknown automatic type -// conversion to a number is attempted. -func (v *Value) arithOp(op string, a *Value) error { - err := inferTypeForArithOp(v) - if err != nil { - return err - } - - err = inferTypeForArithOp(a) - if err != nil { - return err - } - - if !v.isNumeric() || !a.isNumeric() { - return errInvalidDataType(errArithMismatchedTypes) - } - - if !isValidArithOperator(op) { - return errInvalidDataType(errArithMismatchedTypes) - } - - intV, ok1i := v.ToInt() - intA, ok2i := a.ToInt() - switch { - case ok1i && ok2i: - res, err := intArithOp(op, intV, intA) - v.setInt(res) - return err - - default: - // Convert arguments to float - flV, _ := v.ToFloat() - flA, _ := a.ToFloat() - res, err := floatArithOp(op, flV, flA) - v.setFloat(res) - return err - } -} - -func inferTypeForArithOp(a *Value) error { - if _, ok := a.ToBytes(); !ok { - return nil - } - - if i, ok := a.bytesToInt(); ok { - a.setInt(i) - return nil - } - - if f, ok := a.bytesToFloat(); ok { - a.setFloat(f) - return nil - } - - err := fmt.Errorf("Could not convert %q to a number", string(a.value.([]byte))) - return errInvalidDataType(err) -} - -// All the bytesTo* functions defined below assume the value is a byte-slice. - -// Converts untyped value into int. The bool return implies success - -// it returns false only if there is a conversion failure. -func (v Value) bytesToInt() (int64, bool) { - bytes, _ := v.ToBytes() - i, err := strconv.ParseInt(strings.TrimSpace(string(bytes)), 10, 64) - return i, err == nil -} - -// Converts untyped value into float. The bool return implies success -// - it returns false only if there is a conversion failure. -func (v Value) bytesToFloat() (float64, bool) { - bytes, _ := v.ToBytes() - i, err := strconv.ParseFloat(strings.TrimSpace(string(bytes)), 64) - return i, err == nil -} - -// Converts untyped value into bool. The second bool return implies -// success - it returns false in case of a conversion failure. -func (v Value) bytesToBool() (val bool, ok bool) { - bytes, _ := v.ToBytes() - ok = true - switch strings.ToLower(strings.TrimSpace(string(bytes))) { - case "t", "true", "1": - val = true - case "f", "false", "0": - val = false - default: - ok = false - } - return val, ok -} - -// bytesToString - never fails, but returns empty string if value is not bytes. -func (v Value) bytesToString() string { - bytes, _ := v.ToBytes() - return string(bytes) -} - -// Calculates minimum or maximum of v and a and assigns the result to -// v - it works only on numeric arguments, where `v` is already -// assumed to be numeric. Attempts conversion to numeric type for `a` -// (first int, then float) only if the underlying values do not have a -// type. -func (v *Value) minmax(a *Value, isMax, isFirstRow bool) error { - err := inferTypeForArithOp(a) - if err != nil { - return err - } - - if !a.isNumeric() { - return errArithMismatchedTypes - } - - // In case of first row, set v to a. - if isFirstRow { - intA, okI := a.ToInt() - if okI { - v.setInt(intA) - return nil - } - floatA, _ := a.ToFloat() - v.setFloat(floatA) - return nil - } - - intV, ok1i := v.ToInt() - intA, ok2i := a.ToInt() - if ok1i && ok2i { - result := intV - if !isMax { - if intA < result { - result = intA - } - } else { - if intA > result { - result = intA - } - } - v.setInt(result) - return nil - } - - floatV, _ := v.ToFloat() - floatA, _ := a.ToFloat() - var result float64 - if !isMax { - result = math.Min(floatV, floatA) - } else { - result = math.Max(floatV, floatA) - } - v.setFloat(result) - return nil -} - -func inferTypeAsTimestamp(v *Value) error { - if s, ok := v.ToString(); ok { - t, err := parseSQLTimestamp(s) - if err != nil { - return err - } - v.setTimestamp(t) - } else if b, ok := v.ToBytes(); ok { - s := string(b) - t, err := parseSQLTimestamp(s) - if err != nil { - return err - } - v.setTimestamp(t) - } - return nil -} - -// inferTypeAsString is used to convert untyped values to string - it -// is called when the caller requires a string context to proceed. -func inferTypeAsString(v *Value) { - b, ok := v.ToBytes() - if !ok { - return - } - - v.setString(string(b)) -} - -func isValidComparisonOperator(op string) bool { - switch op { - case opLt: - case opLte: - case opGt: - case opGte: - case opEq: - case opIneq: - default: - return false - } - return true -} - -func intCompare(op string, left, right int64) bool { - switch op { - case opLt: - return left < right - case opLte: - return left <= right - case opGt: - return left > right - case opGte: - return left >= right - case opEq: - return left == right - case opIneq: - return left != right - } - // This case does not happen - return false -} - -func floatCompare(op string, left, right float64) bool { - switch op { - case opLt: - return left < right - case opLte: - return left <= right - case opGt: - return left > right - case opGte: - return left >= right - case opEq: - return left == right - case opIneq: - return left != right - } - // This case does not happen - return false -} - -func stringCompare(op string, left, right string) bool { - switch op { - case opLt: - return left < right - case opLte: - return left <= right - case opGt: - return left > right - case opGte: - return left >= right - case opEq: - return left == right - case opIneq: - return left != right - } - // This case does not happen - return false -} - -func boolCompare(op string, left, right bool) (bool, error) { - switch op { - case opEq: - return left == right, nil - case opIneq: - return left != right, nil - default: - return false, errCmpInvalidBoolOperator - } -} - -func arrayCompare(op string, left, right []Value) (bool, error) { - switch op { - case opEq: - if len(left) != len(right) { - return false, nil - } - for i, l := range left { - eq, err := l.compareOp(op, &right[i]) - if !eq || err != nil { - return eq, err - } - } - return true, nil - case opIneq: - for i, l := range left { - eq, err := l.compareOp(op, &right[i]) - if eq || err != nil { - return eq, err - } - } - return false, nil - default: - return false, errCmpInvalidBoolOperator - } -} - -func timestampCompare(op string, left, right time.Time) bool { - switch op { - case opLt: - return left.Before(right) - case opLte: - return left.Before(right) || left.Equal(right) - case opGt: - return left.After(right) - case opGte: - return left.After(right) || left.Equal(right) - case opEq: - return left.Equal(right) - case opIneq: - return !left.Equal(right) - } - // This case does not happen - return false -} - -func isValidArithOperator(op string) bool { - switch op { - case opPlus: - case opMinus: - case opDivide: - case opMultiply: - case opModulo: - default: - return false - } - return true -} - -// Overflow errors are ignored. -func intArithOp(op string, left, right int64) (int64, error) { - switch op { - case opPlus: - return left + right, nil - case opMinus: - return left - right, nil - case opDivide: - if right == 0 { - return 0, errArithDivideByZero - } - return left / right, nil - case opMultiply: - return left * right, nil - case opModulo: - if right == 0 { - return 0, errArithDivideByZero - } - return left % right, nil - } - // This does not happen - return 0, nil -} - -// Overflow errors are ignored. -func floatArithOp(op string, left, right float64) (float64, error) { - switch op { - case opPlus: - return left + right, nil - case opMinus: - return left - right, nil - case opDivide: - if right == 0 { - return 0, errArithDivideByZero - } - return left / right, nil - case opMultiply: - return left * right, nil - case opModulo: - if right == 0 { - return 0, errArithDivideByZero - } - return math.Mod(left, right), nil - } - // This does not happen - return 0, nil -} diff --git a/pkg/s3select/sql/value_test.go b/pkg/s3select/sql/value_test.go deleted file mode 100644 index c373c57d..00000000 --- a/pkg/s3select/sql/value_test.go +++ /dev/null @@ -1,685 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sql - -import ( - "fmt" - "math" - "strconv" - "testing" - "time" -) - -// valueBuilders contains one constructor for each value type. -// Values should match if type is the same. -var valueBuilders = []func() *Value{ - func() *Value { - return FromNull() - }, - func() *Value { - return FromBool(true) - }, - func() *Value { - return FromBytes([]byte("byte contents")) - }, - func() *Value { - return FromFloat(math.Pi) - }, - func() *Value { - return FromInt(0x1337) - }, - func() *Value { - t, err := time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") - if err != nil { - panic(err) - } - return FromTimestamp(t) - }, - func() *Value { - return FromString("string contents") - }, -} - -// altValueBuilders contains one constructor for each value type. -// Values are zero values and should NOT match the values in valueBuilders, except Null type. -var altValueBuilders = []func() *Value{ - func() *Value { - return FromNull() - }, - func() *Value { - return FromBool(false) - }, - func() *Value { - return FromBytes(nil) - }, - func() *Value { - return FromFloat(0) - }, - func() *Value { - return FromInt(0) - }, - func() *Value { - return FromTimestamp(time.Time{}) - }, - func() *Value { - return FromString("") - }, -} - -func TestValue_SameTypeAs(t *testing.T) { - type fields struct { - a, b Value - } - type test struct { - name string - fields fields - wantOk bool - } - var tests []test - for i := range valueBuilders { - a := valueBuilders[i]() - for j := range valueBuilders { - b := valueBuilders[j]() - tests = append(tests, test{ - name: fmt.Sprint(a.GetTypeString(), "==", b.GetTypeString()), - fields: fields{ - a: *a, b: *b, - }, - wantOk: i == j, - }) - } - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if gotOk := tt.fields.a.SameTypeAs(tt.fields.b); gotOk != tt.wantOk { - t.Errorf("SameTypeAs() = %v, want %v", gotOk, tt.wantOk) - } - }) - } -} - -func TestValue_Equals(t *testing.T) { - type fields struct { - a, b Value - } - type test struct { - name string - fields fields - wantOk bool - } - var tests []test - for i := range valueBuilders { - a := valueBuilders[i]() - for j := range valueBuilders { - b := valueBuilders[j]() - tests = append(tests, test{ - name: fmt.Sprint(a.GetTypeString(), "==", b.GetTypeString()), - fields: fields{ - a: *a, b: *b, - }, - wantOk: i == j, - }) - } - } - for i := range valueBuilders { - a := valueBuilders[i]() - for j := range altValueBuilders { - b := altValueBuilders[j]() - tests = append(tests, test{ - name: fmt.Sprint(a.GetTypeString(), "!=", b.GetTypeString()), - fields: fields{ - a: *a, b: *b, - }, - // Only Null == Null - wantOk: a.IsNull() && b.IsNull() && i == 0 && j == 0, - }) - } - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if gotOk := tt.fields.a.Equals(tt.fields.b); gotOk != tt.wantOk { - t.Errorf("Equals() = %v, want %v", gotOk, tt.wantOk) - } - }) - } -} - -func TestValue_CSVString(t *testing.T) { - type test struct { - name string - want string - wantAlt string - } - - tests := []test{ - { - name: valueBuilders[0]().String(), - want: "", - wantAlt: "", - }, - { - name: valueBuilders[1]().String(), - want: "true", - wantAlt: "false", - }, - { - name: valueBuilders[2]().String(), - want: "byte contents", - wantAlt: "", - }, - { - name: valueBuilders[3]().String(), - want: "3.141592653589793", - wantAlt: "0", - }, - { - name: valueBuilders[4]().String(), - want: "4919", - wantAlt: "0", - }, - { - name: valueBuilders[5]().String(), - want: "2006-01-02T15:04:05Z", - wantAlt: "0001T", - }, - { - name: valueBuilders[6]().String(), - want: "string contents", - wantAlt: "", - }, - } - - for i, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - v := valueBuilders[i]() - vAlt := altValueBuilders[i]() - if got := v.CSVString(); got != tt.want { - t.Errorf("CSVString() = %v, want %v", got, tt.want) - } - if got := vAlt.CSVString(); got != tt.wantAlt { - t.Errorf("CSVString() = %v, want %v", got, tt.wantAlt) - } - }) - } -} - -func TestValue_bytesToInt(t *testing.T) { - type fields struct { - value interface{} - } - tests := []struct { - name string - fields fields - want int64 - wantOK bool - }{ - { - name: "zero", - fields: fields{ - value: []byte("0"), - }, - want: 0, - wantOK: true, - }, - { - name: "minuszero", - fields: fields{ - value: []byte("-0"), - }, - want: 0, - wantOK: true, - }, - { - name: "one", - fields: fields{ - value: []byte("1"), - }, - want: 1, - wantOK: true, - }, - { - name: "minusone", - fields: fields{ - value: []byte("-1"), - }, - want: -1, - wantOK: true, - }, - { - name: "plusone", - fields: fields{ - value: []byte("+1"), - }, - want: 1, - wantOK: true, - }, - { - name: "max", - fields: fields{ - value: []byte(strconv.FormatInt(math.MaxInt64, 10)), - }, - want: math.MaxInt64, - wantOK: true, - }, - { - name: "min", - fields: fields{ - value: []byte(strconv.FormatInt(math.MinInt64, 10)), - }, - want: math.MinInt64, - wantOK: true, - }, - { - name: "max-overflow", - fields: fields{ - value: []byte("9223372036854775808"), - }, - // Seems to be what strconv.ParseInt returns - want: math.MaxInt64, - wantOK: false, - }, - { - name: "min-underflow", - fields: fields{ - value: []byte("-9223372036854775809"), - }, - // Seems to be what strconv.ParseInt returns - want: math.MinInt64, - wantOK: false, - }, - { - name: "zerospace", - fields: fields{ - value: []byte(" 0"), - }, - want: 0, - wantOK: true, - }, - { - name: "onespace", - fields: fields{ - value: []byte("1 "), - }, - want: 1, - wantOK: true, - }, - { - name: "minusonespace", - fields: fields{ - value: []byte(" -1 "), - }, - want: -1, - wantOK: true, - }, - { - name: "plusonespace", - fields: fields{ - value: []byte("\t+1\t"), - }, - want: 1, - wantOK: true, - }, - { - name: "scientific", - fields: fields{ - value: []byte("3e5"), - }, - want: 0, - wantOK: false, - }, - { - // No support for prefixes - name: "hex", - fields: fields{ - value: []byte("0xff"), - }, - want: 0, - wantOK: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - v := &Value{ - value: tt.fields.value, - } - got, got1 := v.bytesToInt() - if got != tt.want { - t.Errorf("bytesToInt() got = %v, want %v", got, tt.want) - } - if got1 != tt.wantOK { - t.Errorf("bytesToInt() got1 = %v, want %v", got1, tt.wantOK) - } - }) - } -} - -func TestValue_bytesToFloat(t *testing.T) { - type fields struct { - value interface{} - } - tests := []struct { - name string - fields fields - want float64 - wantOK bool - }{ - // Copied from TestValue_bytesToInt. - { - name: "zero", - fields: fields{ - value: []byte("0"), - }, - want: 0, - wantOK: true, - }, - { - name: "minuszero", - fields: fields{ - value: []byte("-0"), - }, - want: 0, - wantOK: true, - }, - { - name: "one", - fields: fields{ - value: []byte("1"), - }, - want: 1, - wantOK: true, - }, - { - name: "minusone", - fields: fields{ - value: []byte("-1"), - }, - want: -1, - wantOK: true, - }, - { - name: "plusone", - fields: fields{ - value: []byte("+1"), - }, - want: 1, - wantOK: true, - }, - { - name: "maxint", - fields: fields{ - value: []byte(strconv.FormatInt(math.MaxInt64, 10)), - }, - want: math.MaxInt64, - wantOK: true, - }, - { - name: "minint", - fields: fields{ - value: []byte(strconv.FormatInt(math.MinInt64, 10)), - }, - want: math.MinInt64, - wantOK: true, - }, - { - name: "max-overflow-int", - fields: fields{ - value: []byte("9223372036854775808"), - }, - // Seems to be what strconv.ParseInt returns - want: math.MaxInt64, - wantOK: true, - }, - { - name: "min-underflow-int", - fields: fields{ - value: []byte("-9223372036854775809"), - }, - // Seems to be what strconv.ParseInt returns - want: math.MinInt64, - wantOK: true, - }, - { - name: "max", - fields: fields{ - value: []byte(strconv.FormatFloat(math.MaxFloat64, 'g', -1, 64)), - }, - want: math.MaxFloat64, - wantOK: true, - }, - { - name: "min", - fields: fields{ - value: []byte(strconv.FormatFloat(-math.MaxFloat64, 'g', -1, 64)), - }, - want: -math.MaxFloat64, - wantOK: true, - }, - { - name: "max-overflow", - fields: fields{ - value: []byte("1.797693134862315708145274237317043567981e+309"), - }, - // Seems to be what strconv.ParseInt returns - want: math.Inf(1), - wantOK: false, - }, - { - name: "min-underflow", - fields: fields{ - value: []byte("-1.797693134862315708145274237317043567981e+309"), - }, - // Seems to be what strconv.ParseInt returns - want: math.Inf(-1), - wantOK: false, - }, - { - name: "smallest-pos", - fields: fields{ - value: []byte(strconv.FormatFloat(math.SmallestNonzeroFloat64, 'g', -1, 64)), - }, - want: math.SmallestNonzeroFloat64, - wantOK: true, - }, - { - name: "smallest-pos", - fields: fields{ - value: []byte(strconv.FormatFloat(-math.SmallestNonzeroFloat64, 'g', -1, 64)), - }, - want: -math.SmallestNonzeroFloat64, - wantOK: true, - }, - { - name: "zerospace", - fields: fields{ - value: []byte(" 0"), - }, - want: 0, - wantOK: true, - }, - { - name: "onespace", - fields: fields{ - value: []byte("1 "), - }, - want: 1, - wantOK: true, - }, - { - name: "minusonespace", - fields: fields{ - value: []byte(" -1 "), - }, - want: -1, - wantOK: true, - }, - { - name: "plusonespace", - fields: fields{ - value: []byte("\t+1\t"), - }, - want: 1, - wantOK: true, - }, - { - name: "scientific", - fields: fields{ - value: []byte("3e5"), - }, - want: 300000, - wantOK: true, - }, - { - // No support for prefixes - name: "hex", - fields: fields{ - value: []byte("0xff"), - }, - want: 0, - wantOK: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - v := Value{ - value: tt.fields.value, - } - got, got1 := v.bytesToFloat() - if got != tt.want { - t.Errorf("bytesToFloat() got = %v, want %v", got, tt.want) - } - if got1 != tt.wantOK { - t.Errorf("bytesToFloat() got1 = %v, want %v", got1, tt.wantOK) - } - }) - } -} - -func TestValue_bytesToBool(t *testing.T) { - type fields struct { - value interface{} - } - tests := []struct { - name string - fields fields - wantVal bool - wantOk bool - }{ - { - name: "true", - fields: fields{ - value: []byte("true"), - }, - wantVal: true, - wantOk: true, - }, - { - name: "false", - fields: fields{ - value: []byte("false"), - }, - wantVal: false, - wantOk: true, - }, - { - name: "t", - fields: fields{ - value: []byte("t"), - }, - wantVal: true, - wantOk: true, - }, - { - name: "f", - fields: fields{ - value: []byte("f"), - }, - wantVal: false, - wantOk: true, - }, - { - name: "1", - fields: fields{ - value: []byte("1"), - }, - wantVal: true, - wantOk: true, - }, - { - name: "0", - fields: fields{ - value: []byte("0"), - }, - wantVal: false, - wantOk: true, - }, - { - name: "truespace", - fields: fields{ - value: []byte(" true "), - }, - wantVal: true, - wantOk: true, - }, - { - name: "truetabs", - fields: fields{ - value: []byte("\ttrue\t"), - }, - wantVal: true, - wantOk: true, - }, - { - name: "TRUE", - fields: fields{ - value: []byte("TRUE"), - }, - wantVal: true, - wantOk: true, - }, - { - name: "FALSE", - fields: fields{ - value: []byte("FALSE"), - }, - wantVal: false, - wantOk: true, - }, - { - name: "invalid", - fields: fields{ - value: []byte("no"), - }, - wantVal: false, - wantOk: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - v := Value{ - value: tt.fields.value, - } - gotVal, gotOk := v.bytesToBool() - if gotVal != tt.wantVal { - t.Errorf("bytesToBool() gotVal = %v, want %v", gotVal, tt.wantVal) - } - if gotOk != tt.wantOk { - t.Errorf("bytesToBool() gotOk = %v, want %v", gotOk, tt.wantOk) - } - }) - } -} diff --git a/pkg/s3select/testdata.parquet b/pkg/s3select/testdata.parquet deleted file mode 100644 index 0128ad1a..00000000 Binary files a/pkg/s3select/testdata.parquet and /dev/null differ diff --git a/pkg/s3select/unused-errors.go b/pkg/s3select/unused-errors.go deleted file mode 100644 index a4cade84..00000000 --- a/pkg/s3select/unused-errors.go +++ /dev/null @@ -1,642 +0,0 @@ -// +build ignore - -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3select - -/////////////////////////////////////////////////////////////////////// -// -// Validation errors. -// -/////////////////////////////////////////////////////////////////////// -func errExpressionTooLong(err error) *s3Error { - return &s3Error{ - code: "ExpressionTooLong", - message: "The SQL expression is too long: The maximum byte-length for the SQL expression is 256 KB.", - statusCode: 400, - cause: err, - } -} - -func errColumnTooLong(err error) *s3Error { - return &s3Error{ - code: "ColumnTooLong", - message: "The length of a column in the result is greater than maxCharsPerColumn of 1 MB.", - statusCode: 400, - cause: err, - } -} - -func errOverMaxColumn(err error) *s3Error { - return &s3Error{ - code: "OverMaxColumn", - message: "The number of columns in the result is greater than the maximum allowable number of columns.", - statusCode: 400, - cause: err, - } -} - -func errOverMaxRecordSize(err error) *s3Error { - return &s3Error{ - code: "OverMaxRecordSize", - message: "The length of a record in the input or result is greater than maxCharsPerRecord of 1 MB.", - statusCode: 400, - cause: err, - } -} - -func errInvalidColumnIndex(err error) *s3Error { - return &s3Error{ - code: "InvalidColumnIndex", - message: "Column index in the SQL expression is invalid.", - statusCode: 400, - cause: err, - } -} - -func errInvalidTextEncoding(err error) *s3Error { - return &s3Error{ - code: "InvalidTextEncoding", - message: "Invalid encoding type. Only UTF-8 encoding is supported.", - statusCode: 400, - cause: err, - } -} - -func errInvalidTableAlias(err error) *s3Error { - return &s3Error{ - code: "InvalidTableAlias", - message: "The SQL expression contains an invalid table alias.", - statusCode: 400, - cause: err, - } -} - -func errUnsupportedSyntax(err error) *s3Error { - return &s3Error{ - code: "UnsupportedSyntax", - message: "Encountered invalid syntax.", - statusCode: 400, - cause: err, - } -} - -func errAmbiguousFieldName(err error) *s3Error { - return &s3Error{ - code: "AmbiguousFieldName", - message: "Field name matches to multiple fields in the file. Check the SQL expression and the file, and try again.", - statusCode: 400, - cause: err, - } -} - -func errIntegerOverflow(err error) *s3Error { - return &s3Error{ - code: "IntegerOverflow", - message: "Integer overflow or underflow in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errIllegalSQLFunctionArgument(err error) *s3Error { - return &s3Error{ - code: "IllegalSqlFunctionArgument", - message: "Illegal argument was used in the SQL function.", - statusCode: 400, - cause: err, - } -} - -func errMultipleDataSourcesUnsupported(err error) *s3Error { - return &s3Error{ - code: "MultipleDataSourcesUnsupported", - message: "Multiple data sources are not supported.", - statusCode: 400, - cause: err, - } -} - -func errMissingHeaders(err error) *s3Error { - return &s3Error{ - code: "MissingHeaders", - message: "Some headers in the query are missing from the file. Check the file and try again.", - statusCode: 400, - cause: err, - } -} - -func errUnrecognizedFormatException(err error) *s3Error { - return &s3Error{ - code: "UnrecognizedFormatException", - message: "Encountered an invalid record type.", - statusCode: 400, - cause: err, - } -} - -////////////////////////////////////////////////////////////////////////////////////// -// -// SQL parsing errors. -// -////////////////////////////////////////////////////////////////////////////////////// -func errLexerInvalidChar(err error) *s3Error { - return &s3Error{ - code: "LexerInvalidChar", - message: "The SQL expression contains an invalid character.", - statusCode: 400, - cause: err, - } -} - -func errLexerInvalidOperator(err error) *s3Error { - return &s3Error{ - code: "LexerInvalidOperator", - message: "The SQL expression contains an invalid literal.", - statusCode: 400, - cause: err, - } -} - -func errLexerInvalidLiteral(err error) *s3Error { - return &s3Error{ - code: "LexerInvalidLiteral", - message: "The SQL expression contains an invalid operator.", - statusCode: 400, - cause: err, - } -} - -func errLexerInvalidIONLiteral(err error) *s3Error { - return &s3Error{ - code: "LexerInvalidIONLiteral", - message: "The SQL expression contains an invalid operator.", - statusCode: 400, - cause: err, - } -} - -func errParseExpectedDatePart(err error) *s3Error { - return &s3Error{ - code: "ParseExpectedDatePart", - message: "Did not find the expected date part in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errParseExpectedKeyword(err error) *s3Error { - return &s3Error{ - code: "ParseExpectedKeyword", - message: "Did not find the expected keyword in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errParseExpectedTokenType(err error) *s3Error { - return &s3Error{ - code: "ParseExpectedTokenType", - message: "Did not find the expected token in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errParseExpected2TokenTypes(err error) *s3Error { - return &s3Error{ - code: "ParseExpected2TokenTypes", - message: "Did not find the expected token in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errParseExpectedNumber(err error) *s3Error { - return &s3Error{ - code: "ParseExpectedNumber", - message: "Did not find the expected number in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errParseExpectedRightParenBuiltinFunctionCall(err error) *s3Error { - return &s3Error{ - code: "ParseExpectedRightParenBuiltinFunctionCall", - message: "Did not find the expected right parenthesis character in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errParseExpectedTypeName(err error) *s3Error { - return &s3Error{ - code: "ParseExpectedTypeName", - message: "Did not find the expected type name in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errParseExpectedWhenClause(err error) *s3Error { - return &s3Error{ - code: "ParseExpectedWhenClause", - message: "Did not find the expected WHEN clause in the SQL expression. CASE is not supported.", - statusCode: 400, - cause: err, - } -} - -func errParseUnsupportedToken(err error) *s3Error { - return &s3Error{ - code: "ParseUnsupportedToken", - message: "The SQL expression contains an unsupported token.", - statusCode: 400, - cause: err, - } -} - -func errParseUnsupportedLiteralsGroupBy(err error) *s3Error { - return &s3Error{ - code: "ParseUnsupportedLiteralsGroupBy", - message: "The SQL expression contains an unsupported use of GROUP BY.", - statusCode: 400, - cause: err, - } -} - -func errParseExpectedMember(err error) *s3Error { - return &s3Error{ - code: "ParseExpectedMember", - message: "The SQL expression contains an unsupported use of MEMBER.", - statusCode: 400, - cause: err, - } -} - -func errParseUnsupportedCase(err error) *s3Error { - return &s3Error{ - code: "ParseUnsupportedCase", - message: "The SQL expression contains an unsupported use of CASE.", - statusCode: 400, - cause: err, - } -} - -func errParseUnsupportedCaseClause(err error) *s3Error { - return &s3Error{ - code: "ParseUnsupportedCaseClause", - message: "The SQL expression contains an unsupported use of CASE.", - statusCode: 400, - cause: err, - } -} - -func errParseUnsupportedAlias(err error) *s3Error { - return &s3Error{ - code: "ParseUnsupportedAlias", - message: "The SQL expression contains an unsupported use of ALIAS.", - statusCode: 400, - cause: err, - } -} - -func errParseInvalidPathComponent(err error) *s3Error { - return &s3Error{ - code: "ParseInvalidPathComponent", - message: "The SQL expression contains an invalid path component.", - statusCode: 400, - cause: err, - } -} - -func errParseMissingIdentAfterAt(err error) *s3Error { - return &s3Error{ - code: "ParseMissingIdentAfterAt", - message: "Did not find the expected identifier after the @ symbol in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errParseUnexpectedOperator(err error) *s3Error { - return &s3Error{ - code: "ParseUnexpectedOperator", - message: "The SQL expression contains an unexpected operator.", - statusCode: 400, - cause: err, - } -} - -func errParseUnexpectedTerm(err error) *s3Error { - return &s3Error{ - code: "ParseUnexpectedTerm", - message: "The SQL expression contains an unexpected term.", - statusCode: 400, - cause: err, - } -} - -func errParseUnexpectedToken(err error) *s3Error { - return &s3Error{ - code: "ParseUnexpectedToken", - message: "The SQL expression contains an unexpected token.", - statusCode: 400, - cause: err, - } -} - -func errParseUnExpectedKeyword(err error) *s3Error { - return &s3Error{ - code: "ParseUnExpectedKeyword", - message: "The SQL expression contains an unexpected keyword.", - statusCode: 400, - cause: err, - } -} - -func errParseExpectedExpression(err error) *s3Error { - return &s3Error{ - code: "ParseExpectedExpression", - message: "Did not find the expected SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errParseExpectedLeftParenAfterCast(err error) *s3Error { - return &s3Error{ - code: "ParseExpectedLeftParenAfterCast", - message: "Did not find the expected left parenthesis after CAST in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errParseExpectedLeftParenValueConstructor(err error) *s3Error { - return &s3Error{ - code: "ParseExpectedLeftParenValueConstructor", - message: "Did not find expected the left parenthesis in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errParseExpectedLeftParenBuiltinFunctionCall(err error) *s3Error { - return &s3Error{ - code: "ParseExpectedLeftParenBuiltinFunctionCall", - message: "Did not find the expected left parenthesis in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errParseExpectedArgumentDelimiter(err error) *s3Error { - return &s3Error{ - code: "ParseExpectedArgumentDelimiter", - message: "Did not find the expected argument delimiter in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errParseCastArity(err error) *s3Error { - return &s3Error{ - code: "ParseCastArity", - message: "The SQL expression CAST has incorrect arity.", - statusCode: 400, - cause: err, - } -} - -func errParseEmptySelect(err error) *s3Error { - return &s3Error{ - code: "ParseEmptySelect", - message: "The SQL expression contains an empty SELECT.", - statusCode: 400, - cause: err, - } -} - -func errParseSelectMissingFrom(err error) *s3Error { - return &s3Error{ - code: "ParseSelectMissingFrom", - message: "The SQL expression contains a missing FROM after SELECT list.", - statusCode: 400, - cause: err, - } -} - -func errParseExpectedIdentForGroupName(err error) *s3Error { - return &s3Error{ - code: "ParseExpectedIdentForGroupName", - message: "GROUP is not supported in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errParseExpectedIdentForAlias(err error) *s3Error { - return &s3Error{ - code: "ParseExpectedIdentForAlias", - message: "Did not find the expected identifier for the alias in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errParseUnsupportedCallWithStar(err error) *s3Error { - return &s3Error{ - code: "ParseUnsupportedCallWithStar", - message: "Only COUNT with (*) as a parameter is supported in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errParseMalformedJoin(err error) *s3Error { - return &s3Error{ - code: "ParseMalformedJoin", - message: "JOIN is not supported in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errParseExpectedIdentForAt(err error) *s3Error { - return &s3Error{ - code: "ParseExpectedIdentForAt", - message: "Did not find the expected identifier for AT name in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errParseCannotMixSqbAndWildcardInSelectList(err error) *s3Error { - return &s3Error{ - code: "ParseCannotMixSqbAndWildcardInSelectList", - message: "Cannot mix [] and * in the same expression in a SELECT list in SQL expression.", - statusCode: 400, - cause: err, - } -} - -////////////////////////////////////////////////////////////////////////////////////// -// -// CAST() related errors. -// -////////////////////////////////////////////////////////////////////////////////////// -func errCastFailed(err error) *s3Error { - return &s3Error{ - code: "CastFailed", - message: "Attempt to convert from one data type to another using CAST failed in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errInvalidCast(err error) *s3Error { - return &s3Error{ - code: "InvalidCast", - message: "Attempt to convert from one data type to another using CAST failed in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errEvaluatorInvalidTimestampFormatPattern(err error) *s3Error { - return &s3Error{ - code: "EvaluatorInvalidTimestampFormatPattern", - message: "Invalid time stamp format string in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errEvaluatorInvalidTimestampFormatPatternAdditionalFieldsRequired(err error) *s3Error { - return &s3Error{ - code: "EvaluatorInvalidTimestampFormatPattern", - message: "Time stamp format pattern requires additional fields in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errEvaluatorInvalidTimestampFormatPatternSymbolForParsing(err error) *s3Error { - return &s3Error{ - code: "EvaluatorInvalidTimestampFormatPatternSymbolForParsing", - message: "Time stamp format pattern contains a valid format symbol that cannot be applied to time stamp parsing in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errEvaluatorTimestampFormatPatternDuplicateFields(err error) *s3Error { - return &s3Error{ - code: "EvaluatorTimestampFormatPatternDuplicateFields", - message: "Time stamp format pattern contains multiple format specifiers representing the time stamp field in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errEvaluatorTimestampFormatPatternHourClockAmPmMismatch(err error) *s3Error { - return &s3Error{ - code: "EvaluatorTimestampFormatPatternHourClockAmPmMismatch", - message: "Time stamp format pattern contains a 12-hour hour of day format symbol but doesn't also contain an AM/PM field, or it contains a 24-hour hour of day format specifier and contains an AM/PM field in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errEvaluatorUnterminatedTimestampFormatPatternToken(err error) *s3Error { - return &s3Error{ - code: "EvaluatorUnterminatedTimestampFormatPatternToken", - message: "Time stamp format pattern contains unterminated token in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errEvaluatorInvalidTimestampFormatPatternToken(err error) *s3Error { - return &s3Error{ - code: "EvaluatorInvalidTimestampFormatPatternToken", - message: "Time stamp format pattern contains an invalid token in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -func errEvaluatorInvalidTimestampFormatPatternSymbol(err error) *s3Error { - return &s3Error{ - code: "EvaluatorInvalidTimestampFormatPatternSymbol", - message: "Time stamp format pattern contains an invalid symbol in the SQL expression.", - statusCode: 400, - cause: err, - } -} - -//////////////////////////////////////////////////////////////////////// -// -// Generic S3 HTTP handler errors. -// -//////////////////////////////////////////////////////////////////////// -func errBusy(err error) *s3Error { - return &s3Error{ - code: "Busy", - message: "The service is unavailable. Please retry.", - statusCode: 503, - cause: err, - } -} - -func errUnauthorizedAccess(err error) *s3Error { - return &s3Error{ - code: "UnauthorizedAccess", - message: "You are not authorized to perform this operation", - statusCode: 401, - cause: err, - } -} - -func errEmptyRequestBody(err error) *s3Error { - return &s3Error{ - code: "EmptyRequestBody", - message: "Request body cannot be empty.", - statusCode: 400, - cause: err, - } -} - -func errUnsupportedRangeHeader(err error) *s3Error { - return &s3Error{ - code: "UnsupportedRangeHeader", - message: "Range header is not supported for this operation.", - statusCode: 400, - cause: err, - } -} - -func errUnsupportedStorageClass(err error) *s3Error { - return &s3Error{ - code: "UnsupportedStorageClass", - message: "Encountered an invalid storage class. Only STANDARD, STANDARD_IA, and ONEZONE_IA storage classes are supported.", - statusCode: 400, - cause: err, - } -} diff --git a/pkg/safe/safe.go b/pkg/safe/safe.go deleted file mode 100644 index 1e3ef460..00000000 --- a/pkg/safe/safe.go +++ /dev/null @@ -1,134 +0,0 @@ -/* - * MinIO Cloud Storage (C) 2015-2016 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// NOTE - Rename() not guaranteed to be safe on all filesystems which are not fully POSIX compatible - -package safe - -import ( - "errors" - "io/ioutil" - "os" - "path/filepath" -) - -// File represents safe file descriptor. -type File struct { - name string - tmpfile *os.File - closed bool - aborted bool -} - -// Write writes len(b) bytes to the temporary File. In case of error, the temporary file is removed. -func (file *File) Write(b []byte) (n int, err error) { - if file.closed { - err = errors.New("write on closed file") - return - } - if file.aborted { - err = errors.New("write on aborted file") - return - } - - defer func() { - if err != nil { - os.Remove(file.tmpfile.Name()) - file.aborted = true - } - }() - - n, err = file.tmpfile.Write(b) - return -} - -// Close closes the temporary File and renames to the named file. In case of error, the temporary file is removed. -func (file *File) Close() (err error) { - defer func() { - if err != nil { - os.Remove(file.tmpfile.Name()) - file.aborted = true - } - }() - - if file.closed { - err = errors.New("close on closed file") - return - } - if file.aborted { - err = errors.New("close on aborted file") - return - } - - if err = file.tmpfile.Close(); err != nil { - return - } - - err = os.Rename(file.tmpfile.Name(), file.name) - - file.closed = true - return -} - -// Abort aborts the temporary File by closing and removing the temporary file. -func (file *File) Abort() (err error) { - if file.closed { - err = errors.New("abort on closed file") - return - } - if file.aborted { - err = errors.New("abort on aborted file") - return - } - - file.tmpfile.Close() - err = os.Remove(file.tmpfile.Name()) - file.aborted = true - return -} - -// CreateFile creates the named file safely from unique temporary file. -// The temporary file is renamed to the named file upon successful close -// to safeguard intermediate state in the named file. The temporary file -// is created in the name of the named file with suffixed unique number -// and prefixed "$tmpfile" string. While creating the temporary file, -// missing parent directories are also created. The temporary file is -// removed if case of any intermediate failure. Not removed temporary -// files can be cleaned up by identifying them using "$tmpfile" prefix -// string. -func CreateFile(name string) (*File, error) { - // ioutil.TempFile() fails if parent directory is missing. - // Create parent directory to avoid such error. - dname := filepath.Dir(name) - if err := os.MkdirAll(dname, 0700); err != nil { - return nil, err - } - - fname := filepath.Base(name) - tmpfile, err := ioutil.TempFile(dname, "$tmpfile."+fname+".") - if err != nil { - return nil, err - } - - if err = os.Chmod(tmpfile.Name(), 0600); err != nil { - if rerr := os.Remove(tmpfile.Name()); rerr != nil { - err = rerr - } - return nil, err - } - - return &File{name: name, tmpfile: tmpfile}, nil -} diff --git a/pkg/safe/safe_test.go b/pkg/safe/safe_test.go deleted file mode 100644 index 8dd7c661..00000000 --- a/pkg/safe/safe_test.go +++ /dev/null @@ -1,192 +0,0 @@ -/* - * MinIO Client (C) 2015, 2016, 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package safe - -import ( - "io/ioutil" - "os" - "path" - "testing" -) - -type MySuite struct { - root string -} - -func (s *MySuite) SetUpSuite(t *testing.T) { - root, err := ioutil.TempDir(os.TempDir(), "safe_test.go.") - if err != nil { - t.Fatal(err) - } - s.root = root -} - -func (s *MySuite) TearDownSuite(t *testing.T) { - err := os.RemoveAll(s.root) - if err != nil { - t.Fatal(err) - } -} - -func TestSafeAbort(t *testing.T) { - s := &MySuite{} - s.SetUpSuite(t) - defer s.TearDownSuite(t) - - f, err := CreateFile(path.Join(s.root, "testfile-abort")) - if err != nil { - t.Fatal(err) - } - - _, err = os.Stat(path.Join(s.root, "testfile-abort")) - if !os.IsNotExist(err) { - t.Fatal(err) - } - err = f.Abort() - if err != nil { - t.Fatal(err) - } - err = f.Close() - if err != nil { - if err.Error() != "close on aborted file" { - t.Fatal(err) - } - } -} - -func TestSafeClose(t *testing.T) { - s := &MySuite{} - s.SetUpSuite(t) - defer s.TearDownSuite(t) - - f, err := CreateFile(path.Join(s.root, "testfile-close")) - if err != nil { - t.Fatal(err) - } - - _, err = os.Stat(path.Join(s.root, "testfile-close")) - if !os.IsNotExist(err) { - t.Fatal(err) - } - - err = f.Close() - if err != nil { - t.Fatal(err) - } - - _, err = os.Stat(path.Join(s.root, "testfile-close")) - if err != nil { - t.Fatal(err) - } - - err = os.Remove(path.Join(s.root, "testfile-close")) - if err != nil { - t.Fatal(err) - } - - err = f.Abort() - if err != nil { - if err.Error() != "abort on closed file" { - t.Fatal(err) - } - } -} - -func TestSafe(t *testing.T) { - s := &MySuite{} - s.SetUpSuite(t) - defer s.TearDownSuite(t) - - f, err := CreateFile(path.Join(s.root, "testfile-safe")) - if err != nil { - t.Fatal(err) - } - - _, err = os.Stat(path.Join(s.root, "testfile-safe")) - if !os.IsNotExist(err) { - t.Fatal(err) - } - - err = f.Close() - if err != nil { - t.Fatal(err) - } - - _, err = f.Write([]byte("Test")) - if err != nil { - if err.Error() != "write on closed file" { - t.Fatal(err) - } - } - - err = f.Close() - if err != nil { - if err.Error() != "close on closed file" { - t.Fatal(err) - } - } - - _, err = os.Stat(path.Join(s.root, "testfile-safe")) - if err != nil { - t.Fatal(err) - } - - err = os.Remove(path.Join(s.root, "testfile-safe")) - if err != nil { - t.Fatal(err) - } -} - -func TestSafeAbortWrite(t *testing.T) { - s := &MySuite{} - s.SetUpSuite(t) - defer s.TearDownSuite(t) - - f, err := CreateFile(path.Join(s.root, "purgefile-abort")) - if err != nil { - t.Fatal(err) - } - - _, err = os.Stat(path.Join(s.root, "purgefile-abort")) - if !os.IsNotExist(err) { - t.Fatal(err) - } - - err = f.Abort() - if err != nil { - t.Fatal(err) - } - - _, err = os.Stat(path.Join(s.root, "purgefile-abort")) - if !os.IsNotExist(err) { - t.Fatal(err) - } - - err = f.Abort() - if err != nil { - if err.Error() != "abort on aborted file" { - t.Fatal(err) - } - } - - _, err = f.Write([]byte("Test")) - if err != nil { - if err.Error() != "write on aborted file" { - t.Fatal(err) - } - } -} diff --git a/pkg/sync/errgroup/errgroup.go b/pkg/sync/errgroup/errgroup.go deleted file mode 100644 index bf0ce1c7..00000000 --- a/pkg/sync/errgroup/errgroup.go +++ /dev/null @@ -1,59 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package errgroup - -import ( - "sync" -) - -// A Group is a collection of goroutines working on subtasks that are part of -// the same overall task. -// -// A zero Group is valid and does not cancel on error. -type Group struct { - wg sync.WaitGroup - errs []error -} - -// WithNErrs returns a new Group with length of errs slice upto nerrs, -// upon Wait() errors are returned collected from all tasks. -func WithNErrs(nerrs int) *Group { - return &Group{errs: make([]error, nerrs)} -} - -// Wait blocks until all function calls from the Go method have returned, then -// returns the slice of errors from all function calls. -func (g *Group) Wait() []error { - g.wg.Wait() - return g.errs -} - -// Go calls the given function in a new goroutine. -// -// The first call to return a non-nil error will be -// collected in errs slice and returned by Wait(). -func (g *Group) Go(f func() error, index int) { - g.wg.Add(1) - - go func() { - defer g.wg.Done() - - if err := f(); err != nil { - g.errs[index] = err - } - }() -} diff --git a/pkg/sync/errgroup/errgroup_test.go b/pkg/sync/errgroup/errgroup_test.go deleted file mode 100644 index 9816d985..00000000 --- a/pkg/sync/errgroup/errgroup_test.go +++ /dev/null @@ -1,52 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package errgroup - -import ( - "fmt" - "reflect" - "testing" -) - -func TestGroupWithNErrs(t *testing.T) { - err1 := fmt.Errorf("errgroup_test: 1") - err2 := fmt.Errorf("errgroup_test: 2") - - cases := []struct { - errs []error - }{ - {errs: []error{nil}}, - {errs: []error{err1}}, - {errs: []error{err1, nil}}, - {errs: []error{err1, nil, err2}}, - } - - for j, tc := range cases { - t.Run(fmt.Sprintf("Test%d", j+1), func(t *testing.T) { - g := WithNErrs(len(tc.errs)) - for i, err := range tc.errs { - err := err - g.Go(func() error { return err }, i) - } - - gotErrs := g.Wait() - if !reflect.DeepEqual(gotErrs, tc.errs) { - t.Errorf("Expected %#v, got %#v", tc.errs, gotErrs) - } - }) - } -} diff --git a/pkg/sys/rlimit-file_bsd.go b/pkg/sys/rlimit-file_bsd.go deleted file mode 100644 index d30b365e..00000000 --- a/pkg/sys/rlimit-file_bsd.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build freebsd dragonfly - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sys - -import ( - "syscall" -) - -// GetMaxOpenFileLimit - returns maximum file descriptor number that can be opened by this process. -func GetMaxOpenFileLimit() (curLimit, maxLimit uint64, err error) { - var rlimit syscall.Rlimit - if err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit); err == nil { - curLimit = uint64(rlimit.Cur) - maxLimit = uint64(rlimit.Max) - } - - return curLimit, maxLimit, err -} - -// SetMaxOpenFileLimit - sets maximum file descriptor number that can be opened by this process. -func SetMaxOpenFileLimit(curLimit, maxLimit uint64) error { - rlimit := syscall.Rlimit{Cur: int64(curLimit), Max: int64(curLimit)} - return syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rlimit) -} diff --git a/pkg/sys/rlimit-file_nix.go b/pkg/sys/rlimit-file_nix.go deleted file mode 100644 index ac01be5f..00000000 --- a/pkg/sys/rlimit-file_nix.go +++ /dev/null @@ -1,48 +0,0 @@ -// +build linux darwin openbsd netbsd solaris - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sys - -import ( - "runtime" - "syscall" -) - -// GetMaxOpenFileLimit - returns maximum file descriptor number that can be opened by this process. -func GetMaxOpenFileLimit() (curLimit, maxLimit uint64, err error) { - var rlimit syscall.Rlimit - if err = syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit); err == nil { - curLimit = rlimit.Cur - maxLimit = rlimit.Max - } - - return curLimit, maxLimit, err -} - -// SetMaxOpenFileLimit - sets maximum file descriptor number that can be opened by this process. -func SetMaxOpenFileLimit(curLimit, maxLimit uint64) error { - if runtime.GOOS == "darwin" && curLimit > 10240 { - // The max file limit is 10240, even though - // the max returned by Getrlimit is 1<<63-1. - // This is OPEN_MAX in sys/syslimits.h. - // refer https://github.com/golang/go/issues/30401 - curLimit = 10240 - } - rlimit := syscall.Rlimit{Cur: curLimit, Max: maxLimit} - return syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rlimit) -} diff --git a/pkg/sys/rlimit-file_test.go b/pkg/sys/rlimit-file_test.go deleted file mode 100644 index 1e5646d8..00000000 --- a/pkg/sys/rlimit-file_test.go +++ /dev/null @@ -1,40 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sys - -import "testing" - -// Test get max open file limit. -func TestGetMaxOpenFileLimit(t *testing.T) { - _, _, err := GetMaxOpenFileLimit() - if err != nil { - t.Errorf("expected: nil, got: %v", err) - } -} - -// Test set open file limit -func TestSetMaxOpenFileLimit(t *testing.T) { - curLimit, maxLimit, err := GetMaxOpenFileLimit() - if err != nil { - t.Fatalf("Unable to get max open file limit. %v", err) - } - - err = SetMaxOpenFileLimit(curLimit, maxLimit) - if err != nil { - t.Errorf("expected: nil, got: %v", err) - } -} diff --git a/pkg/sys/rlimit-file_windows.go b/pkg/sys/rlimit-file_windows.go deleted file mode 100644 index b78fc0a6..00000000 --- a/pkg/sys/rlimit-file_windows.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build windows - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sys - -// GetMaxOpenFileLimit - returns maximum file descriptor number that can be opened by this process. -func GetMaxOpenFileLimit() (curLimit, maxLimit uint64, err error) { - // Nothing to do for windows. - return curLimit, maxLimit, err -} - -// SetMaxOpenFileLimit - sets maximum file descriptor number that can be opened by this process. -func SetMaxOpenFileLimit(curLimit, maxLimit uint64) error { - // Nothing to do for windows. - return nil -} diff --git a/pkg/sys/rlimit-memory_bsd.go b/pkg/sys/rlimit-memory_bsd.go deleted file mode 100644 index bb56f7b2..00000000 --- a/pkg/sys/rlimit-memory_bsd.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build freebsd dragonfly - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sys - -import "syscall" - -// GetMaxMemoryLimit - returns the maximum size of the process's virtual memory (address space) in bytes. -func GetMaxMemoryLimit() (curLimit, maxLimit uint64, err error) { - var rlimit syscall.Rlimit - if err = syscall.Getrlimit(syscall.RLIMIT_DATA, &rlimit); err == nil { - curLimit = uint64(rlimit.Cur) - maxLimit = uint64(rlimit.Max) - } - - return curLimit, maxLimit, err -} - -// SetMaxMemoryLimit - sets the maximum size of the process's virtual memory (address space) in bytes. -func SetMaxMemoryLimit(curLimit, maxLimit uint64) error { - rlimit := syscall.Rlimit{Cur: int64(curLimit), Max: int64(maxLimit)} - return syscall.Setrlimit(syscall.RLIMIT_DATA, &rlimit) -} diff --git a/pkg/sys/rlimit-memory_nix.go b/pkg/sys/rlimit-memory_nix.go deleted file mode 100644 index 85d0e2a5..00000000 --- a/pkg/sys/rlimit-memory_nix.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build linux darwin netbsd solaris - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sys - -import "syscall" - -// GetMaxMemoryLimit - returns the maximum size of the process's virtual memory (address space) in bytes. -func GetMaxMemoryLimit() (curLimit, maxLimit uint64, err error) { - var rlimit syscall.Rlimit - if err = syscall.Getrlimit(syscall.RLIMIT_AS, &rlimit); err == nil { - curLimit = rlimit.Cur - maxLimit = rlimit.Max - } - - return curLimit, maxLimit, err -} - -// SetMaxMemoryLimit - sets the maximum size of the process's virtual memory (address space) in bytes. -func SetMaxMemoryLimit(curLimit, maxLimit uint64) error { - rlimit := syscall.Rlimit{Cur: curLimit, Max: maxLimit} - return syscall.Setrlimit(syscall.RLIMIT_AS, &rlimit) -} diff --git a/pkg/sys/rlimit-memory_openbsd.go b/pkg/sys/rlimit-memory_openbsd.go deleted file mode 100644 index ced796fa..00000000 --- a/pkg/sys/rlimit-memory_openbsd.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build openbsd - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sys - -import "syscall" - -// GetMaxMemoryLimit - returns the maximum size of the process's virtual memory (address space) in bytes. -func GetMaxMemoryLimit() (curLimit, maxLimit uint64, err error) { - var rlimit syscall.Rlimit - if err = syscall.Getrlimit(syscall.RLIMIT_DATA, &rlimit); err == nil { - curLimit = rlimit.Cur - maxLimit = rlimit.Max - } - - return curLimit, maxLimit, err -} - -// SetMaxMemoryLimit - sets the maximum size of the process's virtual memory (address space) in bytes. -func SetMaxMemoryLimit(curLimit, maxLimit uint64) error { - rlimit := syscall.Rlimit{Cur: curLimit, Max: maxLimit} - return syscall.Setrlimit(syscall.RLIMIT_DATA, &rlimit) -} diff --git a/pkg/sys/rlimit-memory_test.go b/pkg/sys/rlimit-memory_test.go deleted file mode 100644 index 4836f561..00000000 --- a/pkg/sys/rlimit-memory_test.go +++ /dev/null @@ -1,40 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sys - -import "testing" - -// Test get max memory limit. -func TestGetMaxMemoryLimit(t *testing.T) { - _, _, err := GetMaxMemoryLimit() - if err != nil { - t.Errorf("expected: nil, got: %v", err) - } -} - -// Test set memory limit -func TestSetMaxMemoryLimit(t *testing.T) { - curLimit, maxLimit, err := GetMaxMemoryLimit() - if err != nil { - t.Fatalf("Unable to get max memory limit. %v", err) - } - - err = SetMaxMemoryLimit(curLimit, maxLimit) - if err != nil { - t.Errorf("expected: nil, got: %v", err) - } -} diff --git a/pkg/sys/rlimit-memory_windows.go b/pkg/sys/rlimit-memory_windows.go deleted file mode 100644 index b831737c..00000000 --- a/pkg/sys/rlimit-memory_windows.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build windows - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sys - -// GetMaxMemoryLimit - returns the maximum size of the process's virtual memory (address space) in bytes. -func GetMaxMemoryLimit() (curLimit, maxLimit uint64, err error) { - // Nothing to do for windows. - return curLimit, maxLimit, err -} - -// SetMaxMemoryLimit - sets the maximum size of the process's virtual memory (address space) in bytes. -func SetMaxMemoryLimit(curLimit, maxLimit uint64) error { - // Nothing to do for windows. - return nil -} diff --git a/pkg/sys/stats.go b/pkg/sys/stats.go deleted file mode 100644 index 541015e1..00000000 --- a/pkg/sys/stats.go +++ /dev/null @@ -1,22 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sys - -// Stats - system statistics. -type Stats struct { - TotalRAM uint64 // Physical RAM size in bytes, -} diff --git a/pkg/sys/stats_bsd.go b/pkg/sys/stats_bsd.go deleted file mode 100644 index f1a25221..00000000 --- a/pkg/sys/stats_bsd.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build openbsd netbsd freebsd dragonfly - -/* - * MinIO Cloud Storage, (C) 2016,2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sys - -import ( - "encoding/binary" - "syscall" -) - -func getHwPhysmem() (uint64, error) { - totalString, err := syscall.Sysctl("hw.physmem") - if err != nil { - return 0, err - } - - // syscall.sysctl() helpfully assumes the result is a null-terminated string and - // removes the last byte of the result if it's 0 :/ - totalString += "\x00" - - total := uint64(binary.LittleEndian.Uint64([]byte(totalString))) - - return total, nil -} - -// GetStats - return system statistics for bsd. -func GetStats() (stats Stats, err error) { - stats.TotalRAM, err = getHwPhysmem() - return stats, err -} diff --git a/pkg/sys/stats_darwin.go b/pkg/sys/stats_darwin.go deleted file mode 100644 index 33c28338..00000000 --- a/pkg/sys/stats_darwin.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build darwin - -/* - * MinIO Cloud Storage, (C) 2016,2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sys - -import ( - "encoding/binary" - "syscall" -) - -func getHwMemsize() (uint64, error) { - totalString, err := syscall.Sysctl("hw.memsize") - if err != nil { - return 0, err - } - - // syscall.sysctl() helpfully assumes the result is a null-terminated string and - // removes the last byte of the result if it's 0 :/ - totalString += "\x00" - - total := uint64(binary.LittleEndian.Uint64([]byte(totalString))) - - return total, nil -} - -// GetStats - return system statistics for macOS. -func GetStats() (stats Stats, err error) { - stats.TotalRAM, err = getHwMemsize() - return stats, err -} diff --git a/pkg/sys/stats_linux.go b/pkg/sys/stats_linux.go deleted file mode 100644 index 643f30ce..00000000 --- a/pkg/sys/stats_linux.go +++ /dev/null @@ -1,87 +0,0 @@ -// +build linux - -/* - * MinIO Cloud Storage, (C) 2016,2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sys - -import ( - "os" - "syscall" - - "github.com/minio/minio/pkg/cgroup" -) - -// Get the final system memory limit chosen by the user. -// by default without any configuration on a vanilla Linux -// system you would see physical RAM limit. If cgroup -// is configured at some point in time this function -// would return the memory limit chosen for the given pid. -func getMemoryLimit() (sysLimit uint64, err error) { - if sysLimit, err = getSysinfoMemoryLimit(); err != nil { - // Physical memory info is not accessible, just exit here. - return 0, err - } - - // Following code is deliberately ignoring the error. - cGroupLimit, gerr := cgroup.GetMemoryLimit(os.Getpid()) - if gerr != nil { - // Upon error just return system limit. - return sysLimit, nil - } - - // cgroup limit is lesser than system limit means - // user wants to limit the memory usage further - // treat cgroup limit as the system limit. - if cGroupLimit <= sysLimit { - sysLimit = cGroupLimit - } - - // Final system limit. - return sysLimit, nil - -} - -// Get physical RAM size of the node. -func getSysinfoMemoryLimit() (limit uint64, err error) { - var si syscall.Sysinfo_t - if err = syscall.Sysinfo(&si); err != nil { - return 0, err - } - - // Some fields in syscall.Sysinfo_t have different integer sizes - // in different platform architectures. Cast all fields to uint64. - totalRAM := uint64(si.Totalram) - unit := uint64(si.Unit) - - // Total RAM is always the multiplicative value - // of unit size and total ram. - limit = unit * totalRAM - return limit, nil -} - -// GetStats - return system statistics, currently only -// supported value is TotalRAM. -func GetStats() (stats Stats, err error) { - var limit uint64 - limit, err = getMemoryLimit() - if err != nil { - return Stats{}, err - } - - stats.TotalRAM = limit - return stats, nil -} diff --git a/pkg/sys/stats_solaris.go b/pkg/sys/stats_solaris.go deleted file mode 100644 index fded5619..00000000 --- a/pkg/sys/stats_solaris.go +++ /dev/null @@ -1,25 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sys - -import "errors" - -// GetStats - stub implementation for Solaris, this will not give us -// complete functionality but will enable fs setups on Solaris. -func GetStats() (stats Stats, err error) { - return Stats{}, errors.New("Not implemented") -} diff --git a/pkg/sys/stats_test.go b/pkg/sys/stats_test.go deleted file mode 100644 index f9b2e4d4..00000000 --- a/pkg/sys/stats_test.go +++ /dev/null @@ -1,30 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sys - -import "testing" - -// Test get stats result. -func TestGetStats(t *testing.T) { - stats, err := GetStats() - if err != nil { - t.Errorf("Tests: Expected `nil`, Got %s", err) - } - if stats.TotalRAM == 0 { - t.Errorf("Tests: Expected `n > 0`, Got %d", stats.TotalRAM) - } -} diff --git a/pkg/sys/stats_windows.go b/pkg/sys/stats_windows.go deleted file mode 100644 index e75612c0..00000000 --- a/pkg/sys/stats_windows.go +++ /dev/null @@ -1,54 +0,0 @@ -// +build windows - -/* - * MinIO Cloud Storage, (C) 2016,2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sys - -import ( - "syscall" - "unsafe" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") -) - -type memoryStatusEx struct { - cbSize uint32 - dwMemoryLoad uint32 - ullTotalPhys uint64 // in bytes - ullAvailPhys uint64 - ullTotalPageFile uint64 - ullAvailPageFile uint64 - ullTotalVirtual uint64 - ullAvailVirtual uint64 - ullAvailExtendedVirtual uint64 -} - -// GetStats - return system statistics for windows. -func GetStats() (stats Stats, err error) { - var memInfo memoryStatusEx - memInfo.cbSize = uint32(unsafe.Sizeof(memInfo)) - if mem, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(&memInfo))); mem == 0 { - err = syscall.GetLastError() - } else { - stats.TotalRAM = memInfo.ullTotalPhys - } - - return stats, err -} diff --git a/pkg/sys/threads.go b/pkg/sys/threads.go deleted file mode 100644 index 25cc5f6d..00000000 --- a/pkg/sys/threads.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build linux - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sys - -import ( - "io/ioutil" - "strconv" - "strings" -) - -// GetMaxThreads returns the maximum number of threads that the system can create. -func GetMaxThreads() (int, error) { - sysMaxThreadsStr, err := ioutil.ReadFile("/proc/sys/kernel/threads-max") - if err != nil { - return 0, err - } - sysMaxThreads, err := strconv.Atoi(strings.TrimSpace(string(sysMaxThreadsStr))) - if err != nil { - return 0, err - } - return sysMaxThreads, nil -} diff --git a/pkg/sys/threads_other.go b/pkg/sys/threads_other.go deleted file mode 100644 index e0ee3883..00000000 --- a/pkg/sys/threads_other.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build !linux - -/* - * MinIO Cloud Storage, (C) 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package sys - -import "errors" - -// GetMaxThreads returns the maximum number of threads that the system can create. -func GetMaxThreads() (int, error) { - return 0, errors.New("getting max threads is not supported") -} diff --git a/pkg/trace/trace.go b/pkg/trace/trace.go deleted file mode 100644 index 5cc87578..00000000 --- a/pkg/trace/trace.go +++ /dev/null @@ -1,59 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2019 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package trace - -import ( - "net/http" - "time" -) - -// Info - represents a trace record, additionally -// also reports errors if any while listening on trace. -type Info struct { - NodeName string `json:"nodename"` - FuncName string `json:"funcname"` - ReqInfo RequestInfo `json:"request"` - RespInfo ResponseInfo `json:"response"` - CallStats CallStats `json:"stats"` -} - -// CallStats records request stats -type CallStats struct { - InputBytes int `json:"inputbytes"` - OutputBytes int `json:"outputbytes"` - Latency time.Duration `json:"latency"` - TimeToFirstByte time.Duration `json:"timetofirstbyte"` -} - -// RequestInfo represents trace of http request -type RequestInfo struct { - Time time.Time `json:"time"` - Method string `json:"method"` - Path string `json:"path,omitempty"` - RawQuery string `json:"rawquery,omitempty"` - Headers http.Header `json:"headers,omitempty"` - Body []byte `json:"body,omitempty"` - Client string `json:"client"` -} - -// ResponseInfo represents trace of http request -type ResponseInfo struct { - Time time.Time `json:"time"` - Headers http.Header `json:"headers,omitempty"` - Body []byte `json:"body,omitempty"` - StatusCode int `json:"statuscode,omitempty"` -} diff --git a/pkg/trie/trie.go b/pkg/trie/trie.go deleted file mode 100644 index d41d4d38..00000000 --- a/pkg/trie/trie.go +++ /dev/null @@ -1,118 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2014, 2015, 2016, 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// Package trie implements a simple trie tree for minio server/tools borrows -// idea from - https://godoc.org/golang.org/x/text/internal/triegen. -package trie - -// Node trie tree node container carries value and children. -type Node struct { - exists bool - value interface{} - child map[rune]*Node // runes as child. -} - -// newNode create a new trie node. -func newNode() *Node { - return &Node{ - exists: false, - value: nil, - child: make(map[rune]*Node), - } -} - -// Trie is a trie container. -type Trie struct { - root *Node - size int -} - -// Root returns root node. -func (t *Trie) Root() *Node { - return t.root -} - -// Insert insert a key. -func (t *Trie) Insert(key string) { - curNode := t.root - for _, v := range key { - if curNode.child[v] == nil { - curNode.child[v] = newNode() - } - curNode = curNode.child[v] - } - - if !curNode.exists { - // increment when new rune child is added. - t.size++ - curNode.exists = true - } - // value is stored for retrieval in future. - curNode.value = key -} - -// PrefixMatch - prefix match. -func (t *Trie) PrefixMatch(key string) []interface{} { - node, _ := t.findNode(key) - if node != nil { - return t.Walk(node) - } - return []interface{}{} -} - -// Walk the tree. -func (t *Trie) Walk(node *Node) (ret []interface{}) { - if node.exists { - ret = append(ret, node.value) - } - for _, v := range node.child { - ret = append(ret, t.Walk(v)...) - } - return -} - -// find nodes corresponding to key. -func (t *Trie) findNode(key string) (node *Node, index int) { - curNode := t.root - f := false - for k, v := range key { - if f { - index = k - f = false - } - if curNode.child[v] == nil { - return nil, index - } - curNode = curNode.child[v] - if curNode.exists { - f = true - } - } - - if curNode.exists { - index = len(key) - } - - return curNode, index -} - -// NewTrie create a new trie. -func NewTrie() *Trie { - return &Trie{ - root: newNode(), - size: 0, - } -} diff --git a/pkg/trie/trie_test.go b/pkg/trie/trie_test.go deleted file mode 100644 index 0c84328d..00000000 --- a/pkg/trie/trie_test.go +++ /dev/null @@ -1,68 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package trie - -import ( - "testing" -) - -// Simply make sure creating a new tree works. -func TestNewTrie(t *testing.T) { - trie := NewTrie() - - if trie.size != 0 { - t.Errorf("expected size 0, got: %d", trie.size) - } -} - -// Ensure that we can insert new keys into the tree, then check the size. -func TestInsert(t *testing.T) { - trie := NewTrie() - - // We need to have an empty tree to begin with. - if trie.size != 0 { - t.Errorf("expected size 0, got: %d", trie.size) - } - - trie.Insert("key") - trie.Insert("keyy") - - // After inserting, we should have a size of two. - if trie.size != 2 { - t.Errorf("expected size 2, got: %d", trie.size) - } -} - -// Ensure that PrefixMatch gives us the correct two keys in the tree. -func TestPrefixMatch(t *testing.T) { - trie := NewTrie() - - // Feed it some fodder: only 'minio' and 'miny-os' should trip the matcher. - trie.Insert("minio") - trie.Insert("amazon") - trie.Insert("cheerio") - trie.Insert("miny-o's") - - matches := trie.PrefixMatch("min") - if len(matches) != 2 { - t.Errorf("expected two matches, got: %d", len(matches)) - } - - if matches[0] != "minio" && matches[1] != "minio" { - t.Errorf("expected one match to be 'minio', got: '%s' and '%s'", matches[0], matches[1]) - } -} diff --git a/pkg/wildcard/match.go b/pkg/wildcard/match.go deleted file mode 100644 index 358d21e0..00000000 --- a/pkg/wildcard/match.go +++ /dev/null @@ -1,67 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2015, 2016 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wildcard - -// MatchSimple - finds whether the text matches/satisfies the pattern string. -// supports only '*' wildcard in the pattern. -// considers a file system path as a flat name space. -func MatchSimple(pattern, name string) bool { - if pattern == "" { - return name == pattern - } - if pattern == "*" { - return true - } - // Does only wildcard '*' match. - return deepMatchRune([]rune(name), []rune(pattern), true) -} - -// Match - finds whether the text matches/satisfies the pattern string. -// supports '*' and '?' wildcards in the pattern string. -// unlike path.Match(), considers a path as a flat name space while matching the pattern. -// The difference is illustrated in the example here https://play.golang.org/p/Ega9qgD4Qz . -func Match(pattern, name string) (matched bool) { - if pattern == "" { - return name == pattern - } - if pattern == "*" { - return true - } - // Does extended wildcard '*' and '?' match. - return deepMatchRune([]rune(name), []rune(pattern), false) -} - -func deepMatchRune(str, pattern []rune, simple bool) bool { - for len(pattern) > 0 { - switch pattern[0] { - default: - if len(str) == 0 || str[0] != pattern[0] { - return false - } - case '?': - if len(str) == 0 && !simple { - return false - } - case '*': - return deepMatchRune(str, pattern[1:], simple) || - (len(str) > 0 && deepMatchRune(str[1:], pattern, simple)) - } - str = str[1:] - pattern = pattern[1:] - } - return len(str) == 0 && len(pattern) == 0 -} diff --git a/pkg/wildcard/match_test.go b/pkg/wildcard/match_test.go deleted file mode 100644 index 5eb305c6..00000000 --- a/pkg/wildcard/match_test.go +++ /dev/null @@ -1,542 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2015, 2016 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package wildcard_test - -import ( - "testing" - - "github.com/minio/minio/pkg/wildcard" -) - -// TestMatch - Tests validate the logic of wild card matching. -// `Match` supports '*' and '?' wildcards. -// Sample usage: In resource matching for bucket policy validation. -func TestMatch(t *testing.T) { - testCases := []struct { - pattern string - text string - matched bool - }{ - // Test case - 1. - // Test case with pattern "*". Expected to match any text. - { - pattern: "*", - text: "s3:GetObject", - matched: true, - }, - // Test case - 2. - // Test case with empty pattern. This only matches empty string. - { - pattern: "", - text: "s3:GetObject", - matched: false, - }, - // Test case - 3. - // Test case with empty pattern. This only matches empty string. - { - pattern: "", - text: "", - matched: true, - }, - // Test case - 4. - // Test case with single "*" at the end. - { - pattern: "s3:*", - text: "s3:ListMultipartUploadParts", - matched: true, - }, - // Test case - 5. - // Test case with a no "*". In this case the pattern and text should be the same. - { - pattern: "s3:ListBucketMultipartUploads", - text: "s3:ListBucket", - matched: false, - }, - // Test case - 6. - // Test case with a no "*". In this case the pattern and text should be the same. - { - pattern: "s3:ListBucket", - text: "s3:ListBucket", - matched: true, - }, - // Test case - 7. - // Test case with a no "*". In this case the pattern and text should be the same. - { - pattern: "s3:ListBucketMultipartUploads", - text: "s3:ListBucketMultipartUploads", - matched: true, - }, - // Test case - 8. - // Test case with pattern containing key name with a prefix. Should accept the same text without a "*". - { - pattern: "my-bucket/oo*", - text: "my-bucket/oo", - matched: true, - }, - // Test case - 9. - // Test case with "*" at the end of the pattern. - { - pattern: "my-bucket/In*", - text: "my-bucket/India/Karnataka/", - matched: true, - }, - // Test case - 10. - // Test case with prefixes shuffled. - // This should fail. - { - pattern: "my-bucket/In*", - text: "my-bucket/Karnataka/India/", - matched: false, - }, - // Test case - 11. - // Test case with text expanded to the wildcards in the pattern. - { - pattern: "my-bucket/In*/Ka*/Ban", - text: "my-bucket/India/Karnataka/Ban", - matched: true, - }, - // Test case - 12. - // Test case with the keyname part is repeated as prefix several times. - // This is valid. - { - pattern: "my-bucket/In*/Ka*/Ban", - text: "my-bucket/India/Karnataka/Ban/Ban/Ban/Ban/Ban", - matched: true, - }, - // Test case - 13. - // Test case to validate that `*` can be expanded into multiple prefixes. - { - pattern: "my-bucket/In*/Ka*/Ban", - text: "my-bucket/India/Karnataka/Area1/Area2/Area3/Ban", - matched: true, - }, - // Test case - 14. - // Test case to validate that `*` can be expanded into multiple prefixes. - { - pattern: "my-bucket/In*/Ka*/Ban", - text: "my-bucket/India/State1/State2/Karnataka/Area1/Area2/Area3/Ban", - matched: true, - }, - // Test case - 15. - // Test case where the keyname part of the pattern is expanded in the text. - { - pattern: "my-bucket/In*/Ka*/Ban", - text: "my-bucket/India/Karnataka/Bangalore", - matched: false, - }, - // Test case - 16. - // Test case with prefixes and wildcard expanded for all "*". - { - pattern: "my-bucket/In*/Ka*/Ban*", - text: "my-bucket/India/Karnataka/Bangalore", - matched: true, - }, - // Test case - 17. - // Test case with keyname part being a wildcard in the pattern. - { - pattern: "my-bucket/*", - text: "my-bucket/India", - matched: true, - }, - // Test case - 18. - { - pattern: "my-bucket/oo*", - text: "my-bucket/odo", - matched: false, - }, - - // Test case with pattern containing wildcard '?'. - // Test case - 19. - // "my-bucket?/" matches "my-bucket1/", "my-bucket2/", "my-bucket3" etc... - // doesn't match "mybucket/". - { - pattern: "my-bucket?/abc*", - text: "mybucket/abc", - matched: false, - }, - // Test case - 20. - { - pattern: "my-bucket?/abc*", - text: "my-bucket1/abc", - matched: true, - }, - // Test case - 21. - { - pattern: "my-?-bucket/abc*", - text: "my--bucket/abc", - matched: false, - }, - // Test case - 22. - { - pattern: "my-?-bucket/abc*", - text: "my-1-bucket/abc", - matched: true, - }, - // Test case - 23. - { - pattern: "my-?-bucket/abc*", - text: "my-k-bucket/abc", - matched: true, - }, - // Test case - 24. - { - pattern: "my??bucket/abc*", - text: "mybucket/abc", - matched: false, - }, - // Test case - 25. - { - pattern: "my??bucket/abc*", - text: "my4abucket/abc", - matched: true, - }, - // Test case - 26. - { - pattern: "my-bucket?abc*", - text: "my-bucket/abc", - matched: true, - }, - // Test case 27-28. - // '?' matches '/' too. (works with s3). - // This is because the namespace is considered flat. - // "abc?efg" matches both "abcdefg" and "abc/efg". - { - pattern: "my-bucket/abc?efg", - text: "my-bucket/abcdefg", - matched: true, - }, - { - pattern: "my-bucket/abc?efg", - text: "my-bucket/abc/efg", - matched: true, - }, - // Test case - 29. - { - pattern: "my-bucket/abc????", - text: "my-bucket/abc", - matched: false, - }, - // Test case - 30. - { - pattern: "my-bucket/abc????", - text: "my-bucket/abcde", - matched: false, - }, - // Test case - 31. - { - pattern: "my-bucket/abc????", - text: "my-bucket/abcdefg", - matched: true, - }, - // Test case 32-34. - // test case with no '*'. - { - pattern: "my-bucket/abc?", - text: "my-bucket/abc", - matched: false, - }, - { - pattern: "my-bucket/abc?", - text: "my-bucket/abcd", - matched: true, - }, - { - pattern: "my-bucket/abc?", - text: "my-bucket/abcde", - matched: false, - }, - // Test case 35. - { - pattern: "my-bucket/mnop*?", - text: "my-bucket/mnop", - matched: false, - }, - // Test case 36. - { - pattern: "my-bucket/mnop*?", - text: "my-bucket/mnopqrst/mnopqr", - matched: true, - }, - // Test case 37. - { - pattern: "my-bucket/mnop*?", - text: "my-bucket/mnopqrst/mnopqrs", - matched: true, - }, - // Test case 38. - { - pattern: "my-bucket/mnop*?", - text: "my-bucket/mnop", - matched: false, - }, - // Test case 39. - { - pattern: "my-bucket/mnop*?", - text: "my-bucket/mnopq", - matched: true, - }, - // Test case 40. - { - pattern: "my-bucket/mnop*?", - text: "my-bucket/mnopqr", - matched: true, - }, - // Test case 41. - { - pattern: "my-bucket/mnop*?and", - text: "my-bucket/mnopqand", - matched: true, - }, - // Test case 42. - { - pattern: "my-bucket/mnop*?and", - text: "my-bucket/mnopand", - matched: false, - }, - // Test case 43. - { - pattern: "my-bucket/mnop*?and", - text: "my-bucket/mnopqand", - matched: true, - }, - // Test case 44. - { - pattern: "my-bucket/mnop*?", - text: "my-bucket/mn", - matched: false, - }, - // Test case 45. - { - pattern: "my-bucket/mnop*?", - text: "my-bucket/mnopqrst/mnopqrs", - matched: true, - }, - // Test case 46. - { - pattern: "my-bucket/mnop*??", - text: "my-bucket/mnopqrst", - matched: true, - }, - // Test case 47. - { - pattern: "my-bucket/mnop*qrst", - text: "my-bucket/mnopabcdegqrst", - matched: true, - }, - // Test case 48. - { - pattern: "my-bucket/mnop*?and", - text: "my-bucket/mnopqand", - matched: true, - }, - // Test case 49. - { - pattern: "my-bucket/mnop*?and", - text: "my-bucket/mnopand", - matched: false, - }, - // Test case 50. - { - pattern: "my-bucket/mnop*?and?", - text: "my-bucket/mnopqanda", - matched: true, - }, - // Test case 51. - { - pattern: "my-bucket/mnop*?and", - text: "my-bucket/mnopqanda", - matched: false, - }, - // Test case 52. - - { - pattern: "my-?-bucket/abc*", - text: "my-bucket/mnopqanda", - matched: false, - }, - } - // Iterating over the test cases, call the function under test and asert the output. - for i, testCase := range testCases { - actualResult := wildcard.Match(testCase.pattern, testCase.text) - if testCase.matched != actualResult { - t.Errorf("Test %d: Expected the result to be `%v`, but instead found it to be `%v`", i+1, testCase.matched, actualResult) - } - } -} - -// TestMatchSimple - Tests validate the logic of wild card matching. -// `MatchSimple` supports matching for only '*' in the pattern string. -func TestMatchSimple(t *testing.T) { - testCases := []struct { - pattern string - text string - matched bool - }{ - // Test case - 1. - // Test case with pattern "*". Expected to match any text. - { - pattern: "*", - text: "s3:GetObject", - matched: true, - }, - // Test case - 2. - // Test case with empty pattern. This only matches empty string. - { - pattern: "", - text: "s3:GetObject", - matched: false, - }, - // Test case - 3. - // Test case with empty pattern. This only matches empty string. - { - pattern: "", - text: "", - matched: true, - }, - // Test case - 4. - // Test case with single "*" at the end. - { - pattern: "s3:*", - text: "s3:ListMultipartUploadParts", - matched: true, - }, - // Test case - 5. - // Test case with a no "*". In this case the pattern and text should be the same. - { - pattern: "s3:ListBucketMultipartUploads", - text: "s3:ListBucket", - matched: false, - }, - // Test case - 6. - // Test case with a no "*". In this case the pattern and text should be the same. - { - pattern: "s3:ListBucket", - text: "s3:ListBucket", - matched: true, - }, - // Test case - 7. - // Test case with a no "*". In this case the pattern and text should be the same. - { - pattern: "s3:ListBucketMultipartUploads", - text: "s3:ListBucketMultipartUploads", - matched: true, - }, - // Test case - 8. - // Test case with pattern containing key name with a prefix. Should accept the same text without a "*". - { - pattern: "my-bucket/oo*", - text: "my-bucket/oo", - matched: true, - }, - // Test case - 9. - // Test case with "*" at the end of the pattern. - { - pattern: "my-bucket/In*", - text: "my-bucket/India/Karnataka/", - matched: true, - }, - // Test case - 10. - // Test case with prefixes shuffled. - // This should fail. - { - pattern: "my-bucket/In*", - text: "my-bucket/Karnataka/India/", - matched: false, - }, - // Test case - 11. - // Test case with text expanded to the wildcards in the pattern. - { - pattern: "my-bucket/In*/Ka*/Ban", - text: "my-bucket/India/Karnataka/Ban", - matched: true, - }, - // Test case - 12. - // Test case with the keyname part is repeated as prefix several times. - // This is valid. - { - pattern: "my-bucket/In*/Ka*/Ban", - text: "my-bucket/India/Karnataka/Ban/Ban/Ban/Ban/Ban", - matched: true, - }, - // Test case - 13. - // Test case to validate that `*` can be expanded into multiple prefixes. - { - pattern: "my-bucket/In*/Ka*/Ban", - text: "my-bucket/India/Karnataka/Area1/Area2/Area3/Ban", - matched: true, - }, - // Test case - 14. - // Test case to validate that `*` can be expanded into multiple prefixes. - { - pattern: "my-bucket/In*/Ka*/Ban", - text: "my-bucket/India/State1/State2/Karnataka/Area1/Area2/Area3/Ban", - matched: true, - }, - // Test case - 15. - // Test case where the keyname part of the pattern is expanded in the text. - { - pattern: "my-bucket/In*/Ka*/Ban", - text: "my-bucket/India/Karnataka/Bangalore", - matched: false, - }, - // Test case - 16. - // Test case with prefixes and wildcard expanded for all "*". - { - pattern: "my-bucket/In*/Ka*/Ban*", - text: "my-bucket/India/Karnataka/Bangalore", - matched: true, - }, - // Test case - 17. - // Test case with keyname part being a wildcard in the pattern. - { - pattern: "my-bucket/*", - text: "my-bucket/India", - matched: true, - }, - // Test case - 18. - { - pattern: "my-bucket/oo*", - text: "my-bucket/odo", - matched: false, - }, - // Test case - 11. - { - pattern: "my-bucket/oo?*", - text: "my-bucket/oo???", - matched: true, - }, - // Test case - 12: - { - pattern: "my-bucket/oo??*", - text: "my-bucket/odo", - matched: false, - }, - // Test case - 13: - { - pattern: "?h?*", - text: "?h?hello", - matched: true, - }, - } - // Iterating over the test cases, call the function under test and asert the output. - for i, testCase := range testCases { - actualResult := wildcard.MatchSimple(testCase.pattern, testCase.text) - if testCase.matched != actualResult { - t.Errorf("Test %d: Expected the result to be `%v`, but instead found it to be `%v`", i+1, testCase.matched, actualResult) - } - } -} diff --git a/pkg/words/damerau-levenshtein.go b/pkg/words/damerau-levenshtein.go deleted file mode 100644 index 4edf0c2b..00000000 --- a/pkg/words/damerau-levenshtein.go +++ /dev/null @@ -1,64 +0,0 @@ -/* - * MinIO Client (C) 2014, 2015, 2016, 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package words - -import "math" - -// Returns the minimum value of a slice of integers -func minimum(integers []int) (minVal int) { - minVal = math.MaxInt32 - for _, v := range integers { - if v < minVal { - minVal = v - } - } - return -} - -// DamerauLevenshteinDistance calculates distance between two strings using an algorithm -// described in https://en.wikipedia.org/wiki/Damerau-Levenshtein_distance -func DamerauLevenshteinDistance(a string, b string) int { - var cost int - d := make([][]int, len(a)+1) - for i := 1; i <= len(a)+1; i++ { - d[i-1] = make([]int, len(b)+1) - } - for i := 0; i <= len(a); i++ { - d[i][0] = i - } - for j := 0; j <= len(b); j++ { - d[0][j] = j - } - for i := 1; i <= len(a); i++ { - for j := 1; j <= len(b); j++ { - if a[i-1] == b[j-1] { - cost = 0 - } else { - cost = 1 - } - d[i][j] = minimum([]int{ - d[i-1][j] + 1, - d[i][j-1] + 1, - d[i-1][j-1] + cost, - }) - if i > 1 && j > 1 && a[i-1] == b[j-2] && a[i-2] == b[j-1] { - d[i][j] = minimum([]int{d[i][j], d[i-2][j-2] + cost}) // transposition - } - } - } - return d[len(a)][len(b)] -} diff --git a/pkg/words/damerau-levenshtein_test.go b/pkg/words/damerau-levenshtein_test.go deleted file mode 100644 index e072ce08..00000000 --- a/pkg/words/damerau-levenshtein_test.go +++ /dev/null @@ -1,65 +0,0 @@ -/* - * MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package words - -import ( - "math" - "testing" -) - -// Test minimum function which calculates the minimal value in a list of integers -func TestMinimum(t *testing.T) { - type testCase struct { - listval []int - expected int - } - testCases := []testCase{ - {listval: []int{3, 4, 15}, expected: 3}, - {listval: []int{}, expected: math.MaxInt32}, - } - // Validate all the test cases. - for i, tt := range testCases { - val := minimum(tt.listval) - if val != tt.expected { - t.Errorf("Test %d:, Expected %d, got %d", i+1, tt.expected, val) - } - } -} - -// Test DamerauLevenshtein which calculates the difference distance between two words -func TestDamerauLevenshtein(t *testing.T) { - type testCase struct { - word1 string - word2 string - distance int - } - testCases := []testCase{ - {word1: "", word2: "", distance: 0}, - {word1: "a", word2: "a", distance: 0}, - {word1: "a", word2: "b", distance: 1}, - {word1: "rm", word2: "tm", distance: 1}, - {word1: "version", word2: "evrsion", distance: 1}, - {word1: "version", word2: "bersio", distance: 2}, - } - // Validate all the test cases. - for i, tt := range testCases { - d := DamerauLevenshteinDistance(tt.word1, tt.word2) - if d != tt.distance { - t.Errorf("Test %d:, Expected %d, got %d", i+1, tt.distance, d) - } - } -}