forked from TrueCloudLab/frostfs-s3-gw
Remove pkg-legacy subpackage
This commit is contained in:
parent
c49d2824a1
commit
28fa75fb69
368 changed files with 0 additions and 62905 deletions
|
@ -1,258 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2015, 2016, 2017, 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/subtle"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
jwtgo "github.com/dgrijalva/jwt-go"
|
||||
)
|
||||
|
||||
const (
|
||||
// Minimum length for MinIO access key.
|
||||
accessKeyMinLen = 3
|
||||
|
||||
// Maximum length for MinIO access key.
|
||||
// There is no max length enforcement for access keys
|
||||
accessKeyMaxLen = 20
|
||||
|
||||
// Minimum length for MinIO secret key for both server and gateway mode.
|
||||
secretKeyMinLen = 8
|
||||
|
||||
// Maximum secret key length for MinIO, this
|
||||
// is used when autogenerating new credentials.
|
||||
// There is no max length enforcement for secret keys
|
||||
secretKeyMaxLen = 40
|
||||
|
||||
// Alpha numeric table used for generating access keys.
|
||||
alphaNumericTable = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
|
||||
// Total length of the alpha numeric table.
|
||||
alphaNumericTableLen = byte(len(alphaNumericTable))
|
||||
)
|
||||
|
||||
// Common errors generated for access and secret key validation.
|
||||
var (
|
||||
ErrInvalidAccessKeyLength = fmt.Errorf("access key must be minimum %v or more characters long", accessKeyMinLen)
|
||||
ErrInvalidSecretKeyLength = fmt.Errorf("secret key must be minimum %v or more characters long", secretKeyMinLen)
|
||||
)
|
||||
|
||||
// IsAccessKeyValid - validate access key for right length.
|
||||
func IsAccessKeyValid(accessKey string) bool {
|
||||
return len(accessKey) >= accessKeyMinLen
|
||||
}
|
||||
|
||||
// IsSecretKeyValid - validate secret key for right length.
|
||||
func IsSecretKeyValid(secretKey string) bool {
|
||||
return len(secretKey) >= secretKeyMinLen
|
||||
}
|
||||
|
||||
// Default access and secret keys.
|
||||
const (
|
||||
DefaultAccessKey = "minioadmin"
|
||||
DefaultSecretKey = "minioadmin"
|
||||
)
|
||||
|
||||
// Default access credentials
|
||||
var (
|
||||
DefaultCredentials = Credentials{
|
||||
AccessKey: DefaultAccessKey,
|
||||
SecretKey: DefaultSecretKey,
|
||||
}
|
||||
)
|
||||
|
||||
// Credentials holds access and secret keys.
|
||||
type Credentials struct {
|
||||
AccessKey string `xml:"AccessKeyId" json:"accessKey,omitempty"`
|
||||
SecretKey string `xml:"SecretAccessKey" json:"secretKey,omitempty"`
|
||||
Expiration time.Time `xml:"Expiration" json:"expiration,omitempty"`
|
||||
SessionToken string `xml:"SessionToken" json:"sessionToken,omitempty"`
|
||||
Status string `xml:"-" json:"status,omitempty"`
|
||||
ParentUser string `xml:"-" json:"parentUser,omitempty"`
|
||||
Groups []string `xml:"-" json:"groups,omitempty"`
|
||||
}
|
||||
|
||||
func (cred Credentials) String() string {
|
||||
var s strings.Builder
|
||||
s.WriteString(cred.AccessKey)
|
||||
s.WriteString(":")
|
||||
s.WriteString(cred.SecretKey)
|
||||
if cred.SessionToken != "" {
|
||||
s.WriteString("\n")
|
||||
s.WriteString(cred.SessionToken)
|
||||
}
|
||||
if !cred.Expiration.IsZero() && !cred.Expiration.Equal(timeSentinel) {
|
||||
s.WriteString("\n")
|
||||
s.WriteString(cred.Expiration.String())
|
||||
}
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// IsExpired - returns whether Credential is expired or not.
|
||||
func (cred Credentials) IsExpired() bool {
|
||||
if cred.Expiration.IsZero() || cred.Expiration.Equal(timeSentinel) {
|
||||
return false
|
||||
}
|
||||
|
||||
return cred.Expiration.Before(time.Now().UTC())
|
||||
}
|
||||
|
||||
// IsTemp - returns whether credential is temporary or not.
|
||||
func (cred Credentials) IsTemp() bool {
|
||||
return cred.SessionToken != "" && !cred.Expiration.IsZero() && !cred.Expiration.Equal(timeSentinel)
|
||||
}
|
||||
|
||||
// IsServiceAccount - returns whether credential is a service account or not
|
||||
func (cred Credentials) IsServiceAccount() bool {
|
||||
return cred.ParentUser != "" && (cred.Expiration.IsZero() || cred.Expiration.Equal(timeSentinel))
|
||||
}
|
||||
|
||||
// IsValid - returns whether credential is valid or not.
|
||||
func (cred Credentials) IsValid() bool {
|
||||
// Verify credentials if its enabled or not set.
|
||||
if cred.Status == "off" {
|
||||
return false
|
||||
}
|
||||
return IsAccessKeyValid(cred.AccessKey) && IsSecretKeyValid(cred.SecretKey) && !cred.IsExpired()
|
||||
}
|
||||
|
||||
// Equal - returns whether two credentials are equal or not.
|
||||
func (cred Credentials) Equal(ccred Credentials) bool {
|
||||
if !ccred.IsValid() {
|
||||
return false
|
||||
}
|
||||
return (cred.AccessKey == ccred.AccessKey && subtle.ConstantTimeCompare([]byte(cred.SecretKey), []byte(ccred.SecretKey)) == 1 &&
|
||||
subtle.ConstantTimeCompare([]byte(cred.SessionToken), []byte(ccred.SessionToken)) == 1)
|
||||
}
|
||||
|
||||
var timeSentinel = time.Unix(0, 0).UTC()
|
||||
|
||||
// ErrInvalidDuration invalid token expiry
|
||||
var ErrInvalidDuration = errors.New("invalid token expiry")
|
||||
|
||||
// ExpToInt64 - convert input interface value to int64.
|
||||
func ExpToInt64(expI interface{}) (expAt int64, err error) {
|
||||
switch exp := expI.(type) {
|
||||
case string:
|
||||
expAt, err = strconv.ParseInt(exp, 10, 64)
|
||||
case float64:
|
||||
expAt, err = int64(exp), nil
|
||||
case int64:
|
||||
expAt, err = exp, nil
|
||||
case int:
|
||||
expAt, err = int64(exp), nil
|
||||
case uint64:
|
||||
expAt, err = int64(exp), nil
|
||||
case uint:
|
||||
expAt, err = int64(exp), nil
|
||||
case json.Number:
|
||||
expAt, err = exp.Int64()
|
||||
case time.Duration:
|
||||
expAt, err = time.Now().UTC().Add(exp).Unix(), nil
|
||||
case nil:
|
||||
expAt, err = 0, nil
|
||||
default:
|
||||
expAt, err = 0, ErrInvalidDuration
|
||||
}
|
||||
if expAt < 0 {
|
||||
return 0, ErrInvalidDuration
|
||||
}
|
||||
return expAt, err
|
||||
}
|
||||
|
||||
// GetNewCredentialsWithMetadata generates and returns new credential with expiry.
|
||||
func GetNewCredentialsWithMetadata(m map[string]interface{}, tokenSecret string) (cred Credentials, err error) {
|
||||
readBytes := func(size int) (data []byte, err error) {
|
||||
data = make([]byte, size)
|
||||
var n int
|
||||
if n, err = rand.Read(data); err != nil {
|
||||
return nil, err
|
||||
} else if n != size {
|
||||
return nil, fmt.Errorf("Not enough data. Expected to read: %v bytes, got: %v bytes", size, n)
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// Generate access key.
|
||||
keyBytes, err := readBytes(accessKeyMaxLen)
|
||||
if err != nil {
|
||||
return cred, err
|
||||
}
|
||||
for i := 0; i < accessKeyMaxLen; i++ {
|
||||
keyBytes[i] = alphaNumericTable[keyBytes[i]%alphaNumericTableLen]
|
||||
}
|
||||
cred.AccessKey = string(keyBytes)
|
||||
|
||||
// Generate secret key.
|
||||
keyBytes, err = readBytes(secretKeyMaxLen)
|
||||
if err != nil {
|
||||
return cred, err
|
||||
}
|
||||
cred.SecretKey = strings.Replace(string([]byte(base64.StdEncoding.EncodeToString(keyBytes))[:secretKeyMaxLen]),
|
||||
"/", "+", -1)
|
||||
cred.Status = "on"
|
||||
|
||||
if tokenSecret == "" {
|
||||
cred.Expiration = timeSentinel
|
||||
return cred, nil
|
||||
}
|
||||
|
||||
expiry, err := ExpToInt64(m["exp"])
|
||||
if err != nil {
|
||||
return cred, err
|
||||
}
|
||||
|
||||
m["accessKey"] = cred.AccessKey
|
||||
jwt := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, jwtgo.MapClaims(m))
|
||||
|
||||
cred.Expiration = time.Unix(expiry, 0).UTC()
|
||||
cred.SessionToken, err = jwt.SignedString([]byte(tokenSecret))
|
||||
if err != nil {
|
||||
return cred, err
|
||||
}
|
||||
|
||||
return cred, nil
|
||||
}
|
||||
|
||||
// GetNewCredentials generates and returns new credential.
|
||||
func GetNewCredentials() (cred Credentials, err error) {
|
||||
return GetNewCredentialsWithMetadata(map[string]interface{}{}, "")
|
||||
}
|
||||
|
||||
// CreateCredentials returns new credential with the given access key and secret key.
|
||||
// Error is returned if given access key or secret key are invalid length.
|
||||
func CreateCredentials(accessKey, secretKey string) (cred Credentials, err error) {
|
||||
if !IsAccessKeyValid(accessKey) {
|
||||
return cred, ErrInvalidAccessKeyLength
|
||||
}
|
||||
if !IsSecretKeyValid(secretKey) {
|
||||
return cred, ErrInvalidSecretKeyLength
|
||||
}
|
||||
cred.AccessKey = accessKey
|
||||
cred.SecretKey = secretKey
|
||||
cred.Expiration = timeSentinel
|
||||
cred.Status = "on"
|
||||
return cred, nil
|
||||
}
|
|
@ -1,181 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2017 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestExpToInt64(t *testing.T) {
|
||||
testCases := []struct {
|
||||
exp interface{}
|
||||
expectedFailure bool
|
||||
}{
|
||||
{"", true},
|
||||
{"-1", true},
|
||||
{"1574812326", false},
|
||||
{1574812326, false},
|
||||
{int64(1574812326), false},
|
||||
{int(1574812326), false},
|
||||
{uint(1574812326), false},
|
||||
{uint64(1574812326), false},
|
||||
{json.Number("1574812326"), false},
|
||||
{1574812326.000, false},
|
||||
{time.Duration(3) * time.Minute, false},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
_, err := ExpToInt64(testCase.exp)
|
||||
if err != nil && !testCase.expectedFailure {
|
||||
t.Errorf("Expected success but got failure %s", err)
|
||||
}
|
||||
if err == nil && testCase.expectedFailure {
|
||||
t.Error("Expected failure but got success")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsAccessKeyValid(t *testing.T) {
|
||||
testCases := []struct {
|
||||
accessKey string
|
||||
expectedResult bool
|
||||
}{
|
||||
{alphaNumericTable[:accessKeyMinLen], true},
|
||||
{alphaNumericTable[:accessKeyMinLen+1], true},
|
||||
{alphaNumericTable[:accessKeyMinLen-1], false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := IsAccessKeyValid(testCase.accessKey)
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("test %v: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsSecretKeyValid(t *testing.T) {
|
||||
testCases := []struct {
|
||||
secretKey string
|
||||
expectedResult bool
|
||||
}{
|
||||
{alphaNumericTable[:secretKeyMinLen], true},
|
||||
{alphaNumericTable[:secretKeyMinLen+1], true},
|
||||
{alphaNumericTable[:secretKeyMinLen-1], false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := IsSecretKeyValid(testCase.secretKey)
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("test %v: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetNewCredentials(t *testing.T) {
|
||||
cred, err := GetNewCredentials()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get a new credential")
|
||||
}
|
||||
if !cred.IsValid() {
|
||||
t.Fatalf("Failed to get new valid credential")
|
||||
}
|
||||
if len(cred.AccessKey) != accessKeyMaxLen {
|
||||
t.Fatalf("access key length: expected: %v, got: %v", secretKeyMaxLen, len(cred.AccessKey))
|
||||
}
|
||||
if len(cred.SecretKey) != secretKeyMaxLen {
|
||||
t.Fatalf("secret key length: expected: %v, got: %v", secretKeyMaxLen, len(cred.SecretKey))
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateCredentials(t *testing.T) {
|
||||
testCases := []struct {
|
||||
accessKey string
|
||||
secretKey string
|
||||
valid bool
|
||||
expectedErr error
|
||||
}{
|
||||
// Valid access and secret keys with minimum length.
|
||||
{alphaNumericTable[:accessKeyMinLen], alphaNumericTable[:secretKeyMinLen], true, nil},
|
||||
// Valid access and/or secret keys are longer than minimum length.
|
||||
{alphaNumericTable[:accessKeyMinLen+1], alphaNumericTable[:secretKeyMinLen+1], true, nil},
|
||||
// Smaller access key.
|
||||
{alphaNumericTable[:accessKeyMinLen-1], alphaNumericTable[:secretKeyMinLen], false, ErrInvalidAccessKeyLength},
|
||||
// Smaller secret key.
|
||||
{alphaNumericTable[:accessKeyMinLen], alphaNumericTable[:secretKeyMinLen-1], false, ErrInvalidSecretKeyLength},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
cred, err := CreateCredentials(testCase.accessKey, testCase.secretKey)
|
||||
|
||||
if err != nil {
|
||||
if testCase.expectedErr == nil {
|
||||
t.Fatalf("test %v: error: expected = <nil>, got = %v", i+1, err)
|
||||
}
|
||||
if testCase.expectedErr.Error() != err.Error() {
|
||||
t.Fatalf("test %v: error: expected = %v, got = %v", i+1, testCase.expectedErr, err)
|
||||
}
|
||||
} else {
|
||||
if testCase.expectedErr != nil {
|
||||
t.Fatalf("test %v: error: expected = %v, got = <nil>", i+1, testCase.expectedErr)
|
||||
}
|
||||
if !cred.IsValid() {
|
||||
t.Fatalf("test %v: got invalid credentials", i+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCredentialsEqual(t *testing.T) {
|
||||
cred, err := GetNewCredentials()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get a new credential")
|
||||
}
|
||||
cred2, err := GetNewCredentials()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get a new credential")
|
||||
}
|
||||
testCases := []struct {
|
||||
cred Credentials
|
||||
ccred Credentials
|
||||
expectedResult bool
|
||||
}{
|
||||
// Same Credentialss.
|
||||
{cred, cred, true},
|
||||
// Empty credentials to compare.
|
||||
{cred, Credentials{}, false},
|
||||
// Empty credentials.
|
||||
{Credentials{}, cred, false},
|
||||
// Two different credentialss
|
||||
{cred, cred2, false},
|
||||
// Access key is different in credentials to compare.
|
||||
{cred, Credentials{AccessKey: "myuser", SecretKey: cred.SecretKey}, false},
|
||||
// Secret key is different in credentials to compare.
|
||||
{cred, Credentials{AccessKey: cred.AccessKey, SecretKey: "mypassword"}, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.cred.Equal(testCase.ccred)
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("test %v: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,77 +0,0 @@
|
|||
// Original work https://github.com/oxtoacart/bpool borrowed
|
||||
// only bpool.go licensed under Apache 2.0.
|
||||
|
||||
// This file modifies original bpool.go to add one more option
|
||||
// to provide []byte capacity for better GC management.
|
||||
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package bpool
|
||||
|
||||
// BytePoolCap implements a leaky pool of []byte in the form of a bounded channel.
|
||||
type BytePoolCap struct {
|
||||
c chan []byte
|
||||
w int
|
||||
wcap int
|
||||
}
|
||||
|
||||
// NewBytePoolCap creates a new BytePool bounded to the given maxSize, with new
|
||||
// byte arrays sized based on width.
|
||||
func NewBytePoolCap(maxSize int, width int, capwidth int) (bp *BytePoolCap) {
|
||||
return &BytePoolCap{
|
||||
c: make(chan []byte, maxSize),
|
||||
w: width,
|
||||
wcap: capwidth,
|
||||
}
|
||||
}
|
||||
|
||||
// Get gets a []byte from the BytePool, or creates a new one if none are
|
||||
// available in the pool.
|
||||
func (bp *BytePoolCap) Get() (b []byte) {
|
||||
select {
|
||||
case b = <-bp.c:
|
||||
// reuse existing buffer
|
||||
default:
|
||||
// create new buffer
|
||||
if bp.wcap > 0 {
|
||||
b = make([]byte, bp.w, bp.wcap)
|
||||
} else {
|
||||
b = make([]byte, bp.w)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Put returns the given Buffer to the BytePool.
|
||||
func (bp *BytePoolCap) Put(b []byte) {
|
||||
select {
|
||||
case bp.c <- b:
|
||||
// buffer went back into pool
|
||||
default:
|
||||
// buffer didn't go back into pool, just discard
|
||||
}
|
||||
}
|
||||
|
||||
// Width returns the width of the byte arrays in this pool.
|
||||
func (bp *BytePoolCap) Width() (n int) {
|
||||
return bp.w
|
||||
}
|
||||
|
||||
// WidthCap returns the cap width of the byte arrays in this pool.
|
||||
func (bp *BytePoolCap) WidthCap() (n int) {
|
||||
return bp.wcap
|
||||
}
|
|
@ -1,96 +0,0 @@
|
|||
// Original work https://github.com/oxtoacart/bpool borrowed
|
||||
// only bpool.go licensed under Apache 2.0.
|
||||
|
||||
// This file modifies original bpool.go to add one more option
|
||||
// to provide []byte capacity for better GC management.
|
||||
|
||||
/*
|
||||
* MinIO Cloud Storage (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package bpool
|
||||
|
||||
import "testing"
|
||||
|
||||
// Tests - bytePool functionality.
|
||||
func TestBytePool(t *testing.T) {
|
||||
var size = 4
|
||||
var width = 10
|
||||
var capWidth = 16
|
||||
|
||||
bufPool := NewBytePoolCap(size, width, capWidth)
|
||||
|
||||
// Check the width
|
||||
if bufPool.Width() != width {
|
||||
t.Fatalf("bytepool width invalid: got %v want %v", bufPool.Width(), width)
|
||||
}
|
||||
|
||||
// Check with width cap
|
||||
if bufPool.WidthCap() != capWidth {
|
||||
t.Fatalf("bytepool capWidth invalid: got %v want %v", bufPool.WidthCap(), capWidth)
|
||||
}
|
||||
|
||||
// Check that retrieved buffer are of the expected width
|
||||
b := bufPool.Get()
|
||||
if len(b) != width {
|
||||
t.Fatalf("bytepool length invalid: got %v want %v", len(b), width)
|
||||
}
|
||||
if cap(b) != capWidth {
|
||||
t.Fatalf("bytepool length invalid: got %v want %v", cap(b), capWidth)
|
||||
}
|
||||
|
||||
bufPool.Put(b)
|
||||
|
||||
// Fill the pool beyond the capped pool size.
|
||||
for i := 0; i < size*2; i++ {
|
||||
bufPool.Put(make([]byte, bufPool.w))
|
||||
}
|
||||
|
||||
b = bufPool.Get()
|
||||
if len(b) != width {
|
||||
t.Fatalf("bytepool length invalid: got %v want %v", len(b), width)
|
||||
}
|
||||
if cap(b) != capWidth {
|
||||
t.Fatalf("bytepool length invalid: got %v want %v", cap(b), capWidth)
|
||||
}
|
||||
|
||||
bufPool.Put(b)
|
||||
|
||||
// Close the channel so we can iterate over it.
|
||||
close(bufPool.c)
|
||||
|
||||
// Check the size of the pool.
|
||||
if len(bufPool.c) != size {
|
||||
t.Fatalf("bytepool size invalid: got %v want %v", len(bufPool.c), size)
|
||||
}
|
||||
|
||||
bufPoolNoCap := NewBytePoolCap(size, width, 0)
|
||||
// Check the width
|
||||
if bufPoolNoCap.Width() != width {
|
||||
t.Fatalf("bytepool width invalid: got %v want %v", bufPool.Width(), width)
|
||||
}
|
||||
|
||||
// Check with width cap
|
||||
if bufPoolNoCap.WidthCap() != 0 {
|
||||
t.Fatalf("bytepool capWidth invalid: got %v want %v", bufPool.WidthCap(), 0)
|
||||
}
|
||||
b = bufPoolNoCap.Get()
|
||||
if len(b) != width {
|
||||
t.Fatalf("bytepool length invalid: got %v want %v", len(b), width)
|
||||
}
|
||||
if cap(b) != width {
|
||||
t.Fatalf("bytepool length invalid: got %v want %v", cap(b), width)
|
||||
}
|
||||
}
|
|
@ -1,111 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
// AES256 is used with SSE-S3
|
||||
AES256 SSEAlgorithm = "AES256"
|
||||
// AWSKms is used with SSE-KMS
|
||||
AWSKms SSEAlgorithm = "aws:kms"
|
||||
)
|
||||
|
||||
// SSEAlgorithm - represents valid SSE algorithms supported; currently only AES256 is supported
|
||||
type SSEAlgorithm string
|
||||
|
||||
// UnmarshalXML - Unmarshals XML tag to valid SSE algorithm
|
||||
func (alg *SSEAlgorithm) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
var s string
|
||||
if err := d.DecodeElement(&s, &start); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch s {
|
||||
case string(AES256):
|
||||
*alg = AES256
|
||||
case string(AWSKms):
|
||||
*alg = AWSKms
|
||||
default:
|
||||
return errors.New("Unknown SSE algorithm")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalXML - Marshals given SSE algorithm to valid XML
|
||||
func (alg *SSEAlgorithm) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
return e.EncodeElement(string(*alg), start)
|
||||
}
|
||||
|
||||
// EncryptionAction - for ApplyServerSideEncryptionByDefault XML tag
|
||||
type EncryptionAction struct {
|
||||
Algorithm SSEAlgorithm `xml:"SSEAlgorithm,omitempty"`
|
||||
MasterKeyID string `xml:"KMSMasterKeyID,omitempty"`
|
||||
}
|
||||
|
||||
// SSERule - for ServerSideEncryptionConfiguration XML tag
|
||||
type SSERule struct {
|
||||
DefaultEncryptionAction EncryptionAction `xml:"ApplyServerSideEncryptionByDefault"`
|
||||
}
|
||||
|
||||
const xmlNS = "http://s3.amazonaws.com/doc/2006-03-01/"
|
||||
|
||||
// BucketSSEConfig - represents default bucket encryption configuration
|
||||
type BucketSSEConfig struct {
|
||||
XMLNS string `xml:"xmlns,attr,omitempty"`
|
||||
XMLName xml.Name `xml:"ServerSideEncryptionConfiguration"`
|
||||
Rules []SSERule `xml:"Rule"`
|
||||
}
|
||||
|
||||
// ParseBucketSSEConfig - Decodes given XML to a valid default bucket encryption config
|
||||
func ParseBucketSSEConfig(r io.Reader) (*BucketSSEConfig, error) {
|
||||
var config BucketSSEConfig
|
||||
err := xml.NewDecoder(r).Decode(&config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Validates server-side encryption config rules
|
||||
// Only one rule is allowed on AWS S3
|
||||
if len(config.Rules) != 1 {
|
||||
return nil, errors.New("only one server-side encryption rule is allowed at a time")
|
||||
}
|
||||
|
||||
for _, rule := range config.Rules {
|
||||
switch rule.DefaultEncryptionAction.Algorithm {
|
||||
case AES256:
|
||||
if rule.DefaultEncryptionAction.MasterKeyID != "" {
|
||||
return nil, errors.New("MasterKeyID is allowed with aws:kms only")
|
||||
}
|
||||
case AWSKms:
|
||||
if rule.DefaultEncryptionAction.MasterKeyID == "" {
|
||||
return nil, errors.New("MasterKeyID is missing with aws:kms")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if config.XMLNS == "" {
|
||||
config.XMLNS = xmlNS
|
||||
}
|
||||
|
||||
return &config, nil
|
||||
}
|
|
@ -1,140 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestParseBucketSSEConfig performs basic sanity tests on ParseBucketSSEConfig
|
||||
func TestParseBucketSSEConfig(t *testing.T) {
|
||||
actualAES256NoNSConfig := &BucketSSEConfig{
|
||||
XMLName: xml.Name{
|
||||
Local: "ServerSideEncryptionConfiguration",
|
||||
},
|
||||
Rules: []SSERule{
|
||||
{
|
||||
DefaultEncryptionAction: EncryptionAction{
|
||||
Algorithm: AES256,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
actualAES256Config := &BucketSSEConfig{
|
||||
XMLNS: xmlNS,
|
||||
XMLName: xml.Name{
|
||||
Local: "ServerSideEncryptionConfiguration",
|
||||
},
|
||||
Rules: []SSERule{
|
||||
{
|
||||
DefaultEncryptionAction: EncryptionAction{
|
||||
Algorithm: AES256,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
actualKMSConfig := &BucketSSEConfig{
|
||||
XMLNS: xmlNS,
|
||||
XMLName: xml.Name{
|
||||
Local: "ServerSideEncryptionConfiguration",
|
||||
},
|
||||
Rules: []SSERule{
|
||||
{
|
||||
DefaultEncryptionAction: EncryptionAction{
|
||||
Algorithm: AWSKms,
|
||||
MasterKeyID: "arn:aws:kms:us-east-1:1234/5678example",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
inputXML string
|
||||
expectedErr error
|
||||
shouldPass bool
|
||||
expectedConfig *BucketSSEConfig
|
||||
}{
|
||||
// 1. Valid XML SSE-S3
|
||||
{
|
||||
inputXML: `<ServerSideEncryptionConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Rule><ApplyServerSideEncryptionByDefault><SSEAlgorithm>AES256</SSEAlgorithm></ApplyServerSideEncryptionByDefault></Rule></ServerSideEncryptionConfiguration>`,
|
||||
expectedErr: nil,
|
||||
shouldPass: true,
|
||||
expectedConfig: actualAES256Config,
|
||||
},
|
||||
// 2. Valid XML SSE-KMS
|
||||
{
|
||||
inputXML: `<ServerSideEncryptionConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Rule><ApplyServerSideEncryptionByDefault><SSEAlgorithm>aws:kms</SSEAlgorithm><KMSMasterKeyID>arn:aws:kms:us-east-1:1234/5678example</KMSMasterKeyID></ApplyServerSideEncryptionByDefault></Rule></ServerSideEncryptionConfiguration>`,
|
||||
expectedErr: nil,
|
||||
shouldPass: true,
|
||||
expectedConfig: actualKMSConfig,
|
||||
},
|
||||
// 3. Invalid - more than one rule
|
||||
{
|
||||
inputXML: `<ServerSideEncryptionConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Rule><ApplyServerSideEncryptionByDefault><SSEAlgorithm>AES256</SSEAlgorithm></ApplyServerSideEncryptionByDefault></Rule><Rule><ApplyServerSideEncryptionByDefault><SSEAlgorithm>AES256</SSEAlgorithm></ApplyServerSideEncryptionByDefault></Rule></ServerSideEncryptionConfiguration>`,
|
||||
expectedErr: errors.New("only one server-side encryption rule is allowed at a time"),
|
||||
shouldPass: false,
|
||||
},
|
||||
// 4. Invalid XML - master key ID present along with AES256
|
||||
{
|
||||
inputXML: `<ServerSideEncryptionConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Rule><ApplyServerSideEncryptionByDefault><SSEAlgorithm>AES256</SSEAlgorithm><KMSMasterKeyID>arn:aws:kms:us-east-1:1234/5678example</KMSMasterKeyID></ApplyServerSideEncryptionByDefault></Rule></ServerSideEncryptionConfiguration>`,
|
||||
expectedErr: errors.New("MasterKeyID is allowed with aws:kms only"),
|
||||
shouldPass: false,
|
||||
},
|
||||
// 5. Invalid XML - master key ID not provided when algorithm is set to aws:kms algorithm
|
||||
{
|
||||
inputXML: `<ServerSideEncryptionConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Rule><ApplyServerSideEncryptionByDefault><SSEAlgorithm>aws:kms</SSEAlgorithm></ApplyServerSideEncryptionByDefault></Rule></ServerSideEncryptionConfiguration>`,
|
||||
expectedErr: errors.New("MasterKeyID is missing with aws:kms"),
|
||||
shouldPass: false,
|
||||
},
|
||||
// 6. Invalid Algorithm
|
||||
{
|
||||
inputXML: `<ServerSideEncryptionConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Rule><ApplyServerSideEncryptionByDefault><SSEAlgorithm>InvalidAlgorithm</SSEAlgorithm></ApplyServerSideEncryptionByDefault></Rule></ServerSideEncryptionConfiguration>`,
|
||||
expectedErr: errors.New("Unknown SSE algorithm"),
|
||||
shouldPass: false,
|
||||
},
|
||||
// 7. Valid XML without the namespace set
|
||||
{
|
||||
inputXML: `<ServerSideEncryptionConfiguration><Rule><ApplyServerSideEncryptionByDefault><SSEAlgorithm>AES256</SSEAlgorithm></ApplyServerSideEncryptionByDefault></Rule></ServerSideEncryptionConfiguration>`,
|
||||
expectedErr: nil,
|
||||
shouldPass: true,
|
||||
expectedConfig: actualAES256NoNSConfig,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
_, err := ParseBucketSSEConfig(bytes.NewReader([]byte(tc.inputXML)))
|
||||
if tc.shouldPass && err != nil {
|
||||
t.Fatalf("Test case %d: Expected to succeed but got %s", i+1, err)
|
||||
}
|
||||
|
||||
if !tc.shouldPass {
|
||||
if err == nil || err != nil && err.Error() != tc.expectedErr.Error() {
|
||||
t.Fatalf("Test case %d: Expected %s but got %s", i+1, tc.expectedErr, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if expectedXML, err := xml.Marshal(tc.expectedConfig); err != nil || !bytes.Equal(expectedXML, []byte(tc.inputXML)) {
|
||||
t.Fatalf("Test case %d: Expected bucket encryption XML %s but got %s", i+1, string(expectedXML), tc.inputXML)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,24 +0,0 @@
|
|||
// Code generated by "stringer -type Action lifecycle.go"; DO NOT EDIT.
|
||||
|
||||
package lifecycle
|
||||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[NoneAction-0]
|
||||
_ = x[DeleteAction-1]
|
||||
}
|
||||
|
||||
const _Action_name = "NoneActionDeleteAction"
|
||||
|
||||
var _Action_index = [...]uint8{0, 10, 22}
|
||||
|
||||
func (i Action) String() string {
|
||||
if i < 0 || i >= Action(len(_Action_index)-1) {
|
||||
return "Action(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||
}
|
||||
return _Action_name[_Action_index[i]:_Action_index[i+1]]
|
||||
}
|
|
@ -1,62 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
)
|
||||
|
||||
// And - a tag to combine a prefix and multiple tags for lifecycle configuration rule.
|
||||
type And struct {
|
||||
XMLName xml.Name `xml:"And"`
|
||||
Prefix string `xml:"Prefix,omitempty"`
|
||||
Tags []Tag `xml:"Tag,omitempty"`
|
||||
}
|
||||
|
||||
var errDuplicateTagKey = Errorf("Duplicate Tag Keys are not allowed")
|
||||
|
||||
// isEmpty returns true if Tags field is null
|
||||
func (a And) isEmpty() bool {
|
||||
return len(a.Tags) == 0 && a.Prefix == ""
|
||||
}
|
||||
|
||||
// Validate - validates the And field
|
||||
func (a And) Validate() error {
|
||||
if a.ContainsDuplicateTag() {
|
||||
return errDuplicateTagKey
|
||||
}
|
||||
for _, t := range a.Tags {
|
||||
if err := t.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ContainsDuplicateTag - returns true if duplicate keys are present in And
|
||||
func (a And) ContainsDuplicateTag() bool {
|
||||
x := make(map[string]struct{}, len(a.Tags))
|
||||
|
||||
for _, t := range a.Tags {
|
||||
if _, has := x[t.Key]; has {
|
||||
return true
|
||||
}
|
||||
x[t.Key] = struct{}{}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
|
@ -1,44 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Error is the generic type for any error happening during tag
|
||||
// parsing.
|
||||
type Error struct {
|
||||
err error
|
||||
}
|
||||
|
||||
// Errorf - formats according to a format specifier and returns
|
||||
// the string as a value that satisfies error of type tagging.Error
|
||||
func Errorf(format string, a ...interface{}) error {
|
||||
return Error{err: fmt.Errorf(format, a...)}
|
||||
}
|
||||
|
||||
// Unwrap the internal error.
|
||||
func (e Error) Unwrap() error { return e.err }
|
||||
|
||||
// Error 'error' compatible method.
|
||||
func (e Error) Error() string {
|
||||
if e.err == nil {
|
||||
return "lifecycle: cause <nil>"
|
||||
}
|
||||
return e.err.Error()
|
||||
}
|
|
@ -1,133 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
errLifecycleInvalidDate = Errorf("Date must be provided in ISO 8601 format")
|
||||
errLifecycleInvalidDays = Errorf("Days must be positive integer when used with Expiration")
|
||||
errLifecycleInvalidExpiration = Errorf("At least one of Days or Date should be present inside Expiration")
|
||||
errLifecycleDateNotMidnight = Errorf("'Date' must be at midnight GMT")
|
||||
)
|
||||
|
||||
// ExpirationDays is a type alias to unmarshal Days in Expiration
|
||||
type ExpirationDays int
|
||||
|
||||
// UnmarshalXML parses number of days from Expiration and validates if
|
||||
// greater than zero
|
||||
func (eDays *ExpirationDays) UnmarshalXML(d *xml.Decoder, startElement xml.StartElement) error {
|
||||
var numDays int
|
||||
err := d.DecodeElement(&numDays, &startElement)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if numDays <= 0 {
|
||||
return errLifecycleInvalidDays
|
||||
}
|
||||
*eDays = ExpirationDays(numDays)
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalXML encodes number of days to expire if it is non-zero and
|
||||
// encodes empty string otherwise
|
||||
func (eDays *ExpirationDays) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
|
||||
if *eDays == ExpirationDays(0) {
|
||||
return nil
|
||||
}
|
||||
return e.EncodeElement(int(*eDays), startElement)
|
||||
}
|
||||
|
||||
// ExpirationDate is a embedded type containing time.Time to unmarshal
|
||||
// Date in Expiration
|
||||
type ExpirationDate struct {
|
||||
time.Time
|
||||
}
|
||||
|
||||
// UnmarshalXML parses date from Expiration and validates date format
|
||||
func (eDate *ExpirationDate) UnmarshalXML(d *xml.Decoder, startElement xml.StartElement) error {
|
||||
var dateStr string
|
||||
err := d.DecodeElement(&dateStr, &startElement)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// While AWS documentation mentions that the date specified
|
||||
// must be present in ISO 8601 format, in reality they allow
|
||||
// users to provide RFC 3339 compliant dates.
|
||||
expDate, err := time.Parse(time.RFC3339, dateStr)
|
||||
if err != nil {
|
||||
return errLifecycleInvalidDate
|
||||
}
|
||||
// Allow only date timestamp specifying midnight GMT
|
||||
hr, min, sec := expDate.Clock()
|
||||
nsec := expDate.Nanosecond()
|
||||
loc := expDate.Location()
|
||||
if !(hr == 0 && min == 0 && sec == 0 && nsec == 0 && loc.String() == time.UTC.String()) {
|
||||
return errLifecycleDateNotMidnight
|
||||
}
|
||||
|
||||
*eDate = ExpirationDate{expDate}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalXML encodes expiration date if it is non-zero and encodes
|
||||
// empty string otherwise
|
||||
func (eDate *ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
|
||||
if *eDate == (ExpirationDate{time.Time{}}) {
|
||||
return nil
|
||||
}
|
||||
return e.EncodeElement(eDate.Format(time.RFC3339), startElement)
|
||||
}
|
||||
|
||||
// Expiration - expiration actions for a rule in lifecycle configuration.
|
||||
type Expiration struct {
|
||||
XMLName xml.Name `xml:"Expiration"`
|
||||
Days ExpirationDays `xml:"Days,omitempty"`
|
||||
Date ExpirationDate `xml:"Date,omitempty"`
|
||||
}
|
||||
|
||||
// Validate - validates the "Expiration" element
|
||||
func (e Expiration) Validate() error {
|
||||
// Neither expiration days or date is specified
|
||||
if e.IsDaysNull() && e.IsDateNull() {
|
||||
return errLifecycleInvalidExpiration
|
||||
}
|
||||
|
||||
// Both expiration days and date are specified
|
||||
if !e.IsDaysNull() && !e.IsDateNull() {
|
||||
return errLifecycleInvalidExpiration
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDaysNull returns true if days field is null
|
||||
func (e Expiration) IsDaysNull() bool {
|
||||
return e.Days == ExpirationDays(0)
|
||||
}
|
||||
|
||||
// IsDateNull returns true if date field is null
|
||||
func (e Expiration) IsDateNull() bool {
|
||||
return e.Date.Time.IsZero()
|
||||
}
|
||||
|
||||
// IsNull returns true if both date and days fields are null
|
||||
func (e Expiration) IsNull() bool {
|
||||
return e.IsDaysNull() && e.IsDateNull()
|
||||
}
|
|
@ -1,105 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// appropriate errors on validation
|
||||
func TestInvalidExpiration(t *testing.T) {
|
||||
testCases := []struct {
|
||||
inputXML string
|
||||
expectedErr error
|
||||
}{
|
||||
{ // Expiration with zero days
|
||||
inputXML: ` <Expiration>
|
||||
<Days>0</Days>
|
||||
</Expiration>`,
|
||||
expectedErr: errLifecycleInvalidDays,
|
||||
},
|
||||
{ // Expiration with invalid date
|
||||
inputXML: ` <Expiration>
|
||||
<Date>invalid date</Date>
|
||||
</Expiration>`,
|
||||
expectedErr: errLifecycleInvalidDate,
|
||||
},
|
||||
{ // Expiration with both number of days nor a date
|
||||
inputXML: `<Expiration>
|
||||
<Date>2019-04-20T00:01:00Z</Date>
|
||||
</Expiration>`,
|
||||
expectedErr: errLifecycleDateNotMidnight,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("Test %d", i+1), func(t *testing.T) {
|
||||
var expiration Expiration
|
||||
err := xml.Unmarshal([]byte(tc.inputXML), &expiration)
|
||||
if err != tc.expectedErr {
|
||||
t.Fatalf("%d: Expected %v but got %v", i+1, tc.expectedErr, err)
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
validationTestCases := []struct {
|
||||
inputXML string
|
||||
expectedErr error
|
||||
}{
|
||||
{ // Expiration with a valid ISO 8601 date
|
||||
inputXML: `<Expiration>
|
||||
<Date>2019-04-20T00:00:00Z</Date>
|
||||
</Expiration>`,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{ // Expiration with a valid number of days
|
||||
inputXML: `<Expiration>
|
||||
<Days>3</Days>
|
||||
</Expiration>`,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{ // Expiration with neither number of days nor a date
|
||||
inputXML: `<Expiration>
|
||||
</Expiration>`,
|
||||
expectedErr: errLifecycleInvalidExpiration,
|
||||
},
|
||||
{ // Expiration with both number of days nor a date
|
||||
inputXML: `<Expiration>
|
||||
<Days>3</Days>
|
||||
<Date>2019-04-20T00:00:00Z</Date>
|
||||
</Expiration>`,
|
||||
expectedErr: errLifecycleInvalidExpiration,
|
||||
},
|
||||
}
|
||||
for i, tc := range validationTestCases {
|
||||
t.Run(fmt.Sprintf("Test %d", i+1), func(t *testing.T) {
|
||||
var expiration Expiration
|
||||
err := xml.Unmarshal([]byte(tc.inputXML), &expiration)
|
||||
if err != nil {
|
||||
t.Fatalf("%d: %v", i+1, err)
|
||||
}
|
||||
|
||||
err = expiration.Validate()
|
||||
if err != tc.expectedErr {
|
||||
t.Fatalf("%d: %v", i+1, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,116 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalidFilter = Errorf("Filter must have exactly one of Prefix, Tag, or And specified")
|
||||
)
|
||||
|
||||
// Filter - a filter for a lifecycle configuration Rule.
|
||||
type Filter struct {
|
||||
XMLName xml.Name `xml:"Filter"`
|
||||
Prefix string
|
||||
And And
|
||||
Tag Tag
|
||||
|
||||
// Caching tags, only once
|
||||
cachedTags []string
|
||||
}
|
||||
|
||||
// MarshalXML - produces the xml representation of the Filter struct
|
||||
// only one of Prefix, And and Tag should be present in the output.
|
||||
func (f Filter) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
if err := e.EncodeToken(start); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch {
|
||||
case !f.And.isEmpty():
|
||||
if err := e.EncodeElement(f.And, xml.StartElement{Name: xml.Name{Local: "And"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
case !f.Tag.IsEmpty():
|
||||
if err := e.EncodeElement(f.Tag, xml.StartElement{Name: xml.Name{Local: "Tag"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
// Always print Prefix field when both And & Tag are empty
|
||||
if err := e.EncodeElement(f.Prefix, xml.StartElement{Name: xml.Name{Local: "Prefix"}}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return e.EncodeToken(xml.EndElement{Name: start.Name})
|
||||
}
|
||||
|
||||
// Validate - validates the filter element
|
||||
func (f Filter) Validate() error {
|
||||
// A Filter must have exactly one of Prefix, Tag, or And specified.
|
||||
if !f.And.isEmpty() {
|
||||
if f.Prefix != "" {
|
||||
return errInvalidFilter
|
||||
}
|
||||
if !f.Tag.IsEmpty() {
|
||||
return errInvalidFilter
|
||||
}
|
||||
if err := f.And.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if f.Prefix != "" {
|
||||
if !f.Tag.IsEmpty() {
|
||||
return errInvalidFilter
|
||||
}
|
||||
}
|
||||
if !f.Tag.IsEmpty() {
|
||||
if err := f.Tag.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TestTags tests if the object tags satisfy the Filter tags requirement,
|
||||
// it returns true if there is no tags in the underlying Filter.
|
||||
func (f Filter) TestTags(tags []string) bool {
|
||||
if f.cachedTags == nil {
|
||||
tags := make([]string, 0)
|
||||
for _, t := range append(f.And.Tags, f.Tag) {
|
||||
if !t.IsEmpty() {
|
||||
tags = append(tags, t.String())
|
||||
}
|
||||
}
|
||||
f.cachedTags = tags
|
||||
}
|
||||
for _, ct := range f.cachedTags {
|
||||
foundTag := false
|
||||
for _, t := range tags {
|
||||
if ct == t {
|
||||
foundTag = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundTag {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
|
@ -1,124 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestUnsupportedFilters checks if parsing Filter xml with
|
||||
// unsupported elements returns appropriate errors
|
||||
func TestUnsupportedFilters(t *testing.T) {
|
||||
testCases := []struct {
|
||||
inputXML string
|
||||
expectedErr error
|
||||
}{
|
||||
{ // Filter with And tags
|
||||
inputXML: ` <Filter>
|
||||
<And>
|
||||
<Prefix>key-prefix</Prefix>
|
||||
</And>
|
||||
</Filter>`,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{ // Filter with Tag tags
|
||||
inputXML: ` <Filter>
|
||||
<Tag>
|
||||
<Key>key1</Key>
|
||||
<Value>value1</Value>
|
||||
</Tag>
|
||||
</Filter>`,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{ // Filter with Prefix tag
|
||||
inputXML: ` <Filter>
|
||||
<Prefix>key-prefix</Prefix>
|
||||
</Filter>`,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{ // Filter without And and multiple Tag tags
|
||||
inputXML: ` <Filter>
|
||||
<Prefix>key-prefix</Prefix>
|
||||
<Tag>
|
||||
<Key>key1</Key>
|
||||
<Value>value1</Value>
|
||||
</Tag>
|
||||
<Tag>
|
||||
<Key>key2</Key>
|
||||
<Value>value2</Value>
|
||||
</Tag>
|
||||
</Filter>`,
|
||||
expectedErr: errInvalidFilter,
|
||||
},
|
||||
{ // Filter with And, Prefix & multiple Tag tags
|
||||
inputXML: ` <Filter>
|
||||
<And>
|
||||
<Prefix>key-prefix</Prefix>
|
||||
<Tag>
|
||||
<Key>key1</Key>
|
||||
<Value>value1</Value>
|
||||
</Tag>
|
||||
<Tag>
|
||||
<Key>key2</Key>
|
||||
<Value>value2</Value>
|
||||
</Tag>
|
||||
</And>
|
||||
</Filter>`,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{ // Filter with And and multiple Tag tags
|
||||
inputXML: ` <Filter>
|
||||
<And>
|
||||
<Tag>
|
||||
<Key>key1</Key>
|
||||
<Value>value1</Value>
|
||||
</Tag>
|
||||
<Tag>
|
||||
<Key>key2</Key>
|
||||
<Value>value2</Value>
|
||||
</Tag>
|
||||
</And>
|
||||
</Filter>`,
|
||||
expectedErr: nil,
|
||||
},
|
||||
{ // Filter without And and single Tag tag
|
||||
inputXML: ` <Filter>
|
||||
<Prefix>key-prefix</Prefix>
|
||||
<Tag>
|
||||
<Key>key1</Key>
|
||||
<Value>value1</Value>
|
||||
</Tag>
|
||||
</Filter>`,
|
||||
expectedErr: errInvalidFilter,
|
||||
},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("Test %d", i+1), func(t *testing.T) {
|
||||
var filter Filter
|
||||
err := xml.Unmarshal([]byte(tc.inputXML), &filter)
|
||||
if err != nil {
|
||||
t.Fatalf("%d: Expected no error but got %v", i+1, err)
|
||||
}
|
||||
err = filter.Validate()
|
||||
if err != tc.expectedErr {
|
||||
t.Fatalf("%d: Expected %v but got %v", i+1, tc.expectedErr, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,204 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
errLifecycleTooManyRules = Errorf("Lifecycle configuration allows a maximum of 1000 rules")
|
||||
errLifecycleNoRule = Errorf("Lifecycle configuration should have at least one rule")
|
||||
errLifecycleOverlappingPrefix = Errorf("Lifecycle configuration has rules with overlapping prefix")
|
||||
)
|
||||
|
||||
// Action represents a delete action or other transition
|
||||
// actions that will be implemented later.
|
||||
type Action int
|
||||
|
||||
//go:generate stringer -type Action $GOFILE
|
||||
|
||||
const (
|
||||
// NoneAction means no action required after evaluting lifecycle rules
|
||||
NoneAction Action = iota
|
||||
// DeleteAction means the object needs to be removed after evaluting lifecycle rules
|
||||
DeleteAction
|
||||
)
|
||||
|
||||
// Lifecycle - Configuration for bucket lifecycle.
|
||||
type Lifecycle struct {
|
||||
XMLName xml.Name `xml:"LifecycleConfiguration"`
|
||||
Rules []Rule `xml:"Rule"`
|
||||
}
|
||||
|
||||
// HasActiveRules - returns whether policy has active rules for.
|
||||
// Optionally a prefix can be supplied.
|
||||
// If recursive is specified the function will also return true if any level below the
|
||||
// prefix has active rules. If no prefix is specified recursive is effectively true.
|
||||
func (lc Lifecycle) HasActiveRules(prefix string, recursive bool) bool {
|
||||
if len(lc.Rules) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, rule := range lc.Rules {
|
||||
if rule.Status == Disabled {
|
||||
continue
|
||||
}
|
||||
if len(prefix) > 0 && len(rule.Filter.Prefix) > 0 {
|
||||
// incoming prefix must be in rule prefix
|
||||
if !recursive && !strings.HasPrefix(prefix, rule.Filter.Prefix) {
|
||||
continue
|
||||
}
|
||||
// If recursive, we can skip this rule if it doesn't match the tested prefix.
|
||||
if recursive && !strings.HasPrefix(rule.Filter.Prefix, prefix) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if rule.NoncurrentVersionExpiration.NoncurrentDays > 0 {
|
||||
return true
|
||||
}
|
||||
if rule.NoncurrentVersionTransition.NoncurrentDays > 0 {
|
||||
return true
|
||||
}
|
||||
if rule.Expiration.IsNull() {
|
||||
continue
|
||||
}
|
||||
if !rule.Expiration.IsDateNull() && rule.Expiration.Date.After(time.Now()) {
|
||||
continue
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ParseLifecycleConfig - parses data in given reader to Lifecycle.
|
||||
func ParseLifecycleConfig(reader io.Reader) (*Lifecycle, error) {
|
||||
var lc Lifecycle
|
||||
if err := xml.NewDecoder(reader).Decode(&lc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &lc, nil
|
||||
}
|
||||
|
||||
// Validate - validates the lifecycle configuration
|
||||
func (lc Lifecycle) Validate() error {
|
||||
// Lifecycle config can't have more than 1000 rules
|
||||
if len(lc.Rules) > 1000 {
|
||||
return errLifecycleTooManyRules
|
||||
}
|
||||
// Lifecycle config should have at least one rule
|
||||
if len(lc.Rules) == 0 {
|
||||
return errLifecycleNoRule
|
||||
}
|
||||
// Validate all the rules in the lifecycle config
|
||||
for _, r := range lc.Rules {
|
||||
if err := r.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Compare every rule's prefix with every other rule's prefix
|
||||
for i := range lc.Rules {
|
||||
if i == len(lc.Rules)-1 {
|
||||
break
|
||||
}
|
||||
// N B Empty prefixes overlap with all prefixes
|
||||
otherRules := lc.Rules[i+1:]
|
||||
for _, otherRule := range otherRules {
|
||||
if strings.HasPrefix(lc.Rules[i].Prefix(), otherRule.Prefix()) ||
|
||||
strings.HasPrefix(otherRule.Prefix(), lc.Rules[i].Prefix()) {
|
||||
return errLifecycleOverlappingPrefix
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// FilterActionableRules returns the rules actions that need to be executed
|
||||
// after evaluating prefix/tag filtering
|
||||
func (lc Lifecycle) FilterActionableRules(objName, objTags string) []Rule {
|
||||
if objName == "" {
|
||||
return nil
|
||||
}
|
||||
var rules []Rule
|
||||
for _, rule := range lc.Rules {
|
||||
if rule.Status == Disabled {
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(objName, rule.Prefix()) {
|
||||
continue
|
||||
}
|
||||
tags := strings.Split(objTags, "&")
|
||||
if rule.Filter.TestTags(tags) {
|
||||
rules = append(rules, rule)
|
||||
}
|
||||
}
|
||||
return rules
|
||||
}
|
||||
|
||||
// ComputeAction returns the action to perform by evaluating all lifecycle rules
|
||||
// against the object name and its modification time.
|
||||
func (lc Lifecycle) ComputeAction(objName, objTags string, modTime time.Time) (action Action) {
|
||||
action = NoneAction
|
||||
if modTime.IsZero() {
|
||||
return
|
||||
}
|
||||
|
||||
_, expiryTime := lc.PredictExpiryTime(objName, modTime, objTags)
|
||||
if !expiryTime.IsZero() && time.Now().After(expiryTime) {
|
||||
return DeleteAction
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// expectedExpiryTime calculates the expiry date/time based on a object modtime.
|
||||
// The expected expiry time is always a midnight time following the the object
|
||||
// modification time plus the number of expiration days.
|
||||
// e.g. If the object modtime is `Thu May 21 13:42:50 GMT 2020` and the object should
|
||||
// expire in 1 day, then the expected expiry time is `Fri, 23 May 2020 00:00:00 GMT`
|
||||
func expectedExpiryTime(modTime time.Time, days ExpirationDays) time.Time {
|
||||
t := modTime.UTC().Add(time.Duration(days+1) * 24 * time.Hour)
|
||||
return t.Truncate(24 * time.Hour)
|
||||
}
|
||||
|
||||
// PredictExpiryTime returns the expiry date/time of a given object
|
||||
// after evaluting the current lifecycle document.
|
||||
func (lc Lifecycle) PredictExpiryTime(objName string, modTime time.Time, objTags string) (string, time.Time) {
|
||||
var finalExpiryDate time.Time
|
||||
var finalExpiryRuleID string
|
||||
|
||||
// Iterate over all actionable rules and find the earliest
|
||||
// expiration date and its associated rule ID.
|
||||
for _, rule := range lc.FilterActionableRules(objName, objTags) {
|
||||
if !rule.Expiration.IsDateNull() {
|
||||
if finalExpiryDate.IsZero() || finalExpiryDate.After(rule.Expiration.Date.Time) {
|
||||
finalExpiryRuleID = rule.ID
|
||||
finalExpiryDate = rule.Expiration.Date.Time
|
||||
}
|
||||
}
|
||||
if !rule.Expiration.IsDaysNull() {
|
||||
expectedExpiry := expectedExpiryTime(modTime, rule.Expiration.Days)
|
||||
if finalExpiryDate.IsZero() || finalExpiryDate.After(expectedExpiry) {
|
||||
finalExpiryRuleID = rule.ID
|
||||
finalExpiryDate = expectedExpiry
|
||||
}
|
||||
}
|
||||
}
|
||||
return finalExpiryRuleID, finalExpiryDate
|
||||
}
|
|
@ -1,348 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestParseAndValidateLifecycleConfig(t *testing.T) {
|
||||
// Test for lifecycle config with more than 1000 rules
|
||||
var manyRules []Rule
|
||||
rule := Rule{
|
||||
Status: "Enabled",
|
||||
Expiration: Expiration{Days: ExpirationDays(3)},
|
||||
}
|
||||
for i := 0; i < 1001; i++ {
|
||||
manyRules = append(manyRules, rule)
|
||||
}
|
||||
|
||||
manyRuleLcConfig, err := xml.Marshal(Lifecycle{Rules: manyRules})
|
||||
if err != nil {
|
||||
t.Fatal("Failed to marshal lifecycle config with more than 1000 rules")
|
||||
}
|
||||
|
||||
// Test for lifecycle config with rules containing overlapping prefixes
|
||||
rule1 := Rule{
|
||||
Status: "Enabled",
|
||||
Expiration: Expiration{Days: ExpirationDays(3)},
|
||||
Filter: Filter{
|
||||
Prefix: "/a/b",
|
||||
},
|
||||
}
|
||||
rule2 := Rule{
|
||||
Status: "Enabled",
|
||||
Expiration: Expiration{Days: ExpirationDays(3)},
|
||||
Filter: Filter{
|
||||
And: And{
|
||||
Prefix: "/a/b/c",
|
||||
},
|
||||
},
|
||||
}
|
||||
overlappingRules := []Rule{rule1, rule2}
|
||||
overlappingLcConfig, err := xml.Marshal(Lifecycle{Rules: overlappingRules})
|
||||
if err != nil {
|
||||
t.Fatal("Failed to marshal lifecycle config with rules having overlapping prefix")
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
inputConfig string
|
||||
expectedParsingErr error
|
||||
expectedValidationErr error
|
||||
}{
|
||||
{ // Valid lifecycle config
|
||||
inputConfig: `<LifecycleConfiguration>
|
||||
<Rule>
|
||||
<Filter>
|
||||
<Prefix>prefix</Prefix>
|
||||
</Filter>
|
||||
<Status>Enabled</Status>
|
||||
<Expiration><Days>3</Days></Expiration>
|
||||
</Rule>
|
||||
<Rule>
|
||||
<Filter>
|
||||
<Prefix>another-prefix</Prefix>
|
||||
</Filter>
|
||||
<Status>Enabled</Status>
|
||||
<Expiration><Days>3</Days></Expiration>
|
||||
</Rule>
|
||||
</LifecycleConfiguration>`,
|
||||
expectedParsingErr: nil,
|
||||
expectedValidationErr: nil,
|
||||
},
|
||||
{ // Valid lifecycle config
|
||||
inputConfig: `<LifecycleConfiguration>
|
||||
<Rule>
|
||||
<Filter>
|
||||
<And><Tag><Key>key1</Key><Value>val1</Value><Key>key2</Key><Value>val2</Value></Tag></And>
|
||||
</Filter>
|
||||
<Expiration><Days>3</Days></Expiration>
|
||||
</Rule>
|
||||
</LifecycleConfiguration>`,
|
||||
expectedParsingErr: errDuplicatedXMLTag,
|
||||
expectedValidationErr: nil,
|
||||
},
|
||||
{ // lifecycle config with no rules
|
||||
inputConfig: `<LifecycleConfiguration>
|
||||
</LifecycleConfiguration>`,
|
||||
expectedParsingErr: nil,
|
||||
expectedValidationErr: errLifecycleNoRule,
|
||||
},
|
||||
{ // lifecycle config with more than 1000 rules
|
||||
inputConfig: string(manyRuleLcConfig),
|
||||
expectedParsingErr: nil,
|
||||
expectedValidationErr: errLifecycleTooManyRules,
|
||||
},
|
||||
{ // lifecycle config with rules having overlapping prefix
|
||||
inputConfig: string(overlappingLcConfig),
|
||||
expectedParsingErr: nil,
|
||||
expectedValidationErr: errLifecycleOverlappingPrefix,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("Test %d", i+1), func(t *testing.T) {
|
||||
lc, err := ParseLifecycleConfig(bytes.NewReader([]byte(tc.inputConfig)))
|
||||
if err != tc.expectedParsingErr {
|
||||
t.Fatalf("%d: Expected %v during parsing but got %v", i+1, tc.expectedParsingErr, err)
|
||||
}
|
||||
if tc.expectedParsingErr != nil {
|
||||
// We already expect a parsing error,
|
||||
// no need to continue this test.
|
||||
return
|
||||
}
|
||||
err = lc.Validate()
|
||||
if err != tc.expectedValidationErr {
|
||||
t.Fatalf("%d: Expected %v during parsing but got %v", i+1, tc.expectedValidationErr, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestMarshalLifecycleConfig checks if lifecycleconfig xml
|
||||
// marshaling/unmarshaling can handle output from each other
|
||||
func TestMarshalLifecycleConfig(t *testing.T) {
|
||||
// Time at midnight UTC
|
||||
midnightTS := ExpirationDate{time.Date(2019, time.April, 20, 0, 0, 0, 0, time.UTC)}
|
||||
lc := Lifecycle{
|
||||
Rules: []Rule{
|
||||
{
|
||||
Status: "Enabled",
|
||||
Filter: Filter{Prefix: "prefix-1"},
|
||||
Expiration: Expiration{Days: ExpirationDays(3)},
|
||||
},
|
||||
{
|
||||
Status: "Enabled",
|
||||
Filter: Filter{Prefix: "prefix-1"},
|
||||
Expiration: Expiration{Date: ExpirationDate(midnightTS)},
|
||||
},
|
||||
},
|
||||
}
|
||||
b, err := xml.MarshalIndent(&lc, "", "\t")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var lc1 Lifecycle
|
||||
err = xml.Unmarshal(b, &lc1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ruleSet := make(map[string]struct{})
|
||||
for _, rule := range lc.Rules {
|
||||
ruleBytes, err := xml.Marshal(rule)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ruleSet[string(ruleBytes)] = struct{}{}
|
||||
}
|
||||
for _, rule := range lc1.Rules {
|
||||
ruleBytes, err := xml.Marshal(rule)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, ok := ruleSet[string(ruleBytes)]; !ok {
|
||||
t.Fatalf("Expected %v to be equal to %v, %v missing", lc, lc1, rule)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestExpectedExpiryTime(t *testing.T) {
|
||||
testCases := []struct {
|
||||
modTime time.Time
|
||||
days ExpirationDays
|
||||
expected time.Time
|
||||
}{
|
||||
{
|
||||
time.Date(2020, time.March, 15, 10, 10, 10, 0, time.UTC),
|
||||
4,
|
||||
time.Date(2020, time.March, 20, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
{
|
||||
time.Date(2020, time.March, 15, 0, 0, 0, 0, time.UTC),
|
||||
1,
|
||||
time.Date(2020, time.March, 17, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("Test %d", i+1), func(t *testing.T) {
|
||||
got := expectedExpiryTime(tc.modTime, tc.days)
|
||||
if got != tc.expected {
|
||||
t.Fatalf("Expected %v to be equal to %v", got, tc.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestComputeActions(t *testing.T) {
|
||||
testCases := []struct {
|
||||
inputConfig string
|
||||
objectName string
|
||||
objectTags string
|
||||
objectModTime time.Time
|
||||
expectedAction Action
|
||||
}{
|
||||
// Empty object name (unexpected case) should always return NoneAction
|
||||
{
|
||||
inputConfig: `<LifecycleConfiguration><Rule><Filter><Prefix>prefix</Prefix></Filter><Status>Enabled</Status><Expiration><Days>5</Days></Expiration></Rule></LifecycleConfiguration>`,
|
||||
expectedAction: NoneAction,
|
||||
},
|
||||
// Disabled should always return NoneAction
|
||||
{
|
||||
inputConfig: `<LifecycleConfiguration><Rule><Filter><Prefix>foodir/</Prefix></Filter><Status>Disabled</Status><Expiration><Days>5</Days></Expiration></Rule></LifecycleConfiguration>`,
|
||||
objectName: "foodir/fooobject",
|
||||
objectModTime: time.Now().UTC().Add(-10 * 24 * time.Hour), // Created 10 days ago
|
||||
expectedAction: NoneAction,
|
||||
},
|
||||
// No modTime, should be none-action
|
||||
{
|
||||
inputConfig: `<LifecycleConfiguration><Rule><Filter><Prefix>foodir/</Prefix></Filter><Status>Enabled</Status><Expiration><Days>5</Days></Expiration></Rule></LifecycleConfiguration>`,
|
||||
objectName: "foodir/fooobject",
|
||||
expectedAction: NoneAction,
|
||||
},
|
||||
// Prefix not matched
|
||||
{
|
||||
inputConfig: `<LifecycleConfiguration><Rule><Filter><Prefix>foodir/</Prefix></Filter><Status>Enabled</Status><Expiration><Days>5</Days></Expiration></Rule></LifecycleConfiguration>`,
|
||||
objectName: "foxdir/fooobject",
|
||||
objectModTime: time.Now().UTC().Add(-10 * 24 * time.Hour), // Created 10 days ago
|
||||
expectedAction: NoneAction,
|
||||
},
|
||||
// Too early to remove (test Days)
|
||||
{
|
||||
inputConfig: `<LifecycleConfiguration><Rule><Filter><Prefix>foodir/</Prefix></Filter><Status>Enabled</Status><Expiration><Days>5</Days></Expiration></Rule></LifecycleConfiguration>`,
|
||||
objectName: "foxdir/fooobject",
|
||||
objectModTime: time.Now().UTC().Add(-10 * 24 * time.Hour), // Created 10 days ago
|
||||
expectedAction: NoneAction,
|
||||
},
|
||||
// Should remove (test Days)
|
||||
{
|
||||
inputConfig: `<LifecycleConfiguration><Rule><Filter><Prefix>foodir/</Prefix></Filter><Status>Enabled</Status><Expiration><Days>5</Days></Expiration></Rule></LifecycleConfiguration>`,
|
||||
objectName: "foodir/fooobject",
|
||||
objectModTime: time.Now().UTC().Add(-6 * 24 * time.Hour), // Created 6 days ago
|
||||
expectedAction: DeleteAction,
|
||||
},
|
||||
// Too early to remove (test Date)
|
||||
{
|
||||
inputConfig: `<LifecycleConfiguration><Rule><Filter><Prefix>foodir/</Prefix></Filter><Status>Enabled</Status><Expiration><Date>` + time.Now().Truncate(24*time.Hour).UTC().Add(24*time.Hour).Format(time.RFC3339) + `</Date></Expiration></Rule></LifecycleConfiguration>`,
|
||||
objectName: "foodir/fooobject",
|
||||
objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago
|
||||
expectedAction: NoneAction,
|
||||
},
|
||||
// Should remove (test Days)
|
||||
{
|
||||
inputConfig: `<LifecycleConfiguration><Rule><Filter><Prefix>foodir/</Prefix></Filter><Status>Enabled</Status><Expiration><Date>` + time.Now().Truncate(24*time.Hour).UTC().Add(-24*time.Hour).Format(time.RFC3339) + `</Date></Expiration></Rule></LifecycleConfiguration>`,
|
||||
objectName: "foodir/fooobject",
|
||||
objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago
|
||||
expectedAction: DeleteAction,
|
||||
},
|
||||
// Should remove (Tags match)
|
||||
{
|
||||
inputConfig: `<LifecycleConfiguration><Rule><Filter><And><Prefix>foodir/</Prefix><Tag><Key>tag1</Key><Value>value1</Value></Tag></And></Filter><Status>Enabled</Status><Expiration><Date>` + time.Now().Truncate(24*time.Hour).UTC().Add(-24*time.Hour).Format(time.RFC3339) + `</Date></Expiration></Rule></LifecycleConfiguration>`,
|
||||
objectName: "foodir/fooobject",
|
||||
objectTags: "tag1=value1&tag2=value2",
|
||||
objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago
|
||||
expectedAction: DeleteAction,
|
||||
},
|
||||
// Should remove (Multiple Rules, Tags match)
|
||||
{
|
||||
inputConfig: `<LifecycleConfiguration><Rule><Filter><And><Prefix>foodir/</Prefix><Tag><Key>tag1</Key><Value>value1</Value></Tag><Tag><Key>tag2</Key><Value>value2</Value></Tag></And></Filter><Status>Enabled</Status><Expiration><Date>` + time.Now().Truncate(24*time.Hour).UTC().Add(-24*time.Hour).Format(time.RFC3339) + `</Date></Expiration></Rule><Rule><Filter><And><Prefix>abc/</Prefix><Tag><Key>tag2</Key><Value>value</Value></Tag></And></Filter><Status>Enabled</Status><Expiration><Date>` + time.Now().Truncate(24*time.Hour).UTC().Add(-24*time.Hour).Format(time.RFC3339) + `</Date></Expiration></Rule></LifecycleConfiguration>`,
|
||||
objectName: "foodir/fooobject",
|
||||
objectTags: "tag1=value1&tag2=value2",
|
||||
objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago
|
||||
expectedAction: DeleteAction,
|
||||
},
|
||||
// Should remove (Tags match)
|
||||
{
|
||||
inputConfig: `<LifecycleConfiguration><Rule><Filter><And><Prefix>foodir/</Prefix><Tag><Key>tag1</Key><Value>value1</Value></Tag><Tag><Key>tag2</Key><Value>value2</Value></Tag></And></Filter><Status>Enabled</Status><Expiration><Date>` + time.Now().Truncate(24*time.Hour).UTC().Add(-24*time.Hour).Format(time.RFC3339) + `</Date></Expiration></Rule></LifecycleConfiguration>`,
|
||||
objectName: "foodir/fooobject",
|
||||
objectTags: "tag1=value1&tag2=value2",
|
||||
objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago
|
||||
expectedAction: DeleteAction,
|
||||
},
|
||||
// Should remove (Tags match with inverted order)
|
||||
{
|
||||
inputConfig: `<LifecycleConfiguration><Rule><Filter><And><Tag><Key>factory</Key><Value>true</Value></Tag><Tag><Key>storeforever</Key><Value>false</Value></Tag></And></Filter><Status>Enabled</Status><Expiration><Date>` + time.Now().Truncate(24*time.Hour).UTC().Add(-24*time.Hour).Format(time.RFC3339) + `</Date></Expiration></Rule></LifecycleConfiguration>`,
|
||||
objectName: "fooobject",
|
||||
objectTags: "storeforever=false&factory=true",
|
||||
objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago
|
||||
expectedAction: DeleteAction,
|
||||
},
|
||||
|
||||
// Should not remove (Tags don't match)
|
||||
{
|
||||
inputConfig: `<LifecycleConfiguration><Rule><Filter><And><Prefix>foodir/</Prefix><Tag><Key>tag</Key><Value>value1</Value></Tag></And></Filter><Status>Enabled</Status><Expiration><Date>` + time.Now().Truncate(24*time.Hour).UTC().Add(-24*time.Hour).Format(time.RFC3339) + `</Date></Expiration></Rule></LifecycleConfiguration>`,
|
||||
objectName: "foodir/fooobject",
|
||||
objectTags: "tag1=value1",
|
||||
objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago
|
||||
expectedAction: NoneAction,
|
||||
},
|
||||
// Should not remove (Tags match, but prefix doesn't match)
|
||||
{
|
||||
inputConfig: `<LifecycleConfiguration><Rule><Filter><And><Prefix>foodir/</Prefix><Tag><Key>tag1</Key><Value>value1</Value></Tag></And></Filter><Status>Enabled</Status><Expiration><Date>` + time.Now().Truncate(24*time.Hour).UTC().Add(-24*time.Hour).Format(time.RFC3339) + `</Date></Expiration></Rule></LifecycleConfiguration>`,
|
||||
objectName: "foxdir/fooobject",
|
||||
objectTags: "tag1=value1",
|
||||
objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago
|
||||
expectedAction: NoneAction,
|
||||
},
|
||||
// Should remove, the second rule has expiration kicked in
|
||||
{
|
||||
inputConfig: `<LifecycleConfiguration><Rule><Status>Enabled</Status><Expiration><Date>` + time.Now().Truncate(24*time.Hour).UTC().Add(24*time.Hour).Format(time.RFC3339) + `</Date></Expiration></Rule><Rule><Filter><Prefix>foxdir/</Prefix></Filter><Status>Enabled</Status><Expiration><Date>` + time.Now().Truncate(24*time.Hour).UTC().Add(-24*time.Hour).Format(time.RFC3339) + `</Date></Expiration></Rule></LifecycleConfiguration>`,
|
||||
objectName: "foxdir/fooobject",
|
||||
objectModTime: time.Now().UTC().Add(-24 * time.Hour), // Created 1 day ago
|
||||
expectedAction: DeleteAction,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("Test %d", i+1), func(t *testing.T) {
|
||||
lc, err := ParseLifecycleConfig(bytes.NewReader([]byte(tc.inputConfig)))
|
||||
if err != nil {
|
||||
t.Fatalf("%d: Got unexpected error: %v", i+1, err)
|
||||
}
|
||||
if resultAction := lc.ComputeAction(tc.objectName, tc.objectTags, tc.objectModTime); resultAction != tc.expectedAction {
|
||||
t.Fatalf("%d: Expected action: `%v`, got: `%v`", i+1, tc.expectedAction, resultAction)
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
}
|
|
@ -1,64 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
)
|
||||
|
||||
// NoncurrentVersionExpiration - an action for lifecycle configuration rule.
|
||||
type NoncurrentVersionExpiration struct {
|
||||
XMLName xml.Name `xml:"NoncurrentVersionExpiration"`
|
||||
NoncurrentDays int `xml:"NoncurrentDays,omitempty"`
|
||||
}
|
||||
|
||||
// NoncurrentVersionTransition - an action for lifecycle configuration rule.
|
||||
type NoncurrentVersionTransition struct {
|
||||
NoncurrentDays int `xml:"NoncurrentDays"`
|
||||
StorageClass string `xml:"StorageClass"`
|
||||
}
|
||||
|
||||
var (
|
||||
errNoncurrentVersionExpirationUnsupported = Errorf("Specifying <NoncurrentVersionExpiration></NoncurrentVersionExpiration> is not supported")
|
||||
errNoncurrentVersionTransitionUnsupported = Errorf("Specifying <NoncurrentVersionTransition></NoncurrentVersionTransition> is not supported")
|
||||
)
|
||||
|
||||
// UnmarshalXML is extended to indicate lack of support for
|
||||
// NoncurrentVersionExpiration xml tag in object lifecycle
|
||||
// configuration
|
||||
func (n NoncurrentVersionExpiration) UnmarshalXML(d *xml.Decoder, startElement xml.StartElement) error {
|
||||
return errNoncurrentVersionExpirationUnsupported
|
||||
}
|
||||
|
||||
// UnmarshalXML is extended to indicate lack of support for
|
||||
// NoncurrentVersionTransition xml tag in object lifecycle
|
||||
// configuration
|
||||
func (n NoncurrentVersionTransition) UnmarshalXML(d *xml.Decoder, startElement xml.StartElement) error {
|
||||
return errNoncurrentVersionTransitionUnsupported
|
||||
}
|
||||
|
||||
// MarshalXML is extended to leave out
|
||||
// <NoncurrentVersionTransition></NoncurrentVersionTransition> tags
|
||||
func (n NoncurrentVersionTransition) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalXML is extended to leave out
|
||||
// <NoncurrentVersionExpiration></NoncurrentVersionExpiration> tags
|
||||
func (n NoncurrentVersionExpiration) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
return nil
|
||||
}
|
|
@ -1,135 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
)
|
||||
|
||||
// Status represents lifecycle configuration status
|
||||
type Status string
|
||||
|
||||
// Supported status types
|
||||
const (
|
||||
Enabled Status = "Enabled"
|
||||
Disabled Status = "Disabled"
|
||||
)
|
||||
|
||||
// Rule - a rule for lifecycle configuration.
|
||||
type Rule struct {
|
||||
XMLName xml.Name `xml:"Rule"`
|
||||
ID string `xml:"ID,omitempty"`
|
||||
Status Status `xml:"Status"`
|
||||
Filter Filter `xml:"Filter,omitempty"`
|
||||
Expiration Expiration `xml:"Expiration,omitempty"`
|
||||
Transition Transition `xml:"Transition,omitempty"`
|
||||
// FIXME: add a type to catch unsupported AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty"`
|
||||
NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"`
|
||||
NoncurrentVersionTransition NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty"`
|
||||
}
|
||||
|
||||
var (
|
||||
errInvalidRuleID = Errorf("ID must be less than 255 characters")
|
||||
errEmptyRuleStatus = Errorf("Status should not be empty")
|
||||
errInvalidRuleStatus = Errorf("Status must be set to either Enabled or Disabled")
|
||||
errMissingExpirationAction = Errorf("No expiration action found")
|
||||
)
|
||||
|
||||
// validateID - checks if ID is valid or not.
|
||||
func (r Rule) validateID() error {
|
||||
// cannot be longer than 255 characters
|
||||
if len(string(r.ID)) > 255 {
|
||||
return errInvalidRuleID
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateStatus - checks if status is valid or not.
|
||||
func (r Rule) validateStatus() error {
|
||||
// Status can't be empty
|
||||
if len(r.Status) == 0 {
|
||||
return errEmptyRuleStatus
|
||||
}
|
||||
|
||||
// Status must be one of Enabled or Disabled
|
||||
if r.Status != Enabled && r.Status != Disabled {
|
||||
return errInvalidRuleStatus
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r Rule) validateAction() error {
|
||||
if r.Expiration == (Expiration{}) {
|
||||
return errMissingExpirationAction
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r Rule) validateFilter() error {
|
||||
return r.Filter.Validate()
|
||||
}
|
||||
|
||||
// Prefix - a rule can either have prefix under <filter></filter> or under
|
||||
// <filter><and></and></filter>. This method returns the prefix from the
|
||||
// location where it is available
|
||||
func (r Rule) Prefix() string {
|
||||
if r.Filter.Prefix != "" {
|
||||
return r.Filter.Prefix
|
||||
}
|
||||
if r.Filter.And.Prefix != "" {
|
||||
return r.Filter.And.Prefix
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Tags - a rule can either have tag under <filter></filter> or under
|
||||
// <filter><and></and></filter>. This method returns all the tags from the
|
||||
// rule in the format tag1=value1&tag2=value2
|
||||
func (r Rule) Tags() string {
|
||||
if !r.Filter.Tag.IsEmpty() {
|
||||
return r.Filter.Tag.String()
|
||||
}
|
||||
if len(r.Filter.And.Tags) != 0 {
|
||||
var buf bytes.Buffer
|
||||
for _, t := range r.Filter.And.Tags {
|
||||
if buf.Len() > 0 {
|
||||
buf.WriteString("&")
|
||||
}
|
||||
buf.WriteString(t.String())
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Validate - validates the rule element
|
||||
func (r Rule) Validate() error {
|
||||
if err := r.validateID(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := r.validateStatus(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := r.validateAction(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := r.validateFilter(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,112 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// TestUnsupportedRules checks if Rule xml with unsuported tags return
|
||||
// appropriate errors on parsing
|
||||
func TestUnsupportedRules(t *testing.T) {
|
||||
// NoncurrentVersionTransition, NoncurrentVersionExpiration
|
||||
// and Transition tags aren't supported
|
||||
unsupportedTestCases := []struct {
|
||||
inputXML string
|
||||
expectedErr error
|
||||
}{
|
||||
{ // Rule with unsupported NoncurrentVersionTransition
|
||||
inputXML: ` <Rule>
|
||||
<NoncurrentVersionTransition></NoncurrentVersionTransition>
|
||||
</Rule>`,
|
||||
expectedErr: errNoncurrentVersionTransitionUnsupported,
|
||||
},
|
||||
{ // Rule with unsupported NoncurrentVersionExpiration
|
||||
|
||||
inputXML: ` <Rule>
|
||||
<NoncurrentVersionExpiration></NoncurrentVersionExpiration>
|
||||
</Rule>`,
|
||||
expectedErr: errNoncurrentVersionExpirationUnsupported,
|
||||
},
|
||||
{ // Rule with unsupported Transition action
|
||||
inputXML: ` <Rule>
|
||||
<Transition></Transition>
|
||||
</Rule>`,
|
||||
expectedErr: errTransitionUnsupported,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range unsupportedTestCases {
|
||||
t.Run(fmt.Sprintf("Test %d", i+1), func(t *testing.T) {
|
||||
var rule Rule
|
||||
err := xml.Unmarshal([]byte(tc.inputXML), &rule)
|
||||
if err != tc.expectedErr {
|
||||
t.Fatalf("%d: Expected %v but got %v", i+1, tc.expectedErr, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestInvalidRules checks if Rule xml with invalid elements returns
|
||||
// appropriate errors on validation
|
||||
func TestInvalidRules(t *testing.T) {
|
||||
invalidTestCases := []struct {
|
||||
inputXML string
|
||||
expectedErr error
|
||||
}{
|
||||
{ // Rule without expiration action
|
||||
inputXML: ` <Rule>
|
||||
<Status>Enabled</Status>
|
||||
</Rule>`,
|
||||
expectedErr: errMissingExpirationAction,
|
||||
},
|
||||
{ // Rule with ID longer than 255 characters
|
||||
inputXML: ` <Rule>
|
||||
<ID> babababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababababab </ID>
|
||||
</Rule>`,
|
||||
expectedErr: errInvalidRuleID,
|
||||
},
|
||||
{ // Rule with empty status
|
||||
inputXML: ` <Rule>
|
||||
<Status></Status>
|
||||
</Rule>`,
|
||||
expectedErr: errEmptyRuleStatus,
|
||||
},
|
||||
{ // Rule with invalid status
|
||||
inputXML: ` <Rule>
|
||||
<Status>OK</Status>
|
||||
</Rule>`,
|
||||
expectedErr: errInvalidRuleStatus,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range invalidTestCases {
|
||||
t.Run(fmt.Sprintf("Test %d", i+1), func(t *testing.T) {
|
||||
var rule Rule
|
||||
err := xml.Unmarshal([]byte(tc.inputXML), &rule)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := rule.Validate(); err != tc.expectedErr {
|
||||
t.Fatalf("%d: Expected %v but got %v", i+1, tc.expectedErr, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -1,101 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Tag - a tag for a lifecycle configuration Rule filter.
|
||||
type Tag struct {
|
||||
XMLName xml.Name `xml:"Tag"`
|
||||
Key string `xml:"Key,omitempty"`
|
||||
Value string `xml:"Value,omitempty"`
|
||||
}
|
||||
|
||||
var (
|
||||
errInvalidTagKey = Errorf("The TagKey you have provided is invalid")
|
||||
errInvalidTagValue = Errorf("The TagValue you have provided is invalid")
|
||||
|
||||
errDuplicatedXMLTag = Errorf("duplicated XML Tag")
|
||||
errUnknownXMLTag = Errorf("unknown XML Tag")
|
||||
)
|
||||
|
||||
// UnmarshalXML - decodes XML data.
|
||||
func (tag *Tag) UnmarshalXML(d *xml.Decoder, start xml.StartElement) (err error) {
|
||||
var keyAlreadyParsed, valueAlreadyParsed bool
|
||||
for {
|
||||
// Read tokens from the XML document in a stream.
|
||||
t, err := d.Token()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
switch se := t.(type) {
|
||||
case xml.StartElement:
|
||||
var s string
|
||||
if err = d.DecodeElement(&s, &se); err != nil {
|
||||
return err
|
||||
}
|
||||
switch se.Name.Local {
|
||||
case "Key":
|
||||
if keyAlreadyParsed {
|
||||
return errDuplicatedXMLTag
|
||||
}
|
||||
tag.Key = s
|
||||
keyAlreadyParsed = true
|
||||
case "Value":
|
||||
if valueAlreadyParsed {
|
||||
return errDuplicatedXMLTag
|
||||
}
|
||||
tag.Value = s
|
||||
valueAlreadyParsed = true
|
||||
default:
|
||||
return errUnknownXMLTag
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tag Tag) String() string {
|
||||
return tag.Key + "=" + tag.Value
|
||||
}
|
||||
|
||||
// IsEmpty returns whether this tag is empty or not.
|
||||
func (tag Tag) IsEmpty() bool {
|
||||
return tag.Key == ""
|
||||
}
|
||||
|
||||
// Validate checks this tag.
|
||||
func (tag Tag) Validate() error {
|
||||
if len(tag.Key) == 0 || utf8.RuneCountInString(tag.Key) > 128 {
|
||||
return errInvalidTagKey
|
||||
}
|
||||
|
||||
if utf8.RuneCountInString(tag.Value) > 256 {
|
||||
return errInvalidTagValue
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,42 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package lifecycle
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
)
|
||||
|
||||
// Transition - transition actions for a rule in lifecycle configuration.
|
||||
type Transition struct {
|
||||
XMLName xml.Name `xml:"Transition"`
|
||||
Days int `xml:"Days,omitempty"`
|
||||
Date string `xml:"Date,omitempty"`
|
||||
StorageClass string `xml:"StorageClass"`
|
||||
}
|
||||
|
||||
var errTransitionUnsupported = Errorf("Specifying <Transition></Transition> tag is not supported")
|
||||
|
||||
// UnmarshalXML is extended to indicate lack of support for Transition
|
||||
// xml tag in object lifecycle configuration
|
||||
func (t Transition) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
return errTransitionUnsupported
|
||||
}
|
||||
|
||||
// MarshalXML is extended to leave out <Transition></Transition> tags
|
||||
func (t Transition) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
return nil
|
||||
}
|
|
@ -1,529 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package lock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/beevik/ntp"
|
||||
"github.com/minio/minio/legacy/logger"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
)
|
||||
|
||||
// RetMode - object retention mode.
|
||||
type RetMode string
|
||||
|
||||
const (
|
||||
// RetGovernance - governance mode.
|
||||
RetGovernance RetMode = "GOVERNANCE"
|
||||
|
||||
// RetCompliance - compliance mode.
|
||||
RetCompliance RetMode = "COMPLIANCE"
|
||||
)
|
||||
|
||||
// Valid - returns if retention mode is valid
|
||||
func (r RetMode) Valid() bool {
|
||||
switch r {
|
||||
case RetGovernance, RetCompliance:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func parseRetMode(modeStr string) (mode RetMode) {
|
||||
switch strings.ToUpper(modeStr) {
|
||||
case "GOVERNANCE":
|
||||
mode = RetGovernance
|
||||
case "COMPLIANCE":
|
||||
mode = RetCompliance
|
||||
}
|
||||
return mode
|
||||
}
|
||||
|
||||
// LegalHoldStatus - object legal hold status.
|
||||
type LegalHoldStatus string
|
||||
|
||||
const (
|
||||
// LegalHoldOn - legal hold is on.
|
||||
LegalHoldOn LegalHoldStatus = "ON"
|
||||
|
||||
// LegalHoldOff - legal hold is off.
|
||||
LegalHoldOff LegalHoldStatus = "OFF"
|
||||
)
|
||||
|
||||
// Valid - returns true if legal hold status has valid values
|
||||
func (l LegalHoldStatus) Valid() bool {
|
||||
switch l {
|
||||
case LegalHoldOn, LegalHoldOff:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func parseLegalHoldStatus(holdStr string) (st LegalHoldStatus) {
|
||||
switch strings.ToUpper(holdStr) {
|
||||
case "ON":
|
||||
st = LegalHoldOn
|
||||
case "OFF":
|
||||
st = LegalHoldOff
|
||||
}
|
||||
return st
|
||||
}
|
||||
|
||||
// Bypass retention governance header.
|
||||
const (
|
||||
AmzObjectLockBypassRetGovernance = "X-Amz-Bypass-Governance-Retention"
|
||||
AmzObjectLockRetainUntilDate = "X-Amz-Object-Lock-Retain-Until-Date"
|
||||
AmzObjectLockMode = "X-Amz-Object-Lock-Mode"
|
||||
AmzObjectLockLegalHold = "X-Amz-Object-Lock-Legal-Hold"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrMalformedBucketObjectConfig -indicates that the bucket object lock config is malformed
|
||||
ErrMalformedBucketObjectConfig = errors.New("invalid bucket object lock config")
|
||||
// ErrInvalidRetentionDate - indicates that retention date needs to be in ISO 8601 format
|
||||
ErrInvalidRetentionDate = errors.New("date must be provided in ISO 8601 format")
|
||||
// ErrPastObjectLockRetainDate - indicates that retention date must be in the future
|
||||
ErrPastObjectLockRetainDate = errors.New("the retain until date must be in the future")
|
||||
// ErrUnknownWORMModeDirective - indicates that the retention mode is invalid
|
||||
ErrUnknownWORMModeDirective = errors.New("unknown WORM mode directive")
|
||||
// ErrObjectLockMissingContentMD5 - indicates missing Content-MD5 header for put object requests with locking
|
||||
ErrObjectLockMissingContentMD5 = errors.New("content-MD5 HTTP header is required for Put Object requests with Object Lock parameters")
|
||||
// ErrObjectLockInvalidHeaders indicates that object lock headers are missing
|
||||
ErrObjectLockInvalidHeaders = errors.New("x-amz-object-lock-retain-until-date and x-amz-object-lock-mode must both be supplied")
|
||||
// ErrMalformedXML - generic error indicating malformed XML
|
||||
ErrMalformedXML = errors.New("the XML you provided was not well-formed or did not validate against our published schema")
|
||||
)
|
||||
|
||||
const (
|
||||
ntpServerEnv = "MINIO_NTP_SERVER"
|
||||
)
|
||||
|
||||
var (
|
||||
ntpServer = env.Get(ntpServerEnv, "")
|
||||
)
|
||||
|
||||
// UTCNowNTP - is similar in functionality to UTCNow()
|
||||
// but only used when we do not wish to rely on system
|
||||
// time.
|
||||
func UTCNowNTP() (time.Time, error) {
|
||||
// ntp server is disabled
|
||||
if ntpServer == "" {
|
||||
return time.Now().UTC(), nil
|
||||
}
|
||||
return ntp.Time(ntpServer)
|
||||
}
|
||||
|
||||
// Retention - bucket level retention configuration.
|
||||
type Retention struct {
|
||||
Mode RetMode
|
||||
Validity time.Duration
|
||||
LockEnabled bool
|
||||
}
|
||||
|
||||
// Retain - check whether given date is retainable by validity time.
|
||||
func (r Retention) Retain(created time.Time) bool {
|
||||
t, err := UTCNowNTP()
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
// Retain
|
||||
return true
|
||||
}
|
||||
return created.Add(r.Validity).After(t)
|
||||
}
|
||||
|
||||
// DefaultRetention - default retention configuration.
|
||||
type DefaultRetention struct {
|
||||
XMLName xml.Name `xml:"DefaultRetention"`
|
||||
Mode RetMode `xml:"Mode"`
|
||||
Days *uint64 `xml:"Days"`
|
||||
Years *uint64 `xml:"Years"`
|
||||
}
|
||||
|
||||
// Maximum support retention days and years supported by AWS S3.
|
||||
const (
|
||||
// This tested by using `mc lock` command
|
||||
maximumRetentionDays = 36500
|
||||
maximumRetentionYears = 100
|
||||
)
|
||||
|
||||
// UnmarshalXML - decodes XML data.
|
||||
func (dr *DefaultRetention) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
// Make subtype to avoid recursive UnmarshalXML().
|
||||
type defaultRetention DefaultRetention
|
||||
retention := defaultRetention{}
|
||||
|
||||
if err := d.DecodeElement(&retention, &start); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch retention.Mode {
|
||||
case RetGovernance, RetCompliance:
|
||||
default:
|
||||
return fmt.Errorf("unknown retention mode %v", retention.Mode)
|
||||
}
|
||||
|
||||
if retention.Days == nil && retention.Years == nil {
|
||||
return fmt.Errorf("either Days or Years must be specified")
|
||||
}
|
||||
|
||||
if retention.Days != nil && retention.Years != nil {
|
||||
return fmt.Errorf("either Days or Years must be specified, not both")
|
||||
}
|
||||
|
||||
if retention.Days != nil {
|
||||
if *retention.Days == 0 {
|
||||
return fmt.Errorf("Default retention period must be a positive integer value for 'Days'")
|
||||
}
|
||||
if *retention.Days > maximumRetentionDays {
|
||||
return fmt.Errorf("Default retention period too large for 'Days' %d", *retention.Days)
|
||||
}
|
||||
} else if *retention.Years == 0 {
|
||||
return fmt.Errorf("Default retention period must be a positive integer value for 'Years'")
|
||||
} else if *retention.Years > maximumRetentionYears {
|
||||
return fmt.Errorf("Default retention period too large for 'Years' %d", *retention.Years)
|
||||
}
|
||||
|
||||
*dr = DefaultRetention(retention)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Config - object lock configuration specified in
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/Type_API_ObjectLockConfiguration.html
|
||||
type Config struct {
|
||||
XMLNS string `xml:"xmlns,attr,omitempty"`
|
||||
XMLName xml.Name `xml:"ObjectLockConfiguration"`
|
||||
ObjectLockEnabled string `xml:"ObjectLockEnabled"`
|
||||
Rule *struct {
|
||||
DefaultRetention DefaultRetention `xml:"DefaultRetention"`
|
||||
} `xml:"Rule,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalXML - decodes XML data.
|
||||
func (config *Config) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error {
|
||||
// Make subtype to avoid recursive UnmarshalXML().
|
||||
type objectLockConfig Config
|
||||
parsedConfig := objectLockConfig{}
|
||||
|
||||
if err := d.DecodeElement(&parsedConfig, &start); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if parsedConfig.ObjectLockEnabled != "Enabled" {
|
||||
return fmt.Errorf("only 'Enabled' value is allowed to ObjectLockEnabled element")
|
||||
}
|
||||
|
||||
*config = Config(parsedConfig)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ToRetention - convert to Retention type.
|
||||
func (config *Config) ToRetention() Retention {
|
||||
r := Retention{
|
||||
LockEnabled: config.ObjectLockEnabled == "Enabled",
|
||||
}
|
||||
if config.Rule != nil {
|
||||
r.Mode = config.Rule.DefaultRetention.Mode
|
||||
|
||||
t, err := UTCNowNTP()
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
// Do not change any configuration
|
||||
// upon NTP failure.
|
||||
return r
|
||||
}
|
||||
|
||||
if config.Rule.DefaultRetention.Days != nil {
|
||||
r.Validity = t.AddDate(0, 0, int(*config.Rule.DefaultRetention.Days)).Sub(t)
|
||||
} else {
|
||||
r.Validity = t.AddDate(int(*config.Rule.DefaultRetention.Years), 0, 0).Sub(t)
|
||||
}
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// Maximum 4KiB size per object lock config.
|
||||
const maxObjectLockConfigSize = 1 << 12
|
||||
|
||||
// ParseObjectLockConfig parses ObjectLockConfig from xml
|
||||
func ParseObjectLockConfig(reader io.Reader) (*Config, error) {
|
||||
config := Config{}
|
||||
if err := xml.NewDecoder(io.LimitReader(reader, maxObjectLockConfigSize)).Decode(&config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &config, nil
|
||||
}
|
||||
|
||||
// NewObjectLockConfig returns a initialized lock.Config struct
|
||||
func NewObjectLockConfig() *Config {
|
||||
return &Config{
|
||||
ObjectLockEnabled: "Enabled",
|
||||
}
|
||||
}
|
||||
|
||||
// RetentionDate is a embedded type containing time.Time to unmarshal
|
||||
// Date in Retention
|
||||
type RetentionDate struct {
|
||||
time.Time
|
||||
}
|
||||
|
||||
// UnmarshalXML parses date from Retention and validates date format
|
||||
func (rDate *RetentionDate) UnmarshalXML(d *xml.Decoder, startElement xml.StartElement) error {
|
||||
var dateStr string
|
||||
err := d.DecodeElement(&dateStr, &startElement)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// While AWS documentation mentions that the date specified
|
||||
// must be present in ISO 8601 format, in reality they allow
|
||||
// users to provide RFC 3339 compliant dates.
|
||||
retDate, err := time.Parse(time.RFC3339, dateStr)
|
||||
if err != nil {
|
||||
return ErrInvalidRetentionDate
|
||||
}
|
||||
|
||||
*rDate = RetentionDate{retDate}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalXML encodes expiration date if it is non-zero and encodes
|
||||
// empty string otherwise
|
||||
func (rDate *RetentionDate) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
|
||||
if *rDate == (RetentionDate{time.Time{}}) {
|
||||
return nil
|
||||
}
|
||||
return e.EncodeElement(rDate.Format(time.RFC3339), startElement)
|
||||
}
|
||||
|
||||
// ObjectRetention specified in
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectRetention.html
|
||||
type ObjectRetention struct {
|
||||
XMLNS string `xml:"xmlns,attr,omitempty"`
|
||||
XMLName xml.Name `xml:"Retention"`
|
||||
Mode RetMode `xml:"Mode,omitempty"`
|
||||
RetainUntilDate RetentionDate `xml:"RetainUntilDate,omitempty"`
|
||||
}
|
||||
|
||||
// Maximum 4KiB size per object retention config.
|
||||
const maxObjectRetentionSize = 1 << 12
|
||||
|
||||
// ParseObjectRetention constructs ObjectRetention struct from xml input
|
||||
func ParseObjectRetention(reader io.Reader) (*ObjectRetention, error) {
|
||||
ret := ObjectRetention{}
|
||||
if err := xml.NewDecoder(io.LimitReader(reader, maxObjectRetentionSize)).Decode(&ret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if ret.Mode != "" && !ret.Mode.Valid() {
|
||||
return &ret, ErrUnknownWORMModeDirective
|
||||
}
|
||||
|
||||
t, err := UTCNowNTP()
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return &ret, ErrPastObjectLockRetainDate
|
||||
}
|
||||
|
||||
if !ret.RetainUntilDate.IsZero() && ret.RetainUntilDate.Before(t) {
|
||||
return &ret, ErrPastObjectLockRetainDate
|
||||
}
|
||||
|
||||
return &ret, nil
|
||||
}
|
||||
|
||||
// IsObjectLockRetentionRequested returns true if object lock retention headers are set.
|
||||
func IsObjectLockRetentionRequested(h http.Header) bool {
|
||||
if _, ok := h[AmzObjectLockMode]; ok {
|
||||
return true
|
||||
}
|
||||
if _, ok := h[AmzObjectLockRetainUntilDate]; ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsObjectLockLegalHoldRequested returns true if object lock legal hold header is set.
|
||||
func IsObjectLockLegalHoldRequested(h http.Header) bool {
|
||||
_, ok := h[AmzObjectLockLegalHold]
|
||||
return ok
|
||||
}
|
||||
|
||||
// IsObjectLockGovernanceBypassSet returns true if object lock governance bypass header is set.
|
||||
func IsObjectLockGovernanceBypassSet(h http.Header) bool {
|
||||
return strings.ToLower(h.Get(AmzObjectLockBypassRetGovernance)) == "true"
|
||||
}
|
||||
|
||||
// IsObjectLockRequested returns true if legal hold or object lock retention headers are requested.
|
||||
func IsObjectLockRequested(h http.Header) bool {
|
||||
return IsObjectLockLegalHoldRequested(h) || IsObjectLockRetentionRequested(h)
|
||||
}
|
||||
|
||||
// ParseObjectLockRetentionHeaders parses http headers to extract retention mode and retention date
|
||||
func ParseObjectLockRetentionHeaders(h http.Header) (rmode RetMode, r RetentionDate, err error) {
|
||||
retMode := h.Get(AmzObjectLockMode)
|
||||
dateStr := h.Get(AmzObjectLockRetainUntilDate)
|
||||
if len(retMode) == 0 || len(dateStr) == 0 {
|
||||
return rmode, r, ErrObjectLockInvalidHeaders
|
||||
}
|
||||
|
||||
rmode = parseRetMode(retMode)
|
||||
if !rmode.Valid() {
|
||||
return rmode, r, ErrUnknownWORMModeDirective
|
||||
}
|
||||
|
||||
var retDate time.Time
|
||||
// While AWS documentation mentions that the date specified
|
||||
// must be present in ISO 8601 format, in reality they allow
|
||||
// users to provide RFC 3339 compliant dates.
|
||||
retDate, err = time.Parse(time.RFC3339, dateStr)
|
||||
if err != nil {
|
||||
return rmode, r, ErrInvalidRetentionDate
|
||||
}
|
||||
|
||||
t, err := UTCNowNTP()
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return rmode, r, ErrPastObjectLockRetainDate
|
||||
}
|
||||
|
||||
if retDate.Before(t) {
|
||||
return rmode, r, ErrPastObjectLockRetainDate
|
||||
}
|
||||
|
||||
return rmode, RetentionDate{retDate}, nil
|
||||
|
||||
}
|
||||
|
||||
// GetObjectRetentionMeta constructs ObjectRetention from metadata
|
||||
func GetObjectRetentionMeta(meta map[string]string) ObjectRetention {
|
||||
var mode RetMode
|
||||
var retainTill RetentionDate
|
||||
|
||||
var modeStr, tillStr string
|
||||
ok := false
|
||||
|
||||
modeStr, ok = meta[strings.ToLower(AmzObjectLockMode)]
|
||||
if !ok {
|
||||
modeStr, ok = meta[AmzObjectLockMode]
|
||||
}
|
||||
if ok {
|
||||
mode = parseRetMode(modeStr)
|
||||
}
|
||||
tillStr, ok = meta[strings.ToLower(AmzObjectLockRetainUntilDate)]
|
||||
if !ok {
|
||||
tillStr, ok = meta[AmzObjectLockRetainUntilDate]
|
||||
}
|
||||
if ok {
|
||||
if t, e := time.Parse(time.RFC3339, tillStr); e == nil {
|
||||
retainTill = RetentionDate{t.UTC()}
|
||||
}
|
||||
}
|
||||
return ObjectRetention{XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/", Mode: mode, RetainUntilDate: retainTill}
|
||||
}
|
||||
|
||||
// GetObjectLegalHoldMeta constructs ObjectLegalHold from metadata
|
||||
func GetObjectLegalHoldMeta(meta map[string]string) ObjectLegalHold {
|
||||
holdStr, ok := meta[strings.ToLower(AmzObjectLockLegalHold)]
|
||||
if !ok {
|
||||
holdStr, ok = meta[AmzObjectLockLegalHold]
|
||||
}
|
||||
if ok {
|
||||
return ObjectLegalHold{XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/", Status: parseLegalHoldStatus(holdStr)}
|
||||
}
|
||||
return ObjectLegalHold{}
|
||||
}
|
||||
|
||||
// ParseObjectLockLegalHoldHeaders parses request headers to construct ObjectLegalHold
|
||||
func ParseObjectLockLegalHoldHeaders(h http.Header) (lhold ObjectLegalHold, err error) {
|
||||
holdStatus, ok := h[AmzObjectLockLegalHold]
|
||||
if ok {
|
||||
lh := parseLegalHoldStatus(holdStatus[0])
|
||||
if !lh.Valid() {
|
||||
return lhold, ErrUnknownWORMModeDirective
|
||||
}
|
||||
lhold = ObjectLegalHold{XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/", Status: lh}
|
||||
}
|
||||
return lhold, nil
|
||||
|
||||
}
|
||||
|
||||
// ObjectLegalHold specified in
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectLegalHold.html
|
||||
type ObjectLegalHold struct {
|
||||
XMLNS string `xml:"xmlns,attr,omitempty"`
|
||||
XMLName xml.Name `xml:"LegalHold"`
|
||||
Status LegalHoldStatus `xml:"Status,omitempty"`
|
||||
}
|
||||
|
||||
// IsEmpty returns true if struct is empty
|
||||
func (l *ObjectLegalHold) IsEmpty() bool {
|
||||
return !l.Status.Valid()
|
||||
}
|
||||
|
||||
// ParseObjectLegalHold decodes the XML into ObjectLegalHold
|
||||
func ParseObjectLegalHold(reader io.Reader) (hold *ObjectLegalHold, err error) {
|
||||
hold = &ObjectLegalHold{}
|
||||
if err = xml.NewDecoder(reader).Decode(hold); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !hold.Status.Valid() {
|
||||
return nil, ErrMalformedXML
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// FilterObjectLockMetadata filters object lock metadata if s3:GetObjectRetention permission is denied or if isCopy flag set.
|
||||
func FilterObjectLockMetadata(metadata map[string]string, filterRetention, filterLegalHold bool) map[string]string {
|
||||
// Copy on write
|
||||
dst := metadata
|
||||
var copied bool
|
||||
delKey := func(key string) {
|
||||
if _, ok := metadata[key]; !ok {
|
||||
return
|
||||
}
|
||||
if !copied {
|
||||
dst = make(map[string]string, len(metadata))
|
||||
for k, v := range metadata {
|
||||
dst[k] = v
|
||||
}
|
||||
copied = true
|
||||
}
|
||||
delete(dst, key)
|
||||
}
|
||||
legalHold := GetObjectLegalHoldMeta(metadata)
|
||||
if !legalHold.Status.Valid() || filterLegalHold {
|
||||
delKey(AmzObjectLockLegalHold)
|
||||
}
|
||||
|
||||
ret := GetObjectRetentionMeta(metadata)
|
||||
if !ret.Mode.Valid() || filterRetention {
|
||||
delKey(AmzObjectLockMode)
|
||||
delKey(AmzObjectLockRetainUntilDate)
|
||||
return dst
|
||||
}
|
||||
return dst
|
||||
}
|
|
@ -1,567 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package lock
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
xhttp "github.com/minio/minio/legacy/http"
|
||||
)
|
||||
|
||||
func TestParseMode(t *testing.T) {
|
||||
testCases := []struct {
|
||||
value string
|
||||
expectedMode RetMode
|
||||
}{
|
||||
{
|
||||
value: "governance",
|
||||
expectedMode: RetGovernance,
|
||||
},
|
||||
{
|
||||
value: "complIAnce",
|
||||
expectedMode: RetCompliance,
|
||||
},
|
||||
{
|
||||
value: "gce",
|
||||
expectedMode: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
if parseRetMode(tc.value) != tc.expectedMode {
|
||||
t.Errorf("Expected Mode %s, got %s", tc.expectedMode, parseRetMode(tc.value))
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestParseLegalHoldStatus(t *testing.T) {
|
||||
tests := []struct {
|
||||
value string
|
||||
expectedStatus LegalHoldStatus
|
||||
}{
|
||||
{
|
||||
value: "ON",
|
||||
expectedStatus: LegalHoldOn,
|
||||
},
|
||||
{
|
||||
value: "Off",
|
||||
expectedStatus: LegalHoldOff,
|
||||
},
|
||||
{
|
||||
value: "x",
|
||||
expectedStatus: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
actualStatus := parseLegalHoldStatus(tt.value)
|
||||
if actualStatus != tt.expectedStatus {
|
||||
t.Errorf("Expected legal hold status %s, got %s", tt.expectedStatus, actualStatus)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestUnmarshalDefaultRetention checks if default retention
|
||||
// marshaling and unmarshaling work as expected
|
||||
func TestUnmarshalDefaultRetention(t *testing.T) {
|
||||
days := uint64(4)
|
||||
years := uint64(1)
|
||||
zerodays := uint64(0)
|
||||
invalidDays := uint64(maximumRetentionDays + 1)
|
||||
tests := []struct {
|
||||
value DefaultRetention
|
||||
expectedErr error
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
value: DefaultRetention{Mode: "retain"},
|
||||
expectedErr: fmt.Errorf("unknown retention mode retain"),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
value: DefaultRetention{Mode: RetGovernance},
|
||||
expectedErr: fmt.Errorf("either Days or Years must be specified"),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
value: DefaultRetention{Mode: RetGovernance, Days: &days},
|
||||
expectedErr: nil,
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
value: DefaultRetention{Mode: RetGovernance, Years: &years},
|
||||
expectedErr: nil,
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
value: DefaultRetention{Mode: RetGovernance, Days: &days, Years: &years},
|
||||
expectedErr: fmt.Errorf("either Days or Years must be specified, not both"),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
value: DefaultRetention{Mode: RetGovernance, Days: &zerodays},
|
||||
expectedErr: fmt.Errorf("Default retention period must be a positive integer value for 'Days'"),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
value: DefaultRetention{Mode: RetGovernance, Days: &invalidDays},
|
||||
expectedErr: fmt.Errorf("Default retention period too large for 'Days' %d", invalidDays),
|
||||
expectErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
d, err := xml.MarshalIndent(&tt.value, "", "\t")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var dr DefaultRetention
|
||||
err = xml.Unmarshal(d, &dr)
|
||||
if tt.expectedErr == nil {
|
||||
if err != nil {
|
||||
t.Fatalf("error: expected = <nil>, got = %v", err)
|
||||
}
|
||||
} else if err == nil {
|
||||
t.Fatalf("error: expected = %v, got = <nil>", tt.expectedErr)
|
||||
} else if tt.expectedErr.Error() != err.Error() {
|
||||
t.Fatalf("error: expected = %v, got = %v", tt.expectedErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseObjectLockConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
value string
|
||||
expectedErr error
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
value: `<ObjectLockConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><ObjectLockEnabled>yes</ObjectLockEnabled></ObjectLockConfiguration>`,
|
||||
expectedErr: fmt.Errorf("only 'Enabled' value is allowed to ObjectLockEnabled element"),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
value: `<ObjectLockConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><ObjectLockEnabled>Enabled</ObjectLockEnabled><Rule><DefaultRetention><Mode>COMPLIANCE</Mode><Days>0</Days></DefaultRetention></Rule></ObjectLockConfiguration>`,
|
||||
expectedErr: fmt.Errorf("Default retention period must be a positive integer value for 'Days'"),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
value: `<ObjectLockConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><ObjectLockEnabled>Enabled</ObjectLockEnabled><Rule><DefaultRetention><Mode>COMPLIANCE</Mode><Days>30</Days></DefaultRetention></Rule></ObjectLockConfiguration>`,
|
||||
expectedErr: nil,
|
||||
expectErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
_, err := ParseObjectLockConfig(strings.NewReader(tt.value))
|
||||
if tt.expectedErr == nil {
|
||||
if err != nil {
|
||||
t.Fatalf("error: expected = <nil>, got = %v", err)
|
||||
}
|
||||
} else if err == nil {
|
||||
t.Fatalf("error: expected = %v, got = <nil>", tt.expectedErr)
|
||||
} else if tt.expectedErr.Error() != err.Error() {
|
||||
t.Fatalf("error: expected = %v, got = %v", tt.expectedErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseObjectRetention(t *testing.T) {
|
||||
tests := []struct {
|
||||
value string
|
||||
expectedErr error
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
value: `<?xml version="1.0" encoding="UTF-8"?><Retention xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Mode>string</Mode><RetainUntilDate>2020-01-02T15:04:05Z</RetainUntilDate></Retention>`,
|
||||
expectedErr: ErrUnknownWORMModeDirective,
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
value: `<?xml version="1.0" encoding="UTF-8"?><Retention xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Mode>COMPLIANCE</Mode><RetainUntilDate>2017-01-02T15:04:05Z</RetainUntilDate></Retention>`,
|
||||
expectedErr: ErrPastObjectLockRetainDate,
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
value: `<?xml version="1.0" encoding="UTF-8"?><Retention xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Mode>GOVERNANCE</Mode><RetainUntilDate>2057-01-02T15:04:05Z</RetainUntilDate></Retention>`,
|
||||
expectedErr: nil,
|
||||
expectErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
_, err := ParseObjectRetention(strings.NewReader(tt.value))
|
||||
if tt.expectedErr == nil {
|
||||
if err != nil {
|
||||
t.Fatalf("error: expected = <nil>, got = %v", err)
|
||||
}
|
||||
} else if err == nil {
|
||||
t.Fatalf("error: expected = %v, got = <nil>", tt.expectedErr)
|
||||
} else if tt.expectedErr.Error() != err.Error() {
|
||||
t.Fatalf("error: expected = %v, got = %v", tt.expectedErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsObjectLockRequested(t *testing.T) {
|
||||
tests := []struct {
|
||||
header http.Header
|
||||
expectedVal bool
|
||||
}{
|
||||
{
|
||||
header: http.Header{
|
||||
"Authorization": []string{"AWS4-HMAC-SHA256 <cred_string>"},
|
||||
"X-Amz-Content-Sha256": []string{""},
|
||||
"Content-Encoding": []string{""},
|
||||
},
|
||||
expectedVal: false,
|
||||
},
|
||||
{
|
||||
header: http.Header{
|
||||
AmzObjectLockLegalHold: []string{""},
|
||||
},
|
||||
expectedVal: true,
|
||||
},
|
||||
{
|
||||
header: http.Header{
|
||||
AmzObjectLockRetainUntilDate: []string{""},
|
||||
AmzObjectLockMode: []string{""},
|
||||
},
|
||||
expectedVal: true,
|
||||
},
|
||||
{
|
||||
header: http.Header{
|
||||
AmzObjectLockBypassRetGovernance: []string{""},
|
||||
},
|
||||
expectedVal: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
actualVal := IsObjectLockRequested(tt.header)
|
||||
if actualVal != tt.expectedVal {
|
||||
t.Fatalf("error: expected %v, actual %v", tt.expectedVal, actualVal)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsObjectLockGovernanceBypassSet(t *testing.T) {
|
||||
tests := []struct {
|
||||
header http.Header
|
||||
expectedVal bool
|
||||
}{
|
||||
{
|
||||
header: http.Header{
|
||||
"Authorization": []string{"AWS4-HMAC-SHA256 <cred_string>"},
|
||||
"X-Amz-Content-Sha256": []string{""},
|
||||
"Content-Encoding": []string{""},
|
||||
},
|
||||
expectedVal: false,
|
||||
},
|
||||
{
|
||||
header: http.Header{
|
||||
AmzObjectLockLegalHold: []string{""},
|
||||
},
|
||||
expectedVal: false,
|
||||
},
|
||||
{
|
||||
header: http.Header{
|
||||
AmzObjectLockRetainUntilDate: []string{""},
|
||||
AmzObjectLockMode: []string{""},
|
||||
},
|
||||
expectedVal: false,
|
||||
},
|
||||
{
|
||||
header: http.Header{
|
||||
AmzObjectLockBypassRetGovernance: []string{""},
|
||||
},
|
||||
expectedVal: false,
|
||||
},
|
||||
{
|
||||
header: http.Header{
|
||||
AmzObjectLockBypassRetGovernance: []string{"true"},
|
||||
},
|
||||
expectedVal: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
actualVal := IsObjectLockGovernanceBypassSet(tt.header)
|
||||
if actualVal != tt.expectedVal {
|
||||
t.Fatalf("error: expected %v, actual %v", tt.expectedVal, actualVal)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseObjectLockRetentionHeaders(t *testing.T) {
|
||||
tests := []struct {
|
||||
header http.Header
|
||||
expectedErr error
|
||||
}{
|
||||
{
|
||||
header: http.Header{
|
||||
"Authorization": []string{"AWS4-HMAC-SHA256 <cred_string>"},
|
||||
"X-Amz-Content-Sha256": []string{""},
|
||||
"Content-Encoding": []string{""},
|
||||
},
|
||||
expectedErr: ErrObjectLockInvalidHeaders,
|
||||
},
|
||||
{
|
||||
header: http.Header{
|
||||
xhttp.AmzObjectLockMode: []string{"lock"},
|
||||
xhttp.AmzObjectLockRetainUntilDate: []string{"2017-01-02"},
|
||||
},
|
||||
expectedErr: ErrUnknownWORMModeDirective,
|
||||
},
|
||||
{
|
||||
header: http.Header{
|
||||
xhttp.AmzObjectLockMode: []string{"governance"},
|
||||
},
|
||||
expectedErr: ErrObjectLockInvalidHeaders,
|
||||
},
|
||||
{
|
||||
header: http.Header{
|
||||
xhttp.AmzObjectLockRetainUntilDate: []string{"2017-01-02"},
|
||||
xhttp.AmzObjectLockMode: []string{"governance"},
|
||||
},
|
||||
expectedErr: ErrInvalidRetentionDate,
|
||||
},
|
||||
{
|
||||
header: http.Header{
|
||||
xhttp.AmzObjectLockRetainUntilDate: []string{"2017-01-02T15:04:05Z"},
|
||||
xhttp.AmzObjectLockMode: []string{"governance"},
|
||||
},
|
||||
expectedErr: ErrPastObjectLockRetainDate,
|
||||
},
|
||||
{
|
||||
header: http.Header{
|
||||
xhttp.AmzObjectLockMode: []string{"governance"},
|
||||
xhttp.AmzObjectLockRetainUntilDate: []string{"2017-01-02T15:04:05Z"},
|
||||
},
|
||||
expectedErr: ErrPastObjectLockRetainDate,
|
||||
},
|
||||
{
|
||||
header: http.Header{
|
||||
xhttp.AmzObjectLockMode: []string{"governance"},
|
||||
xhttp.AmzObjectLockRetainUntilDate: []string{"2087-01-02T15:04:05Z"},
|
||||
},
|
||||
expectedErr: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
_, _, err := ParseObjectLockRetentionHeaders(tt.header)
|
||||
if tt.expectedErr == nil {
|
||||
if err != nil {
|
||||
t.Fatalf("Case %d error: expected = <nil>, got = %v", i, err)
|
||||
}
|
||||
} else if err == nil {
|
||||
t.Fatalf("Case %d error: expected = %v, got = <nil>", i, tt.expectedErr)
|
||||
} else if tt.expectedErr.Error() != err.Error() {
|
||||
t.Fatalf("Case %d error: expected = %v, got = %v", i, tt.expectedErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetObjectRetentionMeta(t *testing.T) {
|
||||
tests := []struct {
|
||||
metadata map[string]string
|
||||
expected ObjectRetention
|
||||
}{
|
||||
{
|
||||
metadata: map[string]string{
|
||||
"Authorization": "AWS4-HMAC-SHA256 <cred_string>",
|
||||
"X-Amz-Content-Sha256": "",
|
||||
"Content-Encoding": "",
|
||||
},
|
||||
expected: ObjectRetention{},
|
||||
},
|
||||
{
|
||||
metadata: map[string]string{
|
||||
"x-amz-object-lock-mode": "governance",
|
||||
},
|
||||
expected: ObjectRetention{Mode: RetGovernance},
|
||||
},
|
||||
{
|
||||
metadata: map[string]string{
|
||||
"x-amz-object-lock-retain-until-date": "2020-02-01",
|
||||
},
|
||||
expected: ObjectRetention{RetainUntilDate: RetentionDate{time.Date(2020, 2, 1, 12, 0, 0, 0, time.UTC)}},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
o := GetObjectRetentionMeta(tt.metadata)
|
||||
if o.Mode != tt.expected.Mode {
|
||||
t.Fatalf("Case %d expected %v, got %v", i, tt.expected.Mode, o.Mode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetObjectLegalHoldMeta(t *testing.T) {
|
||||
tests := []struct {
|
||||
metadata map[string]string
|
||||
expected ObjectLegalHold
|
||||
}{
|
||||
{
|
||||
metadata: map[string]string{
|
||||
"x-amz-object-lock-mode": "governance",
|
||||
},
|
||||
expected: ObjectLegalHold{},
|
||||
},
|
||||
{
|
||||
metadata: map[string]string{
|
||||
"x-amz-object-lock-legal-hold": "on",
|
||||
},
|
||||
expected: ObjectLegalHold{Status: LegalHoldOn},
|
||||
},
|
||||
{
|
||||
metadata: map[string]string{
|
||||
"x-amz-object-lock-legal-hold": "off",
|
||||
},
|
||||
expected: ObjectLegalHold{Status: LegalHoldOff},
|
||||
},
|
||||
{
|
||||
metadata: map[string]string{
|
||||
"x-amz-object-lock-legal-hold": "X",
|
||||
},
|
||||
expected: ObjectLegalHold{Status: ""},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
o := GetObjectLegalHoldMeta(tt.metadata)
|
||||
if o.Status != tt.expected.Status {
|
||||
t.Fatalf("Case %d expected %v, got %v", i, tt.expected.Status, o.Status)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseObjectLegalHold(t *testing.T) {
|
||||
tests := []struct {
|
||||
value string
|
||||
expectedErr error
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
value: `<?xml version="1.0" encoding="UTF-8"?><LegalHold xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Status>string</Status></LegalHold>`,
|
||||
expectedErr: ErrMalformedXML,
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
value: `<?xml version="1.0" encoding="UTF-8"?><LegalHold xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Status>ON</Status></LegalHold>`,
|
||||
expectedErr: nil,
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
value: `<?xml version="1.0" encoding="UTF-8"?><LegalHold xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Status>On</Status></LegalHold>`,
|
||||
expectedErr: ErrMalformedXML,
|
||||
expectErr: true,
|
||||
},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
_, err := ParseObjectLegalHold(strings.NewReader(tt.value))
|
||||
if tt.expectedErr == nil {
|
||||
if err != nil {
|
||||
t.Fatalf("Case %d error: expected = <nil>, got = %v", i, err)
|
||||
}
|
||||
} else if err == nil {
|
||||
t.Fatalf("Case %d error: expected = %v, got = <nil>", i, tt.expectedErr)
|
||||
} else if tt.expectedErr.Error() != err.Error() {
|
||||
t.Fatalf("Case %d error: expected = %v, got = %v", i, tt.expectedErr, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestFilterObjectLockMetadata(t *testing.T) {
|
||||
tests := []struct {
|
||||
metadata map[string]string
|
||||
filterRetention bool
|
||||
filterLegalHold bool
|
||||
expected map[string]string
|
||||
}{
|
||||
{
|
||||
metadata: map[string]string{
|
||||
"Authorization": "AWS4-HMAC-SHA256 <cred_string>",
|
||||
"X-Amz-Content-Sha256": "",
|
||||
"Content-Encoding": "",
|
||||
},
|
||||
expected: map[string]string{
|
||||
"Authorization": "AWS4-HMAC-SHA256 <cred_string>",
|
||||
"X-Amz-Content-Sha256": "",
|
||||
"Content-Encoding": "",
|
||||
},
|
||||
},
|
||||
{
|
||||
metadata: map[string]string{
|
||||
"x-amz-object-lock-mode": "governance",
|
||||
},
|
||||
expected: map[string]string{
|
||||
"x-amz-object-lock-mode": "governance",
|
||||
},
|
||||
filterRetention: false,
|
||||
},
|
||||
{
|
||||
metadata: map[string]string{
|
||||
"x-amz-object-lock-mode": "governance",
|
||||
"x-amz-object-lock-retain-until-date": "2020-02-01",
|
||||
},
|
||||
expected: map[string]string{},
|
||||
filterRetention: true,
|
||||
},
|
||||
{
|
||||
metadata: map[string]string{
|
||||
"x-amz-object-lock-legal-hold": "off",
|
||||
},
|
||||
expected: map[string]string{},
|
||||
filterLegalHold: true,
|
||||
},
|
||||
{
|
||||
metadata: map[string]string{
|
||||
"x-amz-object-lock-legal-hold": "on",
|
||||
},
|
||||
expected: map[string]string{"x-amz-object-lock-legal-hold": "on"},
|
||||
filterLegalHold: false,
|
||||
},
|
||||
{
|
||||
metadata: map[string]string{
|
||||
"x-amz-object-lock-legal-hold": "on",
|
||||
"x-amz-object-lock-mode": "governance",
|
||||
"x-amz-object-lock-retain-until-date": "2020-02-01",
|
||||
},
|
||||
expected: map[string]string{},
|
||||
filterRetention: true,
|
||||
filterLegalHold: true,
|
||||
},
|
||||
{
|
||||
metadata: map[string]string{
|
||||
"x-amz-object-lock-legal-hold": "on",
|
||||
"x-amz-object-lock-mode": "governance",
|
||||
"x-amz-object-lock-retain-until-date": "2020-02-01",
|
||||
},
|
||||
expected: map[string]string{"x-amz-object-lock-legal-hold": "on",
|
||||
"x-amz-object-lock-mode": "governance",
|
||||
"x-amz-object-lock-retain-until-date": "2020-02-01"},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
o := FilterObjectLockMetadata(tt.metadata, tt.filterRetention, tt.filterLegalHold)
|
||||
if !reflect.DeepEqual(o, tt.metadata) {
|
||||
t.Fatalf("Case %d expected %v, got %v", i, tt.metadata, o)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,311 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package policy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/minio/minio/pkg/bucket/policy/condition"
|
||||
)
|
||||
|
||||
// Action - policy action.
|
||||
// Refer https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazons3.html
|
||||
// for more information about available actions.
|
||||
type Action string
|
||||
|
||||
const (
|
||||
// AbortMultipartUploadAction - AbortMultipartUpload Rest API action.
|
||||
AbortMultipartUploadAction Action = "s3:AbortMultipartUpload"
|
||||
|
||||
// CreateBucketAction - CreateBucket Rest API action.
|
||||
CreateBucketAction = "s3:CreateBucket"
|
||||
|
||||
// DeleteBucketAction - DeleteBucket Rest API action.
|
||||
DeleteBucketAction = "s3:DeleteBucket"
|
||||
|
||||
// ForceDeleteBucketAction - DeleteBucket Rest API action when x-minio-force-delete flag
|
||||
// is specified.
|
||||
ForceDeleteBucketAction = "s3:ForceDeleteBucket"
|
||||
|
||||
// DeleteBucketPolicyAction - DeleteBucketPolicy Rest API action.
|
||||
DeleteBucketPolicyAction = "s3:DeleteBucketPolicy"
|
||||
|
||||
// DeleteObjectAction - DeleteObject Rest API action.
|
||||
DeleteObjectAction = "s3:DeleteObject"
|
||||
|
||||
// GetBucketLocationAction - GetBucketLocation Rest API action.
|
||||
GetBucketLocationAction = "s3:GetBucketLocation"
|
||||
|
||||
// GetBucketNotificationAction - GetBucketNotification Rest API action.
|
||||
GetBucketNotificationAction = "s3:GetBucketNotification"
|
||||
|
||||
// GetBucketPolicyAction - GetBucketPolicy Rest API action.
|
||||
GetBucketPolicyAction = "s3:GetBucketPolicy"
|
||||
|
||||
// GetObjectAction - GetObject Rest API action.
|
||||
GetObjectAction = "s3:GetObject"
|
||||
|
||||
// HeadBucketAction - HeadBucket Rest API action. This action is unused in minio.
|
||||
HeadBucketAction = "s3:HeadBucket"
|
||||
|
||||
// ListAllMyBucketsAction - ListAllMyBuckets (List buckets) Rest API action.
|
||||
ListAllMyBucketsAction = "s3:ListAllMyBuckets"
|
||||
|
||||
// ListBucketAction - ListBucket Rest API action.
|
||||
ListBucketAction = "s3:ListBucket"
|
||||
|
||||
// ListBucketMultipartUploadsAction - ListMultipartUploads Rest API action.
|
||||
ListBucketMultipartUploadsAction = "s3:ListBucketMultipartUploads"
|
||||
|
||||
// ListenBucketNotificationAction - ListenBucketNotification Rest API action.
|
||||
// This is MinIO extension.
|
||||
ListenBucketNotificationAction = "s3:ListenBucketNotification"
|
||||
|
||||
// ListMultipartUploadPartsAction - ListParts Rest API action.
|
||||
ListMultipartUploadPartsAction = "s3:ListMultipartUploadParts"
|
||||
|
||||
// PutBucketNotificationAction - PutObjectNotification Rest API action.
|
||||
PutBucketNotificationAction = "s3:PutBucketNotification"
|
||||
|
||||
// PutBucketPolicyAction - PutBucketPolicy Rest API action.
|
||||
PutBucketPolicyAction = "s3:PutBucketPolicy"
|
||||
|
||||
// PutObjectAction - PutObject Rest API action.
|
||||
PutObjectAction = "s3:PutObject"
|
||||
|
||||
// PutBucketLifecycleAction - PutBucketLifecycle Rest API action.
|
||||
PutBucketLifecycleAction = "s3:PutLifecycleConfiguration"
|
||||
|
||||
// GetBucketLifecycleAction - GetBucketLifecycle Rest API action.
|
||||
GetBucketLifecycleAction = "s3:GetLifecycleConfiguration"
|
||||
|
||||
// BypassGovernanceRetentionAction - bypass governance retention for PutObjectRetention, PutObject and DeleteObject Rest API action.
|
||||
BypassGovernanceRetentionAction = "s3:BypassGovernanceRetention"
|
||||
// PutObjectRetentionAction - PutObjectRetention Rest API action.
|
||||
PutObjectRetentionAction = "s3:PutObjectRetention"
|
||||
|
||||
// GetObjectRetentionAction - GetObjectRetention, GetObject, HeadObject Rest API action.
|
||||
GetObjectRetentionAction = "s3:GetObjectRetention"
|
||||
// GetObjectLegalHoldAction - GetObjectLegalHold, GetObject Rest API action.
|
||||
GetObjectLegalHoldAction = "s3:GetObjectLegalHold"
|
||||
// PutObjectLegalHoldAction - PutObjectLegalHold, PutObject Rest API action.
|
||||
PutObjectLegalHoldAction = "s3:PutObjectLegalHold"
|
||||
// GetBucketObjectLockConfigurationAction - GetObjectLockConfiguration Rest API action
|
||||
GetBucketObjectLockConfigurationAction = "s3:GetBucketObjectLockConfiguration"
|
||||
// PutBucketObjectLockConfigurationAction - PutObjectLockConfiguration Rest API action
|
||||
PutBucketObjectLockConfigurationAction = "s3:PutBucketObjectLockConfiguration"
|
||||
|
||||
// GetBucketTaggingAction - GetTagging Rest API action
|
||||
GetBucketTaggingAction = "s3:GetBucketTagging"
|
||||
// PutBucketTaggingAction - PutTagging Rest API action
|
||||
PutBucketTaggingAction = "s3:PutBucketTagging"
|
||||
|
||||
// GetObjectTaggingAction - Get Object Tags API action
|
||||
GetObjectTaggingAction = "s3:GetObjectTagging"
|
||||
// PutObjectTaggingAction - Put Object Tags API action
|
||||
PutObjectTaggingAction = "s3:PutObjectTagging"
|
||||
// DeleteObjectTaggingAction - Delete Object Tags API action
|
||||
DeleteObjectTaggingAction = "s3:DeleteObjectTagging"
|
||||
|
||||
// PutBucketEncryptionAction - PutBucketEncryption REST API action
|
||||
PutBucketEncryptionAction = "s3:PutEncryptionConfiguration"
|
||||
// GetBucketEncryptionAction - GetBucketEncryption REST API action
|
||||
GetBucketEncryptionAction = "s3:GetEncryptionConfiguration"
|
||||
)
|
||||
|
||||
// List of all supported object actions.
|
||||
var supportedObjectActions = map[Action]struct{}{
|
||||
AbortMultipartUploadAction: {},
|
||||
DeleteObjectAction: {},
|
||||
GetObjectAction: {},
|
||||
ListMultipartUploadPartsAction: {},
|
||||
PutObjectAction: {},
|
||||
BypassGovernanceRetentionAction: {},
|
||||
PutObjectRetentionAction: {},
|
||||
GetObjectRetentionAction: {},
|
||||
PutObjectLegalHoldAction: {},
|
||||
GetObjectLegalHoldAction: {},
|
||||
GetObjectTaggingAction: {},
|
||||
PutObjectTaggingAction: {},
|
||||
DeleteObjectTaggingAction: {},
|
||||
}
|
||||
|
||||
// isObjectAction - returns whether action is object type or not.
|
||||
func (action Action) isObjectAction() bool {
|
||||
_, ok := supportedObjectActions[action]
|
||||
return ok
|
||||
}
|
||||
|
||||
// List of all supported actions.
|
||||
var supportedActions = map[Action]struct{}{
|
||||
AbortMultipartUploadAction: {},
|
||||
CreateBucketAction: {},
|
||||
DeleteBucketAction: {},
|
||||
ForceDeleteBucketAction: {},
|
||||
DeleteBucketPolicyAction: {},
|
||||
DeleteObjectAction: {},
|
||||
GetBucketLocationAction: {},
|
||||
GetBucketNotificationAction: {},
|
||||
GetBucketPolicyAction: {},
|
||||
GetObjectAction: {},
|
||||
HeadBucketAction: {},
|
||||
ListAllMyBucketsAction: {},
|
||||
ListBucketAction: {},
|
||||
ListBucketMultipartUploadsAction: {},
|
||||
ListenBucketNotificationAction: {},
|
||||
ListMultipartUploadPartsAction: {},
|
||||
PutBucketNotificationAction: {},
|
||||
PutBucketPolicyAction: {},
|
||||
PutObjectAction: {},
|
||||
GetBucketLifecycleAction: {},
|
||||
PutBucketLifecycleAction: {},
|
||||
PutObjectRetentionAction: {},
|
||||
GetObjectRetentionAction: {},
|
||||
GetObjectLegalHoldAction: {},
|
||||
PutObjectLegalHoldAction: {},
|
||||
PutBucketObjectLockConfigurationAction: {},
|
||||
GetBucketObjectLockConfigurationAction: {},
|
||||
PutBucketTaggingAction: {},
|
||||
GetBucketTaggingAction: {},
|
||||
BypassGovernanceRetentionAction: {},
|
||||
GetObjectTaggingAction: {},
|
||||
PutObjectTaggingAction: {},
|
||||
DeleteObjectTaggingAction: {},
|
||||
PutBucketEncryptionAction: {},
|
||||
GetBucketEncryptionAction: {},
|
||||
}
|
||||
|
||||
// IsValid - checks if action is valid or not.
|
||||
func (action Action) IsValid() bool {
|
||||
_, ok := supportedActions[action]
|
||||
return ok
|
||||
}
|
||||
|
||||
// MarshalJSON - encodes Action to JSON data.
|
||||
func (action Action) MarshalJSON() ([]byte, error) {
|
||||
if action.IsValid() {
|
||||
return json.Marshal(string(action))
|
||||
}
|
||||
|
||||
return nil, Errorf("invalid action '%v'", action)
|
||||
}
|
||||
|
||||
// UnmarshalJSON - decodes JSON data to Action.
|
||||
func (action *Action) UnmarshalJSON(data []byte) error {
|
||||
var s string
|
||||
|
||||
if err := json.Unmarshal(data, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
a := Action(s)
|
||||
if !a.IsValid() {
|
||||
return Errorf("invalid action '%v'", s)
|
||||
}
|
||||
|
||||
*action = a
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseAction(s string) (Action, error) {
|
||||
action := Action(s)
|
||||
|
||||
if action.IsValid() {
|
||||
return action, nil
|
||||
}
|
||||
|
||||
return action, Errorf("unsupported action '%v'", s)
|
||||
}
|
||||
|
||||
// actionConditionKeyMap - holds mapping of supported condition key for an action.
|
||||
var actionConditionKeyMap = map[Action]condition.KeySet{
|
||||
AbortMultipartUploadAction: condition.NewKeySet(condition.CommonKeys...),
|
||||
|
||||
CreateBucketAction: condition.NewKeySet(condition.CommonKeys...),
|
||||
|
||||
DeleteObjectAction: condition.NewKeySet(condition.CommonKeys...),
|
||||
|
||||
GetBucketLocationAction: condition.NewKeySet(condition.CommonKeys...),
|
||||
|
||||
GetObjectAction: condition.NewKeySet(
|
||||
append([]condition.Key{
|
||||
condition.S3XAmzServerSideEncryption,
|
||||
condition.S3XAmzServerSideEncryptionCustomerAlgorithm,
|
||||
condition.S3XAmzStorageClass,
|
||||
}, condition.CommonKeys...)...),
|
||||
|
||||
HeadBucketAction: condition.NewKeySet(condition.CommonKeys...),
|
||||
|
||||
ListAllMyBucketsAction: condition.NewKeySet(condition.CommonKeys...),
|
||||
|
||||
ListBucketAction: condition.NewKeySet(
|
||||
append([]condition.Key{
|
||||
condition.S3Prefix,
|
||||
condition.S3Delimiter,
|
||||
condition.S3MaxKeys,
|
||||
}, condition.CommonKeys...)...),
|
||||
|
||||
ListBucketMultipartUploadsAction: condition.NewKeySet(condition.CommonKeys...),
|
||||
|
||||
ListMultipartUploadPartsAction: condition.NewKeySet(condition.CommonKeys...),
|
||||
|
||||
PutObjectAction: condition.NewKeySet(
|
||||
append([]condition.Key{
|
||||
condition.S3XAmzCopySource,
|
||||
condition.S3XAmzServerSideEncryption,
|
||||
condition.S3XAmzServerSideEncryptionCustomerAlgorithm,
|
||||
condition.S3XAmzMetadataDirective,
|
||||
condition.S3XAmzStorageClass,
|
||||
condition.S3ObjectLockRetainUntilDate,
|
||||
condition.S3ObjectLockMode,
|
||||
condition.S3ObjectLockLegalHold,
|
||||
}, condition.CommonKeys...)...),
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html
|
||||
// LockLegalHold is not supported with PutObjectRetentionAction
|
||||
PutObjectRetentionAction: condition.NewKeySet(
|
||||
append([]condition.Key{
|
||||
condition.S3ObjectLockRemainingRetentionDays,
|
||||
condition.S3ObjectLockRetainUntilDate,
|
||||
condition.S3ObjectLockMode,
|
||||
}, condition.CommonKeys...)...),
|
||||
|
||||
GetObjectRetentionAction: condition.NewKeySet(condition.CommonKeys...),
|
||||
PutObjectLegalHoldAction: condition.NewKeySet(
|
||||
append([]condition.Key{
|
||||
condition.S3ObjectLockLegalHold,
|
||||
}, condition.CommonKeys...)...),
|
||||
GetObjectLegalHoldAction: condition.NewKeySet(condition.CommonKeys...),
|
||||
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html
|
||||
BypassGovernanceRetentionAction: condition.NewKeySet(
|
||||
append([]condition.Key{
|
||||
condition.S3ObjectLockRemainingRetentionDays,
|
||||
condition.S3ObjectLockRetainUntilDate,
|
||||
condition.S3ObjectLockMode,
|
||||
condition.S3ObjectLockLegalHold,
|
||||
}, condition.CommonKeys...)...),
|
||||
|
||||
GetBucketObjectLockConfigurationAction: condition.NewKeySet(condition.CommonKeys...),
|
||||
PutBucketObjectLockConfigurationAction: condition.NewKeySet(condition.CommonKeys...),
|
||||
GetBucketTaggingAction: condition.NewKeySet(condition.CommonKeys...),
|
||||
PutBucketTaggingAction: condition.NewKeySet(condition.CommonKeys...),
|
||||
PutObjectTaggingAction: condition.NewKeySet(condition.CommonKeys...),
|
||||
GetObjectTaggingAction: condition.NewKeySet(condition.CommonKeys...),
|
||||
DeleteObjectTaggingAction: condition.NewKeySet(condition.CommonKeys...),
|
||||
}
|
|
@ -1,116 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package policy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestActionIsObjectAction(t *testing.T) {
|
||||
testCases := []struct {
|
||||
action Action
|
||||
expectedResult bool
|
||||
}{
|
||||
{AbortMultipartUploadAction, true},
|
||||
{DeleteObjectAction, true},
|
||||
{GetObjectAction, true},
|
||||
{ListMultipartUploadPartsAction, true},
|
||||
{PutObjectAction, true},
|
||||
{CreateBucketAction, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.action.isObjectAction()
|
||||
|
||||
if testCase.expectedResult != result {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestActionIsValid(t *testing.T) {
|
||||
testCases := []struct {
|
||||
action Action
|
||||
expectedResult bool
|
||||
}{
|
||||
{AbortMultipartUploadAction, true},
|
||||
{Action("foo"), false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.action.IsValid()
|
||||
|
||||
if testCase.expectedResult != result {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestActionMarshalJSON(t *testing.T) {
|
||||
testCases := []struct {
|
||||
action Action
|
||||
expectedResult []byte
|
||||
expectErr bool
|
||||
}{
|
||||
{PutObjectAction, []byte(`"s3:PutObject"`), false},
|
||||
{Action("foo"), nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := json.Marshal(testCase.action)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if testCase.expectErr != expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestActionUnmarshalJSON(t *testing.T) {
|
||||
testCases := []struct {
|
||||
data []byte
|
||||
expectedResult Action
|
||||
expectErr bool
|
||||
}{
|
||||
{[]byte(`"s3:PutObject"`), PutObjectAction, false},
|
||||
{[]byte(`"foo"`), Action(""), true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
var result Action
|
||||
err := json.Unmarshal(testCase.data, &result)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if testCase.expectErr != expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if testCase.expectedResult != result {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,132 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package policy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
)
|
||||
|
||||
// ActionSet - set of actions.
|
||||
type ActionSet map[Action]struct{}
|
||||
|
||||
// Add - add action to the set.
|
||||
func (actionSet ActionSet) Add(action Action) {
|
||||
actionSet[action] = struct{}{}
|
||||
}
|
||||
|
||||
// Contains - checks given action exists in the action set.
|
||||
func (actionSet ActionSet) Contains(action Action) bool {
|
||||
_, found := actionSet[action]
|
||||
return found
|
||||
}
|
||||
|
||||
// Equals - checks whether given action set is equal to current action set or not.
|
||||
func (actionSet ActionSet) Equals(sactionSet ActionSet) bool {
|
||||
// If length of set is not equal to length of given set, the
|
||||
// set is not equal to given set.
|
||||
if len(actionSet) != len(sactionSet) {
|
||||
return false
|
||||
}
|
||||
|
||||
// As both sets are equal in length, check each elements are equal.
|
||||
for k := range actionSet {
|
||||
if _, ok := sactionSet[k]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Intersection - returns actions available in both ActionSet.
|
||||
func (actionSet ActionSet) Intersection(sset ActionSet) ActionSet {
|
||||
nset := NewActionSet()
|
||||
for k := range actionSet {
|
||||
if _, ok := sset[k]; ok {
|
||||
nset.Add(k)
|
||||
}
|
||||
}
|
||||
|
||||
return nset
|
||||
}
|
||||
|
||||
// MarshalJSON - encodes ActionSet to JSON data.
|
||||
func (actionSet ActionSet) MarshalJSON() ([]byte, error) {
|
||||
if len(actionSet) == 0 {
|
||||
return nil, Errorf("empty actions not allowed")
|
||||
}
|
||||
|
||||
return json.Marshal(actionSet.ToSlice())
|
||||
}
|
||||
|
||||
func (actionSet ActionSet) String() string {
|
||||
actions := []string{}
|
||||
for action := range actionSet {
|
||||
actions = append(actions, string(action))
|
||||
}
|
||||
sort.Strings(actions)
|
||||
|
||||
return fmt.Sprintf("%v", actions)
|
||||
}
|
||||
|
||||
// ToSlice - returns slice of actions from the action set.
|
||||
func (actionSet ActionSet) ToSlice() []Action {
|
||||
actions := []Action{}
|
||||
for action := range actionSet {
|
||||
actions = append(actions, action)
|
||||
}
|
||||
|
||||
return actions
|
||||
}
|
||||
|
||||
// UnmarshalJSON - decodes JSON data to ActionSet.
|
||||
func (actionSet *ActionSet) UnmarshalJSON(data []byte) error {
|
||||
var sset set.StringSet
|
||||
if err := json.Unmarshal(data, &sset); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(sset) == 0 {
|
||||
return Errorf("empty actions not allowed")
|
||||
}
|
||||
|
||||
*actionSet = make(ActionSet)
|
||||
for _, s := range sset.ToSlice() {
|
||||
action, err := parseAction(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
actionSet.Add(action)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewActionSet - creates new action set.
|
||||
func NewActionSet(actions ...Action) ActionSet {
|
||||
actionSet := make(ActionSet)
|
||||
for _, action := range actions {
|
||||
actionSet.Add(action)
|
||||
}
|
||||
|
||||
return actionSet
|
||||
}
|
|
@ -1,158 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package policy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestActionSetAdd(t *testing.T) {
|
||||
testCases := []struct {
|
||||
set ActionSet
|
||||
action Action
|
||||
expectedResult ActionSet
|
||||
}{
|
||||
{NewActionSet(), PutObjectAction, NewActionSet(PutObjectAction)},
|
||||
{NewActionSet(PutObjectAction), PutObjectAction, NewActionSet(PutObjectAction)},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
testCase.set.Add(testCase.action)
|
||||
|
||||
if !reflect.DeepEqual(testCase.expectedResult, testCase.set) {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, testCase.set)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestActionSetContains(t *testing.T) {
|
||||
testCases := []struct {
|
||||
set ActionSet
|
||||
action Action
|
||||
expectedResult bool
|
||||
}{
|
||||
{NewActionSet(PutObjectAction), PutObjectAction, true},
|
||||
{NewActionSet(PutObjectAction, GetObjectAction), PutObjectAction, true},
|
||||
{NewActionSet(PutObjectAction, GetObjectAction), AbortMultipartUploadAction, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.set.Contains(testCase.action)
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestActionSetIntersection(t *testing.T) {
|
||||
testCases := []struct {
|
||||
set ActionSet
|
||||
setToIntersect ActionSet
|
||||
expectedResult ActionSet
|
||||
}{
|
||||
{NewActionSet(), NewActionSet(PutObjectAction), NewActionSet()},
|
||||
{NewActionSet(PutObjectAction), NewActionSet(), NewActionSet()},
|
||||
{NewActionSet(PutObjectAction), NewActionSet(PutObjectAction, GetObjectAction), NewActionSet(PutObjectAction)},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.set.Intersection(testCase.setToIntersect)
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, testCase.set)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestActionSetMarshalJSON(t *testing.T) {
|
||||
testCases := []struct {
|
||||
actionSet ActionSet
|
||||
expectedResult []byte
|
||||
expectErr bool
|
||||
}{
|
||||
{NewActionSet(PutObjectAction), []byte(`["s3:PutObject"]`), false},
|
||||
{NewActionSet(), nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := json.Marshal(testCase.actionSet)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, string(testCase.expectedResult), string(result))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestActionSetToSlice(t *testing.T) {
|
||||
testCases := []struct {
|
||||
actionSet ActionSet
|
||||
expectedResult []Action
|
||||
}{
|
||||
{NewActionSet(PutObjectAction), []Action{PutObjectAction}},
|
||||
{NewActionSet(), []Action{}},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.actionSet.ToSlice()
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestActionSetUnmarshalJSON(t *testing.T) {
|
||||
testCases := []struct {
|
||||
data []byte
|
||||
expectedResult ActionSet
|
||||
expectErr bool
|
||||
}{
|
||||
{[]byte(`"s3:PutObject"`), NewActionSet(PutObjectAction), false},
|
||||
{[]byte(`["s3:PutObject"]`), NewActionSet(PutObjectAction), false},
|
||||
{[]byte(`["s3:PutObject", "s3:GetObject"]`), NewActionSet(PutObjectAction, GetObjectAction), false},
|
||||
{[]byte(`["s3:PutObject", "s3:GetObject", "s3:PutObject"]`), NewActionSet(PutObjectAction, GetObjectAction), false},
|
||||
{[]byte(`[]`), NewActionSet(), true}, // Empty array.
|
||||
{[]byte(`"foo"`), nil, true}, // Invalid action.
|
||||
{[]byte(`["s3:PutObject", "foo"]`), nil, true}, // Invalid action.
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := make(ActionSet)
|
||||
err := json.Unmarshal(testCase.data, &result)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,143 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
|
||||
"github.com/minio/minio-go/v6/pkg/s3utils"
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
)
|
||||
|
||||
func toBinaryEqualsFuncString(n name, key Key, values set.StringSet) string {
|
||||
valueStrings := values.ToSlice()
|
||||
sort.Strings(valueStrings)
|
||||
|
||||
return fmt.Sprintf("%v:%v:%v", n, key, valueStrings)
|
||||
}
|
||||
|
||||
// binaryEqualsFunc - String equals function. It checks whether value by Key in given
|
||||
// values map is in condition values.
|
||||
// For example,
|
||||
// - if values = ["mybucket/foo"], at evaluate() it returns whether string
|
||||
// in value map for Key is in values.
|
||||
type binaryEqualsFunc struct {
|
||||
k Key
|
||||
values set.StringSet
|
||||
}
|
||||
|
||||
// evaluate() - evaluates to check whether value by Key in given values is in
|
||||
// condition values.
|
||||
func (f binaryEqualsFunc) evaluate(values map[string][]string) bool {
|
||||
requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())]
|
||||
if !ok {
|
||||
requestValue = values[f.k.Name()]
|
||||
}
|
||||
|
||||
fvalues := f.values.ApplyFunc(substFuncFromValues(values))
|
||||
return !fvalues.Intersection(set.CreateStringSet(requestValue...)).IsEmpty()
|
||||
}
|
||||
|
||||
// key() - returns condition key which is used by this condition function.
|
||||
func (f binaryEqualsFunc) key() Key {
|
||||
return f.k
|
||||
}
|
||||
|
||||
// name() - returns "BinaryEquals" condition name.
|
||||
func (f binaryEqualsFunc) name() name {
|
||||
return binaryEquals
|
||||
}
|
||||
|
||||
func (f binaryEqualsFunc) String() string {
|
||||
return toBinaryEqualsFuncString(binaryEquals, f.k, f.values)
|
||||
}
|
||||
|
||||
// toMap - returns map representation of this function.
|
||||
func (f binaryEqualsFunc) toMap() map[Key]ValueSet {
|
||||
if !f.k.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
values := NewValueSet()
|
||||
for _, value := range f.values.ToSlice() {
|
||||
values.Add(NewStringValue(base64.StdEncoding.EncodeToString([]byte(value))))
|
||||
}
|
||||
|
||||
return map[Key]ValueSet{
|
||||
f.k: values,
|
||||
}
|
||||
}
|
||||
|
||||
func validateBinaryEqualsValues(n name, key Key, values set.StringSet) error {
|
||||
vslice := values.ToSlice()
|
||||
for _, s := range vslice {
|
||||
sbytes, err := base64.StdEncoding.DecodeString(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
values.Remove(s)
|
||||
s = string(sbytes)
|
||||
switch key {
|
||||
case S3XAmzCopySource:
|
||||
bucket, object := path2BucketAndObject(s)
|
||||
if object == "" {
|
||||
return fmt.Errorf("invalid value '%v' for '%v' for %v condition", s, S3XAmzCopySource, n)
|
||||
}
|
||||
if err = s3utils.CheckValidBucketName(bucket); err != nil {
|
||||
return err
|
||||
}
|
||||
case S3XAmzServerSideEncryption, S3XAmzServerSideEncryptionCustomerAlgorithm:
|
||||
if s != "AES256" {
|
||||
return fmt.Errorf("invalid value '%v' for '%v' for %v condition", s, S3XAmzServerSideEncryption, n)
|
||||
}
|
||||
case S3XAmzMetadataDirective:
|
||||
if s != "COPY" && s != "REPLACE" {
|
||||
return fmt.Errorf("invalid value '%v' for '%v' for %v condition", s, S3XAmzMetadataDirective, n)
|
||||
}
|
||||
case S3XAmzContentSha256:
|
||||
if s == "" {
|
||||
return fmt.Errorf("invalid empty value for '%v' for %v condition", S3XAmzContentSha256, n)
|
||||
}
|
||||
}
|
||||
values.Add(s)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// newBinaryEqualsFunc - returns new BinaryEquals function.
|
||||
func newBinaryEqualsFunc(key Key, values ValueSet) (Function, error) {
|
||||
valueStrings, err := valuesToStringSlice(binaryEquals, values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewBinaryEqualsFunc(key, valueStrings...)
|
||||
}
|
||||
|
||||
// NewBinaryEqualsFunc - returns new BinaryEquals function.
|
||||
func NewBinaryEqualsFunc(key Key, values ...string) (Function, error) {
|
||||
sset := set.CreateStringSet(values...)
|
||||
if err := validateBinaryEqualsValues(binaryEquals, key, sset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &binaryEqualsFunc{key, sset}, nil
|
||||
}
|
|
@ -1,382 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBinaryEqualsFuncEvaluate(t *testing.T) {
|
||||
case1Function, err := newBinaryEqualsFunc(S3XAmzCopySource,
|
||||
NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("mybucket/myobject")))))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newBinaryEqualsFunc(S3XAmzServerSideEncryption,
|
||||
NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("AES256")))))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Function, err := newBinaryEqualsFunc(S3XAmzMetadataDirective,
|
||||
NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("REPLACE")))))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Function, err := newBinaryEqualsFunc(S3LocationConstraint,
|
||||
NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("eu-west-1")))))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
function Function
|
||||
values map[string][]string
|
||||
expectedResult bool
|
||||
}{
|
||||
{case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, true},
|
||||
{case1Function, map[string][]string{"x-amz-copy-source": {"yourbucket/myobject"}}, false},
|
||||
{case1Function, map[string][]string{}, false},
|
||||
{case1Function, map[string][]string{"delimiter": {"/"}}, false},
|
||||
|
||||
{case2Function, map[string][]string{"x-amz-server-side-encryption": {"AES256"}}, true},
|
||||
{case2Function, map[string][]string{}, false},
|
||||
{case2Function, map[string][]string{"delimiter": {"/"}}, false},
|
||||
|
||||
{case3Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE"}}, true},
|
||||
{case3Function, map[string][]string{"x-amz-metadata-directive": {"COPY"}}, false},
|
||||
{case3Function, map[string][]string{}, false},
|
||||
{case3Function, map[string][]string{"delimiter": {"/"}}, false},
|
||||
|
||||
{case4Function, map[string][]string{"LocationConstraint": {"eu-west-1"}}, true},
|
||||
{case4Function, map[string][]string{"LocationConstraint": {"us-east-1"}}, false},
|
||||
{case4Function, map[string][]string{}, false},
|
||||
{case4Function, map[string][]string{"delimiter": {"/"}}, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.function.evaluate(testCase.values)
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBinaryEqualsFuncKey(t *testing.T) {
|
||||
case1Function, err := newBinaryEqualsFunc(S3XAmzCopySource,
|
||||
NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("mybucket/myobject")))))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newBinaryEqualsFunc(S3XAmzServerSideEncryption,
|
||||
NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("AES256")))))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Function, err := newBinaryEqualsFunc(S3XAmzMetadataDirective,
|
||||
NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("REPLACE")))))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Function, err := newBinaryEqualsFunc(S3LocationConstraint,
|
||||
NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("eu-west-1")))))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
function Function
|
||||
expectedResult Key
|
||||
}{
|
||||
{case1Function, S3XAmzCopySource},
|
||||
{case2Function, S3XAmzServerSideEncryption},
|
||||
{case3Function, S3XAmzMetadataDirective},
|
||||
{case4Function, S3LocationConstraint},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.function.key()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBinaryEqualsFuncToMap(t *testing.T) {
|
||||
case1Function, err := newBinaryEqualsFunc(S3XAmzCopySource,
|
||||
NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("mybucket/myobject")))))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case1Result := map[Key]ValueSet{
|
||||
S3XAmzCopySource: NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("mybucket/myobject")))),
|
||||
}
|
||||
|
||||
case2Function, err := newBinaryEqualsFunc(S3XAmzCopySource,
|
||||
NewValueSet(
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("mybucket/myobject"))),
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("yourbucket/myobject"))),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Result := map[Key]ValueSet{
|
||||
S3XAmzCopySource: NewValueSet(
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("mybucket/myobject"))),
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("yourbucket/myobject"))),
|
||||
),
|
||||
}
|
||||
|
||||
case3Function, err := newBinaryEqualsFunc(S3XAmzServerSideEncryption,
|
||||
NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("AES256")))))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Result := map[Key]ValueSet{
|
||||
S3XAmzServerSideEncryption: NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("AES256")))),
|
||||
}
|
||||
|
||||
case4Function, err := newBinaryEqualsFunc(S3XAmzServerSideEncryption,
|
||||
NewValueSet(
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("AES256"))),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Result := map[Key]ValueSet{
|
||||
S3XAmzServerSideEncryption: NewValueSet(
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("AES256"))),
|
||||
),
|
||||
}
|
||||
|
||||
case5Function, err := newBinaryEqualsFunc(S3XAmzMetadataDirective,
|
||||
NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("REPLACE")))))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case5Result := map[Key]ValueSet{
|
||||
S3XAmzMetadataDirective: NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("REPLACE")))),
|
||||
}
|
||||
|
||||
case6Function, err := newBinaryEqualsFunc(S3XAmzMetadataDirective,
|
||||
NewValueSet(
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("REPLACE"))),
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("COPY"))),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case6Result := map[Key]ValueSet{
|
||||
S3XAmzMetadataDirective: NewValueSet(
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("REPLACE"))),
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("COPY"))),
|
||||
),
|
||||
}
|
||||
|
||||
case7Function, err := newBinaryEqualsFunc(S3LocationConstraint,
|
||||
NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("eu-west-1")))))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case7Result := map[Key]ValueSet{
|
||||
S3LocationConstraint: NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("eu-west-1")))),
|
||||
}
|
||||
|
||||
case8Function, err := newBinaryEqualsFunc(S3LocationConstraint,
|
||||
NewValueSet(
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("eu-west-1"))),
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("us-west-1"))),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case8Result := map[Key]ValueSet{
|
||||
S3LocationConstraint: NewValueSet(
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("eu-west-1"))),
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("us-west-1"))),
|
||||
),
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
f Function
|
||||
expectedResult map[Key]ValueSet
|
||||
}{
|
||||
{case1Function, case1Result},
|
||||
{case2Function, case2Result},
|
||||
{case3Function, case3Result},
|
||||
{case4Function, case4Result},
|
||||
{case5Function, case5Result},
|
||||
{case6Function, case6Result},
|
||||
{case7Function, case7Result},
|
||||
{case8Function, case8Result},
|
||||
{&binaryEqualsFunc{}, nil},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.f.toMap()
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewBinaryEqualsFunc(t *testing.T) {
|
||||
case1Function, err := newBinaryEqualsFunc(S3XAmzCopySource,
|
||||
NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("mybucket/myobject")))))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newBinaryEqualsFunc(S3XAmzCopySource,
|
||||
NewValueSet(
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("mybucket/myobject"))),
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("yourbucket/myobject"))),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Function, err := newBinaryEqualsFunc(S3XAmzServerSideEncryption,
|
||||
NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("AES256")))))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Function, err := newBinaryEqualsFunc(S3XAmzServerSideEncryption,
|
||||
NewValueSet(
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("AES256"))),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case5Function, err := newBinaryEqualsFunc(S3XAmzMetadataDirective,
|
||||
NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("REPLACE")))))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case6Function, err := newBinaryEqualsFunc(S3XAmzMetadataDirective,
|
||||
NewValueSet(
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("REPLACE"))),
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("COPY"))),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case7Function, err := newBinaryEqualsFunc(S3LocationConstraint,
|
||||
NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("eu-west-1")))))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case8Function, err := newBinaryEqualsFunc(S3LocationConstraint,
|
||||
NewValueSet(
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("eu-west-1"))),
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("us-west-1"))),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
key Key
|
||||
values ValueSet
|
||||
expectedResult Function
|
||||
expectErr bool
|
||||
}{
|
||||
{S3XAmzCopySource, NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("mybucket/myobject")))), case1Function, false},
|
||||
{S3XAmzCopySource,
|
||||
NewValueSet(
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("mybucket/myobject"))),
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("yourbucket/myobject"))),
|
||||
), case2Function, false},
|
||||
|
||||
{S3XAmzServerSideEncryption, NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("AES256")))), case3Function, false},
|
||||
{S3XAmzServerSideEncryption,
|
||||
NewValueSet(
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("AES256"))),
|
||||
), case4Function, false},
|
||||
|
||||
{S3XAmzMetadataDirective, NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("REPLACE")))), case5Function, false},
|
||||
{S3XAmzMetadataDirective,
|
||||
NewValueSet(
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("REPLACE"))),
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("COPY"))),
|
||||
), case6Function, false},
|
||||
|
||||
{S3LocationConstraint, NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("eu-west-1")))), case7Function, false},
|
||||
{S3LocationConstraint,
|
||||
NewValueSet(
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("eu-west-1"))),
|
||||
NewStringValue(base64.StdEncoding.EncodeToString([]byte("us-west-1"))),
|
||||
), case8Function, false},
|
||||
|
||||
// Unsupported value error.
|
||||
{S3XAmzCopySource, NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("mybucket/myobject"))), NewIntValue(7)), nil, true},
|
||||
{S3XAmzServerSideEncryption, NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("AES256"))), NewIntValue(7)), nil, true},
|
||||
{S3XAmzMetadataDirective, NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("REPLACE"))), NewIntValue(7)), nil, true},
|
||||
{S3LocationConstraint, NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("eu-west-1"))), NewIntValue(7)), nil, true},
|
||||
|
||||
// Invalid value error.
|
||||
{S3XAmzCopySource, NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("mybucket")))), nil, true},
|
||||
{S3XAmzServerSideEncryption, NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("SSE-C")))), nil, true},
|
||||
{S3XAmzMetadataDirective, NewValueSet(NewStringValue(base64.StdEncoding.EncodeToString([]byte("DUPLICATE")))), nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := newBinaryEqualsFunc(testCase.key, testCase.values)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,109 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// booleanFunc - Bool condition function. It checks whether Key is true or false.
|
||||
// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_condition_operators.html#Conditions_Boolean
|
||||
type booleanFunc struct {
|
||||
k Key
|
||||
value string
|
||||
}
|
||||
|
||||
// evaluate() - evaluates to check whether Key is present in given values or not.
|
||||
// Depending on condition boolean value, this function returns true or false.
|
||||
func (f booleanFunc) evaluate(values map[string][]string) bool {
|
||||
requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())]
|
||||
if !ok {
|
||||
requestValue = values[f.k.Name()]
|
||||
}
|
||||
|
||||
if len(requestValue) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
return f.value == requestValue[0]
|
||||
}
|
||||
|
||||
// key() - returns condition key which is used by this condition function.
|
||||
func (f booleanFunc) key() Key {
|
||||
return f.k
|
||||
}
|
||||
|
||||
// name() - returns "Bool" condition name.
|
||||
func (f booleanFunc) name() name {
|
||||
return boolean
|
||||
}
|
||||
|
||||
func (f booleanFunc) String() string {
|
||||
return fmt.Sprintf("%v:%v:%v", boolean, f.k, f.value)
|
||||
}
|
||||
|
||||
// toMap - returns map representation of this function.
|
||||
func (f booleanFunc) toMap() map[Key]ValueSet {
|
||||
if !f.k.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return map[Key]ValueSet{
|
||||
f.k: NewValueSet(NewStringValue(f.value)),
|
||||
}
|
||||
}
|
||||
|
||||
func newBooleanFunc(key Key, values ValueSet) (Function, error) {
|
||||
if key != AWSSecureTransport {
|
||||
return nil, fmt.Errorf("only %v key is allowed for %v condition", AWSSecureTransport, boolean)
|
||||
}
|
||||
|
||||
if len(values) != 1 {
|
||||
return nil, fmt.Errorf("only one value is allowed for boolean condition")
|
||||
}
|
||||
|
||||
var value Value
|
||||
for v := range values {
|
||||
value = v
|
||||
switch v.GetType() {
|
||||
case reflect.Bool:
|
||||
if _, err := v.GetBool(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case reflect.String:
|
||||
s, err := v.GetString()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err = strconv.ParseBool(s); err != nil {
|
||||
return nil, fmt.Errorf("value must be a boolean string for boolean condition")
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("value must be a boolean for boolean condition")
|
||||
}
|
||||
}
|
||||
|
||||
return &booleanFunc{key, value.String()}, nil
|
||||
}
|
||||
|
||||
// NewBoolFunc - returns new Bool function.
|
||||
func NewBoolFunc(key Key, value string) (Function, error) {
|
||||
return &booleanFunc{key, value}, nil
|
||||
}
|
|
@ -1,152 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBooleanFuncEvaluate(t *testing.T) {
|
||||
case1Function, err := newBooleanFunc(AWSSecureTransport, NewValueSet(NewBoolValue(true)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newBooleanFunc(AWSSecureTransport, NewValueSet(NewBoolValue(false)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
function Function
|
||||
values map[string][]string
|
||||
expectedResult bool
|
||||
}{
|
||||
{case1Function, map[string][]string{"SecureTransport": {"true"}}, true},
|
||||
{case2Function, map[string][]string{"SecureTransport": {"false"}}, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.function.evaluate(testCase.values)
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Errorf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBooleanFuncKey(t *testing.T) {
|
||||
case1Function, err := newBooleanFunc(AWSSecureTransport, NewValueSet(NewBoolValue(true)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
function Function
|
||||
expectedResult Key
|
||||
}{
|
||||
{case1Function, AWSSecureTransport},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.function.key()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBooleanFuncToMap(t *testing.T) {
|
||||
case1Function, err := newBooleanFunc(AWSSecureTransport, NewValueSet(NewBoolValue(true)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case1Result := map[Key]ValueSet{
|
||||
AWSSecureTransport: NewValueSet(NewStringValue("true")),
|
||||
}
|
||||
|
||||
case2Function, err := newBooleanFunc(AWSSecureTransport, NewValueSet(NewBoolValue(false)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Result := map[Key]ValueSet{
|
||||
AWSSecureTransport: NewValueSet(NewStringValue("false")),
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
f Function
|
||||
expectedResult map[Key]ValueSet
|
||||
}{
|
||||
{case1Function, case1Result},
|
||||
{case2Function, case2Result},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.f.toMap()
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewBooleanFunc(t *testing.T) {
|
||||
case1Function, err := newBooleanFunc(AWSSecureTransport, NewValueSet(NewBoolValue(true)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newBooleanFunc(AWSSecureTransport, NewValueSet(NewBoolValue(false)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
key Key
|
||||
values ValueSet
|
||||
expectedResult Function
|
||||
expectErr bool
|
||||
}{
|
||||
{AWSSecureTransport, NewValueSet(NewBoolValue(true)), case1Function, false},
|
||||
{AWSSecureTransport, NewValueSet(NewStringValue("false")), case2Function, false},
|
||||
// Multiple values error.
|
||||
{AWSSecureTransport, NewValueSet(NewStringValue("true"), NewStringValue("false")), nil, true},
|
||||
// Invalid boolean string error.
|
||||
{AWSSecureTransport, NewValueSet(NewStringValue("foo")), nil, true},
|
||||
// Invalid value error.
|
||||
{AWSSecureTransport, NewValueSet(NewIntValue(7)), nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := newBooleanFunc(testCase.key, testCase.values)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,164 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
func toDateEqualsFuncString(n name, key Key, value time.Time) string {
|
||||
return fmt.Sprintf("%v:%v:%v", n, key, value.Format(time.RFC3339))
|
||||
}
|
||||
|
||||
// dateEqualsFunc - String equals function. It checks whether value by Key in given
|
||||
// values map is in condition values.
|
||||
// For example,
|
||||
// - if values = ["mybucket/foo"], at evaluate() it returns whether string
|
||||
// in value map for Key is in values.
|
||||
type dateEqualsFunc struct {
|
||||
k Key
|
||||
value time.Time
|
||||
}
|
||||
|
||||
// evaluate() - evaluates to check whether value by Key in given values is in
|
||||
// condition values.
|
||||
func (f dateEqualsFunc) evaluate(values map[string][]string) bool {
|
||||
requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())]
|
||||
if !ok {
|
||||
requestValue = values[f.k.Name()]
|
||||
}
|
||||
|
||||
if len(requestValue) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
t, err := time.Parse(time.RFC3339, requestValue[0])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return f.value.Equal(t)
|
||||
}
|
||||
|
||||
// key() - returns condition key which is used by this condition function.
|
||||
func (f dateEqualsFunc) key() Key {
|
||||
return f.k
|
||||
}
|
||||
|
||||
// name() - returns "DateEquals" condition name.
|
||||
func (f dateEqualsFunc) name() name {
|
||||
return dateEquals
|
||||
}
|
||||
|
||||
func (f dateEqualsFunc) String() string {
|
||||
return toDateEqualsFuncString(dateEquals, f.k, f.value)
|
||||
}
|
||||
|
||||
// toMap - returns map representation of this function.
|
||||
func (f dateEqualsFunc) toMap() map[Key]ValueSet {
|
||||
if !f.k.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
values := NewValueSet()
|
||||
values.Add(NewStringValue(f.value.Format(time.RFC3339)))
|
||||
|
||||
return map[Key]ValueSet{
|
||||
f.k: values,
|
||||
}
|
||||
}
|
||||
|
||||
// dateNotEqualsFunc - String not equals function. It checks whether value by Key in
|
||||
// given values is NOT in condition values.
|
||||
// For example,
|
||||
// - if values = ["mybucket/foo"], at evaluate() it returns whether string
|
||||
// in value map for Key is NOT in values.
|
||||
type dateNotEqualsFunc struct {
|
||||
dateEqualsFunc
|
||||
}
|
||||
|
||||
// evaluate() - evaluates to check whether value by Key in given values is NOT in
|
||||
// condition values.
|
||||
func (f dateNotEqualsFunc) evaluate(values map[string][]string) bool {
|
||||
return !f.dateEqualsFunc.evaluate(values)
|
||||
}
|
||||
|
||||
// name() - returns "DateNotEquals" condition name.
|
||||
func (f dateNotEqualsFunc) name() name {
|
||||
return dateNotEquals
|
||||
}
|
||||
|
||||
func (f dateNotEqualsFunc) String() string {
|
||||
return toDateEqualsFuncString(dateNotEquals, f.dateEqualsFunc.k, f.dateEqualsFunc.value)
|
||||
}
|
||||
|
||||
func valueToTime(n name, values ValueSet) (v time.Time, err error) {
|
||||
if len(values) != 1 {
|
||||
return v, fmt.Errorf("only one value is allowed for %s condition", n)
|
||||
}
|
||||
|
||||
for vs := range values {
|
||||
switch vs.GetType() {
|
||||
case reflect.String:
|
||||
s, err := vs.GetString()
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
if v, err = time.Parse(time.RFC3339, s); err != nil {
|
||||
return v, fmt.Errorf("value %s must be a time.Time string for %s condition: %w", vs, n, err)
|
||||
}
|
||||
default:
|
||||
return v, fmt.Errorf("value %s must be a time.Time for %s condition", vs, n)
|
||||
}
|
||||
}
|
||||
|
||||
return v, nil
|
||||
|
||||
}
|
||||
|
||||
// newDateEqualsFunc - returns new DateEquals function.
|
||||
func newDateEqualsFunc(key Key, values ValueSet) (Function, error) {
|
||||
v, err := valueToTime(dateEquals, values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewDateEqualsFunc(key, v)
|
||||
}
|
||||
|
||||
// NewDateEqualsFunc - returns new DateEquals function.
|
||||
func NewDateEqualsFunc(key Key, value time.Time) (Function, error) {
|
||||
return &dateEqualsFunc{key, value}, nil
|
||||
}
|
||||
|
||||
// newDateNotEqualsFunc - returns new DateNotEquals function.
|
||||
func newDateNotEqualsFunc(key Key, values ValueSet) (Function, error) {
|
||||
v, err := valueToTime(dateNotEquals, values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewDateNotEqualsFunc(key, v)
|
||||
}
|
||||
|
||||
// NewDateNotEqualsFunc - returns new DateNotEquals function.
|
||||
func NewDateNotEqualsFunc(key Key, value time.Time) (Function, error) {
|
||||
return &dateNotEqualsFunc{dateEqualsFunc{key, value}}, nil
|
||||
}
|
|
@ -1,153 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
func toDateGreaterThanFuncString(n name, key Key, value time.Time) string {
|
||||
return fmt.Sprintf("%v:%v:%v", n, key, value.Format(time.RFC3339))
|
||||
}
|
||||
|
||||
// dateGreaterThanFunc - String equals function. It checks whether value by Key in given
|
||||
// values map is in condition values.
|
||||
// For example,
|
||||
// - if values = ["mybucket/foo"], at evaluate() it returns whether string
|
||||
// in value map for Key is in values.
|
||||
type dateGreaterThanFunc struct {
|
||||
k Key
|
||||
value time.Time
|
||||
}
|
||||
|
||||
// evaluate() - evaluates to check whether value by Key in given values is in
|
||||
// condition values.
|
||||
func (f dateGreaterThanFunc) evaluate(values map[string][]string) bool {
|
||||
requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())]
|
||||
if !ok {
|
||||
requestValue = values[f.k.Name()]
|
||||
}
|
||||
|
||||
if len(requestValue) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
t, err := time.Parse(time.RFC3339, requestValue[0])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return t.After(f.value)
|
||||
}
|
||||
|
||||
// key() - returns condition key which is used by this condition function.
|
||||
func (f dateGreaterThanFunc) key() Key {
|
||||
return f.k
|
||||
}
|
||||
|
||||
// name() - returns "DateGreaterThan" condition name.
|
||||
func (f dateGreaterThanFunc) name() name {
|
||||
return dateGreaterThan
|
||||
}
|
||||
|
||||
func (f dateGreaterThanFunc) String() string {
|
||||
return toDateGreaterThanFuncString(dateGreaterThan, f.k, f.value)
|
||||
}
|
||||
|
||||
// toMap - returns map representation of this function.
|
||||
func (f dateGreaterThanFunc) toMap() map[Key]ValueSet {
|
||||
if !f.k.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
values := NewValueSet()
|
||||
values.Add(NewStringValue(f.value.Format(time.RFC3339)))
|
||||
|
||||
return map[Key]ValueSet{
|
||||
f.k: values,
|
||||
}
|
||||
}
|
||||
|
||||
// dateNotEqualsFunc - String not equals function. It checks whether value by Key in
|
||||
// given values is NOT in condition values.
|
||||
// For example,
|
||||
// - if values = ["mybucket/foo"], at evaluate() it returns whether string
|
||||
// in value map for Key is NOT in values.
|
||||
type dateGreaterThanEqualsFunc struct {
|
||||
dateGreaterThanFunc
|
||||
}
|
||||
|
||||
// evaluate() - evaluates to check whether value by Key in given values is NOT in
|
||||
// condition values.
|
||||
func (f dateGreaterThanEqualsFunc) evaluate(values map[string][]string) bool {
|
||||
requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())]
|
||||
if !ok {
|
||||
requestValue = values[f.k.Name()]
|
||||
}
|
||||
|
||||
if len(requestValue) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
t, err := time.Parse(time.RFC3339, requestValue[0])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return t.After(f.value) || t.Equal(f.value)
|
||||
}
|
||||
|
||||
// name() - returns "DateNotEquals" condition name.
|
||||
func (f dateGreaterThanEqualsFunc) name() name {
|
||||
return dateGreaterThanEquals
|
||||
}
|
||||
|
||||
func (f dateGreaterThanEqualsFunc) String() string {
|
||||
return toDateGreaterThanFuncString(dateNotEquals, f.dateGreaterThanFunc.k, f.dateGreaterThanFunc.value)
|
||||
}
|
||||
|
||||
// newDateGreaterThanFunc - returns new DateGreaterThan function.
|
||||
func newDateGreaterThanFunc(key Key, values ValueSet) (Function, error) {
|
||||
v, err := valueToTime(dateGreaterThan, values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewDateGreaterThanFunc(key, v)
|
||||
}
|
||||
|
||||
// NewDateGreaterThanFunc - returns new DateGreaterThan function.
|
||||
func NewDateGreaterThanFunc(key Key, value time.Time) (Function, error) {
|
||||
return &dateGreaterThanFunc{key, value}, nil
|
||||
}
|
||||
|
||||
// newDateNotEqualsFunc - returns new DateNotEquals function.
|
||||
func newDateGreaterThanEqualsFunc(key Key, values ValueSet) (Function, error) {
|
||||
v, err := valueToTime(dateNotEquals, values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewDateGreaterThanEqualsFunc(key, v)
|
||||
}
|
||||
|
||||
// NewDateGreaterThanEqualsFunc - returns new DateNotEquals function.
|
||||
func NewDateGreaterThanEqualsFunc(key Key, value time.Time) (Function, error) {
|
||||
return &dateGreaterThanEqualsFunc{dateGreaterThanFunc{key, value}}, nil
|
||||
}
|
|
@ -1,153 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
func toDateLessThanFuncString(n name, key Key, value time.Time) string {
|
||||
return fmt.Sprintf("%v:%v:%v", n, key, value.Format(time.RFC3339))
|
||||
}
|
||||
|
||||
// dateLessThanFunc - String equals function. It checks whether value by Key in given
|
||||
// values map is in condition values.
|
||||
// For example,
|
||||
// - if values = ["mybucket/foo"], at evaluate() it returns whether string
|
||||
// in value map for Key is in values.
|
||||
type dateLessThanFunc struct {
|
||||
k Key
|
||||
value time.Time
|
||||
}
|
||||
|
||||
// evaluate() - evaluates to check whether value by Key in given values is in
|
||||
// condition values.
|
||||
func (f dateLessThanFunc) evaluate(values map[string][]string) bool {
|
||||
requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())]
|
||||
if !ok {
|
||||
requestValue = values[f.k.Name()]
|
||||
}
|
||||
|
||||
if len(requestValue) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
t, err := time.Parse(time.RFC3339, requestValue[0])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return t.Before(f.value)
|
||||
}
|
||||
|
||||
// key() - returns condition key which is used by this condition function.
|
||||
func (f dateLessThanFunc) key() Key {
|
||||
return f.k
|
||||
}
|
||||
|
||||
// name() - returns "DateLessThan" condition name.
|
||||
func (f dateLessThanFunc) name() name {
|
||||
return dateLessThan
|
||||
}
|
||||
|
||||
func (f dateLessThanFunc) String() string {
|
||||
return toDateLessThanFuncString(dateLessThan, f.k, f.value)
|
||||
}
|
||||
|
||||
// toMap - returns map representation of this function.
|
||||
func (f dateLessThanFunc) toMap() map[Key]ValueSet {
|
||||
if !f.k.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
values := NewValueSet()
|
||||
values.Add(NewStringValue(f.value.Format(time.RFC3339)))
|
||||
|
||||
return map[Key]ValueSet{
|
||||
f.k: values,
|
||||
}
|
||||
}
|
||||
|
||||
// dateNotEqualsFunc - String not equals function. It checks whether value by Key in
|
||||
// given values is NOT in condition values.
|
||||
// For example,
|
||||
// - if values = ["mybucket/foo"], at evaluate() it returns whether string
|
||||
// in value map for Key is NOT in values.
|
||||
type dateLessThanEqualsFunc struct {
|
||||
dateLessThanFunc
|
||||
}
|
||||
|
||||
// evaluate() - evaluates to check whether value by Key in given values is NOT in
|
||||
// condition values.
|
||||
func (f dateLessThanEqualsFunc) evaluate(values map[string][]string) bool {
|
||||
requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())]
|
||||
if !ok {
|
||||
requestValue = values[f.k.Name()]
|
||||
}
|
||||
|
||||
if len(requestValue) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
t, err := time.Parse(time.RFC3339, requestValue[0])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return t.Before(f.value) || t.Equal(f.value)
|
||||
}
|
||||
|
||||
// name() - returns "DateNotEquals" condition name.
|
||||
func (f dateLessThanEqualsFunc) name() name {
|
||||
return dateLessThanEquals
|
||||
}
|
||||
|
||||
func (f dateLessThanEqualsFunc) String() string {
|
||||
return toDateLessThanFuncString(dateNotEquals, f.dateLessThanFunc.k, f.dateLessThanFunc.value)
|
||||
}
|
||||
|
||||
// newDateLessThanFunc - returns new DateLessThan function.
|
||||
func newDateLessThanFunc(key Key, values ValueSet) (Function, error) {
|
||||
v, err := valueToTime(dateLessThan, values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewDateLessThanFunc(key, v)
|
||||
}
|
||||
|
||||
// NewDateLessThanFunc - returns new DateLessThan function.
|
||||
func NewDateLessThanFunc(key Key, value time.Time) (Function, error) {
|
||||
return &dateLessThanFunc{key, value}, nil
|
||||
}
|
||||
|
||||
// newDateNotEqualsFunc - returns new DateNotEquals function.
|
||||
func newDateLessThanEqualsFunc(key Key, values ValueSet) (Function, error) {
|
||||
v, err := valueToTime(dateNotEquals, values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewDateLessThanEqualsFunc(key, v)
|
||||
}
|
||||
|
||||
// NewDateLessThanEqualsFunc - returns new DateNotEquals function.
|
||||
func NewDateLessThanEqualsFunc(key Key, value time.Time) (Function, error) {
|
||||
return &dateLessThanEqualsFunc{dateLessThanFunc{key, value}}, nil
|
||||
}
|
|
@ -1,187 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Function - condition function interface.
|
||||
type Function interface {
|
||||
// evaluate() - evaluates this condition function with given values.
|
||||
evaluate(values map[string][]string) bool
|
||||
|
||||
// key() - returns condition key used in this function.
|
||||
key() Key
|
||||
|
||||
// name() - returns condition name of this function.
|
||||
name() name
|
||||
|
||||
// String() - returns string representation of function.
|
||||
String() string
|
||||
|
||||
// toMap - returns map representation of this function.
|
||||
toMap() map[Key]ValueSet
|
||||
}
|
||||
|
||||
// Functions - list of functions.
|
||||
type Functions []Function
|
||||
|
||||
// Evaluate - evaluates all functions with given values map. Each function is evaluated
|
||||
// sequencely and next function is called only if current function succeeds.
|
||||
func (functions Functions) Evaluate(values map[string][]string) bool {
|
||||
for _, f := range functions {
|
||||
if !f.evaluate(values) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Keys - returns list of keys used in all functions.
|
||||
func (functions Functions) Keys() KeySet {
|
||||
keySet := NewKeySet()
|
||||
|
||||
for _, f := range functions {
|
||||
keySet.Add(f.key())
|
||||
}
|
||||
|
||||
return keySet
|
||||
}
|
||||
|
||||
// MarshalJSON - encodes Functions to JSON data.
|
||||
func (functions Functions) MarshalJSON() ([]byte, error) {
|
||||
nm := make(map[name]map[Key]ValueSet)
|
||||
|
||||
for _, f := range functions {
|
||||
if _, ok := nm[f.name()]; ok {
|
||||
for k, v := range f.toMap() {
|
||||
nm[f.name()][k] = v
|
||||
}
|
||||
} else {
|
||||
nm[f.name()] = f.toMap()
|
||||
}
|
||||
}
|
||||
|
||||
return json.Marshal(nm)
|
||||
}
|
||||
|
||||
func (functions Functions) String() string {
|
||||
funcStrings := []string{}
|
||||
for _, f := range functions {
|
||||
s := fmt.Sprintf("%v", f)
|
||||
funcStrings = append(funcStrings, s)
|
||||
}
|
||||
sort.Strings(funcStrings)
|
||||
|
||||
return fmt.Sprintf("%v", funcStrings)
|
||||
}
|
||||
|
||||
var conditionFuncMap = map[name]func(Key, ValueSet) (Function, error){
|
||||
stringEquals: newStringEqualsFunc,
|
||||
stringNotEquals: newStringNotEqualsFunc,
|
||||
stringEqualsIgnoreCase: newStringEqualsIgnoreCaseFunc,
|
||||
stringNotEqualsIgnoreCase: newStringNotEqualsIgnoreCaseFunc,
|
||||
binaryEquals: newBinaryEqualsFunc,
|
||||
stringLike: newStringLikeFunc,
|
||||
stringNotLike: newStringNotLikeFunc,
|
||||
ipAddress: newIPAddressFunc,
|
||||
notIPAddress: newNotIPAddressFunc,
|
||||
null: newNullFunc,
|
||||
boolean: newBooleanFunc,
|
||||
numericEquals: newNumericEqualsFunc,
|
||||
numericNotEquals: newNumericNotEqualsFunc,
|
||||
numericLessThan: newNumericLessThanFunc,
|
||||
numericLessThanEquals: newNumericLessThanEqualsFunc,
|
||||
numericGreaterThan: newNumericGreaterThanFunc,
|
||||
numericGreaterThanEquals: newNumericGreaterThanEqualsFunc,
|
||||
dateEquals: newDateEqualsFunc,
|
||||
dateNotEquals: newDateNotEqualsFunc,
|
||||
dateLessThan: newDateLessThanFunc,
|
||||
dateLessThanEquals: newDateLessThanEqualsFunc,
|
||||
dateGreaterThan: newDateGreaterThanFunc,
|
||||
dateGreaterThanEquals: newDateGreaterThanEqualsFunc,
|
||||
// Add new conditions here.
|
||||
}
|
||||
|
||||
// UnmarshalJSON - decodes JSON data to Functions.
|
||||
func (functions *Functions) UnmarshalJSON(data []byte) error {
|
||||
// As string kind, int kind then json.Unmarshaler is checked at
|
||||
// https://github.com/golang/go/blob/master/src/encoding/json/decode.go#L618
|
||||
// UnmarshalJSON() is not called for types extending string
|
||||
// see https://play.golang.org/p/HrSsKksHvrS, better way to do is
|
||||
// https://play.golang.org/p/y9ElWpBgVAB
|
||||
//
|
||||
// Due to this issue, name and Key types cannot be used as map keys below.
|
||||
nm := make(map[string]map[string]ValueSet)
|
||||
if err := json.Unmarshal(data, &nm); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(nm) == 0 {
|
||||
return fmt.Errorf("condition must not be empty")
|
||||
}
|
||||
|
||||
funcs := []Function{}
|
||||
for nameString, args := range nm {
|
||||
n, err := parseName(nameString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for keyString, values := range args {
|
||||
key, err := parseKey(keyString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
vfn, ok := conditionFuncMap[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("condition %v is not handled", n)
|
||||
}
|
||||
|
||||
f, err := vfn(key, values)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
funcs = append(funcs, f)
|
||||
}
|
||||
}
|
||||
|
||||
*functions = funcs
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GobEncode - encodes Functions to gob data.
|
||||
func (functions Functions) GobEncode() ([]byte, error) {
|
||||
return functions.MarshalJSON()
|
||||
}
|
||||
|
||||
// GobDecode - decodes gob data to Functions.
|
||||
func (functions *Functions) GobDecode(data []byte) error {
|
||||
return functions.UnmarshalJSON(data)
|
||||
}
|
||||
|
||||
// NewFunctions - returns new Functions with given function list.
|
||||
func NewFunctions(functions ...Function) Functions {
|
||||
return Functions(functions)
|
||||
}
|
|
@ -1,353 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestFunctionsEvaluate(t *testing.T) {
|
||||
func1, err := newNullFunc(S3XAmzCopySource, NewValueSet(NewBoolValue(true)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
func2, err := newIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
func3, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
func4, err := newStringLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case1Function := NewFunctions(func1, func2, func3, func4)
|
||||
|
||||
testCases := []struct {
|
||||
functions Functions
|
||||
values map[string][]string
|
||||
expectedResult bool
|
||||
}{
|
||||
{case1Function, map[string][]string{
|
||||
"x-amz-copy-source": {"mybucket/myobject"},
|
||||
"SourceIp": {"192.168.1.10"},
|
||||
}, false},
|
||||
{case1Function, map[string][]string{
|
||||
"x-amz-copy-source": {"mybucket/myobject"},
|
||||
"SourceIp": {"192.168.1.10"},
|
||||
"Refer": {"http://example.org/"},
|
||||
}, false},
|
||||
{case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, false},
|
||||
{case1Function, map[string][]string{"SourceIp": {"192.168.1.10"}}, false},
|
||||
{case1Function, map[string][]string{
|
||||
"x-amz-copy-source": {"mybucket/yourobject"},
|
||||
"SourceIp": {"192.168.1.10"},
|
||||
}, false},
|
||||
{case1Function, map[string][]string{
|
||||
"x-amz-copy-source": {"mybucket/myobject"},
|
||||
"SourceIp": {"192.168.2.10"},
|
||||
}, false},
|
||||
{case1Function, map[string][]string{
|
||||
"x-amz-copy-source": {"mybucket/myobject"},
|
||||
"Refer": {"http://example.org/"},
|
||||
}, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.functions.Evaluate(testCase.values)
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Errorf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFunctionsKeys(t *testing.T) {
|
||||
func1, err := newNullFunc(S3XAmzCopySource, NewValueSet(NewBoolValue(true)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
func2, err := newIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
func3, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
func4, err := newStringLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
functions Functions
|
||||
expectedResult KeySet
|
||||
}{
|
||||
{NewFunctions(func1, func2, func3, func4), NewKeySet(S3XAmzCopySource, AWSSourceIP)},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.functions.Keys()
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFunctionsMarshalJSON(t *testing.T) {
|
||||
func1, err := newStringLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
func2, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
func3, err := newStringNotEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
func4, err := newNotIPAddressFunc(AWSSourceIP,
|
||||
NewValueSet(NewStringValue("10.1.10.0/24")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
func5, err := newStringNotLikeFunc(S3XAmzStorageClass, NewValueSet(NewStringValue("STANDARD")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
func6, err := newNullFunc(S3XAmzServerSideEncryptionCustomerAlgorithm, NewValueSet(NewBoolValue(true)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
func7, err := newIPAddressFunc(AWSSourceIP,
|
||||
NewValueSet(NewStringValue("192.168.1.0/24")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case1Result := []byte(`{"IpAddress":{"aws:SourceIp":["192.168.1.0/24"]},"NotIpAddress":{"aws:SourceIp":["10.1.10.0/24"]},"Null":{"s3:x-amz-server-side-encryption-customer-algorithm":[true]},"StringEquals":{"s3:x-amz-copy-source":["mybucket/myobject"]},"StringLike":{"s3:x-amz-metadata-directive":["REPL*"]},"StringNotEquals":{"s3:x-amz-server-side-encryption":["AES256"]},"StringNotLike":{"s3:x-amz-storage-class":["STANDARD"]}}`)
|
||||
|
||||
case2Result := []byte(`{"Null":{"s3:x-amz-server-side-encryption-customer-algorithm":[true]}}`)
|
||||
|
||||
testCases := []struct {
|
||||
functions Functions
|
||||
expectedResult []byte
|
||||
expectErr bool
|
||||
}{
|
||||
{NewFunctions(func1, func2, func3, func4, func5, func6, func7), case1Result, false},
|
||||
{NewFunctions(func6), case2Result, false},
|
||||
{NewFunctions(), []byte(`{}`), false},
|
||||
{nil, []byte(`{}`), false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := json.Marshal(testCase.functions)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if testCase.expectErr != expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v", i+1, string(testCase.expectedResult), string(result))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFunctionsUnmarshalJSON(t *testing.T) {
|
||||
case1Data := []byte(`{
|
||||
"StringLike": {
|
||||
"s3:x-amz-metadata-directive": "REPL*"
|
||||
},
|
||||
"StringEquals": {
|
||||
"s3:x-amz-copy-source": "mybucket/myobject"
|
||||
},
|
||||
"StringNotEquals": {
|
||||
"s3:x-amz-server-side-encryption": "AES256"
|
||||
},
|
||||
"NotIpAddress": {
|
||||
"aws:SourceIp": [
|
||||
"10.1.10.0/24",
|
||||
"10.10.1.0/24"
|
||||
]
|
||||
},
|
||||
"StringNotLike": {
|
||||
"s3:x-amz-storage-class": "STANDARD"
|
||||
},
|
||||
"Null": {
|
||||
"s3:x-amz-server-side-encryption-customer-algorithm": true
|
||||
},
|
||||
"IpAddress": {
|
||||
"aws:SourceIp": [
|
||||
"192.168.1.0/24",
|
||||
"192.168.2.0/24"
|
||||
]
|
||||
}
|
||||
}`)
|
||||
func1, err := newStringLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
func2, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
func3, err := newStringNotEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
func4, err := newNotIPAddressFunc(AWSSourceIP,
|
||||
NewValueSet(NewStringValue("10.1.10.0/24"), NewStringValue("10.10.1.0/24")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
func5, err := newStringNotLikeFunc(S3XAmzStorageClass, NewValueSet(NewStringValue("STANDARD")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
func6, err := newNullFunc(S3XAmzServerSideEncryptionCustomerAlgorithm, NewValueSet(NewBoolValue(true)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
func7, err := newIPAddressFunc(AWSSourceIP,
|
||||
NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("192.168.2.0/24")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Data := []byte(`{
|
||||
"Null": {
|
||||
"s3:x-amz-server-side-encryption-customer-algorithm": true
|
||||
},
|
||||
"Null": {
|
||||
"s3:x-amz-server-side-encryption-customer-algorithm": "true"
|
||||
}
|
||||
}`)
|
||||
|
||||
case3Data := []byte(`{}`)
|
||||
|
||||
case4Data := []byte(`{
|
||||
"StringLike": {
|
||||
"s3:x-amz-metadata-directive": "REPL*"
|
||||
},
|
||||
"StringEquals": {
|
||||
"s3:x-amz-copy-source": "mybucket/myobject",
|
||||
"s3:prefix": [
|
||||
"",
|
||||
"home/"
|
||||
],
|
||||
"s3:delimiter": [
|
||||
"/"
|
||||
]
|
||||
},
|
||||
"StringNotEquals": {
|
||||
"s3:x-amz-server-side-encryption": "AES256"
|
||||
},
|
||||
"NotIpAddress": {
|
||||
"aws:SourceIp": [
|
||||
"10.1.10.0/24",
|
||||
"10.10.1.0/24"
|
||||
]
|
||||
},
|
||||
"StringNotLike": {
|
||||
"s3:x-amz-storage-class": "STANDARD"
|
||||
},
|
||||
"Null": {
|
||||
"s3:x-amz-server-side-encryption-customer-algorithm": true
|
||||
},
|
||||
"IpAddress": {
|
||||
"aws:SourceIp": [
|
||||
"192.168.1.0/24",
|
||||
"192.168.2.0/24"
|
||||
]
|
||||
}
|
||||
}`)
|
||||
|
||||
func2_1, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
func2_2, err := newStringEqualsFunc(S3Prefix, NewValueSet(NewStringValue(""), NewStringValue("home/")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
func2_3, err := newStringEqualsFunc(S3Delimiter, NewValueSet(NewStringValue("/")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
data []byte
|
||||
expectedResult Functions
|
||||
expectErr bool
|
||||
}{
|
||||
// Success case, basic conditions.
|
||||
{case1Data, NewFunctions(func1, func2, func3, func4, func5, func6, func7), false},
|
||||
// Duplicate conditions, success case only one value is preserved.
|
||||
{case2Data, NewFunctions(func6), false},
|
||||
// empty condition error.
|
||||
{case3Data, nil, true},
|
||||
// Success case multiple keys, same condition.
|
||||
{case4Data, NewFunctions(func1, func2_1, func2_2, func2_3, func3, func4, func5, func6, func7), false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := new(Functions)
|
||||
err := json.Unmarshal(testCase.data, result)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if testCase.expectErr != expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if (*result).String() != testCase.expectedResult.String() {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, *result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,186 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"sort"
|
||||
)
|
||||
|
||||
func toIPAddressFuncString(n name, key Key, values []*net.IPNet) string {
|
||||
valueStrings := []string{}
|
||||
for _, value := range values {
|
||||
valueStrings = append(valueStrings, value.String())
|
||||
}
|
||||
sort.Strings(valueStrings)
|
||||
|
||||
return fmt.Sprintf("%v:%v:%v", n, key, valueStrings)
|
||||
}
|
||||
|
||||
// ipAddressFunc - IP address function. It checks whether value by Key in given
|
||||
// values is in IP network. Here Key must be AWSSourceIP.
|
||||
// For example,
|
||||
// - if values = [192.168.1.0/24], at evaluate() it returns whether IP address
|
||||
// in value map for AWSSourceIP falls in the network 192.168.1.10/24.
|
||||
type ipAddressFunc struct {
|
||||
k Key
|
||||
values []*net.IPNet
|
||||
}
|
||||
|
||||
// evaluate() - evaluates to check whether IP address in values map for AWSSourceIP
|
||||
// falls in one of network or not.
|
||||
func (f ipAddressFunc) evaluate(values map[string][]string) bool {
|
||||
IPs := []net.IP{}
|
||||
requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())]
|
||||
if !ok {
|
||||
requestValue = values[f.k.Name()]
|
||||
}
|
||||
|
||||
for _, s := range requestValue {
|
||||
IP := net.ParseIP(s)
|
||||
if IP == nil {
|
||||
panic(fmt.Errorf("invalid IP address '%v'", s))
|
||||
}
|
||||
|
||||
IPs = append(IPs, IP)
|
||||
}
|
||||
|
||||
for _, IP := range IPs {
|
||||
for _, IPNet := range f.values {
|
||||
if IPNet.Contains(IP) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// key() - returns condition key which is used by this condition function.
|
||||
// Key is always AWSSourceIP.
|
||||
func (f ipAddressFunc) key() Key {
|
||||
return f.k
|
||||
}
|
||||
|
||||
// name() - returns "IpAddress" condition name.
|
||||
func (f ipAddressFunc) name() name {
|
||||
return ipAddress
|
||||
}
|
||||
|
||||
func (f ipAddressFunc) String() string {
|
||||
return toIPAddressFuncString(ipAddress, f.k, f.values)
|
||||
}
|
||||
|
||||
// toMap - returns map representation of this function.
|
||||
func (f ipAddressFunc) toMap() map[Key]ValueSet {
|
||||
if !f.k.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
values := NewValueSet()
|
||||
for _, value := range f.values {
|
||||
values.Add(NewStringValue(value.String()))
|
||||
}
|
||||
|
||||
return map[Key]ValueSet{
|
||||
f.k: values,
|
||||
}
|
||||
}
|
||||
|
||||
// notIPAddressFunc - Not IP address function. It checks whether value by Key in given
|
||||
// values is NOT in IP network. Here Key must be AWSSourceIP.
|
||||
// For example,
|
||||
// - if values = [192.168.1.0/24], at evaluate() it returns whether IP address
|
||||
// in value map for AWSSourceIP does not fall in the network 192.168.1.10/24.
|
||||
type notIPAddressFunc struct {
|
||||
ipAddressFunc
|
||||
}
|
||||
|
||||
// evaluate() - evaluates to check whether IP address in values map for AWSSourceIP
|
||||
// does not fall in one of network.
|
||||
func (f notIPAddressFunc) evaluate(values map[string][]string) bool {
|
||||
return !f.ipAddressFunc.evaluate(values)
|
||||
}
|
||||
|
||||
// name() - returns "NotIpAddress" condition name.
|
||||
func (f notIPAddressFunc) name() name {
|
||||
return notIPAddress
|
||||
}
|
||||
|
||||
func (f notIPAddressFunc) String() string {
|
||||
return toIPAddressFuncString(notIPAddress, f.ipAddressFunc.k, f.ipAddressFunc.values)
|
||||
}
|
||||
|
||||
func valuesToIPNets(n name, values ValueSet) ([]*net.IPNet, error) {
|
||||
IPNets := []*net.IPNet{}
|
||||
for v := range values {
|
||||
s, err := v.GetString()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("value %v must be string representation of CIDR for %v condition", v, n)
|
||||
}
|
||||
|
||||
var IPNet *net.IPNet
|
||||
_, IPNet, err = net.ParseCIDR(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("value %v must be CIDR string for %v condition", s, n)
|
||||
}
|
||||
|
||||
IPNets = append(IPNets, IPNet)
|
||||
}
|
||||
|
||||
return IPNets, nil
|
||||
}
|
||||
|
||||
// newIPAddressFunc - returns new IP address function.
|
||||
func newIPAddressFunc(key Key, values ValueSet) (Function, error) {
|
||||
IPNets, err := valuesToIPNets(ipAddress, values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewIPAddressFunc(key, IPNets...)
|
||||
}
|
||||
|
||||
// NewIPAddressFunc - returns new IP address function.
|
||||
func NewIPAddressFunc(key Key, IPNets ...*net.IPNet) (Function, error) {
|
||||
if key != AWSSourceIP {
|
||||
return nil, fmt.Errorf("only %v key is allowed for %v condition", AWSSourceIP, ipAddress)
|
||||
}
|
||||
|
||||
return &ipAddressFunc{key, IPNets}, nil
|
||||
}
|
||||
|
||||
// newNotIPAddressFunc - returns new Not IP address function.
|
||||
func newNotIPAddressFunc(key Key, values ValueSet) (Function, error) {
|
||||
IPNets, err := valuesToIPNets(notIPAddress, values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewNotIPAddressFunc(key, IPNets...)
|
||||
}
|
||||
|
||||
// NewNotIPAddressFunc - returns new Not IP address function.
|
||||
func NewNotIPAddressFunc(key Key, IPNets ...*net.IPNet) (Function, error) {
|
||||
if key != AWSSourceIP {
|
||||
return nil, fmt.Errorf("only %v key is allowed for %v condition", AWSSourceIP, notIPAddress)
|
||||
}
|
||||
|
||||
return ¬IPAddressFunc{ipAddressFunc{key, IPNets}}, nil
|
||||
}
|
|
@ -1,278 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIPAddressFuncEvaluate(t *testing.T) {
|
||||
case1Function, err := newIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
function Function
|
||||
values map[string][]string
|
||||
expectedResult bool
|
||||
}{
|
||||
{case1Function, map[string][]string{"SourceIp": {"192.168.1.10"}}, true},
|
||||
{case1Function, map[string][]string{"SourceIp": {"192.168.2.10"}}, false},
|
||||
{case1Function, map[string][]string{}, false},
|
||||
{case1Function, map[string][]string{"delimiter": {"/"}}, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.function.evaluate(testCase.values)
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIPAddressFuncKey(t *testing.T) {
|
||||
case1Function, err := newIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
function Function
|
||||
expectedResult Key
|
||||
}{
|
||||
{case1Function, AWSSourceIP},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.function.key()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIPAddressFuncToMap(t *testing.T) {
|
||||
case1Function, err := newIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("10.1.10.1/32")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case1Result := map[Key]ValueSet{
|
||||
AWSSourceIP: NewValueSet(NewStringValue("192.168.1.0/24")),
|
||||
}
|
||||
|
||||
case2Result := map[Key]ValueSet{
|
||||
AWSSourceIP: NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("10.1.10.1/32")),
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
f Function
|
||||
expectedResult map[Key]ValueSet
|
||||
}{
|
||||
{case1Function, case1Result},
|
||||
{case2Function, case2Result},
|
||||
{&ipAddressFunc{}, nil},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.f.toMap()
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNotIPAddressFuncEvaluate(t *testing.T) {
|
||||
case1Function, err := newNotIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
function Function
|
||||
values map[string][]string
|
||||
expectedResult bool
|
||||
}{
|
||||
{case1Function, map[string][]string{"SourceIp": {"192.168.2.10"}}, true},
|
||||
{case1Function, map[string][]string{}, true},
|
||||
{case1Function, map[string][]string{"delimiter": {"/"}}, true},
|
||||
{case1Function, map[string][]string{"SourceIp": {"192.168.1.10"}}, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.function.evaluate(testCase.values)
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNotIPAddressFuncKey(t *testing.T) {
|
||||
case1Function, err := newNotIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
function Function
|
||||
expectedResult Key
|
||||
}{
|
||||
{case1Function, AWSSourceIP},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.function.key()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNotIPAddressFuncToMap(t *testing.T) {
|
||||
case1Function, err := newNotIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newNotIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("10.1.10.1/32")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case1Result := map[Key]ValueSet{
|
||||
AWSSourceIP: NewValueSet(NewStringValue("192.168.1.0/24")),
|
||||
}
|
||||
|
||||
case2Result := map[Key]ValueSet{
|
||||
AWSSourceIP: NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("10.1.10.1/32")),
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
f Function
|
||||
expectedResult map[Key]ValueSet
|
||||
}{
|
||||
{case1Function, case1Result},
|
||||
{case2Function, case2Result},
|
||||
{¬IPAddressFunc{}, nil},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.f.toMap()
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewIPAddressFunc(t *testing.T) {
|
||||
case1Function, err := newIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("10.1.10.1/32")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
key Key
|
||||
values ValueSet
|
||||
expectedResult Function
|
||||
expectErr bool
|
||||
}{
|
||||
{AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24")), case1Function, false},
|
||||
{AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("10.1.10.1/32")), case2Function, false},
|
||||
// Unsupported key error.
|
||||
{S3Prefix, NewValueSet(NewStringValue("192.168.1.0/24")), nil, true},
|
||||
// Invalid value error.
|
||||
{AWSSourceIP, NewValueSet(NewStringValue("node1.example.org")), nil, true},
|
||||
// Invalid CIDR format error.
|
||||
{AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0.0/24")), nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := newIPAddressFunc(testCase.key, testCase.values)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if result.String() != testCase.expectedResult.String() {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewNotIPAddressFunc(t *testing.T) {
|
||||
case1Function, err := newNotIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newNotIPAddressFunc(AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("10.1.10.1/32")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
key Key
|
||||
values ValueSet
|
||||
expectedResult Function
|
||||
expectErr bool
|
||||
}{
|
||||
{AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24")), case1Function, false},
|
||||
{AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0/24"), NewStringValue("10.1.10.1/32")), case2Function, false},
|
||||
// Unsupported key error.
|
||||
{S3Prefix, NewValueSet(NewStringValue("192.168.1.0/24")), nil, true},
|
||||
// Invalid value error.
|
||||
{AWSSourceIP, NewValueSet(NewStringValue("node1.example.org")), nil, true},
|
||||
// Invalid CIDR format error.
|
||||
{AWSSourceIP, NewValueSet(NewStringValue("192.168.1.0.0/24")), nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := newNotIPAddressFunc(testCase.key, testCase.values)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if result.String() != testCase.expectedResult.String() {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,75 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
// JWT claims supported substitutions.
|
||||
// https://www.iana.org/assignments/jwt/jwt.xhtml#claims
|
||||
const (
|
||||
// JWTSub - JWT subject claim substitution.
|
||||
JWTSub Key = "jwt:sub"
|
||||
|
||||
// JWTIss issuer claim substitution.
|
||||
JWTIss Key = "jwt:iss"
|
||||
|
||||
// JWTAud audience claim substitution.
|
||||
JWTAud Key = "jwt:aud"
|
||||
|
||||
// JWTJti JWT unique identifier claim substitution.
|
||||
JWTJti Key = "jwt:jti"
|
||||
|
||||
JWTName Key = "jwt:name"
|
||||
JWTGivenName Key = "jwt:given_name"
|
||||
JWTFamilyName Key = "jwt:family_name"
|
||||
JWTMiddleName Key = "jwt:middle_name"
|
||||
JWTNickName Key = "jwt:nickname"
|
||||
JWTPrefUsername Key = "jwt:preferred_username"
|
||||
JWTProfile Key = "jwt:profile"
|
||||
JWTPicture Key = "jwt:picture"
|
||||
JWTWebsite Key = "jwt:website"
|
||||
JWTEmail Key = "jwt:email"
|
||||
JWTGender Key = "jwt:gender"
|
||||
JWTBirthdate Key = "jwt:birthdate"
|
||||
JWTPhoneNumber Key = "jwt:phone_number"
|
||||
JWTAddress Key = "jwt:address"
|
||||
JWTScope Key = "jwt:scope"
|
||||
JWTClientID Key = "jwt:client_id"
|
||||
)
|
||||
|
||||
// JWTKeys - Supported JWT keys, non-exhaustive list please
|
||||
// expand as new claims are standardized.
|
||||
var JWTKeys = []Key{
|
||||
JWTSub,
|
||||
JWTIss,
|
||||
JWTAud,
|
||||
JWTJti,
|
||||
JWTName,
|
||||
JWTGivenName,
|
||||
JWTFamilyName,
|
||||
JWTMiddleName,
|
||||
JWTNickName,
|
||||
JWTPrefUsername,
|
||||
JWTProfile,
|
||||
JWTPicture,
|
||||
JWTWebsite,
|
||||
JWTEmail,
|
||||
JWTGender,
|
||||
JWTBirthdate,
|
||||
JWTPhoneNumber,
|
||||
JWTAddress,
|
||||
JWTScope,
|
||||
JWTClientID,
|
||||
}
|
|
@ -1,292 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Key - conditional key which is used to fetch values for any condition.
|
||||
// Refer https://docs.aws.amazon.com/IAM/latest/UserGuide/list_s3.html
|
||||
// for more information about available condition keys.
|
||||
type Key string
|
||||
|
||||
const (
|
||||
// S3XAmzCopySource - key representing x-amz-copy-source HTTP header applicable to PutObject API only.
|
||||
S3XAmzCopySource Key = "s3:x-amz-copy-source"
|
||||
|
||||
// S3XAmzServerSideEncryption - key representing x-amz-server-side-encryption HTTP header applicable
|
||||
// to PutObject API only.
|
||||
S3XAmzServerSideEncryption Key = "s3:x-amz-server-side-encryption"
|
||||
|
||||
// S3XAmzServerSideEncryptionCustomerAlgorithm - key representing
|
||||
// x-amz-server-side-encryption-customer-algorithm HTTP header applicable to PutObject API only.
|
||||
S3XAmzServerSideEncryptionCustomerAlgorithm Key = "s3:x-amz-server-side-encryption-customer-algorithm"
|
||||
|
||||
// S3XAmzMetadataDirective - key representing x-amz-metadata-directive HTTP header applicable to
|
||||
// PutObject API only.
|
||||
S3XAmzMetadataDirective Key = "s3:x-amz-metadata-directive"
|
||||
|
||||
// S3XAmzContentSha256 - set a static content-sha256 for all calls for a given action.
|
||||
S3XAmzContentSha256 = "s3:x-amz-content-sha256"
|
||||
|
||||
// S3XAmzStorageClass - key representing x-amz-storage-class HTTP header applicable to PutObject API
|
||||
// only.
|
||||
S3XAmzStorageClass Key = "s3:x-amz-storage-class"
|
||||
|
||||
// S3LocationConstraint - key representing LocationConstraint XML tag of CreateBucket API only.
|
||||
S3LocationConstraint Key = "s3:LocationConstraint"
|
||||
|
||||
// S3Prefix - key representing prefix query parameter of ListBucket API only.
|
||||
S3Prefix Key = "s3:prefix"
|
||||
|
||||
// S3Delimiter - key representing delimiter query parameter of ListBucket API only.
|
||||
S3Delimiter Key = "s3:delimiter"
|
||||
|
||||
// S3MaxKeys - key representing max-keys query parameter of ListBucket API only.
|
||||
S3MaxKeys Key = "s3:max-keys"
|
||||
|
||||
// S3ObjectLockRemainingRetentionDays - key representing object-lock-remaining-retention-days
|
||||
// Enables enforcement of an object relative to the remaining retention days, you can set
|
||||
// minimum and maximum allowable retention periods for a bucket using a bucket policy.
|
||||
// This key are specific for s3:PutObjectRetention API.
|
||||
S3ObjectLockRemainingRetentionDays Key = "s3:object-lock-remaining-retention-days"
|
||||
|
||||
// S3ObjectLockMode - key representing object-lock-mode
|
||||
// Enables enforcement of the specified object retention mode
|
||||
S3ObjectLockMode Key = "s3:object-lock-mode"
|
||||
|
||||
// S3ObjectLockRetainUntilDate - key representing object-lock-retain-util-date
|
||||
// Enables enforcement of a specific retain-until-date
|
||||
S3ObjectLockRetainUntilDate Key = "s3:object-lock-retain-until-date"
|
||||
|
||||
// S3ObjectLockLegalHold - key representing object-local-legal-hold
|
||||
// Enables enforcement of the specified object legal hold status
|
||||
S3ObjectLockLegalHold Key = "s3:object-lock-legal-hold"
|
||||
|
||||
// AWSReferer - key representing Referer header of any API.
|
||||
AWSReferer Key = "aws:Referer"
|
||||
|
||||
// AWSSourceIP - key representing client's IP address (not intermittent proxies) of any API.
|
||||
AWSSourceIP Key = "aws:SourceIp"
|
||||
|
||||
// AWSUserAgent - key representing UserAgent header for any API.
|
||||
AWSUserAgent Key = "aws:UserAgent"
|
||||
|
||||
// AWSSecureTransport - key representing if the clients request is authenticated or not.
|
||||
AWSSecureTransport Key = "aws:SecureTransport"
|
||||
|
||||
// AWSCurrentTime - key representing the current time.
|
||||
AWSCurrentTime Key = "aws:CurrentTime"
|
||||
|
||||
// AWSEpochTime - key representing the current epoch time.
|
||||
AWSEpochTime Key = "aws:EpochTime"
|
||||
|
||||
// AWSPrincipalType - user principal type currently supported values are "User" and "Anonymous".
|
||||
AWSPrincipalType Key = "aws:principaltype"
|
||||
|
||||
// AWSUserID - user unique ID, in MinIO this value is same as your user Access Key.
|
||||
AWSUserID Key = "aws:userid"
|
||||
|
||||
// AWSUsername - user friendly name, in MinIO this value is same as your user Access Key.
|
||||
AWSUsername Key = "aws:username"
|
||||
)
|
||||
|
||||
// AllSupportedKeys - is list of all all supported keys.
|
||||
var AllSupportedKeys = append([]Key{
|
||||
S3XAmzCopySource,
|
||||
S3XAmzServerSideEncryption,
|
||||
S3XAmzServerSideEncryptionCustomerAlgorithm,
|
||||
S3XAmzMetadataDirective,
|
||||
S3XAmzStorageClass,
|
||||
S3XAmzContentSha256,
|
||||
S3LocationConstraint,
|
||||
S3Prefix,
|
||||
S3Delimiter,
|
||||
S3MaxKeys,
|
||||
S3ObjectLockRemainingRetentionDays,
|
||||
S3ObjectLockMode,
|
||||
S3ObjectLockLegalHold,
|
||||
S3ObjectLockRetainUntilDate,
|
||||
AWSReferer,
|
||||
AWSSourceIP,
|
||||
AWSUserAgent,
|
||||
AWSSecureTransport,
|
||||
AWSCurrentTime,
|
||||
AWSEpochTime,
|
||||
AWSPrincipalType,
|
||||
AWSUserID,
|
||||
AWSUsername,
|
||||
// Add new supported condition keys.
|
||||
}, JWTKeys...)
|
||||
|
||||
// CommonKeys - is list of all common condition keys.
|
||||
var CommonKeys = append([]Key{
|
||||
AWSReferer,
|
||||
AWSSourceIP,
|
||||
AWSUserAgent,
|
||||
AWSSecureTransport,
|
||||
AWSCurrentTime,
|
||||
AWSEpochTime,
|
||||
AWSPrincipalType,
|
||||
AWSUserID,
|
||||
AWSUsername,
|
||||
S3XAmzContentSha256,
|
||||
}, JWTKeys...)
|
||||
|
||||
func substFuncFromValues(values map[string][]string) func(string) string {
|
||||
return func(v string) string {
|
||||
for _, key := range CommonKeys {
|
||||
// Empty values are not supported for policy variables.
|
||||
if rvalues, ok := values[key.Name()]; ok && rvalues[0] != "" {
|
||||
v = strings.Replace(v, key.VarName(), rvalues[0], -1)
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
// IsValid - checks if key is valid or not.
|
||||
func (key Key) IsValid() bool {
|
||||
for _, supKey := range AllSupportedKeys {
|
||||
if supKey == key {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// MarshalJSON - encodes Key to JSON data.
|
||||
func (key Key) MarshalJSON() ([]byte, error) {
|
||||
if !key.IsValid() {
|
||||
return nil, fmt.Errorf("unknown key %v", key)
|
||||
}
|
||||
|
||||
return json.Marshal(string(key))
|
||||
}
|
||||
|
||||
// VarName - returns variable key name, such as "${aws:username}"
|
||||
func (key Key) VarName() string {
|
||||
return fmt.Sprintf("${%s}", key)
|
||||
}
|
||||
|
||||
// Name - returns key name which is stripped value of prefixes "aws:" and "s3:"
|
||||
func (key Key) Name() string {
|
||||
keyString := string(key)
|
||||
|
||||
if strings.HasPrefix(keyString, "aws:") {
|
||||
return strings.TrimPrefix(keyString, "aws:")
|
||||
} else if strings.HasPrefix(keyString, "jwt:") {
|
||||
return strings.TrimPrefix(keyString, "jwt:")
|
||||
}
|
||||
return strings.TrimPrefix(keyString, "s3:")
|
||||
}
|
||||
|
||||
// UnmarshalJSON - decodes JSON data to Key.
|
||||
func (key *Key) UnmarshalJSON(data []byte) error {
|
||||
var s string
|
||||
if err := json.Unmarshal(data, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parsedKey, err := parseKey(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*key = parsedKey
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseKey(s string) (Key, error) {
|
||||
key := Key(s)
|
||||
|
||||
if key.IsValid() {
|
||||
return key, nil
|
||||
}
|
||||
|
||||
return key, fmt.Errorf("invalid condition key '%v'", s)
|
||||
}
|
||||
|
||||
// KeySet - set representation of slice of keys.
|
||||
type KeySet map[Key]struct{}
|
||||
|
||||
// Add - add a key to key set.
|
||||
func (set KeySet) Add(key Key) {
|
||||
set[key] = struct{}{}
|
||||
}
|
||||
|
||||
// Difference - returns a key set contains difference of two keys.
|
||||
// Example:
|
||||
// keySet1 := ["one", "two", "three"]
|
||||
// keySet2 := ["two", "four", "three"]
|
||||
// keySet1.Difference(keySet2) == ["one"]
|
||||
func (set KeySet) Difference(sset KeySet) KeySet {
|
||||
nset := make(KeySet)
|
||||
|
||||
for k := range set {
|
||||
if _, ok := sset[k]; !ok {
|
||||
nset.Add(k)
|
||||
}
|
||||
}
|
||||
|
||||
return nset
|
||||
}
|
||||
|
||||
// IsEmpty - returns whether key set is empty or not.
|
||||
func (set KeySet) IsEmpty() bool {
|
||||
return len(set) == 0
|
||||
}
|
||||
|
||||
func (set KeySet) String() string {
|
||||
return fmt.Sprintf("%v", set.ToSlice())
|
||||
}
|
||||
|
||||
// ToSlice - returns slice of keys.
|
||||
func (set KeySet) ToSlice() []Key {
|
||||
keys := []Key{}
|
||||
|
||||
for key := range set {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
// NewKeySet - returns new KeySet contains given keys.
|
||||
func NewKeySet(keys ...Key) KeySet {
|
||||
set := make(KeySet)
|
||||
for _, key := range keys {
|
||||
set.Add(key)
|
||||
}
|
||||
|
||||
return set
|
||||
}
|
||||
|
||||
// AllSupportedAdminKeys - is list of all admin supported keys.
|
||||
var AllSupportedAdminKeys = []Key{
|
||||
AWSReferer,
|
||||
AWSSourceIP,
|
||||
AWSUserAgent,
|
||||
AWSSecureTransport,
|
||||
AWSCurrentTime,
|
||||
AWSEpochTime,
|
||||
// Add new supported condition keys.
|
||||
}
|
|
@ -1,214 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestKeyIsValid(t *testing.T) {
|
||||
testCases := []struct {
|
||||
key Key
|
||||
expectedResult bool
|
||||
}{
|
||||
{S3XAmzCopySource, true},
|
||||
{S3XAmzServerSideEncryption, true},
|
||||
{S3XAmzServerSideEncryptionCustomerAlgorithm, true},
|
||||
{S3XAmzMetadataDirective, true},
|
||||
{S3XAmzStorageClass, true},
|
||||
{S3LocationConstraint, true},
|
||||
{S3Prefix, true},
|
||||
{S3Delimiter, true},
|
||||
{S3MaxKeys, true},
|
||||
{AWSReferer, true},
|
||||
{AWSSourceIP, true},
|
||||
{Key("foo"), false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.key.IsValid()
|
||||
|
||||
if testCase.expectedResult != result {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestKeyMarshalJSON(t *testing.T) {
|
||||
testCases := []struct {
|
||||
key Key
|
||||
expectedResult []byte
|
||||
expectErr bool
|
||||
}{
|
||||
{S3XAmzCopySource, []byte(`"s3:x-amz-copy-source"`), false},
|
||||
{Key("foo"), nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := json.Marshal(testCase.key)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if testCase.expectErr != expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: key: expected: %v, got: %v\n", i+1, string(testCase.expectedResult), string(result))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestKeyName(t *testing.T) {
|
||||
testCases := []struct {
|
||||
key Key
|
||||
expectedResult string
|
||||
}{
|
||||
{S3XAmzCopySource, "x-amz-copy-source"},
|
||||
{AWSReferer, "Referer"},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.key.Name()
|
||||
|
||||
if testCase.expectedResult != result {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestKeyUnmarshalJSON(t *testing.T) {
|
||||
testCases := []struct {
|
||||
data []byte
|
||||
expectedKey Key
|
||||
expectErr bool
|
||||
}{
|
||||
{[]byte(`"s3:x-amz-copy-source"`), S3XAmzCopySource, false},
|
||||
{[]byte(`"foo"`), Key(""), true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
var key Key
|
||||
err := json.Unmarshal(testCase.data, &key)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if testCase.expectErr != expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if testCase.expectedKey != key {
|
||||
t.Fatalf("case %v: key: expected: %v, got: %v\n", i+1, testCase.expectedKey, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestKeySetAdd(t *testing.T) {
|
||||
testCases := []struct {
|
||||
set KeySet
|
||||
key Key
|
||||
expectedResult KeySet
|
||||
}{
|
||||
{NewKeySet(), S3XAmzCopySource, NewKeySet(S3XAmzCopySource)},
|
||||
{NewKeySet(S3XAmzCopySource), S3XAmzCopySource, NewKeySet(S3XAmzCopySource)},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
testCase.set.Add(testCase.key)
|
||||
|
||||
if !reflect.DeepEqual(testCase.expectedResult, testCase.set) {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, testCase.set)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestKeySetDifference(t *testing.T) {
|
||||
testCases := []struct {
|
||||
set KeySet
|
||||
setToDiff KeySet
|
||||
expectedResult KeySet
|
||||
}{
|
||||
{NewKeySet(), NewKeySet(S3XAmzCopySource), NewKeySet()},
|
||||
{NewKeySet(S3Prefix, S3Delimiter, S3MaxKeys), NewKeySet(S3Delimiter, S3MaxKeys), NewKeySet(S3Prefix)},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.set.Difference(testCase.setToDiff)
|
||||
|
||||
if !reflect.DeepEqual(testCase.expectedResult, result) {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestKeySetIsEmpty(t *testing.T) {
|
||||
testCases := []struct {
|
||||
set KeySet
|
||||
expectedResult bool
|
||||
}{
|
||||
{NewKeySet(), true},
|
||||
{NewKeySet(S3Delimiter), false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.set.IsEmpty()
|
||||
|
||||
if testCase.expectedResult != result {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestKeySetString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
set KeySet
|
||||
expectedResult string
|
||||
}{
|
||||
{NewKeySet(), `[]`},
|
||||
{NewKeySet(S3Delimiter), `[s3:delimiter]`},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.set.String()
|
||||
|
||||
if testCase.expectedResult != result {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestKeySetToSlice(t *testing.T) {
|
||||
testCases := []struct {
|
||||
set KeySet
|
||||
expectedResult []Key
|
||||
}{
|
||||
{NewKeySet(), []Key{}},
|
||||
{NewKeySet(S3Delimiter), []Key{S3Delimiter}},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.set.ToSlice()
|
||||
|
||||
if !reflect.DeepEqual(testCase.expectedResult, result) {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,123 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type name string
|
||||
|
||||
const (
|
||||
stringEquals name = "StringEquals"
|
||||
stringNotEquals = "StringNotEquals"
|
||||
stringEqualsIgnoreCase = "StringEqualsIgnoreCase"
|
||||
stringNotEqualsIgnoreCase = "StringNotEqualsIgnoreCase"
|
||||
stringLike = "StringLike"
|
||||
stringNotLike = "StringNotLike"
|
||||
binaryEquals = "BinaryEquals"
|
||||
ipAddress = "IpAddress"
|
||||
notIPAddress = "NotIpAddress"
|
||||
null = "Null"
|
||||
boolean = "Bool"
|
||||
numericEquals = "NumericEquals"
|
||||
numericNotEquals = "NumericNotEquals"
|
||||
numericLessThan = "NumericLessThan"
|
||||
numericLessThanEquals = "NumericLessThanEquals"
|
||||
numericGreaterThan = "NumericGreaterThan"
|
||||
numericGreaterThanEquals = "NumericGreaterThanEquals"
|
||||
dateEquals = "DateEquals"
|
||||
dateNotEquals = "DateNotEquals"
|
||||
dateLessThan = "DateLessThan"
|
||||
dateLessThanEquals = "DateLessThanEquals"
|
||||
dateGreaterThan = "DateGreaterThan"
|
||||
dateGreaterThanEquals = "DateGreaterThanEquals"
|
||||
)
|
||||
|
||||
var supportedConditions = []name{
|
||||
stringEquals,
|
||||
stringNotEquals,
|
||||
stringEqualsIgnoreCase,
|
||||
stringNotEqualsIgnoreCase,
|
||||
binaryEquals,
|
||||
stringLike,
|
||||
stringNotLike,
|
||||
ipAddress,
|
||||
notIPAddress,
|
||||
null,
|
||||
boolean,
|
||||
numericEquals,
|
||||
numericNotEquals,
|
||||
numericLessThan,
|
||||
numericLessThanEquals,
|
||||
numericGreaterThan,
|
||||
numericGreaterThanEquals,
|
||||
dateEquals,
|
||||
dateNotEquals,
|
||||
dateLessThan,
|
||||
dateLessThanEquals,
|
||||
dateGreaterThan,
|
||||
dateGreaterThanEquals,
|
||||
// Add new conditions here.
|
||||
}
|
||||
|
||||
// IsValid - checks if name is valid or not.
|
||||
func (n name) IsValid() bool {
|
||||
for _, supn := range supportedConditions {
|
||||
if n == supn {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// MarshalJSON - encodes name to JSON data.
|
||||
func (n name) MarshalJSON() ([]byte, error) {
|
||||
if !n.IsValid() {
|
||||
return nil, fmt.Errorf("invalid name %v", n)
|
||||
}
|
||||
|
||||
return json.Marshal(string(n))
|
||||
}
|
||||
|
||||
// UnmarshalJSON - decodes JSON data to condition name.
|
||||
func (n *name) UnmarshalJSON(data []byte) error {
|
||||
var s string
|
||||
if err := json.Unmarshal(data, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parsedName, err := parseName(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*n = parsedName
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseName(s string) (name, error) {
|
||||
n := name(s)
|
||||
|
||||
if n.IsValid() {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
return n, fmt.Errorf("invalid condition name '%v'", s)
|
||||
}
|
|
@ -1,106 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNameIsValid(t *testing.T) {
|
||||
testCases := []struct {
|
||||
n name
|
||||
expectedResult bool
|
||||
}{
|
||||
{stringEquals, true},
|
||||
{stringNotEquals, true},
|
||||
{stringLike, true},
|
||||
{stringNotLike, true},
|
||||
{ipAddress, true},
|
||||
{notIPAddress, true},
|
||||
{null, true},
|
||||
{name("foo"), false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.n.IsValid()
|
||||
|
||||
if testCase.expectedResult != result {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNameMarshalJSON(t *testing.T) {
|
||||
testCases := []struct {
|
||||
n name
|
||||
expectedResult []byte
|
||||
expectErr bool
|
||||
}{
|
||||
{stringEquals, []byte(`"StringEquals"`), false},
|
||||
{stringNotEquals, []byte(`"StringNotEquals"`), false},
|
||||
{stringLike, []byte(`"StringLike"`), false},
|
||||
{stringNotLike, []byte(`"StringNotLike"`), false},
|
||||
{ipAddress, []byte(`"IpAddress"`), false},
|
||||
{notIPAddress, []byte(`"NotIpAddress"`), false},
|
||||
{null, []byte(`"Null"`), false},
|
||||
{name("foo"), nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := json.Marshal(testCase.n)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if testCase.expectErr != expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v", i+1, string(testCase.expectedResult), string(result))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNameUnmarshalJSON(t *testing.T) {
|
||||
testCases := []struct {
|
||||
data []byte
|
||||
expectedResult name
|
||||
expectErr bool
|
||||
}{
|
||||
{[]byte(`"StringEquals"`), stringEquals, false},
|
||||
{[]byte(`"foo"`), name(""), true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
var result name
|
||||
err := json.Unmarshal(testCase.data, &result)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if testCase.expectErr != expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if testCase.expectedResult != result {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,106 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// nullFunc - Null condition function. It checks whether Key is not present in given
|
||||
// values or not.
|
||||
// For example,
|
||||
// 1. if Key = S3XAmzCopySource and Value = true, at evaluate() it returns whether
|
||||
// S3XAmzCopySource is NOT in given value map or not.
|
||||
// 2. if Key = S3XAmzCopySource and Value = false, at evaluate() it returns whether
|
||||
// S3XAmzCopySource is in given value map or not.
|
||||
// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_condition_operators.html#Conditions_Null
|
||||
type nullFunc struct {
|
||||
k Key
|
||||
value bool
|
||||
}
|
||||
|
||||
// evaluate() - evaluates to check whether Key is present in given values or not.
|
||||
// Depending on condition boolean value, this function returns true or false.
|
||||
func (f nullFunc) evaluate(values map[string][]string) bool {
|
||||
requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())]
|
||||
if !ok {
|
||||
requestValue = values[f.k.Name()]
|
||||
}
|
||||
|
||||
if f.value {
|
||||
return len(requestValue) == 0
|
||||
}
|
||||
|
||||
return len(requestValue) != 0
|
||||
}
|
||||
|
||||
// key() - returns condition key which is used by this condition function.
|
||||
func (f nullFunc) key() Key {
|
||||
return f.k
|
||||
}
|
||||
|
||||
// name() - returns "Null" condition name.
|
||||
func (f nullFunc) name() name {
|
||||
return null
|
||||
}
|
||||
|
||||
func (f nullFunc) String() string {
|
||||
return fmt.Sprintf("%v:%v:%v", null, f.k, f.value)
|
||||
}
|
||||
|
||||
// toMap - returns map representation of this function.
|
||||
func (f nullFunc) toMap() map[Key]ValueSet {
|
||||
if !f.k.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return map[Key]ValueSet{
|
||||
f.k: NewValueSet(NewBoolValue(f.value)),
|
||||
}
|
||||
}
|
||||
|
||||
func newNullFunc(key Key, values ValueSet) (Function, error) {
|
||||
if len(values) != 1 {
|
||||
return nil, fmt.Errorf("only one value is allowed for Null condition")
|
||||
}
|
||||
|
||||
var value bool
|
||||
for v := range values {
|
||||
switch v.GetType() {
|
||||
case reflect.Bool:
|
||||
value, _ = v.GetBool()
|
||||
case reflect.String:
|
||||
var err error
|
||||
s, _ := v.GetString()
|
||||
if value, err = strconv.ParseBool(s); err != nil {
|
||||
return nil, fmt.Errorf("value must be a boolean string for Null condition")
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("value must be a boolean for Null condition")
|
||||
}
|
||||
}
|
||||
|
||||
return &nullFunc{key, value}, nil
|
||||
}
|
||||
|
||||
// NewNullFunc - returns new Null function.
|
||||
func NewNullFunc(key Key, value bool) (Function, error) {
|
||||
return &nullFunc{key, value}, nil
|
||||
}
|
|
@ -1,161 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNullFuncEvaluate(t *testing.T) {
|
||||
case1Function, err := newNullFunc(S3Prefix, NewValueSet(NewBoolValue(true)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newNullFunc(S3Prefix, NewValueSet(NewBoolValue(false)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
function Function
|
||||
values map[string][]string
|
||||
expectedResult bool
|
||||
}{
|
||||
{case1Function, map[string][]string{"prefix": {"true"}}, false},
|
||||
{case1Function, map[string][]string{"prefix": {"false"}}, false},
|
||||
{case1Function, map[string][]string{"prefix": {"mybucket/foo"}}, false},
|
||||
{case1Function, map[string][]string{}, true},
|
||||
{case1Function, map[string][]string{"delimiter": {"/"}}, true},
|
||||
{case2Function, map[string][]string{"prefix": {"true"}}, true},
|
||||
{case2Function, map[string][]string{"prefix": {"false"}}, true},
|
||||
{case2Function, map[string][]string{"prefix": {"mybucket/foo"}}, true},
|
||||
{case2Function, map[string][]string{}, false},
|
||||
{case2Function, map[string][]string{"delimiter": {"/"}}, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.function.evaluate(testCase.values)
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Errorf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNullFuncKey(t *testing.T) {
|
||||
case1Function, err := newNullFunc(S3XAmzCopySource, NewValueSet(NewBoolValue(true)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
function Function
|
||||
expectedResult Key
|
||||
}{
|
||||
{case1Function, S3XAmzCopySource},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.function.key()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNullFuncToMap(t *testing.T) {
|
||||
case1Function, err := newNullFunc(S3Prefix, NewValueSet(NewBoolValue(true)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case1Result := map[Key]ValueSet{
|
||||
S3Prefix: NewValueSet(NewBoolValue(true)),
|
||||
}
|
||||
|
||||
case2Function, err := newNullFunc(S3Prefix, NewValueSet(NewBoolValue(false)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Result := map[Key]ValueSet{
|
||||
S3Prefix: NewValueSet(NewBoolValue(false)),
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
f Function
|
||||
expectedResult map[Key]ValueSet
|
||||
}{
|
||||
{case1Function, case1Result},
|
||||
{case2Function, case2Result},
|
||||
{&nullFunc{}, nil},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.f.toMap()
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewNullFunc(t *testing.T) {
|
||||
case1Function, err := newNullFunc(S3Prefix, NewValueSet(NewBoolValue(true)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newNullFunc(S3Prefix, NewValueSet(NewBoolValue(false)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
key Key
|
||||
values ValueSet
|
||||
expectedResult Function
|
||||
expectErr bool
|
||||
}{
|
||||
{S3Prefix, NewValueSet(NewBoolValue(true)), case1Function, false},
|
||||
{S3Prefix, NewValueSet(NewStringValue("false")), case2Function, false},
|
||||
// Multiple values error.
|
||||
{S3Prefix, NewValueSet(NewBoolValue(true), NewBoolValue(false)), nil, true},
|
||||
// Invalid boolean string error.
|
||||
{S3Prefix, NewValueSet(NewStringValue("foo")), nil, true},
|
||||
// Invalid value error.
|
||||
{S3Prefix, NewValueSet(NewIntValue(7)), nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := newNullFunc(testCase.key, testCase.values)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,168 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func toNumericEqualsFuncString(n name, key Key, value int) string {
|
||||
return fmt.Sprintf("%v:%v:%v", n, key, value)
|
||||
}
|
||||
|
||||
// numericEqualsFunc - String equals function. It checks whether value by Key in given
|
||||
// values map is in condition values.
|
||||
// For example,
|
||||
// - if values = ["mybucket/foo"], at evaluate() it returns whether string
|
||||
// in value map for Key is in values.
|
||||
type numericEqualsFunc struct {
|
||||
k Key
|
||||
value int
|
||||
}
|
||||
|
||||
// evaluate() - evaluates to check whether value by Key in given values is in
|
||||
// condition values.
|
||||
func (f numericEqualsFunc) evaluate(values map[string][]string) bool {
|
||||
requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())]
|
||||
if !ok {
|
||||
requestValue = values[f.k.Name()]
|
||||
}
|
||||
|
||||
if len(requestValue) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
rvInt, err := strconv.Atoi(requestValue[0])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return f.value == rvInt
|
||||
}
|
||||
|
||||
// key() - returns condition key which is used by this condition function.
|
||||
func (f numericEqualsFunc) key() Key {
|
||||
return f.k
|
||||
}
|
||||
|
||||
// name() - returns "NumericEquals" condition name.
|
||||
func (f numericEqualsFunc) name() name {
|
||||
return numericEquals
|
||||
}
|
||||
|
||||
func (f numericEqualsFunc) String() string {
|
||||
return toNumericEqualsFuncString(numericEquals, f.k, f.value)
|
||||
}
|
||||
|
||||
// toMap - returns map representation of this function.
|
||||
func (f numericEqualsFunc) toMap() map[Key]ValueSet {
|
||||
if !f.k.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
values := NewValueSet()
|
||||
values.Add(NewIntValue(f.value))
|
||||
|
||||
return map[Key]ValueSet{
|
||||
f.k: values,
|
||||
}
|
||||
}
|
||||
|
||||
// numericNotEqualsFunc - String not equals function. It checks whether value by Key in
|
||||
// given values is NOT in condition values.
|
||||
// For example,
|
||||
// - if values = ["mybucket/foo"], at evaluate() it returns whether string
|
||||
// in value map for Key is NOT in values.
|
||||
type numericNotEqualsFunc struct {
|
||||
numericEqualsFunc
|
||||
}
|
||||
|
||||
// evaluate() - evaluates to check whether value by Key in given values is NOT in
|
||||
// condition values.
|
||||
func (f numericNotEqualsFunc) evaluate(values map[string][]string) bool {
|
||||
return !f.numericEqualsFunc.evaluate(values)
|
||||
}
|
||||
|
||||
// name() - returns "NumericNotEquals" condition name.
|
||||
func (f numericNotEqualsFunc) name() name {
|
||||
return numericNotEquals
|
||||
}
|
||||
|
||||
func (f numericNotEqualsFunc) String() string {
|
||||
return toNumericEqualsFuncString(numericNotEquals, f.numericEqualsFunc.k, f.numericEqualsFunc.value)
|
||||
}
|
||||
|
||||
func valueToInt(n name, values ValueSet) (v int, err error) {
|
||||
if len(values) != 1 {
|
||||
return -1, fmt.Errorf("only one value is allowed for %s condition", n)
|
||||
}
|
||||
|
||||
for vs := range values {
|
||||
switch vs.GetType() {
|
||||
case reflect.Int:
|
||||
if v, err = vs.GetInt(); err != nil {
|
||||
return -1, err
|
||||
}
|
||||
case reflect.String:
|
||||
s, err := vs.GetString()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
if v, err = strconv.Atoi(s); err != nil {
|
||||
return -1, fmt.Errorf("value %s must be a int for %s condition: %w", vs, n, err)
|
||||
}
|
||||
default:
|
||||
return -1, fmt.Errorf("value %s must be a int for %s condition", vs, n)
|
||||
}
|
||||
}
|
||||
|
||||
return v, nil
|
||||
|
||||
}
|
||||
|
||||
// newNumericEqualsFunc - returns new NumericEquals function.
|
||||
func newNumericEqualsFunc(key Key, values ValueSet) (Function, error) {
|
||||
v, err := valueToInt(numericEquals, values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewNumericEqualsFunc(key, v)
|
||||
}
|
||||
|
||||
// NewNumericEqualsFunc - returns new NumericEquals function.
|
||||
func NewNumericEqualsFunc(key Key, value int) (Function, error) {
|
||||
return &numericEqualsFunc{key, value}, nil
|
||||
}
|
||||
|
||||
// newNumericNotEqualsFunc - returns new NumericNotEquals function.
|
||||
func newNumericNotEqualsFunc(key Key, values ValueSet) (Function, error) {
|
||||
v, err := valueToInt(numericNotEquals, values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewNumericNotEqualsFunc(key, v)
|
||||
}
|
||||
|
||||
// NewNumericNotEqualsFunc - returns new NumericNotEquals function.
|
||||
func NewNumericNotEqualsFunc(key Key, value int) (Function, error) {
|
||||
return &numericNotEqualsFunc{numericEqualsFunc{key, value}}, nil
|
||||
}
|
|
@ -1,153 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func toNumericGreaterThanFuncString(n name, key Key, value int) string {
|
||||
return fmt.Sprintf("%v:%v:%v", n, key, value)
|
||||
}
|
||||
|
||||
// numericGreaterThanFunc - String equals function. It checks whether value by Key in given
|
||||
// values map is in condition values.
|
||||
// For example,
|
||||
// - if values = ["mybucket/foo"], at evaluate() it returns whether string
|
||||
// in value map for Key is in values.
|
||||
type numericGreaterThanFunc struct {
|
||||
k Key
|
||||
value int
|
||||
}
|
||||
|
||||
// evaluate() - evaluates to check whether value by Key in given values is in
|
||||
// condition values.
|
||||
func (f numericGreaterThanFunc) evaluate(values map[string][]string) bool {
|
||||
requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())]
|
||||
if !ok {
|
||||
requestValue = values[f.k.Name()]
|
||||
}
|
||||
|
||||
if len(requestValue) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
rvInt, err := strconv.Atoi(requestValue[0])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return rvInt > f.value
|
||||
}
|
||||
|
||||
// key() - returns condition key which is used by this condition function.
|
||||
func (f numericGreaterThanFunc) key() Key {
|
||||
return f.k
|
||||
}
|
||||
|
||||
// name() - returns "NumericGreaterThan" condition name.
|
||||
func (f numericGreaterThanFunc) name() name {
|
||||
return numericGreaterThan
|
||||
}
|
||||
|
||||
func (f numericGreaterThanFunc) String() string {
|
||||
return toNumericGreaterThanFuncString(numericGreaterThan, f.k, f.value)
|
||||
}
|
||||
|
||||
// toMap - returns map representation of this function.
|
||||
func (f numericGreaterThanFunc) toMap() map[Key]ValueSet {
|
||||
if !f.k.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
values := NewValueSet()
|
||||
values.Add(NewIntValue(f.value))
|
||||
|
||||
return map[Key]ValueSet{
|
||||
f.k: values,
|
||||
}
|
||||
}
|
||||
|
||||
// numericGreaterThanEqualsFunc - String not equals function. It checks whether value by Key in
|
||||
// given values is NOT in condition values.
|
||||
// For example,
|
||||
// - if values = ["mybucket/foo"], at evaluate() it returns whether string
|
||||
// in value map for Key is NOT in values.
|
||||
type numericGreaterThanEqualsFunc struct {
|
||||
numericGreaterThanFunc
|
||||
}
|
||||
|
||||
// evaluate() - evaluates to check whether value by Key in given values is NOT in
|
||||
// condition values.
|
||||
func (f numericGreaterThanEqualsFunc) evaluate(values map[string][]string) bool {
|
||||
requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())]
|
||||
if !ok {
|
||||
requestValue = values[f.k.Name()]
|
||||
}
|
||||
|
||||
if len(requestValue) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
rvInt, err := strconv.Atoi(requestValue[0])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return rvInt >= f.value
|
||||
}
|
||||
|
||||
// name() - returns "NumericGreaterThanEquals" condition name.
|
||||
func (f numericGreaterThanEqualsFunc) name() name {
|
||||
return numericGreaterThanEquals
|
||||
}
|
||||
|
||||
func (f numericGreaterThanEqualsFunc) String() string {
|
||||
return toNumericGreaterThanFuncString(numericGreaterThanEquals, f.numericGreaterThanFunc.k, f.numericGreaterThanFunc.value)
|
||||
}
|
||||
|
||||
// newNumericGreaterThanFunc - returns new NumericGreaterThan function.
|
||||
func newNumericGreaterThanFunc(key Key, values ValueSet) (Function, error) {
|
||||
v, err := valueToInt(numericGreaterThan, values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewNumericGreaterThanFunc(key, v)
|
||||
}
|
||||
|
||||
// NewNumericGreaterThanFunc - returns new NumericGreaterThan function.
|
||||
func NewNumericGreaterThanFunc(key Key, value int) (Function, error) {
|
||||
return &numericGreaterThanFunc{key, value}, nil
|
||||
}
|
||||
|
||||
// newNumericGreaterThanEqualsFunc - returns new NumericGreaterThanEquals function.
|
||||
func newNumericGreaterThanEqualsFunc(key Key, values ValueSet) (Function, error) {
|
||||
v, err := valueToInt(numericGreaterThanEquals, values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewNumericGreaterThanEqualsFunc(key, v)
|
||||
}
|
||||
|
||||
// NewNumericGreaterThanEqualsFunc - returns new NumericGreaterThanEquals function.
|
||||
func NewNumericGreaterThanEqualsFunc(key Key, value int) (Function, error) {
|
||||
return &numericGreaterThanEqualsFunc{numericGreaterThanFunc{key, value}}, nil
|
||||
}
|
|
@ -1,153 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func toNumericLessThanFuncString(n name, key Key, value int) string {
|
||||
return fmt.Sprintf("%v:%v:%v", n, key, value)
|
||||
}
|
||||
|
||||
// numericLessThanFunc - String equals function. It checks whether value by Key in given
|
||||
// values map is in condition values.
|
||||
// For example,
|
||||
// - if values = ["mybucket/foo"], at evaluate() it returns whether string
|
||||
// in value map for Key is in values.
|
||||
type numericLessThanFunc struct {
|
||||
k Key
|
||||
value int
|
||||
}
|
||||
|
||||
// evaluate() - evaluates to check whether value by Key in given values is in
|
||||
// condition values.
|
||||
func (f numericLessThanFunc) evaluate(values map[string][]string) bool {
|
||||
requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())]
|
||||
if !ok {
|
||||
requestValue = values[f.k.Name()]
|
||||
}
|
||||
|
||||
if len(requestValue) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
rvInt, err := strconv.Atoi(requestValue[0])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return rvInt < f.value
|
||||
}
|
||||
|
||||
// key() - returns condition key which is used by this condition function.
|
||||
func (f numericLessThanFunc) key() Key {
|
||||
return f.k
|
||||
}
|
||||
|
||||
// name() - returns "NumericLessThan" condition name.
|
||||
func (f numericLessThanFunc) name() name {
|
||||
return numericLessThan
|
||||
}
|
||||
|
||||
func (f numericLessThanFunc) String() string {
|
||||
return toNumericLessThanFuncString(numericLessThan, f.k, f.value)
|
||||
}
|
||||
|
||||
// toMap - returns map representation of this function.
|
||||
func (f numericLessThanFunc) toMap() map[Key]ValueSet {
|
||||
if !f.k.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
values := NewValueSet()
|
||||
values.Add(NewIntValue(f.value))
|
||||
|
||||
return map[Key]ValueSet{
|
||||
f.k: values,
|
||||
}
|
||||
}
|
||||
|
||||
// numericLessThanEqualsFunc - String not equals function. It checks whether value by Key in
|
||||
// given values is NOT in condition values.
|
||||
// For example,
|
||||
// - if values = ["mybucket/foo"], at evaluate() it returns whether string
|
||||
// in value map for Key is NOT in values.
|
||||
type numericLessThanEqualsFunc struct {
|
||||
numericLessThanFunc
|
||||
}
|
||||
|
||||
// evaluate() - evaluates to check whether value by Key in given values is NOT in
|
||||
// condition values.
|
||||
func (f numericLessThanEqualsFunc) evaluate(values map[string][]string) bool {
|
||||
requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())]
|
||||
if !ok {
|
||||
requestValue = values[f.k.Name()]
|
||||
}
|
||||
|
||||
if len(requestValue) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
rvInt, err := strconv.Atoi(requestValue[0])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return rvInt <= f.value
|
||||
}
|
||||
|
||||
// name() - returns "NumericLessThanEquals" condition name.
|
||||
func (f numericLessThanEqualsFunc) name() name {
|
||||
return numericLessThanEquals
|
||||
}
|
||||
|
||||
func (f numericLessThanEqualsFunc) String() string {
|
||||
return toNumericLessThanFuncString(numericLessThanEquals, f.numericLessThanFunc.k, f.numericLessThanFunc.value)
|
||||
}
|
||||
|
||||
// newNumericLessThanFunc - returns new NumericLessThan function.
|
||||
func newNumericLessThanFunc(key Key, values ValueSet) (Function, error) {
|
||||
v, err := valueToInt(numericLessThan, values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewNumericLessThanFunc(key, v)
|
||||
}
|
||||
|
||||
// NewNumericLessThanFunc - returns new NumericLessThan function.
|
||||
func NewNumericLessThanFunc(key Key, value int) (Function, error) {
|
||||
return &numericLessThanFunc{key, value}, nil
|
||||
}
|
||||
|
||||
// newNumericLessThanEqualsFunc - returns new NumericLessThanEquals function.
|
||||
func newNumericLessThanEqualsFunc(key Key, values ValueSet) (Function, error) {
|
||||
v, err := valueToInt(numericLessThanEquals, values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewNumericLessThanEqualsFunc(key, v)
|
||||
}
|
||||
|
||||
// NewNumericLessThanEqualsFunc - returns new NumericLessThanEquals function.
|
||||
func NewNumericLessThanEqualsFunc(key Key, value int) (Function, error) {
|
||||
return &numericLessThanEqualsFunc{numericLessThanFunc{key, value}}, nil
|
||||
}
|
|
@ -1,193 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
|
||||
"github.com/minio/minio-go/v6/pkg/s3utils"
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
)
|
||||
|
||||
func toStringEqualsFuncString(n name, key Key, values set.StringSet) string {
|
||||
valueStrings := values.ToSlice()
|
||||
sort.Strings(valueStrings)
|
||||
|
||||
return fmt.Sprintf("%v:%v:%v", n, key, valueStrings)
|
||||
}
|
||||
|
||||
// stringEqualsFunc - String equals function. It checks whether value by Key in given
|
||||
// values map is in condition values.
|
||||
// For example,
|
||||
// - if values = ["mybucket/foo"], at evaluate() it returns whether string
|
||||
// in value map for Key is in values.
|
||||
type stringEqualsFunc struct {
|
||||
k Key
|
||||
values set.StringSet
|
||||
}
|
||||
|
||||
// evaluate() - evaluates to check whether value by Key in given values is in
|
||||
// condition values.
|
||||
func (f stringEqualsFunc) evaluate(values map[string][]string) bool {
|
||||
requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())]
|
||||
if !ok {
|
||||
requestValue = values[f.k.Name()]
|
||||
}
|
||||
|
||||
fvalues := f.values.ApplyFunc(substFuncFromValues(values))
|
||||
return !fvalues.Intersection(set.CreateStringSet(requestValue...)).IsEmpty()
|
||||
}
|
||||
|
||||
// key() - returns condition key which is used by this condition function.
|
||||
func (f stringEqualsFunc) key() Key {
|
||||
return f.k
|
||||
}
|
||||
|
||||
// name() - returns "StringEquals" condition name.
|
||||
func (f stringEqualsFunc) name() name {
|
||||
return stringEquals
|
||||
}
|
||||
|
||||
func (f stringEqualsFunc) String() string {
|
||||
return toStringEqualsFuncString(stringEquals, f.k, f.values)
|
||||
}
|
||||
|
||||
// toMap - returns map representation of this function.
|
||||
func (f stringEqualsFunc) toMap() map[Key]ValueSet {
|
||||
if !f.k.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
values := NewValueSet()
|
||||
for _, value := range f.values.ToSlice() {
|
||||
values.Add(NewStringValue(value))
|
||||
}
|
||||
|
||||
return map[Key]ValueSet{
|
||||
f.k: values,
|
||||
}
|
||||
}
|
||||
|
||||
// stringNotEqualsFunc - String not equals function. It checks whether value by Key in
|
||||
// given values is NOT in condition values.
|
||||
// For example,
|
||||
// - if values = ["mybucket/foo"], at evaluate() it returns whether string
|
||||
// in value map for Key is NOT in values.
|
||||
type stringNotEqualsFunc struct {
|
||||
stringEqualsFunc
|
||||
}
|
||||
|
||||
// evaluate() - evaluates to check whether value by Key in given values is NOT in
|
||||
// condition values.
|
||||
func (f stringNotEqualsFunc) evaluate(values map[string][]string) bool {
|
||||
return !f.stringEqualsFunc.evaluate(values)
|
||||
}
|
||||
|
||||
// name() - returns "StringNotEquals" condition name.
|
||||
func (f stringNotEqualsFunc) name() name {
|
||||
return stringNotEquals
|
||||
}
|
||||
|
||||
func (f stringNotEqualsFunc) String() string {
|
||||
return toStringEqualsFuncString(stringNotEquals, f.stringEqualsFunc.k, f.stringEqualsFunc.values)
|
||||
}
|
||||
|
||||
func valuesToStringSlice(n name, values ValueSet) ([]string, error) {
|
||||
valueStrings := []string{}
|
||||
|
||||
for value := range values {
|
||||
s, err := value.GetString()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("value must be a string for %v condition", n)
|
||||
}
|
||||
|
||||
valueStrings = append(valueStrings, s)
|
||||
}
|
||||
|
||||
return valueStrings, nil
|
||||
}
|
||||
|
||||
func validateStringEqualsValues(n name, key Key, values set.StringSet) error {
|
||||
for _, s := range values.ToSlice() {
|
||||
switch key {
|
||||
case S3XAmzCopySource:
|
||||
bucket, object := path2BucketAndObject(s)
|
||||
if object == "" {
|
||||
return fmt.Errorf("invalid value '%v' for '%v' for %v condition", s, S3XAmzCopySource, n)
|
||||
}
|
||||
if err := s3utils.CheckValidBucketName(bucket); err != nil {
|
||||
return err
|
||||
}
|
||||
case S3XAmzServerSideEncryption, S3XAmzServerSideEncryptionCustomerAlgorithm:
|
||||
if s != "AES256" {
|
||||
return fmt.Errorf("invalid value '%v' for '%v' for %v condition", s, S3XAmzServerSideEncryption, n)
|
||||
}
|
||||
case S3XAmzMetadataDirective:
|
||||
if s != "COPY" && s != "REPLACE" {
|
||||
return fmt.Errorf("invalid value '%v' for '%v' for %v condition", s, S3XAmzMetadataDirective, n)
|
||||
}
|
||||
case S3XAmzContentSha256:
|
||||
if s == "" {
|
||||
return fmt.Errorf("invalid empty value for '%v' for %v condition", S3XAmzContentSha256, n)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// newStringEqualsFunc - returns new StringEquals function.
|
||||
func newStringEqualsFunc(key Key, values ValueSet) (Function, error) {
|
||||
valueStrings, err := valuesToStringSlice(stringEquals, values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewStringEqualsFunc(key, valueStrings...)
|
||||
}
|
||||
|
||||
// NewStringEqualsFunc - returns new StringEquals function.
|
||||
func NewStringEqualsFunc(key Key, values ...string) (Function, error) {
|
||||
sset := set.CreateStringSet(values...)
|
||||
if err := validateStringEqualsValues(stringEquals, key, sset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &stringEqualsFunc{key, sset}, nil
|
||||
}
|
||||
|
||||
// newStringNotEqualsFunc - returns new StringNotEquals function.
|
||||
func newStringNotEqualsFunc(key Key, values ValueSet) (Function, error) {
|
||||
valueStrings, err := valuesToStringSlice(stringNotEquals, values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewStringNotEqualsFunc(key, valueStrings...)
|
||||
}
|
||||
|
||||
// NewStringNotEqualsFunc - returns new StringNotEquals function.
|
||||
func NewStringNotEqualsFunc(key Key, values ...string) (Function, error) {
|
||||
sset := set.CreateStringSet(values...)
|
||||
if err := validateStringEqualsValues(stringNotEquals, key, sset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &stringNotEqualsFunc{stringEqualsFunc{key, sset}}, nil
|
||||
}
|
|
@ -1,708 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestStringEqualsFuncEvaluate(t *testing.T) {
|
||||
case1Function, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newStringEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Function, err := newStringEqualsFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Function, err := newStringEqualsFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
function Function
|
||||
values map[string][]string
|
||||
expectedResult bool
|
||||
}{
|
||||
{case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, true},
|
||||
{case1Function, map[string][]string{"x-amz-copy-source": {"yourbucket/myobject"}}, false},
|
||||
{case1Function, map[string][]string{}, false},
|
||||
{case1Function, map[string][]string{"delimiter": {"/"}}, false},
|
||||
|
||||
{case2Function, map[string][]string{"x-amz-server-side-encryption": {"AES256"}}, true},
|
||||
{case2Function, map[string][]string{}, false},
|
||||
{case2Function, map[string][]string{"delimiter": {"/"}}, false},
|
||||
|
||||
{case3Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE"}}, true},
|
||||
{case3Function, map[string][]string{"x-amz-metadata-directive": {"COPY"}}, false},
|
||||
{case3Function, map[string][]string{}, false},
|
||||
{case3Function, map[string][]string{"delimiter": {"/"}}, false},
|
||||
|
||||
{case4Function, map[string][]string{"LocationConstraint": {"eu-west-1"}}, true},
|
||||
{case4Function, map[string][]string{"LocationConstraint": {"us-east-1"}}, false},
|
||||
{case4Function, map[string][]string{}, false},
|
||||
{case4Function, map[string][]string{"delimiter": {"/"}}, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.function.evaluate(testCase.values)
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringEqualsFuncKey(t *testing.T) {
|
||||
case1Function, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newStringEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Function, err := newStringEqualsFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Function, err := newStringEqualsFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
function Function
|
||||
expectedResult Key
|
||||
}{
|
||||
{case1Function, S3XAmzCopySource},
|
||||
{case2Function, S3XAmzServerSideEncryption},
|
||||
{case3Function, S3XAmzMetadataDirective},
|
||||
{case4Function, S3LocationConstraint},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.function.key()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringEqualsFuncToMap(t *testing.T) {
|
||||
case1Function, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case1Result := map[Key]ValueSet{
|
||||
S3XAmzCopySource: NewValueSet(NewStringValue("mybucket/myobject")),
|
||||
}
|
||||
|
||||
case2Function, err := newStringEqualsFunc(S3XAmzCopySource,
|
||||
NewValueSet(
|
||||
NewStringValue("mybucket/myobject"),
|
||||
NewStringValue("yourbucket/myobject"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Result := map[Key]ValueSet{
|
||||
S3XAmzCopySource: NewValueSet(
|
||||
NewStringValue("mybucket/myobject"),
|
||||
NewStringValue("yourbucket/myobject"),
|
||||
),
|
||||
}
|
||||
|
||||
case3Function, err := newStringEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Result := map[Key]ValueSet{
|
||||
S3XAmzServerSideEncryption: NewValueSet(NewStringValue("AES256")),
|
||||
}
|
||||
|
||||
case4Function, err := newStringEqualsFunc(S3XAmzServerSideEncryption,
|
||||
NewValueSet(
|
||||
NewStringValue("AES256"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Result := map[Key]ValueSet{
|
||||
S3XAmzServerSideEncryption: NewValueSet(
|
||||
NewStringValue("AES256"),
|
||||
),
|
||||
}
|
||||
|
||||
case5Function, err := newStringEqualsFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case5Result := map[Key]ValueSet{
|
||||
S3XAmzMetadataDirective: NewValueSet(NewStringValue("REPLACE")),
|
||||
}
|
||||
|
||||
case6Function, err := newStringEqualsFunc(S3XAmzMetadataDirective,
|
||||
NewValueSet(
|
||||
NewStringValue("REPLACE"),
|
||||
NewStringValue("COPY"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case6Result := map[Key]ValueSet{
|
||||
S3XAmzMetadataDirective: NewValueSet(
|
||||
NewStringValue("REPLACE"),
|
||||
NewStringValue("COPY"),
|
||||
),
|
||||
}
|
||||
|
||||
case7Function, err := newStringEqualsFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case7Result := map[Key]ValueSet{
|
||||
S3LocationConstraint: NewValueSet(NewStringValue("eu-west-1")),
|
||||
}
|
||||
|
||||
case8Function, err := newStringEqualsFunc(S3LocationConstraint,
|
||||
NewValueSet(
|
||||
NewStringValue("eu-west-1"),
|
||||
NewStringValue("us-west-1"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case8Result := map[Key]ValueSet{
|
||||
S3LocationConstraint: NewValueSet(
|
||||
NewStringValue("eu-west-1"),
|
||||
NewStringValue("us-west-1"),
|
||||
),
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
f Function
|
||||
expectedResult map[Key]ValueSet
|
||||
}{
|
||||
{case1Function, case1Result},
|
||||
{case2Function, case2Result},
|
||||
{case3Function, case3Result},
|
||||
{case4Function, case4Result},
|
||||
{case5Function, case5Result},
|
||||
{case6Function, case6Result},
|
||||
{case7Function, case7Result},
|
||||
{case8Function, case8Result},
|
||||
{&stringEqualsFunc{}, nil},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.f.toMap()
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringNotEqualsFuncEvaluate(t *testing.T) {
|
||||
case1Function, err := newStringNotEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newStringNotEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Function, err := newStringNotEqualsFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Function, err := newStringNotEqualsFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
function Function
|
||||
values map[string][]string
|
||||
expectedResult bool
|
||||
}{
|
||||
{case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, false},
|
||||
{case1Function, map[string][]string{"x-amz-copy-source": {"yourbucket/myobject"}}, true},
|
||||
{case1Function, map[string][]string{}, true},
|
||||
{case1Function, map[string][]string{"delimiter": {"/"}}, true},
|
||||
|
||||
{case2Function, map[string][]string{"x-amz-server-side-encryption": {"AES256"}}, false},
|
||||
{case2Function, map[string][]string{}, true},
|
||||
{case2Function, map[string][]string{"delimiter": {"/"}}, true},
|
||||
|
||||
{case3Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE"}}, false},
|
||||
{case3Function, map[string][]string{"x-amz-metadata-directive": {"COPY"}}, true},
|
||||
{case3Function, map[string][]string{}, true},
|
||||
{case3Function, map[string][]string{"delimiter": {"/"}}, true},
|
||||
|
||||
{case4Function, map[string][]string{"LocationConstraint": {"eu-west-1"}}, false},
|
||||
{case4Function, map[string][]string{"LocationConstraint": {"us-east-1"}}, true},
|
||||
{case4Function, map[string][]string{}, true},
|
||||
{case4Function, map[string][]string{"delimiter": {"/"}}, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.function.evaluate(testCase.values)
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringNotEqualsFuncKey(t *testing.T) {
|
||||
case1Function, err := newStringNotEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newStringNotEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Function, err := newStringNotEqualsFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Function, err := newStringNotEqualsFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
function Function
|
||||
expectedResult Key
|
||||
}{
|
||||
{case1Function, S3XAmzCopySource},
|
||||
{case2Function, S3XAmzServerSideEncryption},
|
||||
{case3Function, S3XAmzMetadataDirective},
|
||||
{case4Function, S3LocationConstraint},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.function.key()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringNotEqualsFuncToMap(t *testing.T) {
|
||||
case1Function, err := newStringNotEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case1Result := map[Key]ValueSet{
|
||||
S3XAmzCopySource: NewValueSet(NewStringValue("mybucket/myobject")),
|
||||
}
|
||||
|
||||
case2Function, err := newStringNotEqualsFunc(S3XAmzCopySource,
|
||||
NewValueSet(
|
||||
NewStringValue("mybucket/myobject"),
|
||||
NewStringValue("yourbucket/myobject"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Result := map[Key]ValueSet{
|
||||
S3XAmzCopySource: NewValueSet(
|
||||
NewStringValue("mybucket/myobject"),
|
||||
NewStringValue("yourbucket/myobject"),
|
||||
),
|
||||
}
|
||||
|
||||
case3Function, err := newStringNotEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Result := map[Key]ValueSet{
|
||||
S3XAmzServerSideEncryption: NewValueSet(NewStringValue("AES256")),
|
||||
}
|
||||
|
||||
case4Function, err := newStringNotEqualsFunc(S3XAmzServerSideEncryption,
|
||||
NewValueSet(
|
||||
NewStringValue("AES256"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Result := map[Key]ValueSet{
|
||||
S3XAmzServerSideEncryption: NewValueSet(
|
||||
NewStringValue("AES256"),
|
||||
),
|
||||
}
|
||||
|
||||
case5Function, err := newStringNotEqualsFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case5Result := map[Key]ValueSet{
|
||||
S3XAmzMetadataDirective: NewValueSet(NewStringValue("REPLACE")),
|
||||
}
|
||||
|
||||
case6Function, err := newStringNotEqualsFunc(S3XAmzMetadataDirective,
|
||||
NewValueSet(
|
||||
NewStringValue("REPLACE"),
|
||||
NewStringValue("COPY"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case6Result := map[Key]ValueSet{
|
||||
S3XAmzMetadataDirective: NewValueSet(
|
||||
NewStringValue("REPLACE"),
|
||||
NewStringValue("COPY"),
|
||||
),
|
||||
}
|
||||
|
||||
case7Function, err := newStringNotEqualsFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case7Result := map[Key]ValueSet{
|
||||
S3LocationConstraint: NewValueSet(NewStringValue("eu-west-1")),
|
||||
}
|
||||
|
||||
case8Function, err := newStringNotEqualsFunc(S3LocationConstraint,
|
||||
NewValueSet(
|
||||
NewStringValue("eu-west-1"),
|
||||
NewStringValue("us-west-1"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case8Result := map[Key]ValueSet{
|
||||
S3LocationConstraint: NewValueSet(
|
||||
NewStringValue("eu-west-1"),
|
||||
NewStringValue("us-west-1"),
|
||||
),
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
f Function
|
||||
expectedResult map[Key]ValueSet
|
||||
}{
|
||||
{case1Function, case1Result},
|
||||
{case2Function, case2Result},
|
||||
{case3Function, case3Result},
|
||||
{case4Function, case4Result},
|
||||
{case5Function, case5Result},
|
||||
{case6Function, case6Result},
|
||||
{case7Function, case7Result},
|
||||
{case8Function, case8Result},
|
||||
{&stringNotEqualsFunc{}, nil},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.f.toMap()
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewStringEqualsFunc(t *testing.T) {
|
||||
case1Function, err := newStringEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newStringEqualsFunc(S3XAmzCopySource,
|
||||
NewValueSet(
|
||||
NewStringValue("mybucket/myobject"),
|
||||
NewStringValue("yourbucket/myobject"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Function, err := newStringEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Function, err := newStringEqualsFunc(S3XAmzServerSideEncryption,
|
||||
NewValueSet(
|
||||
NewStringValue("AES256"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case5Function, err := newStringEqualsFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case6Function, err := newStringEqualsFunc(S3XAmzMetadataDirective,
|
||||
NewValueSet(
|
||||
NewStringValue("REPLACE"),
|
||||
NewStringValue("COPY"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case7Function, err := newStringEqualsFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case8Function, err := newStringEqualsFunc(S3LocationConstraint,
|
||||
NewValueSet(
|
||||
NewStringValue("eu-west-1"),
|
||||
NewStringValue("us-west-1"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
key Key
|
||||
values ValueSet
|
||||
expectedResult Function
|
||||
expectErr bool
|
||||
}{
|
||||
{S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")), case1Function, false},
|
||||
{S3XAmzCopySource,
|
||||
NewValueSet(
|
||||
NewStringValue("mybucket/myobject"),
|
||||
NewStringValue("yourbucket/myobject"),
|
||||
), case2Function, false},
|
||||
|
||||
{S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")), case3Function, false},
|
||||
{S3XAmzServerSideEncryption,
|
||||
NewValueSet(
|
||||
NewStringValue("AES256"),
|
||||
), case4Function, false},
|
||||
|
||||
{S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")), case5Function, false},
|
||||
{S3XAmzMetadataDirective,
|
||||
NewValueSet(
|
||||
NewStringValue("REPLACE"),
|
||||
NewStringValue("COPY"),
|
||||
), case6Function, false},
|
||||
|
||||
{S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")), case7Function, false},
|
||||
{S3LocationConstraint,
|
||||
NewValueSet(
|
||||
NewStringValue("eu-west-1"),
|
||||
NewStringValue("us-west-1"),
|
||||
), case8Function, false},
|
||||
|
||||
// Unsupported value error.
|
||||
{S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"), NewIntValue(7)), nil, true},
|
||||
{S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"), NewIntValue(7)), nil, true},
|
||||
{S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"), NewIntValue(7)), nil, true},
|
||||
{S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"), NewIntValue(7)), nil, true},
|
||||
|
||||
// Invalid value error.
|
||||
{S3XAmzCopySource, NewValueSet(NewStringValue("mybucket")), nil, true},
|
||||
{S3XAmzServerSideEncryption, NewValueSet(NewStringValue("SSE-C")), nil, true},
|
||||
{S3XAmzMetadataDirective, NewValueSet(NewStringValue("DUPLICATE")), nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := newStringEqualsFunc(testCase.key, testCase.values)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewStringNotEqualsFunc(t *testing.T) {
|
||||
case1Function, err := newStringNotEqualsFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newStringNotEqualsFunc(S3XAmzCopySource,
|
||||
NewValueSet(
|
||||
NewStringValue("mybucket/myobject"),
|
||||
NewStringValue("yourbucket/myobject"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Function, err := newStringNotEqualsFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Function, err := newStringNotEqualsFunc(S3XAmzServerSideEncryption,
|
||||
NewValueSet(
|
||||
NewStringValue("AES256"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case5Function, err := newStringNotEqualsFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case6Function, err := newStringNotEqualsFunc(S3XAmzMetadataDirective,
|
||||
NewValueSet(
|
||||
NewStringValue("REPLACE"),
|
||||
NewStringValue("COPY"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case7Function, err := newStringNotEqualsFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case8Function, err := newStringNotEqualsFunc(S3LocationConstraint,
|
||||
NewValueSet(
|
||||
NewStringValue("eu-west-1"),
|
||||
NewStringValue("us-west-1"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
key Key
|
||||
values ValueSet
|
||||
expectedResult Function
|
||||
expectErr bool
|
||||
}{
|
||||
{S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")), case1Function, false},
|
||||
{S3XAmzCopySource,
|
||||
NewValueSet(
|
||||
NewStringValue("mybucket/myobject"),
|
||||
NewStringValue("yourbucket/myobject"),
|
||||
), case2Function, false},
|
||||
|
||||
{S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")), case3Function, false},
|
||||
{S3XAmzServerSideEncryption,
|
||||
NewValueSet(
|
||||
NewStringValue("AES256"),
|
||||
), case4Function, false},
|
||||
|
||||
{S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")), case5Function, false},
|
||||
{S3XAmzMetadataDirective,
|
||||
NewValueSet(
|
||||
NewStringValue("REPLACE"),
|
||||
NewStringValue("COPY"),
|
||||
), case6Function, false},
|
||||
|
||||
{S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")), case7Function, false},
|
||||
{S3LocationConstraint,
|
||||
NewValueSet(
|
||||
NewStringValue("eu-west-1"),
|
||||
NewStringValue("us-west-1"),
|
||||
), case8Function, false},
|
||||
|
||||
// Unsupported value error.
|
||||
{S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"), NewIntValue(7)), nil, true},
|
||||
{S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"), NewIntValue(7)), nil, true},
|
||||
{S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"), NewIntValue(7)), nil, true},
|
||||
{S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"), NewIntValue(7)), nil, true},
|
||||
|
||||
// Invalid value error.
|
||||
{S3XAmzCopySource, NewValueSet(NewStringValue("mybucket")), nil, true},
|
||||
{S3XAmzServerSideEncryption, NewValueSet(NewStringValue("SSE-C")), nil, true},
|
||||
{S3XAmzMetadataDirective, NewValueSet(NewStringValue("DUPLICATE")), nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := newStringNotEqualsFunc(testCase.key, testCase.values)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,160 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
)
|
||||
|
||||
func toStringEqualsIgnoreCaseFuncString(n name, key Key, values set.StringSet) string {
|
||||
valueStrings := values.ToSlice()
|
||||
sort.Strings(valueStrings)
|
||||
|
||||
return fmt.Sprintf("%v:%v:%v", n, key, valueStrings)
|
||||
}
|
||||
|
||||
// stringEqualsIgnoreCaseFunc - String equals function. It checks whether value by Key in given
|
||||
// values map is in condition values.
|
||||
// For example,
|
||||
// - if values = ["mybucket/foo"], at evaluate() it returns whether string
|
||||
// in value map for Key is in values.
|
||||
type stringEqualsIgnoreCaseFunc struct {
|
||||
k Key
|
||||
values set.StringSet
|
||||
}
|
||||
|
||||
// evaluate() - evaluates to check whether value by Key in given values is in
|
||||
// condition values, ignores case.
|
||||
func (f stringEqualsIgnoreCaseFunc) evaluate(values map[string][]string) bool {
|
||||
requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())]
|
||||
if !ok {
|
||||
requestValue = values[f.k.Name()]
|
||||
}
|
||||
|
||||
fvalues := f.values.ApplyFunc(substFuncFromValues(values))
|
||||
|
||||
for _, v := range requestValue {
|
||||
if !fvalues.FuncMatch(strings.EqualFold, v).IsEmpty() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// key() - returns condition key which is used by this condition function.
|
||||
func (f stringEqualsIgnoreCaseFunc) key() Key {
|
||||
return f.k
|
||||
}
|
||||
|
||||
// name() - returns "StringEqualsIgnoreCase" condition name.
|
||||
func (f stringEqualsIgnoreCaseFunc) name() name {
|
||||
return stringEqualsIgnoreCase
|
||||
}
|
||||
|
||||
func (f stringEqualsIgnoreCaseFunc) String() string {
|
||||
return toStringEqualsIgnoreCaseFuncString(stringEqualsIgnoreCase, f.k, f.values)
|
||||
}
|
||||
|
||||
// toMap - returns map representation of this function.
|
||||
func (f stringEqualsIgnoreCaseFunc) toMap() map[Key]ValueSet {
|
||||
if !f.k.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
values := NewValueSet()
|
||||
for _, value := range f.values.ToSlice() {
|
||||
values.Add(NewStringValue(value))
|
||||
}
|
||||
|
||||
return map[Key]ValueSet{
|
||||
f.k: values,
|
||||
}
|
||||
}
|
||||
|
||||
// stringNotEqualsIgnoreCaseFunc - String not equals function. It checks whether value by Key in
|
||||
// given values is NOT in condition values.
|
||||
// For example,
|
||||
// - if values = ["mybucket/foo"], at evaluate() it returns whether string
|
||||
// in value map for Key is NOT in values.
|
||||
type stringNotEqualsIgnoreCaseFunc struct {
|
||||
stringEqualsIgnoreCaseFunc
|
||||
}
|
||||
|
||||
// evaluate() - evaluates to check whether value by Key in given values is NOT in
|
||||
// condition values.
|
||||
func (f stringNotEqualsIgnoreCaseFunc) evaluate(values map[string][]string) bool {
|
||||
return !f.stringEqualsIgnoreCaseFunc.evaluate(values)
|
||||
}
|
||||
|
||||
// name() - returns "StringNotEqualsIgnoreCase" condition name.
|
||||
func (f stringNotEqualsIgnoreCaseFunc) name() name {
|
||||
return stringNotEqualsIgnoreCase
|
||||
}
|
||||
|
||||
func (f stringNotEqualsIgnoreCaseFunc) String() string {
|
||||
return toStringEqualsIgnoreCaseFuncString(stringNotEqualsIgnoreCase, f.stringEqualsIgnoreCaseFunc.k, f.stringEqualsIgnoreCaseFunc.values)
|
||||
}
|
||||
|
||||
func validateStringEqualsIgnoreCaseValues(n name, key Key, values set.StringSet) error {
|
||||
return validateStringEqualsValues(n, key, values)
|
||||
}
|
||||
|
||||
// newStringEqualsIgnoreCaseFunc - returns new StringEqualsIgnoreCase function.
|
||||
func newStringEqualsIgnoreCaseFunc(key Key, values ValueSet) (Function, error) {
|
||||
valueStrings, err := valuesToStringSlice(stringEqualsIgnoreCase, values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewStringEqualsIgnoreCaseFunc(key, valueStrings...)
|
||||
}
|
||||
|
||||
// NewStringEqualsIgnoreCaseFunc - returns new StringEqualsIgnoreCase function.
|
||||
func NewStringEqualsIgnoreCaseFunc(key Key, values ...string) (Function, error) {
|
||||
sset := set.CreateStringSet(values...)
|
||||
if err := validateStringEqualsIgnoreCaseValues(stringEqualsIgnoreCase, key, sset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &stringEqualsIgnoreCaseFunc{key, sset}, nil
|
||||
}
|
||||
|
||||
// newStringNotEqualsIgnoreCaseFunc - returns new StringNotEqualsIgnoreCase function.
|
||||
func newStringNotEqualsIgnoreCaseFunc(key Key, values ValueSet) (Function, error) {
|
||||
valueStrings, err := valuesToStringSlice(stringNotEqualsIgnoreCase, values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewStringNotEqualsIgnoreCaseFunc(key, valueStrings...)
|
||||
}
|
||||
|
||||
// NewStringNotEqualsIgnoreCaseFunc - returns new StringNotEqualsIgnoreCase function.
|
||||
func NewStringNotEqualsIgnoreCaseFunc(key Key, values ...string) (Function, error) {
|
||||
sset := set.CreateStringSet(values...)
|
||||
if err := validateStringEqualsIgnoreCaseValues(stringNotEqualsIgnoreCase, key, sset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &stringNotEqualsIgnoreCaseFunc{stringEqualsIgnoreCaseFunc{key, sset}}, nil
|
||||
}
|
|
@ -1,710 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestStringEqualsIgnoreCaseFuncEvaluate(t *testing.T) {
|
||||
case1Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Function, err := newStringEqualsIgnoreCaseFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
function Function
|
||||
values map[string][]string
|
||||
expectedResult bool
|
||||
}{
|
||||
{case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, true},
|
||||
{case1Function, map[string][]string{"x-amz-copy-source": {"yourbucket/myobject"}}, false},
|
||||
{case1Function, map[string][]string{}, false},
|
||||
{case1Function, map[string][]string{"delimiter": {"/"}}, false},
|
||||
|
||||
{case2Function, map[string][]string{"x-amz-server-side-encryption": {"AES256"}}, true},
|
||||
{case2Function, map[string][]string{"x-amz-server-side-encryption": {"aes256"}}, true},
|
||||
{case2Function, map[string][]string{}, false},
|
||||
{case2Function, map[string][]string{"delimiter": {"/"}}, false},
|
||||
|
||||
{case3Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE"}}, true},
|
||||
{case3Function, map[string][]string{"x-amz-metadata-directive": {"replace"}}, true},
|
||||
{case3Function, map[string][]string{"x-amz-metadata-directive": {"COPY"}}, false},
|
||||
{case3Function, map[string][]string{}, false},
|
||||
{case3Function, map[string][]string{"delimiter": {"/"}}, false},
|
||||
|
||||
{case4Function, map[string][]string{"LocationConstraint": {"eu-west-1"}}, true},
|
||||
{case4Function, map[string][]string{"LocationConstraint": {"us-east-1"}}, false},
|
||||
{case4Function, map[string][]string{}, false},
|
||||
{case4Function, map[string][]string{"delimiter": {"/"}}, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.function.evaluate(testCase.values)
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringEqualsIgnoreCaseFuncKey(t *testing.T) {
|
||||
case1Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Function, err := newStringEqualsIgnoreCaseFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
function Function
|
||||
expectedResult Key
|
||||
}{
|
||||
{case1Function, S3XAmzCopySource},
|
||||
{case2Function, S3XAmzServerSideEncryption},
|
||||
{case3Function, S3XAmzMetadataDirective},
|
||||
{case4Function, S3LocationConstraint},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.function.key()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringEqualsIgnoreCaseFuncToMap(t *testing.T) {
|
||||
case1Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case1Result := map[Key]ValueSet{
|
||||
S3XAmzCopySource: NewValueSet(NewStringValue("mybucket/myobject")),
|
||||
}
|
||||
|
||||
case2Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzCopySource,
|
||||
NewValueSet(
|
||||
NewStringValue("mybucket/myobject"),
|
||||
NewStringValue("yourbucket/myobject"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Result := map[Key]ValueSet{
|
||||
S3XAmzCopySource: NewValueSet(
|
||||
NewStringValue("mybucket/myobject"),
|
||||
NewStringValue("yourbucket/myobject"),
|
||||
),
|
||||
}
|
||||
|
||||
case3Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Result := map[Key]ValueSet{
|
||||
S3XAmzServerSideEncryption: NewValueSet(NewStringValue("AES256")),
|
||||
}
|
||||
|
||||
case4Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzServerSideEncryption,
|
||||
NewValueSet(
|
||||
NewStringValue("AES256"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Result := map[Key]ValueSet{
|
||||
S3XAmzServerSideEncryption: NewValueSet(
|
||||
NewStringValue("AES256"),
|
||||
),
|
||||
}
|
||||
|
||||
case5Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case5Result := map[Key]ValueSet{
|
||||
S3XAmzMetadataDirective: NewValueSet(NewStringValue("REPLACE")),
|
||||
}
|
||||
|
||||
case6Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzMetadataDirective,
|
||||
NewValueSet(
|
||||
NewStringValue("REPLACE"),
|
||||
NewStringValue("COPY"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case6Result := map[Key]ValueSet{
|
||||
S3XAmzMetadataDirective: NewValueSet(
|
||||
NewStringValue("REPLACE"),
|
||||
NewStringValue("COPY"),
|
||||
),
|
||||
}
|
||||
|
||||
case7Function, err := newStringEqualsIgnoreCaseFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case7Result := map[Key]ValueSet{
|
||||
S3LocationConstraint: NewValueSet(NewStringValue("eu-west-1")),
|
||||
}
|
||||
|
||||
case8Function, err := newStringEqualsIgnoreCaseFunc(S3LocationConstraint,
|
||||
NewValueSet(
|
||||
NewStringValue("eu-west-1"),
|
||||
NewStringValue("us-west-1"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case8Result := map[Key]ValueSet{
|
||||
S3LocationConstraint: NewValueSet(
|
||||
NewStringValue("eu-west-1"),
|
||||
NewStringValue("us-west-1"),
|
||||
),
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
f Function
|
||||
expectedResult map[Key]ValueSet
|
||||
}{
|
||||
{case1Function, case1Result},
|
||||
{case2Function, case2Result},
|
||||
{case3Function, case3Result},
|
||||
{case4Function, case4Result},
|
||||
{case5Function, case5Result},
|
||||
{case6Function, case6Result},
|
||||
{case7Function, case7Result},
|
||||
{case8Function, case8Result},
|
||||
{&stringEqualsFunc{}, nil},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.f.toMap()
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringNotEqualsIgnoreCaseFuncEvaluate(t *testing.T) {
|
||||
case1Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Function, err := newStringNotEqualsIgnoreCaseFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
function Function
|
||||
values map[string][]string
|
||||
expectedResult bool
|
||||
}{
|
||||
{case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, false},
|
||||
{case1Function, map[string][]string{"x-amz-copy-source": {"yourbucket/myobject"}}, true},
|
||||
{case1Function, map[string][]string{}, true},
|
||||
{case1Function, map[string][]string{"delimiter": {"/"}}, true},
|
||||
|
||||
{case2Function, map[string][]string{"x-amz-server-side-encryption": {"AES256"}}, false},
|
||||
{case2Function, map[string][]string{}, true},
|
||||
{case2Function, map[string][]string{"delimiter": {"/"}}, true},
|
||||
|
||||
{case3Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE"}}, false},
|
||||
{case3Function, map[string][]string{"x-amz-metadata-directive": {"COPY"}}, true},
|
||||
{case3Function, map[string][]string{}, true},
|
||||
{case3Function, map[string][]string{"delimiter": {"/"}}, true},
|
||||
|
||||
{case4Function, map[string][]string{"LocationConstraint": {"eu-west-1"}}, false},
|
||||
{case4Function, map[string][]string{"LocationConstraint": {"us-east-1"}}, true},
|
||||
{case4Function, map[string][]string{}, true},
|
||||
{case4Function, map[string][]string{"delimiter": {"/"}}, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.function.evaluate(testCase.values)
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringNotEqualsIgnoreCaseFuncKey(t *testing.T) {
|
||||
case1Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Function, err := newStringNotEqualsIgnoreCaseFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
function Function
|
||||
expectedResult Key
|
||||
}{
|
||||
{case1Function, S3XAmzCopySource},
|
||||
{case2Function, S3XAmzServerSideEncryption},
|
||||
{case3Function, S3XAmzMetadataDirective},
|
||||
{case4Function, S3LocationConstraint},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.function.key()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringNotEqualsIgnoreCaseFuncToMap(t *testing.T) {
|
||||
case1Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case1Result := map[Key]ValueSet{
|
||||
S3XAmzCopySource: NewValueSet(NewStringValue("mybucket/myobject")),
|
||||
}
|
||||
|
||||
case2Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzCopySource,
|
||||
NewValueSet(
|
||||
NewStringValue("mybucket/myobject"),
|
||||
NewStringValue("yourbucket/myobject"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Result := map[Key]ValueSet{
|
||||
S3XAmzCopySource: NewValueSet(
|
||||
NewStringValue("mybucket/myobject"),
|
||||
NewStringValue("yourbucket/myobject"),
|
||||
),
|
||||
}
|
||||
|
||||
case3Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Result := map[Key]ValueSet{
|
||||
S3XAmzServerSideEncryption: NewValueSet(NewStringValue("AES256")),
|
||||
}
|
||||
|
||||
case4Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzServerSideEncryption,
|
||||
NewValueSet(
|
||||
NewStringValue("AES256"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Result := map[Key]ValueSet{
|
||||
S3XAmzServerSideEncryption: NewValueSet(
|
||||
NewStringValue("AES256"),
|
||||
),
|
||||
}
|
||||
|
||||
case5Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case5Result := map[Key]ValueSet{
|
||||
S3XAmzMetadataDirective: NewValueSet(NewStringValue("REPLACE")),
|
||||
}
|
||||
|
||||
case6Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzMetadataDirective,
|
||||
NewValueSet(
|
||||
NewStringValue("REPLACE"),
|
||||
NewStringValue("COPY"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case6Result := map[Key]ValueSet{
|
||||
S3XAmzMetadataDirective: NewValueSet(
|
||||
NewStringValue("REPLACE"),
|
||||
NewStringValue("COPY"),
|
||||
),
|
||||
}
|
||||
|
||||
case7Function, err := newStringNotEqualsIgnoreCaseFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case7Result := map[Key]ValueSet{
|
||||
S3LocationConstraint: NewValueSet(NewStringValue("eu-west-1")),
|
||||
}
|
||||
|
||||
case8Function, err := newStringNotEqualsIgnoreCaseFunc(S3LocationConstraint,
|
||||
NewValueSet(
|
||||
NewStringValue("eu-west-1"),
|
||||
NewStringValue("us-west-1"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case8Result := map[Key]ValueSet{
|
||||
S3LocationConstraint: NewValueSet(
|
||||
NewStringValue("eu-west-1"),
|
||||
NewStringValue("us-west-1"),
|
||||
),
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
f Function
|
||||
expectedResult map[Key]ValueSet
|
||||
}{
|
||||
{case1Function, case1Result},
|
||||
{case2Function, case2Result},
|
||||
{case3Function, case3Result},
|
||||
{case4Function, case4Result},
|
||||
{case5Function, case5Result},
|
||||
{case6Function, case6Result},
|
||||
{case7Function, case7Result},
|
||||
{case8Function, case8Result},
|
||||
{&stringNotEqualsFunc{}, nil},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.f.toMap()
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewStringEqualsIgnoreCaseFunc(t *testing.T) {
|
||||
case1Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzCopySource,
|
||||
NewValueSet(
|
||||
NewStringValue("mybucket/myobject"),
|
||||
NewStringValue("yourbucket/myobject"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzServerSideEncryption,
|
||||
NewValueSet(
|
||||
NewStringValue("AES256"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case5Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case6Function, err := newStringEqualsIgnoreCaseFunc(S3XAmzMetadataDirective,
|
||||
NewValueSet(
|
||||
NewStringValue("REPLACE"),
|
||||
NewStringValue("COPY"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case7Function, err := newStringEqualsIgnoreCaseFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case8Function, err := newStringEqualsIgnoreCaseFunc(S3LocationConstraint,
|
||||
NewValueSet(
|
||||
NewStringValue("eu-west-1"),
|
||||
NewStringValue("us-west-1"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
key Key
|
||||
values ValueSet
|
||||
expectedResult Function
|
||||
expectErr bool
|
||||
}{
|
||||
{S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")), case1Function, false},
|
||||
{S3XAmzCopySource,
|
||||
NewValueSet(
|
||||
NewStringValue("mybucket/myobject"),
|
||||
NewStringValue("yourbucket/myobject"),
|
||||
), case2Function, false},
|
||||
|
||||
{S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")), case3Function, false},
|
||||
{S3XAmzServerSideEncryption,
|
||||
NewValueSet(
|
||||
NewStringValue("AES256"),
|
||||
), case4Function, false},
|
||||
|
||||
{S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")), case5Function, false},
|
||||
{S3XAmzMetadataDirective,
|
||||
NewValueSet(
|
||||
NewStringValue("REPLACE"),
|
||||
NewStringValue("COPY"),
|
||||
), case6Function, false},
|
||||
|
||||
{S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")), case7Function, false},
|
||||
{S3LocationConstraint,
|
||||
NewValueSet(
|
||||
NewStringValue("eu-west-1"),
|
||||
NewStringValue("us-west-1"),
|
||||
), case8Function, false},
|
||||
|
||||
// Unsupported value error.
|
||||
{S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"), NewIntValue(7)), nil, true},
|
||||
{S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"), NewIntValue(7)), nil, true},
|
||||
{S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"), NewIntValue(7)), nil, true},
|
||||
{S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"), NewIntValue(7)), nil, true},
|
||||
|
||||
// Invalid value error.
|
||||
{S3XAmzCopySource, NewValueSet(NewStringValue("mybucket")), nil, true},
|
||||
{S3XAmzServerSideEncryption, NewValueSet(NewStringValue("SSE-C")), nil, true},
|
||||
{S3XAmzMetadataDirective, NewValueSet(NewStringValue("DUPLICATE")), nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := newStringEqualsIgnoreCaseFunc(testCase.key, testCase.values)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewStringNotEqualsIgnoreCaseFunc(t *testing.T) {
|
||||
case1Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzCopySource,
|
||||
NewValueSet(
|
||||
NewStringValue("mybucket/myobject"),
|
||||
NewStringValue("yourbucket/myobject"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzServerSideEncryption,
|
||||
NewValueSet(
|
||||
NewStringValue("AES256"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case5Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case6Function, err := newStringNotEqualsIgnoreCaseFunc(S3XAmzMetadataDirective,
|
||||
NewValueSet(
|
||||
NewStringValue("REPLACE"),
|
||||
NewStringValue("COPY"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case7Function, err := newStringNotEqualsIgnoreCaseFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case8Function, err := newStringNotEqualsIgnoreCaseFunc(S3LocationConstraint,
|
||||
NewValueSet(
|
||||
NewStringValue("eu-west-1"),
|
||||
NewStringValue("us-west-1"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
key Key
|
||||
values ValueSet
|
||||
expectedResult Function
|
||||
expectErr bool
|
||||
}{
|
||||
{S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")), case1Function, false},
|
||||
{S3XAmzCopySource,
|
||||
NewValueSet(
|
||||
NewStringValue("mybucket/myobject"),
|
||||
NewStringValue("yourbucket/myobject"),
|
||||
), case2Function, false},
|
||||
|
||||
{S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")), case3Function, false},
|
||||
{S3XAmzServerSideEncryption,
|
||||
NewValueSet(
|
||||
NewStringValue("AES256"),
|
||||
), case4Function, false},
|
||||
|
||||
{S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")), case5Function, false},
|
||||
{S3XAmzMetadataDirective,
|
||||
NewValueSet(
|
||||
NewStringValue("REPLACE"),
|
||||
NewStringValue("COPY"),
|
||||
), case6Function, false},
|
||||
|
||||
{S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")), case7Function, false},
|
||||
{S3LocationConstraint,
|
||||
NewValueSet(
|
||||
NewStringValue("eu-west-1"),
|
||||
NewStringValue("us-west-1"),
|
||||
), case8Function, false},
|
||||
|
||||
// Unsupported value error.
|
||||
{S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"), NewIntValue(7)), nil, true},
|
||||
{S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"), NewIntValue(7)), nil, true},
|
||||
{S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"), NewIntValue(7)), nil, true},
|
||||
{S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"), NewIntValue(7)), nil, true},
|
||||
|
||||
// Invalid value error.
|
||||
{S3XAmzCopySource, NewValueSet(NewStringValue("mybucket")), nil, true},
|
||||
{S3XAmzServerSideEncryption, NewValueSet(NewStringValue("SSE-C")), nil, true},
|
||||
{S3XAmzMetadataDirective, NewValueSet(NewStringValue("DUPLICATE")), nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := newStringNotEqualsIgnoreCaseFunc(testCase.key, testCase.values)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,174 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
|
||||
"github.com/minio/minio-go/v6/pkg/s3utils"
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
)
|
||||
|
||||
func toStringLikeFuncString(n name, key Key, values set.StringSet) string {
|
||||
valueStrings := values.ToSlice()
|
||||
sort.Strings(valueStrings)
|
||||
|
||||
return fmt.Sprintf("%v:%v:%v", n, key, valueStrings)
|
||||
}
|
||||
|
||||
// stringLikeFunc - String like function. It checks whether value by Key in given
|
||||
// values map is widcard matching in condition values.
|
||||
// For example,
|
||||
// - if values = ["mybucket/foo*"], at evaluate() it returns whether string
|
||||
// in value map for Key is wildcard matching in values.
|
||||
type stringLikeFunc struct {
|
||||
k Key
|
||||
values set.StringSet
|
||||
}
|
||||
|
||||
// evaluate() - evaluates to check whether value by Key in given values is wildcard
|
||||
// matching in condition values.
|
||||
func (f stringLikeFunc) evaluate(values map[string][]string) bool {
|
||||
requestValue, ok := values[http.CanonicalHeaderKey(f.k.Name())]
|
||||
if !ok {
|
||||
requestValue = values[f.k.Name()]
|
||||
}
|
||||
|
||||
fvalues := f.values.ApplyFunc(substFuncFromValues(values))
|
||||
|
||||
for _, v := range requestValue {
|
||||
if !fvalues.FuncMatch(wildcard.Match, v).IsEmpty() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// key() - returns condition key which is used by this condition function.
|
||||
func (f stringLikeFunc) key() Key {
|
||||
return f.k
|
||||
}
|
||||
|
||||
// name() - returns "StringLike" function name.
|
||||
func (f stringLikeFunc) name() name {
|
||||
return stringLike
|
||||
}
|
||||
|
||||
func (f stringLikeFunc) String() string {
|
||||
return toStringLikeFuncString(stringLike, f.k, f.values)
|
||||
}
|
||||
|
||||
// toMap - returns map representation of this function.
|
||||
func (f stringLikeFunc) toMap() map[Key]ValueSet {
|
||||
if !f.k.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
values := NewValueSet()
|
||||
for _, value := range f.values.ToSlice() {
|
||||
values.Add(NewStringValue(value))
|
||||
}
|
||||
|
||||
return map[Key]ValueSet{
|
||||
f.k: values,
|
||||
}
|
||||
}
|
||||
|
||||
// stringNotLikeFunc - String not like function. It checks whether value by Key in given
|
||||
// values map is NOT widcard matching in condition values.
|
||||
// For example,
|
||||
// - if values = ["mybucket/foo*"], at evaluate() it returns whether string
|
||||
// in value map for Key is NOT wildcard matching in values.
|
||||
type stringNotLikeFunc struct {
|
||||
stringLikeFunc
|
||||
}
|
||||
|
||||
// evaluate() - evaluates to check whether value by Key in given values is NOT wildcard
|
||||
// matching in condition values.
|
||||
func (f stringNotLikeFunc) evaluate(values map[string][]string) bool {
|
||||
return !f.stringLikeFunc.evaluate(values)
|
||||
}
|
||||
|
||||
// name() - returns "StringNotLike" function name.
|
||||
func (f stringNotLikeFunc) name() name {
|
||||
return stringNotLike
|
||||
}
|
||||
|
||||
func (f stringNotLikeFunc) String() string {
|
||||
return toStringLikeFuncString(stringNotLike, f.stringLikeFunc.k, f.stringLikeFunc.values)
|
||||
}
|
||||
|
||||
func validateStringLikeValues(n name, key Key, values set.StringSet) error {
|
||||
for _, s := range values.ToSlice() {
|
||||
switch key {
|
||||
case S3XAmzCopySource:
|
||||
bucket, object := path2BucketAndObject(s)
|
||||
if object == "" {
|
||||
return fmt.Errorf("invalid value '%v' for '%v' for %v condition", s, S3XAmzCopySource, n)
|
||||
}
|
||||
if err := s3utils.CheckValidBucketName(bucket); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// newStringLikeFunc - returns new StringLike function.
|
||||
func newStringLikeFunc(key Key, values ValueSet) (Function, error) {
|
||||
valueStrings, err := valuesToStringSlice(stringLike, values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewStringLikeFunc(key, valueStrings...)
|
||||
}
|
||||
|
||||
// NewStringLikeFunc - returns new StringLike function.
|
||||
func NewStringLikeFunc(key Key, values ...string) (Function, error) {
|
||||
sset := set.CreateStringSet(values...)
|
||||
if err := validateStringLikeValues(stringLike, key, sset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &stringLikeFunc{key, sset}, nil
|
||||
}
|
||||
|
||||
// newStringNotLikeFunc - returns new StringNotLike function.
|
||||
func newStringNotLikeFunc(key Key, values ValueSet) (Function, error) {
|
||||
valueStrings, err := valuesToStringSlice(stringNotLike, values)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewStringNotLikeFunc(key, valueStrings...)
|
||||
}
|
||||
|
||||
// NewStringNotLikeFunc - returns new StringNotLike function.
|
||||
func NewStringNotLikeFunc(key Key, values ...string) (Function, error) {
|
||||
sset := set.CreateStringSet(values...)
|
||||
if err := validateStringLikeValues(stringNotLike, key, sset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &stringNotLikeFunc{stringLikeFunc{key, sset}}, nil
|
||||
}
|
|
@ -1,798 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestStringLikeFuncEvaluate(t *testing.T) {
|
||||
case1Function, err := newStringLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newStringLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Function, err := newStringLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Function, err := newStringLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case5Function, err := newStringLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case6Function, err := newStringLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case7Function, err := newStringLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case8Function, err := newStringLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
function Function
|
||||
values map[string][]string
|
||||
expectedResult bool
|
||||
}{
|
||||
{case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, true},
|
||||
{case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject.png"}}, true},
|
||||
{case1Function, map[string][]string{"x-amz-copy-source": {"yourbucket/myobject"}}, false},
|
||||
{case1Function, map[string][]string{}, false},
|
||||
{case1Function, map[string][]string{"delimiter": {"/"}}, false},
|
||||
|
||||
{case2Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, true},
|
||||
{case2Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject.png"}}, false},
|
||||
{case2Function, map[string][]string{"x-amz-copy-source": {"yourbucket/myobject"}}, false},
|
||||
{case2Function, map[string][]string{}, false},
|
||||
{case2Function, map[string][]string{"delimiter": {"/"}}, false},
|
||||
|
||||
{case3Function, map[string][]string{"x-amz-server-side-encryption": {"AES256"}}, true},
|
||||
{case3Function, map[string][]string{"x-amz-server-side-encryption": {"AES512"}}, true},
|
||||
{case3Function, map[string][]string{}, false},
|
||||
{case3Function, map[string][]string{"delimiter": {"/"}}, false},
|
||||
|
||||
{case4Function, map[string][]string{"x-amz-server-side-encryption": {"AES256"}}, true},
|
||||
{case4Function, map[string][]string{"x-amz-server-side-encryption": {"AES512"}}, false},
|
||||
{case4Function, map[string][]string{}, false},
|
||||
{case4Function, map[string][]string{"delimiter": {"/"}}, false},
|
||||
|
||||
{case5Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE"}}, true},
|
||||
{case5Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE/COPY"}}, true},
|
||||
{case5Function, map[string][]string{"x-amz-metadata-directive": {"COPY"}}, false},
|
||||
{case5Function, map[string][]string{}, false},
|
||||
{case5Function, map[string][]string{"delimiter": {"/"}}, false},
|
||||
|
||||
{case6Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE"}}, true},
|
||||
{case6Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE/COPY"}}, false},
|
||||
{case6Function, map[string][]string{"x-amz-metadata-directive": {"COPY"}}, false},
|
||||
{case6Function, map[string][]string{}, false},
|
||||
{case6Function, map[string][]string{"delimiter": {"/"}}, false},
|
||||
|
||||
{case7Function, map[string][]string{"LocationConstraint": {"eu-west-1"}}, true},
|
||||
{case7Function, map[string][]string{"LocationConstraint": {"eu-west-2"}}, true},
|
||||
{case7Function, map[string][]string{"LocationConstraint": {"us-east-1"}}, false},
|
||||
{case7Function, map[string][]string{}, false},
|
||||
{case7Function, map[string][]string{"delimiter": {"/"}}, false},
|
||||
|
||||
{case8Function, map[string][]string{"LocationConstraint": {"eu-west-1"}}, true},
|
||||
{case8Function, map[string][]string{"LocationConstraint": {"eu-west-2"}}, false},
|
||||
{case8Function, map[string][]string{"LocationConstraint": {"us-east-1"}}, false},
|
||||
{case8Function, map[string][]string{}, false},
|
||||
{case8Function, map[string][]string{"delimiter": {"/"}}, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.function.evaluate(testCase.values)
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringLikeFuncKey(t *testing.T) {
|
||||
case1Function, err := newStringLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newStringLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Function, err := newStringLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Function, err := newStringLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
function Function
|
||||
expectedResult Key
|
||||
}{
|
||||
{case1Function, S3XAmzCopySource},
|
||||
{case2Function, S3XAmzServerSideEncryption},
|
||||
{case3Function, S3XAmzMetadataDirective},
|
||||
{case4Function, S3LocationConstraint},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.function.key()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringLikeFuncToMap(t *testing.T) {
|
||||
case1Function, err := newStringLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case1Result := map[Key]ValueSet{
|
||||
S3XAmzCopySource: NewValueSet(NewStringValue("mybucket/*")),
|
||||
}
|
||||
|
||||
case2Function, err := newStringLikeFunc(S3XAmzCopySource,
|
||||
NewValueSet(
|
||||
NewStringValue("mybucket/*"),
|
||||
NewStringValue("yourbucket/myobject*"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Result := map[Key]ValueSet{
|
||||
S3XAmzCopySource: NewValueSet(
|
||||
NewStringValue("mybucket/*"),
|
||||
NewStringValue("yourbucket/myobject*"),
|
||||
),
|
||||
}
|
||||
|
||||
case3Function, err := newStringLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Result := map[Key]ValueSet{
|
||||
S3XAmzServerSideEncryption: NewValueSet(NewStringValue("AES*")),
|
||||
}
|
||||
|
||||
case4Function, err := newStringLikeFunc(S3XAmzServerSideEncryption,
|
||||
NewValueSet(
|
||||
NewStringValue("AES*"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Result := map[Key]ValueSet{
|
||||
S3XAmzServerSideEncryption: NewValueSet(
|
||||
NewStringValue("AES*"),
|
||||
),
|
||||
}
|
||||
|
||||
case5Function, err := newStringLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case5Result := map[Key]ValueSet{
|
||||
S3XAmzMetadataDirective: NewValueSet(NewStringValue("REPL*")),
|
||||
}
|
||||
|
||||
case6Function, err := newStringLikeFunc(S3XAmzMetadataDirective,
|
||||
NewValueSet(
|
||||
NewStringValue("REPL*"),
|
||||
NewStringValue("COPY*"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case6Result := map[Key]ValueSet{
|
||||
S3XAmzMetadataDirective: NewValueSet(
|
||||
NewStringValue("REPL*"),
|
||||
NewStringValue("COPY*"),
|
||||
),
|
||||
}
|
||||
|
||||
case7Function, err := newStringLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case7Result := map[Key]ValueSet{
|
||||
S3LocationConstraint: NewValueSet(NewStringValue("eu-west-*")),
|
||||
}
|
||||
|
||||
case8Function, err := newStringLikeFunc(S3LocationConstraint,
|
||||
NewValueSet(
|
||||
NewStringValue("eu-west-*"),
|
||||
NewStringValue("us-west-*"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case8Result := map[Key]ValueSet{
|
||||
S3LocationConstraint: NewValueSet(
|
||||
NewStringValue("eu-west-*"),
|
||||
NewStringValue("us-west-*"),
|
||||
),
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
f Function
|
||||
expectedResult map[Key]ValueSet
|
||||
}{
|
||||
{case1Function, case1Result},
|
||||
{case2Function, case2Result},
|
||||
{case3Function, case3Result},
|
||||
{case4Function, case4Result},
|
||||
{case5Function, case5Result},
|
||||
{case6Function, case6Result},
|
||||
{case7Function, case7Result},
|
||||
{case8Function, case8Result},
|
||||
{&stringLikeFunc{}, nil},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.f.toMap()
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringNotLikeFuncEvaluate(t *testing.T) {
|
||||
case1Function, err := newStringNotLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newStringNotLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Function, err := newStringNotLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Function, err := newStringNotLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case5Function, err := newStringNotLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case6Function, err := newStringNotLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case7Function, err := newStringNotLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case8Function, err := newStringNotLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
function Function
|
||||
values map[string][]string
|
||||
expectedResult bool
|
||||
}{
|
||||
{case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, false},
|
||||
{case1Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject.png"}}, false},
|
||||
{case1Function, map[string][]string{"x-amz-copy-source": {"yourbucket/myobject"}}, true},
|
||||
{case1Function, map[string][]string{}, true},
|
||||
{case1Function, map[string][]string{"delimiter": {"/"}}, true},
|
||||
|
||||
{case2Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject"}}, false},
|
||||
{case2Function, map[string][]string{"x-amz-copy-source": {"mybucket/myobject.png"}}, true},
|
||||
{case2Function, map[string][]string{"x-amz-copy-source": {"yourbucket/myobject"}}, true},
|
||||
{case2Function, map[string][]string{}, true},
|
||||
{case2Function, map[string][]string{"delimiter": {"/"}}, true},
|
||||
|
||||
{case3Function, map[string][]string{"x-amz-server-side-encryption": {"AES256"}}, false},
|
||||
{case3Function, map[string][]string{"x-amz-server-side-encryption": {"AES512"}}, false},
|
||||
{case3Function, map[string][]string{}, true},
|
||||
{case3Function, map[string][]string{"delimiter": {"/"}}, true},
|
||||
|
||||
{case4Function, map[string][]string{"x-amz-server-side-encryption": {"AES256"}}, false},
|
||||
{case4Function, map[string][]string{"x-amz-server-side-encryption": {"AES512"}}, true},
|
||||
{case4Function, map[string][]string{}, true},
|
||||
{case4Function, map[string][]string{"delimiter": {"/"}}, true},
|
||||
|
||||
{case5Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE"}}, false},
|
||||
{case5Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE/COPY"}}, false},
|
||||
{case5Function, map[string][]string{"x-amz-metadata-directive": {"COPY"}}, true},
|
||||
{case5Function, map[string][]string{}, true},
|
||||
{case5Function, map[string][]string{"delimiter": {"/"}}, true},
|
||||
|
||||
{case6Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE"}}, false},
|
||||
{case6Function, map[string][]string{"x-amz-metadata-directive": {"REPLACE/COPY"}}, true},
|
||||
{case6Function, map[string][]string{"x-amz-metadata-directive": {"COPY"}}, true},
|
||||
{case6Function, map[string][]string{}, true},
|
||||
{case6Function, map[string][]string{"delimiter": {"/"}}, true},
|
||||
|
||||
{case7Function, map[string][]string{"LocationConstraint": {"eu-west-1"}}, false},
|
||||
{case7Function, map[string][]string{"LocationConstraint": {"eu-west-2"}}, false},
|
||||
{case7Function, map[string][]string{"LocationConstraint": {"us-east-1"}}, true},
|
||||
{case7Function, map[string][]string{}, true},
|
||||
{case7Function, map[string][]string{"delimiter": {"/"}}, true},
|
||||
|
||||
{case8Function, map[string][]string{"LocationConstraint": {"eu-west-1"}}, false},
|
||||
{case8Function, map[string][]string{"LocationConstraint": {"eu-west-2"}}, true},
|
||||
{case8Function, map[string][]string{"LocationConstraint": {"us-east-1"}}, true},
|
||||
{case8Function, map[string][]string{}, true},
|
||||
{case8Function, map[string][]string{"delimiter": {"/"}}, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.function.evaluate(testCase.values)
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringNotLikeFuncKey(t *testing.T) {
|
||||
case1Function, err := newStringNotLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newStringNotLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Function, err := newStringNotLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Function, err := newStringNotLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
function Function
|
||||
expectedResult Key
|
||||
}{
|
||||
{case1Function, S3XAmzCopySource},
|
||||
{case2Function, S3XAmzServerSideEncryption},
|
||||
{case3Function, S3XAmzMetadataDirective},
|
||||
{case4Function, S3LocationConstraint},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.function.key()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringNotLikeFuncToMap(t *testing.T) {
|
||||
case1Function, err := newStringNotLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case1Result := map[Key]ValueSet{
|
||||
S3XAmzCopySource: NewValueSet(NewStringValue("mybucket/*")),
|
||||
}
|
||||
|
||||
case2Function, err := newStringNotLikeFunc(S3XAmzCopySource,
|
||||
NewValueSet(
|
||||
NewStringValue("mybucket/*"),
|
||||
NewStringValue("yourbucket/myobject*"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Result := map[Key]ValueSet{
|
||||
S3XAmzCopySource: NewValueSet(
|
||||
NewStringValue("mybucket/*"),
|
||||
NewStringValue("yourbucket/myobject*"),
|
||||
),
|
||||
}
|
||||
|
||||
case3Function, err := newStringNotLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Result := map[Key]ValueSet{
|
||||
S3XAmzServerSideEncryption: NewValueSet(NewStringValue("AES*")),
|
||||
}
|
||||
|
||||
case4Function, err := newStringNotLikeFunc(S3XAmzServerSideEncryption,
|
||||
NewValueSet(
|
||||
NewStringValue("AES*"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Result := map[Key]ValueSet{
|
||||
S3XAmzServerSideEncryption: NewValueSet(
|
||||
NewStringValue("AES*"),
|
||||
),
|
||||
}
|
||||
|
||||
case5Function, err := newStringNotLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case5Result := map[Key]ValueSet{
|
||||
S3XAmzMetadataDirective: NewValueSet(NewStringValue("REPL*")),
|
||||
}
|
||||
|
||||
case6Function, err := newStringNotLikeFunc(S3XAmzMetadataDirective,
|
||||
NewValueSet(
|
||||
NewStringValue("REPL*"),
|
||||
NewStringValue("COPY*"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case6Result := map[Key]ValueSet{
|
||||
S3XAmzMetadataDirective: NewValueSet(
|
||||
NewStringValue("REPL*"),
|
||||
NewStringValue("COPY*"),
|
||||
),
|
||||
}
|
||||
|
||||
case7Function, err := newStringNotLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case7Result := map[Key]ValueSet{
|
||||
S3LocationConstraint: NewValueSet(NewStringValue("eu-west-*")),
|
||||
}
|
||||
|
||||
case8Function, err := newStringNotLikeFunc(S3LocationConstraint,
|
||||
NewValueSet(
|
||||
NewStringValue("eu-west-*"),
|
||||
NewStringValue("us-west-*"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case8Result := map[Key]ValueSet{
|
||||
S3LocationConstraint: NewValueSet(
|
||||
NewStringValue("eu-west-*"),
|
||||
NewStringValue("us-west-*"),
|
||||
),
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
f Function
|
||||
expectedResult map[Key]ValueSet
|
||||
}{
|
||||
{case1Function, case1Result},
|
||||
{case2Function, case2Result},
|
||||
{case3Function, case3Result},
|
||||
{case4Function, case4Result},
|
||||
{case5Function, case5Result},
|
||||
{case6Function, case6Result},
|
||||
{case7Function, case7Result},
|
||||
{case8Function, case8Result},
|
||||
{&stringNotLikeFunc{}, nil},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.f.toMap()
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewStringLikeFunc(t *testing.T) {
|
||||
case1Function, err := newStringLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newStringLikeFunc(S3XAmzCopySource,
|
||||
NewValueSet(
|
||||
NewStringValue("mybucket/*"),
|
||||
NewStringValue("yourbucket/myobject*"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Function, err := newStringLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Function, err := newStringLikeFunc(S3XAmzServerSideEncryption,
|
||||
NewValueSet(
|
||||
NewStringValue("AES*"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case5Function, err := newStringLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case6Function, err := newStringLikeFunc(S3XAmzMetadataDirective,
|
||||
NewValueSet(
|
||||
NewStringValue("REPL*"),
|
||||
NewStringValue("COPY*"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case7Function, err := newStringLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case8Function, err := newStringLikeFunc(S3LocationConstraint,
|
||||
NewValueSet(
|
||||
NewStringValue("eu-west-*"),
|
||||
NewStringValue("us-west-*"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
key Key
|
||||
values ValueSet
|
||||
expectedResult Function
|
||||
expectErr bool
|
||||
}{
|
||||
{S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/*")), case1Function, false},
|
||||
{S3XAmzCopySource,
|
||||
NewValueSet(
|
||||
NewStringValue("mybucket/*"),
|
||||
NewStringValue("yourbucket/myobject*"),
|
||||
), case2Function, false},
|
||||
|
||||
{S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES*")), case3Function, false},
|
||||
{S3XAmzServerSideEncryption,
|
||||
NewValueSet(
|
||||
NewStringValue("AES*"),
|
||||
), case4Function, false},
|
||||
|
||||
{S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*")), case5Function, false},
|
||||
{S3XAmzMetadataDirective,
|
||||
NewValueSet(
|
||||
NewStringValue("REPL*"),
|
||||
NewStringValue("COPY*"),
|
||||
), case6Function, false},
|
||||
|
||||
{S3LocationConstraint, NewValueSet(NewStringValue("eu-west-*")), case7Function, false},
|
||||
{S3LocationConstraint,
|
||||
NewValueSet(
|
||||
NewStringValue("eu-west-*"),
|
||||
NewStringValue("us-west-*"),
|
||||
), case8Function, false},
|
||||
|
||||
// Unsupported value error.
|
||||
{S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"), NewIntValue(7)), nil, true},
|
||||
{S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"), NewIntValue(7)), nil, true},
|
||||
{S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"), NewIntValue(7)), nil, true},
|
||||
{S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"), NewIntValue(7)), nil, true},
|
||||
|
||||
// Invalid value error.
|
||||
{S3XAmzCopySource, NewValueSet(NewStringValue("mybucket")), nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := newStringLikeFunc(testCase.key, testCase.values)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewStringNotLikeFunc(t *testing.T) {
|
||||
case1Function, err := newStringNotLikeFunc(S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case2Function, err := newStringNotLikeFunc(S3XAmzCopySource,
|
||||
NewValueSet(
|
||||
NewStringValue("mybucket/*"),
|
||||
NewStringValue("yourbucket/myobject*"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Function, err := newStringNotLikeFunc(S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case4Function, err := newStringNotLikeFunc(S3XAmzServerSideEncryption,
|
||||
NewValueSet(
|
||||
NewStringValue("AES*"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case5Function, err := newStringNotLikeFunc(S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case6Function, err := newStringNotLikeFunc(S3XAmzMetadataDirective,
|
||||
NewValueSet(
|
||||
NewStringValue("REPL*"),
|
||||
NewStringValue("COPY*"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case7Function, err := newStringNotLikeFunc(S3LocationConstraint, NewValueSet(NewStringValue("eu-west-*")))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case8Function, err := newStringNotLikeFunc(S3LocationConstraint,
|
||||
NewValueSet(
|
||||
NewStringValue("eu-west-*"),
|
||||
NewStringValue("us-west-*"),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
key Key
|
||||
values ValueSet
|
||||
expectedResult Function
|
||||
expectErr bool
|
||||
}{
|
||||
{S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/*")), case1Function, false},
|
||||
{S3XAmzCopySource,
|
||||
NewValueSet(
|
||||
NewStringValue("mybucket/*"),
|
||||
NewStringValue("yourbucket/myobject*"),
|
||||
), case2Function, false},
|
||||
|
||||
{S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES*")), case3Function, false},
|
||||
{S3XAmzServerSideEncryption,
|
||||
NewValueSet(
|
||||
NewStringValue("AES*"),
|
||||
), case4Function, false},
|
||||
|
||||
{S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPL*")), case5Function, false},
|
||||
{S3XAmzMetadataDirective,
|
||||
NewValueSet(
|
||||
NewStringValue("REPL*"),
|
||||
NewStringValue("COPY*"),
|
||||
), case6Function, false},
|
||||
|
||||
{S3LocationConstraint, NewValueSet(NewStringValue("eu-west-*")), case7Function, false},
|
||||
{S3LocationConstraint,
|
||||
NewValueSet(
|
||||
NewStringValue("eu-west-*"),
|
||||
NewStringValue("us-west-*"),
|
||||
), case8Function, false},
|
||||
|
||||
// Unsupported value error.
|
||||
{S3XAmzCopySource, NewValueSet(NewStringValue("mybucket/myobject"), NewIntValue(7)), nil, true},
|
||||
{S3XAmzServerSideEncryption, NewValueSet(NewStringValue("AES256"), NewIntValue(7)), nil, true},
|
||||
{S3XAmzMetadataDirective, NewValueSet(NewStringValue("REPLACE"), NewIntValue(7)), nil, true},
|
||||
{S3LocationConstraint, NewValueSet(NewStringValue("eu-west-1"), NewIntValue(7)), nil, true},
|
||||
|
||||
// Invalid value error.
|
||||
{S3XAmzCopySource, NewValueSet(NewStringValue("mybucket")), nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := newStringNotLikeFunc(testCase.key, testCase.values)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,175 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Splits an incoming path into bucket and object components.
|
||||
func path2BucketAndObject(path string) (bucket, object string) {
|
||||
// Skip the first element if it is '/', split the rest.
|
||||
path = strings.TrimPrefix(path, "/")
|
||||
pathComponents := strings.SplitN(path, "/", 2)
|
||||
|
||||
// Save the bucket and object extracted from path.
|
||||
switch len(pathComponents) {
|
||||
case 1:
|
||||
bucket = pathComponents[0]
|
||||
case 2:
|
||||
bucket = pathComponents[0]
|
||||
object = pathComponents[1]
|
||||
}
|
||||
return bucket, object
|
||||
}
|
||||
|
||||
// Value - is enum type of string, int or bool.
|
||||
type Value struct {
|
||||
t reflect.Kind
|
||||
s string
|
||||
i int
|
||||
b bool
|
||||
}
|
||||
|
||||
// GetBool - gets stored bool value.
|
||||
func (v Value) GetBool() (bool, error) {
|
||||
var err error
|
||||
|
||||
if v.t != reflect.Bool {
|
||||
err = fmt.Errorf("not a bool Value")
|
||||
}
|
||||
|
||||
return v.b, err
|
||||
}
|
||||
|
||||
// GetInt - gets stored int value.
|
||||
func (v Value) GetInt() (int, error) {
|
||||
var err error
|
||||
|
||||
if v.t != reflect.Int {
|
||||
err = fmt.Errorf("not a int Value")
|
||||
}
|
||||
|
||||
return v.i, err
|
||||
}
|
||||
|
||||
// GetString - gets stored string value.
|
||||
func (v Value) GetString() (string, error) {
|
||||
var err error
|
||||
|
||||
if v.t != reflect.String {
|
||||
err = fmt.Errorf("not a string Value")
|
||||
}
|
||||
|
||||
return v.s, err
|
||||
}
|
||||
|
||||
// GetType - gets enum type.
|
||||
func (v Value) GetType() reflect.Kind {
|
||||
return v.t
|
||||
}
|
||||
|
||||
// MarshalJSON - encodes Value to JSON data.
|
||||
func (v Value) MarshalJSON() ([]byte, error) {
|
||||
switch v.t {
|
||||
case reflect.String:
|
||||
return json.Marshal(v.s)
|
||||
case reflect.Int:
|
||||
return json.Marshal(v.i)
|
||||
case reflect.Bool:
|
||||
return json.Marshal(v.b)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unknown value kind %v", v.t)
|
||||
}
|
||||
|
||||
// StoreBool - stores bool value.
|
||||
func (v *Value) StoreBool(b bool) {
|
||||
*v = Value{t: reflect.Bool, b: b}
|
||||
}
|
||||
|
||||
// StoreInt - stores int value.
|
||||
func (v *Value) StoreInt(i int) {
|
||||
*v = Value{t: reflect.Int, i: i}
|
||||
}
|
||||
|
||||
// StoreString - stores string value.
|
||||
func (v *Value) StoreString(s string) {
|
||||
*v = Value{t: reflect.String, s: s}
|
||||
}
|
||||
|
||||
// String - returns string representation of value.
|
||||
func (v Value) String() string {
|
||||
switch v.t {
|
||||
case reflect.String:
|
||||
return v.s
|
||||
case reflect.Int:
|
||||
return strconv.Itoa(v.i)
|
||||
case reflect.Bool:
|
||||
return strconv.FormatBool(v.b)
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// UnmarshalJSON - decodes JSON data.
|
||||
func (v *Value) UnmarshalJSON(data []byte) error {
|
||||
var b bool
|
||||
if err := json.Unmarshal(data, &b); err == nil {
|
||||
v.StoreBool(b)
|
||||
return nil
|
||||
}
|
||||
|
||||
var i int
|
||||
if err := json.Unmarshal(data, &i); err == nil {
|
||||
v.StoreInt(i)
|
||||
return nil
|
||||
}
|
||||
|
||||
var s string
|
||||
if err := json.Unmarshal(data, &s); err == nil {
|
||||
v.StoreString(s)
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("unknown json data '%v'", data)
|
||||
}
|
||||
|
||||
// NewBoolValue - returns new bool value.
|
||||
func NewBoolValue(b bool) Value {
|
||||
value := &Value{}
|
||||
value.StoreBool(b)
|
||||
return *value
|
||||
}
|
||||
|
||||
// NewIntValue - returns new int value.
|
||||
func NewIntValue(i int) Value {
|
||||
value := &Value{}
|
||||
value.StoreInt(i)
|
||||
return *value
|
||||
}
|
||||
|
||||
// NewStringValue - returns new string value.
|
||||
func NewStringValue(s string) Value {
|
||||
value := &Value{}
|
||||
value.StoreString(s)
|
||||
return *value
|
||||
}
|
|
@ -1,260 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestValueGetBool(t *testing.T) {
|
||||
testCases := []struct {
|
||||
value Value
|
||||
expectedResult bool
|
||||
expectErr bool
|
||||
}{
|
||||
{NewBoolValue(true), true, false},
|
||||
{NewIntValue(7), false, true},
|
||||
{Value{}, false, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := testCase.value.GetBool()
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueGetInt(t *testing.T) {
|
||||
testCases := []struct {
|
||||
value Value
|
||||
expectedResult int
|
||||
expectErr bool
|
||||
}{
|
||||
{NewIntValue(7), 7, false},
|
||||
{NewBoolValue(true), 0, true},
|
||||
{Value{}, 0, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := testCase.value.GetInt()
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueGetString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
value Value
|
||||
expectedResult string
|
||||
expectErr bool
|
||||
}{
|
||||
{NewStringValue("foo"), "foo", false},
|
||||
{NewBoolValue(true), "", true},
|
||||
{Value{}, "", true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := testCase.value.GetString()
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueGetType(t *testing.T) {
|
||||
testCases := []struct {
|
||||
value Value
|
||||
expectedResult reflect.Kind
|
||||
}{
|
||||
{NewBoolValue(true), reflect.Bool},
|
||||
{NewIntValue(7), reflect.Int},
|
||||
{NewStringValue("foo"), reflect.String},
|
||||
{Value{}, reflect.Invalid},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.value.GetType()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueMarshalJSON(t *testing.T) {
|
||||
testCases := []struct {
|
||||
value Value
|
||||
expectedResult []byte
|
||||
expectErr bool
|
||||
}{
|
||||
{NewBoolValue(true), []byte("true"), false},
|
||||
{NewIntValue(7), []byte("7"), false},
|
||||
{NewStringValue("foo"), []byte(`"foo"`), false},
|
||||
{Value{}, nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := json.Marshal(testCase.value)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueStoreBool(t *testing.T) {
|
||||
testCases := []struct {
|
||||
value bool
|
||||
expectedResult Value
|
||||
}{
|
||||
{false, NewBoolValue(false)},
|
||||
{true, NewBoolValue(true)},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
var result Value
|
||||
result.StoreBool(testCase.value)
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueStoreInt(t *testing.T) {
|
||||
testCases := []struct {
|
||||
value int
|
||||
expectedResult Value
|
||||
}{
|
||||
{0, NewIntValue(0)},
|
||||
{7, NewIntValue(7)},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
var result Value
|
||||
result.StoreInt(testCase.value)
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueStoreString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
value string
|
||||
expectedResult Value
|
||||
}{
|
||||
{"", NewStringValue("")},
|
||||
{"foo", NewStringValue("foo")},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
var result Value
|
||||
result.StoreString(testCase.value)
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueString(t *testing.T) {
|
||||
testCases := []struct {
|
||||
value Value
|
||||
expectedResult string
|
||||
}{
|
||||
{NewBoolValue(true), "true"},
|
||||
{NewIntValue(7), "7"},
|
||||
{NewStringValue("foo"), "foo"},
|
||||
{Value{}, ""},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.value.String()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueUnmarshalJSON(t *testing.T) {
|
||||
testCases := []struct {
|
||||
data []byte
|
||||
expectedResult Value
|
||||
expectErr bool
|
||||
}{
|
||||
{[]byte("true"), NewBoolValue(true), false},
|
||||
{[]byte("7"), NewIntValue(7), false},
|
||||
{[]byte(`"foo"`), NewStringValue("foo"), false},
|
||||
{[]byte("True"), Value{}, true},
|
||||
{[]byte("7.1"), Value{}, true},
|
||||
{[]byte(`["foo"]`), Value{}, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
var result Value
|
||||
err := json.Unmarshal(testCase.data, &result)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,85 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// ValueSet - unique list of values.
|
||||
type ValueSet map[Value]struct{}
|
||||
|
||||
// Add - adds given value to value set.
|
||||
func (set ValueSet) Add(value Value) {
|
||||
set[value] = struct{}{}
|
||||
}
|
||||
|
||||
// MarshalJSON - encodes ValueSet to JSON data.
|
||||
func (set ValueSet) MarshalJSON() ([]byte, error) {
|
||||
var values []Value
|
||||
for k := range set {
|
||||
values = append(values, k)
|
||||
}
|
||||
|
||||
if len(values) == 0 {
|
||||
return nil, fmt.Errorf("invalid value set %v", set)
|
||||
}
|
||||
|
||||
return json.Marshal(values)
|
||||
}
|
||||
|
||||
// UnmarshalJSON - decodes JSON data.
|
||||
func (set *ValueSet) UnmarshalJSON(data []byte) error {
|
||||
var v Value
|
||||
if err := json.Unmarshal(data, &v); err == nil {
|
||||
*set = make(ValueSet)
|
||||
set.Add(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
var values []Value
|
||||
if err := json.Unmarshal(data, &values); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(values) < 1 {
|
||||
return fmt.Errorf("invalid value")
|
||||
}
|
||||
|
||||
*set = make(ValueSet)
|
||||
for _, v = range values {
|
||||
if _, found := (*set)[v]; found {
|
||||
return fmt.Errorf("duplicate value found '%v'", v)
|
||||
}
|
||||
|
||||
set.Add(v)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewValueSet - returns new value set containing given values.
|
||||
func NewValueSet(values ...Value) ValueSet {
|
||||
set := make(ValueSet)
|
||||
|
||||
for _, value := range values {
|
||||
set.Add(value)
|
||||
}
|
||||
|
||||
return set
|
||||
}
|
|
@ -1,118 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package condition
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestValueSetAdd(t *testing.T) {
|
||||
testCases := []struct {
|
||||
value Value
|
||||
expectedResult ValueSet
|
||||
}{
|
||||
{NewBoolValue(true), NewValueSet(NewBoolValue(true))},
|
||||
{NewIntValue(7), NewValueSet(NewIntValue(7))},
|
||||
{NewStringValue("foo"), NewValueSet(NewStringValue("foo"))},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := NewValueSet()
|
||||
result.Add(testCase.value)
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueSetMarshalJSON(t *testing.T) {
|
||||
testCases := []struct {
|
||||
set ValueSet
|
||||
expectedResult string
|
||||
expectErr bool
|
||||
}{
|
||||
{NewValueSet(NewBoolValue(true)), `[true]`, false},
|
||||
{NewValueSet(NewIntValue(7)), `[7]`, false},
|
||||
{NewValueSet(NewStringValue("foo")), `["foo"]`, false},
|
||||
{NewValueSet(NewBoolValue(true)), `[true]`, false},
|
||||
{NewValueSet(NewStringValue("7")), `["7"]`, false},
|
||||
{NewValueSet(NewStringValue("foo")), `["foo"]`, false},
|
||||
{make(ValueSet), "", true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := json.Marshal(testCase.set)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if string(result) != testCase.expectedResult {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, string(result))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueSetUnmarshalJSON(t *testing.T) {
|
||||
set1 := NewValueSet(
|
||||
NewBoolValue(true),
|
||||
NewStringValue("false"),
|
||||
NewIntValue(7),
|
||||
NewStringValue("7"),
|
||||
NewStringValue("foo"),
|
||||
NewStringValue("192.168.1.100/24"),
|
||||
)
|
||||
|
||||
testCases := []struct {
|
||||
data []byte
|
||||
expectedResult ValueSet
|
||||
expectErr bool
|
||||
}{
|
||||
{[]byte(`true`), NewValueSet(NewBoolValue(true)), false},
|
||||
{[]byte(`7`), NewValueSet(NewIntValue(7)), false},
|
||||
{[]byte(`"foo"`), NewValueSet(NewStringValue("foo")), false},
|
||||
{[]byte(`[true]`), NewValueSet(NewBoolValue(true)), false},
|
||||
{[]byte(`[7]`), NewValueSet(NewIntValue(7)), false},
|
||||
{[]byte(`["foo"]`), NewValueSet(NewStringValue("foo")), false},
|
||||
{[]byte(`[true, "false", 7, "7", "foo", "192.168.1.100/24"]`), set1, false},
|
||||
{[]byte(`{}`), nil, true}, // Unsupported data.
|
||||
{[]byte(`[]`), nil, true}, // Empty array.
|
||||
{[]byte(`[7, 7, true]`), nil, true}, // Duplicate value.
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := make(ValueSet)
|
||||
err := json.Unmarshal(testCase.data, &result)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package policy
|
||||
|
||||
// Effect - policy statement effect Allow or Deny.
|
||||
type Effect string
|
||||
|
||||
const (
|
||||
// Allow - allow effect.
|
||||
Allow Effect = "Allow"
|
||||
|
||||
// Deny - deny effect.
|
||||
Deny = "Deny"
|
||||
)
|
||||
|
||||
// IsAllowed - returns if given check is allowed or not.
|
||||
func (effect Effect) IsAllowed(b bool) bool {
|
||||
if effect == Allow {
|
||||
return b
|
||||
}
|
||||
|
||||
return !b
|
||||
}
|
||||
|
||||
// IsValid - checks if Effect is valid or not
|
||||
func (effect Effect) IsValid() bool {
|
||||
switch effect {
|
||||
case Allow, Deny:
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
|
@ -1,63 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package policy
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEffectIsAllowed(t *testing.T) {
|
||||
testCases := []struct {
|
||||
effect Effect
|
||||
check bool
|
||||
expectedResult bool
|
||||
}{
|
||||
{Allow, false, false},
|
||||
{Allow, true, true},
|
||||
{Deny, false, true},
|
||||
{Deny, true, false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.effect.IsAllowed(testCase.check)
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestEffectIsValid(t *testing.T) {
|
||||
testCases := []struct {
|
||||
effect Effect
|
||||
expectedResult bool
|
||||
}{
|
||||
{Allow, true},
|
||||
{Deny, true},
|
||||
{Effect(""), false},
|
||||
{Effect("foo"), false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.effect.IsValid()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,44 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package policy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Error is the generic type for any error happening during policy
|
||||
// parsing.
|
||||
type Error struct {
|
||||
err error
|
||||
}
|
||||
|
||||
// Errorf - formats according to a format specifier and returns
|
||||
// the string as a value that satisfies error of type policy.Error
|
||||
func Errorf(format string, a ...interface{}) error {
|
||||
return Error{err: fmt.Errorf(format, a...)}
|
||||
}
|
||||
|
||||
// Unwrap the internal error.
|
||||
func (e Error) Unwrap() error { return e.err }
|
||||
|
||||
// Error 'error' compatible method.
|
||||
func (e Error) Error() string {
|
||||
if e.err == nil {
|
||||
return "policy: cause <nil>"
|
||||
}
|
||||
return e.err.Error()
|
||||
}
|
|
@ -1,29 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package policy
|
||||
|
||||
import (
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// ID - policy ID.
|
||||
type ID string
|
||||
|
||||
// IsValid - checks if ID is valid or not.
|
||||
func (id ID) IsValid() bool {
|
||||
return utf8.ValidString(string(id))
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package policy
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestIDIsValid(t *testing.T) {
|
||||
testCases := []struct {
|
||||
id ID
|
||||
expectedResult bool
|
||||
}{
|
||||
{ID("DenyEncryptionSt1"), true},
|
||||
{ID(""), true},
|
||||
{ID("aa\xe2"), false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.id.IsValid()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Errorf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,180 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package policy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
)
|
||||
|
||||
// DefaultVersion - default policy version as per AWS S3 specification.
|
||||
const DefaultVersion = "2012-10-17"
|
||||
|
||||
// Args - arguments to policy to check whether it is allowed
|
||||
type Args struct {
|
||||
AccountName string `json:"account"`
|
||||
Action Action `json:"action"`
|
||||
BucketName string `json:"bucket"`
|
||||
ConditionValues map[string][]string `json:"conditions"`
|
||||
IsOwner bool `json:"owner"`
|
||||
ObjectName string `json:"object"`
|
||||
}
|
||||
|
||||
// Policy - bucket policy.
|
||||
type Policy struct {
|
||||
ID ID `json:"ID,omitempty"`
|
||||
Version string
|
||||
Statements []Statement `json:"Statement"`
|
||||
}
|
||||
|
||||
// IsAllowed - checks given policy args is allowed to continue the Rest API.
|
||||
func (policy Policy) IsAllowed(args Args) bool {
|
||||
// Check all deny statements. If any one statement denies, return false.
|
||||
for _, statement := range policy.Statements {
|
||||
if statement.Effect == Deny {
|
||||
if !statement.IsAllowed(args) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// For owner, its allowed by default.
|
||||
if args.IsOwner {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check all allow statements. If any one statement allows, return true.
|
||||
for _, statement := range policy.Statements {
|
||||
if statement.Effect == Allow {
|
||||
if statement.IsAllowed(args) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// IsEmpty - returns whether policy is empty or not.
|
||||
func (policy Policy) IsEmpty() bool {
|
||||
return len(policy.Statements) == 0
|
||||
}
|
||||
|
||||
// isValid - checks if Policy is valid or not.
|
||||
func (policy Policy) isValid() error {
|
||||
if policy.Version != DefaultVersion && policy.Version != "" {
|
||||
return Errorf("invalid version '%v'", policy.Version)
|
||||
}
|
||||
|
||||
for _, statement := range policy.Statements {
|
||||
if err := statement.isValid(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON - encodes Policy to JSON data.
|
||||
func (policy Policy) MarshalJSON() ([]byte, error) {
|
||||
if err := policy.isValid(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// subtype to avoid recursive call to MarshalJSON()
|
||||
type subPolicy Policy
|
||||
return json.Marshal(subPolicy(policy))
|
||||
}
|
||||
|
||||
func (policy *Policy) dropDuplicateStatements() {
|
||||
redo:
|
||||
for i := range policy.Statements {
|
||||
for j, statement := range policy.Statements[i+1:] {
|
||||
if policy.Statements[i].Effect != statement.Effect {
|
||||
continue
|
||||
}
|
||||
|
||||
if !policy.Statements[i].Principal.Equals(statement.Principal) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !policy.Statements[i].Actions.Equals(statement.Actions) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !policy.Statements[i].Resources.Equals(statement.Resources) {
|
||||
continue
|
||||
}
|
||||
|
||||
if policy.Statements[i].Conditions.String() != statement.Conditions.String() {
|
||||
continue
|
||||
}
|
||||
policy.Statements = append(policy.Statements[:j], policy.Statements[j+1:]...)
|
||||
goto redo
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalJSON - decodes JSON data to Policy.
|
||||
func (policy *Policy) UnmarshalJSON(data []byte) error {
|
||||
// subtype to avoid recursive call to UnmarshalJSON()
|
||||
type subPolicy Policy
|
||||
var sp subPolicy
|
||||
if err := json.Unmarshal(data, &sp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p := Policy(sp)
|
||||
if err := p.isValid(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.dropDuplicateStatements()
|
||||
|
||||
*policy = p
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate - validates all statements are for given bucket or not.
|
||||
func (policy Policy) Validate(bucketName string) error {
|
||||
if err := policy.isValid(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, statement := range policy.Statements {
|
||||
if err := statement.Validate(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseConfig - parses data in given reader to Policy.
|
||||
func ParseConfig(reader io.Reader, bucketName string) (*Policy, error) {
|
||||
var policy Policy
|
||||
|
||||
decoder := json.NewDecoder(reader)
|
||||
decoder.DisallowUnknownFields()
|
||||
if err := decoder.Decode(&policy); err != nil {
|
||||
return nil, Errorf("%w", err)
|
||||
}
|
||||
|
||||
err := policy.Validate(bucketName)
|
||||
return &policy, err
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -1,96 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package policy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
)
|
||||
|
||||
// Principal - policy principal.
|
||||
type Principal struct {
|
||||
AWS set.StringSet
|
||||
}
|
||||
|
||||
// IsValid - checks whether Principal is valid or not.
|
||||
func (p Principal) IsValid() bool {
|
||||
return len(p.AWS) != 0
|
||||
}
|
||||
|
||||
// Equals - returns true if principals are equal.
|
||||
func (p Principal) Equals(pp Principal) bool {
|
||||
return p.AWS.Equals(pp.AWS)
|
||||
}
|
||||
|
||||
// Intersection - returns principals available in both Principal.
|
||||
func (p Principal) Intersection(principal Principal) set.StringSet {
|
||||
return p.AWS.Intersection(principal.AWS)
|
||||
}
|
||||
|
||||
// MarshalJSON - encodes Principal to JSON data.
|
||||
func (p Principal) MarshalJSON() ([]byte, error) {
|
||||
if !p.IsValid() {
|
||||
return nil, Errorf("invalid principal %v", p)
|
||||
}
|
||||
|
||||
// subtype to avoid recursive call to MarshalJSON()
|
||||
type subPrincipal Principal
|
||||
sp := subPrincipal(p)
|
||||
return json.Marshal(sp)
|
||||
}
|
||||
|
||||
// Match - matches given principal is wildcard matching with Principal.
|
||||
func (p Principal) Match(principal string) bool {
|
||||
for _, pattern := range p.AWS.ToSlice() {
|
||||
if wildcard.MatchSimple(pattern, principal) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// UnmarshalJSON - decodes JSON data to Principal.
|
||||
func (p *Principal) UnmarshalJSON(data []byte) error {
|
||||
// subtype to avoid recursive call to UnmarshalJSON()
|
||||
type subPrincipal Principal
|
||||
var sp subPrincipal
|
||||
|
||||
if err := json.Unmarshal(data, &sp); err != nil {
|
||||
var s string
|
||||
if err = json.Unmarshal(data, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if s != "*" {
|
||||
return Errorf("invalid principal '%v'", s)
|
||||
}
|
||||
|
||||
sp.AWS = set.CreateStringSet("*")
|
||||
}
|
||||
|
||||
*p = Principal(sp)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewPrincipal - creates new Principal.
|
||||
func NewPrincipal(principals ...string) Principal {
|
||||
return Principal{AWS: set.CreateStringSet(principals...)}
|
||||
}
|
|
@ -1,141 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package policy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
)
|
||||
|
||||
func TestPrincipalIsValid(t *testing.T) {
|
||||
testCases := []struct {
|
||||
principal Principal
|
||||
expectedResult bool
|
||||
}{
|
||||
{NewPrincipal("*"), true},
|
||||
{NewPrincipal("arn:aws:iam::AccountNumber:root"), true},
|
||||
{NewPrincipal(), false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.principal.IsValid()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrincipalIntersection(t *testing.T) {
|
||||
testCases := []struct {
|
||||
principal Principal
|
||||
principalToIntersect Principal
|
||||
expectedResult set.StringSet
|
||||
}{
|
||||
{NewPrincipal("*"), NewPrincipal("*"), set.CreateStringSet("*")},
|
||||
{NewPrincipal("arn:aws:iam::AccountNumber:root"), NewPrincipal("arn:aws:iam::AccountNumber:myuser"), set.CreateStringSet()},
|
||||
{NewPrincipal(), NewPrincipal("*"), set.CreateStringSet()},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.principal.Intersection(testCase.principalToIntersect)
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrincipalMarshalJSON(t *testing.T) {
|
||||
testCases := []struct {
|
||||
principal Principal
|
||||
expectedResult []byte
|
||||
expectErr bool
|
||||
}{
|
||||
{NewPrincipal("*"), []byte(`{"AWS":["*"]}`), false},
|
||||
{NewPrincipal("arn:aws:iam::AccountNumber:*"), []byte(`{"AWS":["arn:aws:iam::AccountNumber:*"]}`), false},
|
||||
{NewPrincipal(), nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := json.Marshal(testCase.principal)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, string(testCase.expectedResult), string(result))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrincipalMatch(t *testing.T) {
|
||||
testCases := []struct {
|
||||
principals Principal
|
||||
principal string
|
||||
expectedResult bool
|
||||
}{
|
||||
{NewPrincipal("*"), "AccountNumber", true},
|
||||
{NewPrincipal("arn:aws:iam::*"), "arn:aws:iam::AccountNumber:root", true},
|
||||
{NewPrincipal("arn:aws:iam::AccountNumber:*"), "arn:aws:iam::TestAccountNumber:root", false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.principals.Match(testCase.principal)
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrincipalUnmarshalJSON(t *testing.T) {
|
||||
testCases := []struct {
|
||||
data []byte
|
||||
expectedResult Principal
|
||||
expectErr bool
|
||||
}{
|
||||
{[]byte(`"*"`), NewPrincipal("*"), false},
|
||||
{[]byte(`{"AWS": "*"}`), NewPrincipal("*"), false},
|
||||
{[]byte(`{"AWS": "arn:aws:iam::AccountNumber:*"}`), NewPrincipal("arn:aws:iam::AccountNumber:*"), false},
|
||||
{[]byte(`"arn:aws:iam::AccountNumber:*"`), NewPrincipal(), true},
|
||||
{[]byte(`["arn:aws:iam::AccountNumber:*", "arn:aws:iam:AnotherAccount:*"]`), NewPrincipal(), true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
var result Principal
|
||||
err := json.Unmarshal(testCase.data, &result)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v\n", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,139 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package policy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/pkg/bucket/policy/condition"
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
)
|
||||
|
||||
// ResourceARNPrefix - resource ARN prefix as per AWS S3 specification.
|
||||
const ResourceARNPrefix = "arn:aws:s3:::"
|
||||
|
||||
// Resource - resource in policy statement.
|
||||
type Resource struct {
|
||||
BucketName string
|
||||
Pattern string
|
||||
}
|
||||
|
||||
func (r Resource) isBucketPattern() bool {
|
||||
return !strings.Contains(r.Pattern, "/")
|
||||
}
|
||||
|
||||
func (r Resource) isObjectPattern() bool {
|
||||
return strings.Contains(r.Pattern, "/") || strings.Contains(r.BucketName, "*")
|
||||
}
|
||||
|
||||
// IsValid - checks whether Resource is valid or not.
|
||||
func (r Resource) IsValid() bool {
|
||||
return r.BucketName != "" && r.Pattern != ""
|
||||
}
|
||||
|
||||
// Match - matches object name with resource pattern.
|
||||
func (r Resource) Match(resource string, conditionValues map[string][]string) bool {
|
||||
pattern := r.Pattern
|
||||
for _, key := range condition.CommonKeys {
|
||||
// Empty values are not supported for policy variables.
|
||||
if rvalues, ok := conditionValues[key.Name()]; ok && rvalues[0] != "" {
|
||||
pattern = strings.Replace(pattern, key.VarName(), rvalues[0], -1)
|
||||
}
|
||||
}
|
||||
|
||||
return wildcard.Match(pattern, resource)
|
||||
}
|
||||
|
||||
// MarshalJSON - encodes Resource to JSON data.
|
||||
func (r Resource) MarshalJSON() ([]byte, error) {
|
||||
if !r.IsValid() {
|
||||
return nil, Errorf("invalid resource %v", r)
|
||||
}
|
||||
|
||||
return json.Marshal(r.String())
|
||||
}
|
||||
|
||||
func (r Resource) String() string {
|
||||
return ResourceARNPrefix + r.Pattern
|
||||
}
|
||||
|
||||
// UnmarshalJSON - decodes JSON data to Resource.
|
||||
func (r *Resource) UnmarshalJSON(data []byte) error {
|
||||
var s string
|
||||
if err := json.Unmarshal(data, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
parsedResource, err := parseResource(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*r = parsedResource
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate - validates Resource is for given bucket or not.
|
||||
func (r Resource) Validate(bucketName string) error {
|
||||
if !r.IsValid() {
|
||||
return Errorf("invalid resource")
|
||||
}
|
||||
|
||||
if !wildcard.Match(r.BucketName, bucketName) {
|
||||
return Errorf("bucket name does not match")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseResource - parses string to Resource.
|
||||
func parseResource(s string) (Resource, error) {
|
||||
if !strings.HasPrefix(s, ResourceARNPrefix) {
|
||||
return Resource{}, Errorf("invalid resource '%v'", s)
|
||||
}
|
||||
|
||||
pattern := strings.TrimPrefix(s, ResourceARNPrefix)
|
||||
tokens := strings.SplitN(pattern, "/", 2)
|
||||
bucketName := tokens[0]
|
||||
if bucketName == "" {
|
||||
return Resource{}, Errorf("invalid resource format '%v'", s)
|
||||
}
|
||||
|
||||
return Resource{
|
||||
BucketName: bucketName,
|
||||
Pattern: pattern,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewResource - creates new resource.
|
||||
func NewResource(bucketName, keyName string) Resource {
|
||||
pattern := bucketName
|
||||
if keyName != "" {
|
||||
if !strings.HasPrefix(keyName, "/") {
|
||||
pattern += "/"
|
||||
}
|
||||
|
||||
pattern += keyName
|
||||
}
|
||||
|
||||
return Resource{
|
||||
BucketName: bucketName,
|
||||
Pattern: pattern,
|
||||
}
|
||||
}
|
|
@ -1,221 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package policy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestResourceIsBucketPattern(t *testing.T) {
|
||||
testCases := []struct {
|
||||
resource Resource
|
||||
expectedResult bool
|
||||
}{
|
||||
{NewResource("*", ""), true},
|
||||
{NewResource("mybucket", ""), true},
|
||||
{NewResource("mybucket*", ""), true},
|
||||
{NewResource("mybucket?0", ""), true},
|
||||
{NewResource("", "*"), false},
|
||||
{NewResource("*", "*"), false},
|
||||
{NewResource("mybucket", "*"), false},
|
||||
{NewResource("mybucket*", "/myobject"), false},
|
||||
{NewResource("mybucket?0", "/2010/photos/*"), false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.resource.isBucketPattern()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceIsObjectPattern(t *testing.T) {
|
||||
testCases := []struct {
|
||||
resource Resource
|
||||
expectedResult bool
|
||||
}{
|
||||
{NewResource("*", ""), true},
|
||||
{NewResource("mybucket*", ""), true},
|
||||
{NewResource("", "*"), true},
|
||||
{NewResource("*", "*"), true},
|
||||
{NewResource("mybucket", "*"), true},
|
||||
{NewResource("mybucket*", "/myobject"), true},
|
||||
{NewResource("mybucket?0", "/2010/photos/*"), true},
|
||||
{NewResource("mybucket", ""), false},
|
||||
{NewResource("mybucket?0", ""), false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.resource.isObjectPattern()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceIsValid(t *testing.T) {
|
||||
testCases := []struct {
|
||||
resource Resource
|
||||
expectedResult bool
|
||||
}{
|
||||
{NewResource("*", ""), true},
|
||||
{NewResource("mybucket*", ""), true},
|
||||
{NewResource("*", "*"), true},
|
||||
{NewResource("mybucket", "*"), true},
|
||||
{NewResource("mybucket*", "/myobject"), true},
|
||||
{NewResource("mybucket?0", "/2010/photos/*"), true},
|
||||
{NewResource("mybucket", ""), true},
|
||||
{NewResource("mybucket?0", ""), true},
|
||||
{NewResource("", ""), false},
|
||||
{NewResource("", "*"), false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.resource.IsValid()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceMatch(t *testing.T) {
|
||||
testCases := []struct {
|
||||
resource Resource
|
||||
objectName string
|
||||
expectedResult bool
|
||||
}{
|
||||
{NewResource("*", ""), "mybucket", true},
|
||||
{NewResource("*", ""), "mybucket/myobject", true},
|
||||
{NewResource("mybucket*", ""), "mybucket", true},
|
||||
{NewResource("mybucket*", ""), "mybucket/myobject", true},
|
||||
{NewResource("", "*"), "/myobject", true},
|
||||
{NewResource("*", "*"), "mybucket/myobject", true},
|
||||
{NewResource("mybucket", "*"), "mybucket/myobject", true},
|
||||
{NewResource("mybucket*", "/myobject"), "mybucket/myobject", true},
|
||||
{NewResource("mybucket*", "/myobject"), "mybucket100/myobject", true},
|
||||
{NewResource("mybucket?0", "/2010/photos/*"), "mybucket20/2010/photos/1.jpg", true},
|
||||
{NewResource("mybucket", ""), "mybucket", true},
|
||||
{NewResource("mybucket?0", ""), "mybucket30", true},
|
||||
{NewResource("", "*"), "mybucket/myobject", false},
|
||||
{NewResource("*", "*"), "mybucket", false},
|
||||
{NewResource("mybucket", "*"), "mybucket10/myobject", false},
|
||||
{NewResource("mybucket?0", "/2010/photos/*"), "mybucket0/2010/photos/1.jpg", false},
|
||||
{NewResource("mybucket", ""), "mybucket/myobject", false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.resource.Match(testCase.objectName, nil)
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceMarshalJSON(t *testing.T) {
|
||||
testCases := []struct {
|
||||
resource Resource
|
||||
expectedResult []byte
|
||||
expectErr bool
|
||||
}{
|
||||
{NewResource("*", ""), []byte(`"arn:aws:s3:::*"`), false},
|
||||
{NewResource("mybucket*", ""), []byte(`"arn:aws:s3:::mybucket*"`), false},
|
||||
{NewResource("mybucket", ""), []byte(`"arn:aws:s3:::mybucket"`), false},
|
||||
{NewResource("*", "*"), []byte(`"arn:aws:s3:::*/*"`), false},
|
||||
{NewResource("mybucket", "*"), []byte(`"arn:aws:s3:::mybucket/*"`), false},
|
||||
{NewResource("mybucket*", "myobject"), []byte(`"arn:aws:s3:::mybucket*/myobject"`), false},
|
||||
{NewResource("mybucket?0", "/2010/photos/*"), []byte(`"arn:aws:s3:::mybucket?0/2010/photos/*"`), false},
|
||||
{Resource{}, nil, true},
|
||||
{NewResource("", "*"), nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := json.Marshal(testCase.resource)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v", i+1, string(testCase.expectedResult), string(result))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceUnmarshalJSON(t *testing.T) {
|
||||
testCases := []struct {
|
||||
data []byte
|
||||
expectedResult Resource
|
||||
expectErr bool
|
||||
}{
|
||||
{[]byte(`"arn:aws:s3:::*"`), NewResource("*", ""), false},
|
||||
{[]byte(`"arn:aws:s3:::mybucket*"`), NewResource("mybucket*", ""), false},
|
||||
{[]byte(`"arn:aws:s3:::mybucket"`), NewResource("mybucket", ""), false},
|
||||
{[]byte(`"arn:aws:s3:::*/*"`), NewResource("*", "*"), false},
|
||||
{[]byte(`"arn:aws:s3:::mybucket/*"`), NewResource("mybucket", "*"), false},
|
||||
{[]byte(`"arn:aws:s3:::mybucket*/myobject"`), NewResource("mybucket*", "myobject"), false},
|
||||
{[]byte(`"arn:aws:s3:::mybucket?0/2010/photos/*"`), NewResource("mybucket?0", "/2010/photos/*"), false},
|
||||
{[]byte(`"mybucket/myobject*"`), Resource{}, true},
|
||||
{[]byte(`"arn:aws:s3:::/*"`), Resource{}, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
var result Resource
|
||||
err := json.Unmarshal(testCase.data, &result)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceValidate(t *testing.T) {
|
||||
testCases := []struct {
|
||||
resource Resource
|
||||
bucketName string
|
||||
expectErr bool
|
||||
}{
|
||||
{NewResource("mybucket", "/myobject*"), "mybucket", false},
|
||||
{NewResource("", "/myobject*"), "yourbucket", true},
|
||||
{NewResource("mybucket", "/myobject*"), "yourbucket", true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
err := testCase.resource.Validate(testCase.bucketName)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,165 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package policy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
)
|
||||
|
||||
// ResourceSet - set of resources in policy statement.
|
||||
type ResourceSet map[Resource]struct{}
|
||||
|
||||
// bucketResourceExists - checks if at least one bucket resource exists in the set.
|
||||
func (resourceSet ResourceSet) bucketResourceExists() bool {
|
||||
for resource := range resourceSet {
|
||||
if resource.isBucketPattern() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// objectResourceExists - checks if at least one object resource exists in the set.
|
||||
func (resourceSet ResourceSet) objectResourceExists() bool {
|
||||
for resource := range resourceSet {
|
||||
if resource.isObjectPattern() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Add - adds resource to resource set.
|
||||
func (resourceSet ResourceSet) Add(resource Resource) {
|
||||
resourceSet[resource] = struct{}{}
|
||||
}
|
||||
|
||||
// Equals - checks whether given resource set is equal to current resource set or not.
|
||||
func (resourceSet ResourceSet) Equals(sresourceSet ResourceSet) bool {
|
||||
// If length of set is not equal to length of given set, the
|
||||
// set is not equal to given set.
|
||||
if len(resourceSet) != len(sresourceSet) {
|
||||
return false
|
||||
}
|
||||
|
||||
// As both sets are equal in length, check each elements are equal.
|
||||
for k := range resourceSet {
|
||||
if _, ok := sresourceSet[k]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Intersection - returns resouces available in both ResourcsSet.
|
||||
func (resourceSet ResourceSet) Intersection(sset ResourceSet) ResourceSet {
|
||||
nset := NewResourceSet()
|
||||
for k := range resourceSet {
|
||||
if _, ok := sset[k]; ok {
|
||||
nset.Add(k)
|
||||
}
|
||||
}
|
||||
|
||||
return nset
|
||||
}
|
||||
|
||||
// MarshalJSON - encodes ResourceSet to JSON data.
|
||||
func (resourceSet ResourceSet) MarshalJSON() ([]byte, error) {
|
||||
if len(resourceSet) == 0 {
|
||||
return nil, Errorf("empty resources not allowed")
|
||||
}
|
||||
|
||||
resources := []Resource{}
|
||||
for resource := range resourceSet {
|
||||
resources = append(resources, resource)
|
||||
}
|
||||
|
||||
return json.Marshal(resources)
|
||||
}
|
||||
|
||||
// Match - matches object name with anyone of resource pattern in resource set.
|
||||
func (resourceSet ResourceSet) Match(resource string, conditionValues map[string][]string) bool {
|
||||
for r := range resourceSet {
|
||||
if r.Match(resource, conditionValues) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (resourceSet ResourceSet) String() string {
|
||||
resources := []string{}
|
||||
for resource := range resourceSet {
|
||||
resources = append(resources, resource.String())
|
||||
}
|
||||
sort.Strings(resources)
|
||||
|
||||
return fmt.Sprintf("%v", resources)
|
||||
}
|
||||
|
||||
// UnmarshalJSON - decodes JSON data to ResourceSet.
|
||||
func (resourceSet *ResourceSet) UnmarshalJSON(data []byte) error {
|
||||
var sset set.StringSet
|
||||
if err := json.Unmarshal(data, &sset); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*resourceSet = make(ResourceSet)
|
||||
for _, s := range sset.ToSlice() {
|
||||
resource, err := parseResource(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, found := (*resourceSet)[resource]; found {
|
||||
return Errorf("duplicate resource '%v' found", s)
|
||||
}
|
||||
|
||||
resourceSet.Add(resource)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate - validates ResourceSet is for given bucket or not.
|
||||
func (resourceSet ResourceSet) Validate(bucketName string) error {
|
||||
for resource := range resourceSet {
|
||||
if err := resource.Validate(bucketName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewResourceSet - creates new resource set.
|
||||
func NewResourceSet(resources ...Resource) ResourceSet {
|
||||
resourceSet := make(ResourceSet)
|
||||
for _, resource := range resources {
|
||||
resourceSet.Add(resource)
|
||||
}
|
||||
|
||||
return resourceSet
|
||||
}
|
|
@ -1,240 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package policy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestResourceSetBucketResourceExists(t *testing.T) {
|
||||
testCases := []struct {
|
||||
resourceSet ResourceSet
|
||||
expectedResult bool
|
||||
}{
|
||||
{NewResourceSet(NewResource("*", "")), true},
|
||||
{NewResourceSet(NewResource("mybucket", "")), true},
|
||||
{NewResourceSet(NewResource("mybucket*", "")), true},
|
||||
{NewResourceSet(NewResource("mybucket?0", "")), true},
|
||||
{NewResourceSet(NewResource("mybucket", "/2010/photos/*"), NewResource("mybucket", "")), true},
|
||||
{NewResourceSet(NewResource("", "*")), false},
|
||||
{NewResourceSet(NewResource("*", "*")), false},
|
||||
{NewResourceSet(NewResource("mybucket", "*")), false},
|
||||
{NewResourceSet(NewResource("mybucket*", "/myobject")), false},
|
||||
{NewResourceSet(NewResource("mybucket?0", "/2010/photos/*")), false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.resourceSet.bucketResourceExists()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceSetObjectResourceExists(t *testing.T) {
|
||||
testCases := []struct {
|
||||
resourceSet ResourceSet
|
||||
expectedResult bool
|
||||
}{
|
||||
{NewResourceSet(NewResource("*", "")), true},
|
||||
{NewResourceSet(NewResource("mybucket*", "")), true},
|
||||
{NewResourceSet(NewResource("", "*")), true},
|
||||
{NewResourceSet(NewResource("*", "*")), true},
|
||||
{NewResourceSet(NewResource("mybucket", "*")), true},
|
||||
{NewResourceSet(NewResource("mybucket*", "/myobject")), true},
|
||||
{NewResourceSet(NewResource("mybucket?0", "/2010/photos/*")), true},
|
||||
{NewResourceSet(NewResource("mybucket", ""), NewResource("mybucket", "/2910/photos/*")), true},
|
||||
{NewResourceSet(NewResource("mybucket", "")), false},
|
||||
{NewResourceSet(NewResource("mybucket?0", "")), false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.resourceSet.objectResourceExists()
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceSetAdd(t *testing.T) {
|
||||
testCases := []struct {
|
||||
resourceSet ResourceSet
|
||||
resource Resource
|
||||
expectedResult ResourceSet
|
||||
}{
|
||||
{NewResourceSet(), NewResource("mybucket", "/myobject*"),
|
||||
NewResourceSet(NewResource("mybucket", "/myobject*"))},
|
||||
{NewResourceSet(NewResource("mybucket", "/myobject*")),
|
||||
NewResource("mybucket", "/yourobject*"),
|
||||
NewResourceSet(NewResource("mybucket", "/myobject*"),
|
||||
NewResource("mybucket", "/yourobject*"))},
|
||||
{NewResourceSet(NewResource("mybucket", "/myobject*")),
|
||||
NewResource("mybucket", "/myobject*"),
|
||||
NewResourceSet(NewResource("mybucket", "/myobject*"))},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
testCase.resourceSet.Add(testCase.resource)
|
||||
|
||||
if !reflect.DeepEqual(testCase.resourceSet, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, testCase.resourceSet)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceSetIntersection(t *testing.T) {
|
||||
testCases := []struct {
|
||||
set ResourceSet
|
||||
setToIntersect ResourceSet
|
||||
expectedResult ResourceSet
|
||||
}{
|
||||
{NewResourceSet(), NewResourceSet(NewResource("mybucket", "/myobject*")), NewResourceSet()},
|
||||
{NewResourceSet(NewResource("mybucket", "/myobject*")), NewResourceSet(), NewResourceSet()},
|
||||
{NewResourceSet(NewResource("mybucket", "/myobject*")),
|
||||
NewResourceSet(NewResource("mybucket", "/myobject*"), NewResource("mybucket", "/yourobject*")),
|
||||
NewResourceSet(NewResource("mybucket", "/myobject*"))},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.set.Intersection(testCase.setToIntersect)
|
||||
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, testCase.set)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceSetMarshalJSON(t *testing.T) {
|
||||
testCases := []struct {
|
||||
resoruceSet ResourceSet
|
||||
expectedResult []byte
|
||||
expectErr bool
|
||||
}{
|
||||
{NewResourceSet(NewResource("mybucket", "/myobject*")),
|
||||
[]byte(`["arn:aws:s3:::mybucket/myobject*"]`), false},
|
||||
{NewResourceSet(NewResource("mybucket", "/photos/myobject*")),
|
||||
[]byte(`["arn:aws:s3:::mybucket/photos/myobject*"]`), false},
|
||||
{NewResourceSet(), nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := json.Marshal(testCase.resoruceSet)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v", i+1, string(testCase.expectedResult), string(result))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceSetMatch(t *testing.T) {
|
||||
testCases := []struct {
|
||||
resourceSet ResourceSet
|
||||
resource string
|
||||
expectedResult bool
|
||||
}{
|
||||
{NewResourceSet(NewResource("*", "")), "mybucket", true},
|
||||
{NewResourceSet(NewResource("*", "")), "mybucket/myobject", true},
|
||||
{NewResourceSet(NewResource("mybucket*", "")), "mybucket", true},
|
||||
{NewResourceSet(NewResource("mybucket*", "")), "mybucket/myobject", true},
|
||||
{NewResourceSet(NewResource("", "*")), "/myobject", true},
|
||||
{NewResourceSet(NewResource("*", "*")), "mybucket/myobject", true},
|
||||
{NewResourceSet(NewResource("mybucket", "*")), "mybucket/myobject", true},
|
||||
{NewResourceSet(NewResource("mybucket*", "/myobject")), "mybucket/myobject", true},
|
||||
{NewResourceSet(NewResource("mybucket*", "/myobject")), "mybucket100/myobject", true},
|
||||
{NewResourceSet(NewResource("mybucket?0", "/2010/photos/*")), "mybucket20/2010/photos/1.jpg", true},
|
||||
{NewResourceSet(NewResource("mybucket", "")), "mybucket", true},
|
||||
{NewResourceSet(NewResource("mybucket?0", "")), "mybucket30", true},
|
||||
{NewResourceSet(NewResource("mybucket?0", "/2010/photos/*"),
|
||||
NewResource("mybucket", "/2010/photos/*")), "mybucket/2010/photos/1.jpg", true},
|
||||
{NewResourceSet(NewResource("", "*")), "mybucket/myobject", false},
|
||||
{NewResourceSet(NewResource("*", "*")), "mybucket", false},
|
||||
{NewResourceSet(NewResource("mybucket", "*")), "mybucket10/myobject", false},
|
||||
{NewResourceSet(NewResource("mybucket", "")), "mybucket/myobject", false},
|
||||
{NewResourceSet(), "mybucket/myobject", false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.resourceSet.Match(testCase.resource, nil)
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceSetUnmarshalJSON(t *testing.T) {
|
||||
testCases := []struct {
|
||||
data []byte
|
||||
expectedResult ResourceSet
|
||||
expectErr bool
|
||||
}{
|
||||
{[]byte(`"arn:aws:s3:::mybucket/myobject*"`),
|
||||
NewResourceSet(NewResource("mybucket", "/myobject*")), false},
|
||||
{[]byte(`"arn:aws:s3:::mybucket/photos/myobject*"`),
|
||||
NewResourceSet(NewResource("mybucket", "/photos/myobject*")), false},
|
||||
{[]byte(`"arn:aws:s3:::mybucket"`), NewResourceSet(NewResource("mybucket", "")), false},
|
||||
{[]byte(`"mybucket/myobject*"`), nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
var result ResourceSet
|
||||
err := json.Unmarshal(testCase.data, &result)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceSetValidate(t *testing.T) {
|
||||
testCases := []struct {
|
||||
resourceSet ResourceSet
|
||||
bucketName string
|
||||
expectErr bool
|
||||
}{
|
||||
{NewResourceSet(NewResource("mybucket", "/myobject*")), "mybucket", false},
|
||||
{NewResourceSet(NewResource("", "/myobject*")), "yourbucket", true},
|
||||
{NewResourceSet(NewResource("mybucket", "/myobject*")), "yourbucket", true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
err := testCase.resourceSet.Validate(testCase.bucketName)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,155 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package policy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/pkg/bucket/policy/condition"
|
||||
)
|
||||
|
||||
// Statement - policy statement.
|
||||
type Statement struct {
|
||||
SID ID `json:"Sid,omitempty"`
|
||||
Effect Effect `json:"Effect"`
|
||||
Principal Principal `json:"Principal"`
|
||||
Actions ActionSet `json:"Action"`
|
||||
Resources ResourceSet `json:"Resource"`
|
||||
Conditions condition.Functions `json:"Condition,omitempty"`
|
||||
}
|
||||
|
||||
// IsAllowed - checks given policy args is allowed to continue the Rest API.
|
||||
func (statement Statement) IsAllowed(args Args) bool {
|
||||
check := func() bool {
|
||||
if !statement.Principal.Match(args.AccountName) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !statement.Actions.Contains(args.Action) {
|
||||
return false
|
||||
}
|
||||
|
||||
resource := args.BucketName
|
||||
if args.ObjectName != "" {
|
||||
if !strings.HasPrefix(args.ObjectName, "/") {
|
||||
resource += "/"
|
||||
}
|
||||
|
||||
resource += args.ObjectName
|
||||
}
|
||||
|
||||
if !statement.Resources.Match(resource, args.ConditionValues) {
|
||||
return false
|
||||
}
|
||||
|
||||
return statement.Conditions.Evaluate(args.ConditionValues)
|
||||
}
|
||||
|
||||
return statement.Effect.IsAllowed(check())
|
||||
}
|
||||
|
||||
// isValid - checks whether statement is valid or not.
|
||||
func (statement Statement) isValid() error {
|
||||
if !statement.Effect.IsValid() {
|
||||
return Errorf("invalid Effect %v", statement.Effect)
|
||||
}
|
||||
|
||||
if !statement.Principal.IsValid() {
|
||||
return Errorf("invalid Principal %v", statement.Principal)
|
||||
}
|
||||
|
||||
if len(statement.Actions) == 0 {
|
||||
return Errorf("Action must not be empty")
|
||||
}
|
||||
|
||||
if len(statement.Resources) == 0 {
|
||||
return Errorf("Resource must not be empty")
|
||||
}
|
||||
|
||||
for action := range statement.Actions {
|
||||
if action.isObjectAction() {
|
||||
if !statement.Resources.objectResourceExists() {
|
||||
return Errorf("unsupported Resource found %v for action %v", statement.Resources, action)
|
||||
}
|
||||
} else {
|
||||
if !statement.Resources.bucketResourceExists() {
|
||||
return Errorf("unsupported Resource found %v for action %v", statement.Resources, action)
|
||||
}
|
||||
}
|
||||
|
||||
keys := statement.Conditions.Keys()
|
||||
keyDiff := keys.Difference(actionConditionKeyMap[action])
|
||||
if !keyDiff.IsEmpty() {
|
||||
return Errorf("unsupported condition keys '%v' used for action '%v'", keyDiff, action)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalJSON - encodes JSON data to Statement.
|
||||
func (statement Statement) MarshalJSON() ([]byte, error) {
|
||||
if err := statement.isValid(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// subtype to avoid recursive call to MarshalJSON()
|
||||
type subStatement Statement
|
||||
ss := subStatement(statement)
|
||||
return json.Marshal(ss)
|
||||
}
|
||||
|
||||
// UnmarshalJSON - decodes JSON data to Statement.
|
||||
func (statement *Statement) UnmarshalJSON(data []byte) error {
|
||||
// subtype to avoid recursive call to UnmarshalJSON()
|
||||
type subStatement Statement
|
||||
var ss subStatement
|
||||
|
||||
if err := json.Unmarshal(data, &ss); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s := Statement(ss)
|
||||
if err := s.isValid(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*statement = s
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate - validates Statement is for given bucket or not.
|
||||
func (statement Statement) Validate(bucketName string) error {
|
||||
if err := statement.isValid(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return statement.Resources.Validate(bucketName)
|
||||
}
|
||||
|
||||
// NewStatement - creates new statement.
|
||||
func NewStatement(effect Effect, principal Principal, actionSet ActionSet, resourceSet ResourceSet, conditions condition.Functions) Statement {
|
||||
return Statement{
|
||||
Effect: effect,
|
||||
Principal: principal,
|
||||
Actions: actionSet,
|
||||
Resources: resourceSet,
|
||||
Conditions: conditions,
|
||||
}
|
||||
}
|
|
@ -1,571 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package policy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/minio/pkg/bucket/policy/condition"
|
||||
)
|
||||
|
||||
func TestStatementIsAllowed(t *testing.T) {
|
||||
case1Statement := NewStatement(
|
||||
Allow,
|
||||
NewPrincipal("*"),
|
||||
NewActionSet(GetBucketLocationAction, PutObjectAction),
|
||||
NewResourceSet(NewResource("*", "")),
|
||||
condition.NewFunctions(),
|
||||
)
|
||||
|
||||
case2Statement := NewStatement(
|
||||
Allow,
|
||||
NewPrincipal("*"),
|
||||
NewActionSet(GetObjectAction, PutObjectAction),
|
||||
NewResourceSet(NewResource("mybucket", "/myobject*")),
|
||||
condition.NewFunctions(),
|
||||
)
|
||||
|
||||
_, IPNet1, err := net.ParseCIDR("192.168.1.0/24")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
func1, err := condition.NewIPAddressFunc(
|
||||
condition.AWSSourceIP,
|
||||
IPNet1,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
case3Statement := NewStatement(
|
||||
Allow,
|
||||
NewPrincipal("*"),
|
||||
NewActionSet(GetObjectAction, PutObjectAction),
|
||||
NewResourceSet(NewResource("mybucket", "/myobject*")),
|
||||
condition.NewFunctions(func1),
|
||||
)
|
||||
|
||||
case4Statement := NewStatement(
|
||||
Deny,
|
||||
NewPrincipal("*"),
|
||||
NewActionSet(GetObjectAction, PutObjectAction),
|
||||
NewResourceSet(NewResource("mybucket", "/myobject*")),
|
||||
condition.NewFunctions(func1),
|
||||
)
|
||||
|
||||
anonGetBucketLocationArgs := Args{
|
||||
AccountName: "Q3AM3UQ867SPQQA43P2F",
|
||||
Action: GetBucketLocationAction,
|
||||
BucketName: "mybucket",
|
||||
ConditionValues: map[string][]string{},
|
||||
}
|
||||
|
||||
anonPutObjectActionArgs := Args{
|
||||
AccountName: "Q3AM3UQ867SPQQA43P2F",
|
||||
Action: PutObjectAction,
|
||||
BucketName: "mybucket",
|
||||
ConditionValues: map[string][]string{
|
||||
"x-amz-copy-source": {"mybucket/myobject"},
|
||||
"SourceIp": {"192.168.1.10"},
|
||||
},
|
||||
ObjectName: "myobject",
|
||||
}
|
||||
|
||||
anonGetObjectActionArgs := Args{
|
||||
AccountName: "Q3AM3UQ867SPQQA43P2F",
|
||||
Action: GetObjectAction,
|
||||
BucketName: "mybucket",
|
||||
ConditionValues: map[string][]string{},
|
||||
ObjectName: "myobject",
|
||||
}
|
||||
|
||||
getBucketLocationArgs := Args{
|
||||
AccountName: "Q3AM3UQ867SPQQA43P2F",
|
||||
Action: GetBucketLocationAction,
|
||||
BucketName: "mybucket",
|
||||
ConditionValues: map[string][]string{},
|
||||
IsOwner: true,
|
||||
}
|
||||
|
||||
putObjectActionArgs := Args{
|
||||
AccountName: "Q3AM3UQ867SPQQA43P2F",
|
||||
Action: PutObjectAction,
|
||||
BucketName: "mybucket",
|
||||
ConditionValues: map[string][]string{
|
||||
"x-amz-copy-source": {"mybucket/myobject"},
|
||||
"SourceIp": {"192.168.1.10"},
|
||||
},
|
||||
IsOwner: true,
|
||||
ObjectName: "myobject",
|
||||
}
|
||||
|
||||
getObjectActionArgs := Args{
|
||||
AccountName: "Q3AM3UQ867SPQQA43P2F",
|
||||
Action: GetObjectAction,
|
||||
BucketName: "mybucket",
|
||||
ConditionValues: map[string][]string{},
|
||||
IsOwner: true,
|
||||
ObjectName: "myobject",
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
statement Statement
|
||||
args Args
|
||||
expectedResult bool
|
||||
}{
|
||||
{case1Statement, anonGetBucketLocationArgs, true},
|
||||
{case1Statement, anonPutObjectActionArgs, true},
|
||||
{case1Statement, anonGetObjectActionArgs, false},
|
||||
{case1Statement, getBucketLocationArgs, true},
|
||||
{case1Statement, putObjectActionArgs, true},
|
||||
{case1Statement, getObjectActionArgs, false},
|
||||
|
||||
{case2Statement, anonGetBucketLocationArgs, false},
|
||||
{case2Statement, anonPutObjectActionArgs, true},
|
||||
{case2Statement, anonGetObjectActionArgs, true},
|
||||
{case2Statement, getBucketLocationArgs, false},
|
||||
{case2Statement, putObjectActionArgs, true},
|
||||
{case2Statement, getObjectActionArgs, true},
|
||||
|
||||
{case3Statement, anonGetBucketLocationArgs, false},
|
||||
{case3Statement, anonPutObjectActionArgs, true},
|
||||
{case3Statement, anonGetObjectActionArgs, false},
|
||||
{case3Statement, getBucketLocationArgs, false},
|
||||
{case3Statement, putObjectActionArgs, true},
|
||||
{case3Statement, getObjectActionArgs, false},
|
||||
|
||||
{case4Statement, anonGetBucketLocationArgs, true},
|
||||
{case4Statement, anonPutObjectActionArgs, false},
|
||||
{case4Statement, anonGetObjectActionArgs, true},
|
||||
{case4Statement, getBucketLocationArgs, true},
|
||||
{case4Statement, putObjectActionArgs, false},
|
||||
{case4Statement, getObjectActionArgs, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result := testCase.statement.IsAllowed(testCase.args)
|
||||
|
||||
if result != testCase.expectedResult {
|
||||
t.Fatalf("case %v: expected: %v, got: %v\n", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatementIsValid(t *testing.T) {
|
||||
_, IPNet1, err := net.ParseCIDR("192.168.1.0/24")
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
func1, err := condition.NewIPAddressFunc(
|
||||
condition.AWSSourceIP,
|
||||
IPNet1,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
func2, err := condition.NewStringEqualsFunc(
|
||||
condition.S3XAmzCopySource,
|
||||
"mybucket/myobject",
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
statement Statement
|
||||
expectErr bool
|
||||
}{
|
||||
// Invalid effect error.
|
||||
{NewStatement(
|
||||
Effect("foo"),
|
||||
NewPrincipal("*"),
|
||||
NewActionSet(GetBucketLocationAction, PutObjectAction),
|
||||
NewResourceSet(NewResource("*", "")),
|
||||
condition.NewFunctions(),
|
||||
), true},
|
||||
// Invalid principal error.
|
||||
{NewStatement(
|
||||
Allow,
|
||||
NewPrincipal(),
|
||||
NewActionSet(GetBucketLocationAction, PutObjectAction),
|
||||
NewResourceSet(NewResource("*", "")),
|
||||
condition.NewFunctions(),
|
||||
), true},
|
||||
// Empty actions error.
|
||||
{NewStatement(
|
||||
Allow,
|
||||
NewPrincipal("*"),
|
||||
NewActionSet(),
|
||||
NewResourceSet(NewResource("*", "")),
|
||||
condition.NewFunctions(),
|
||||
), true},
|
||||
// Empty resources error.
|
||||
{NewStatement(
|
||||
Allow,
|
||||
NewPrincipal("*"),
|
||||
NewActionSet(GetBucketLocationAction, PutObjectAction),
|
||||
NewResourceSet(),
|
||||
condition.NewFunctions(),
|
||||
), true},
|
||||
// Unsupported resource found for object action.
|
||||
{NewStatement(
|
||||
Allow,
|
||||
NewPrincipal("*"),
|
||||
NewActionSet(GetBucketLocationAction, PutObjectAction),
|
||||
NewResourceSet(NewResource("mybucket", "")),
|
||||
condition.NewFunctions(),
|
||||
), true},
|
||||
// Unsupported resource found for bucket action.
|
||||
{NewStatement(
|
||||
Allow,
|
||||
NewPrincipal("*"),
|
||||
NewActionSet(GetBucketLocationAction, PutObjectAction),
|
||||
NewResourceSet(NewResource("mybucket", "myobject*")),
|
||||
condition.NewFunctions(),
|
||||
), true},
|
||||
// Unsupported condition key for action.
|
||||
{NewStatement(
|
||||
Allow,
|
||||
NewPrincipal("*"),
|
||||
NewActionSet(GetObjectAction, PutObjectAction),
|
||||
NewResourceSet(NewResource("mybucket", "myobject*")),
|
||||
condition.NewFunctions(func1, func2),
|
||||
), true},
|
||||
{NewStatement(
|
||||
Deny,
|
||||
NewPrincipal("*"),
|
||||
NewActionSet(GetObjectAction, PutObjectAction),
|
||||
NewResourceSet(NewResource("mybucket", "myobject*")),
|
||||
condition.NewFunctions(func1),
|
||||
), false},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
err := testCase.statement.isValid()
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatementMarshalJSON(t *testing.T) {
|
||||
case1Statement := NewStatement(
|
||||
Allow,
|
||||
NewPrincipal("*"),
|
||||
NewActionSet(PutObjectAction),
|
||||
NewResourceSet(NewResource("mybucket", "/myobject*")),
|
||||
condition.NewFunctions(),
|
||||
)
|
||||
case1Statement.SID = "SomeId1"
|
||||
case1Data := []byte(`{"Sid":"SomeId1","Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"]}`)
|
||||
|
||||
func1, err := condition.NewNullFunc(
|
||||
condition.S3XAmzCopySource,
|
||||
true,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
case2Statement := NewStatement(
|
||||
Allow,
|
||||
NewPrincipal("*"),
|
||||
NewActionSet(PutObjectAction),
|
||||
NewResourceSet(NewResource("mybucket", "/myobject*")),
|
||||
condition.NewFunctions(func1),
|
||||
)
|
||||
case2Data := []byte(`{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"],"Condition":{"Null":{"s3:x-amz-copy-source":[true]}}}`)
|
||||
|
||||
func2, err := condition.NewNullFunc(
|
||||
condition.S3XAmzServerSideEncryption,
|
||||
false,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
case3Statement := NewStatement(
|
||||
Deny,
|
||||
NewPrincipal("*"),
|
||||
NewActionSet(PutObjectAction),
|
||||
NewResourceSet(NewResource("mybucket", "/myobject*")),
|
||||
condition.NewFunctions(func2),
|
||||
)
|
||||
case3Data := []byte(`{"Effect":"Deny","Principal":{"AWS":["*"]},"Action":["s3:PutObject"],"Resource":["arn:aws:s3:::mybucket/myobject*"],"Condition":{"Null":{"s3:x-amz-server-side-encryption":[false]}}}`)
|
||||
|
||||
case4Statement := NewStatement(
|
||||
Allow,
|
||||
NewPrincipal("*"),
|
||||
NewActionSet(GetObjectAction, PutObjectAction),
|
||||
NewResourceSet(NewResource("mybucket", "myobject*")),
|
||||
condition.NewFunctions(func1, func2),
|
||||
)
|
||||
|
||||
testCases := []struct {
|
||||
statement Statement
|
||||
expectedResult []byte
|
||||
expectErr bool
|
||||
}{
|
||||
{case1Statement, case1Data, false},
|
||||
{case2Statement, case2Data, false},
|
||||
{case3Statement, case3Data, false},
|
||||
// Invalid statement error.
|
||||
{case4Statement, nil, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
result, err := json.Marshal(testCase.statement)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v", i+1, string(testCase.expectedResult), string(result))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatementUnmarshalJSON(t *testing.T) {
|
||||
case1Data := []byte(`{
|
||||
"Sid": "SomeId1",
|
||||
"Effect": "Allow",
|
||||
"Principal": "*",
|
||||
"Action": "s3:PutObject",
|
||||
"Resource": "arn:aws:s3:::mybucket/myobject*"
|
||||
}`)
|
||||
case1Statement := NewStatement(
|
||||
Allow,
|
||||
NewPrincipal("*"),
|
||||
NewActionSet(PutObjectAction),
|
||||
NewResourceSet(NewResource("mybucket", "/myobject*")),
|
||||
condition.NewFunctions(),
|
||||
)
|
||||
case1Statement.SID = "SomeId1"
|
||||
|
||||
case2Data := []byte(`{
|
||||
"Effect": "Allow",
|
||||
"Principal": "*",
|
||||
"Action": "s3:PutObject",
|
||||
"Resource": "arn:aws:s3:::mybucket/myobject*",
|
||||
"Condition": {
|
||||
"Null": {
|
||||
"s3:x-amz-copy-source": true
|
||||
}
|
||||
}
|
||||
}`)
|
||||
func1, err := condition.NewNullFunc(
|
||||
condition.S3XAmzCopySource,
|
||||
true,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
case2Statement := NewStatement(
|
||||
Allow,
|
||||
NewPrincipal("*"),
|
||||
NewActionSet(PutObjectAction),
|
||||
NewResourceSet(NewResource("mybucket", "/myobject*")),
|
||||
condition.NewFunctions(func1),
|
||||
)
|
||||
|
||||
case3Data := []byte(`{
|
||||
"Effect": "Deny",
|
||||
"Principal": {
|
||||
"AWS": "*"
|
||||
},
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": "arn:aws:s3:::mybucket/myobject*",
|
||||
"Condition": {
|
||||
"Null": {
|
||||
"s3:x-amz-server-side-encryption": "false"
|
||||
}
|
||||
}
|
||||
}`)
|
||||
func2, err := condition.NewNullFunc(
|
||||
condition.S3XAmzServerSideEncryption,
|
||||
false,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
case3Statement := NewStatement(
|
||||
Deny,
|
||||
NewPrincipal("*"),
|
||||
NewActionSet(PutObjectAction, GetObjectAction),
|
||||
NewResourceSet(NewResource("mybucket", "/myobject*")),
|
||||
condition.NewFunctions(func2),
|
||||
)
|
||||
|
||||
case4Data := []byte(`{
|
||||
"Effect": "Allow",
|
||||
"Principal": "Q3AM3UQ867SPQQA43P2F",
|
||||
"Action": "s3:PutObject",
|
||||
"Resource": "arn:aws:s3:::mybucket/myobject*"
|
||||
}`)
|
||||
|
||||
case5Data := []byte(`{
|
||||
"Principal": "*",
|
||||
"Action": "s3:PutObject",
|
||||
"Resource": "arn:aws:s3:::mybucket/myobject*"
|
||||
}`)
|
||||
|
||||
case6Data := []byte(`{
|
||||
"Effect": "Allow",
|
||||
"Action": "s3:PutObject",
|
||||
"Resource": "arn:aws:s3:::mybucket/myobject*"
|
||||
}`)
|
||||
|
||||
case7Data := []byte(`{
|
||||
"Effect": "Allow",
|
||||
"Principal": "*",
|
||||
"Resource": "arn:aws:s3:::mybucket/myobject*"
|
||||
}`)
|
||||
|
||||
case8Data := []byte(`{
|
||||
"Effect": "Allow",
|
||||
"Principal": "*",
|
||||
"Action": "s3:PutObject"
|
||||
}`)
|
||||
|
||||
case9Data := []byte(`{
|
||||
"Effect": "Allow",
|
||||
"Principal": "*",
|
||||
"Action": "s3:PutObject",
|
||||
"Resource": "arn:aws:s3:::mybucket/myobject*",
|
||||
"Condition": {
|
||||
}
|
||||
}`)
|
||||
|
||||
case10Data := []byte(`{
|
||||
"Effect": "Deny",
|
||||
"Principal": {
|
||||
"AWS": "*"
|
||||
},
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject"
|
||||
],
|
||||
"Resource": "arn:aws:s3:::mybucket/myobject*",
|
||||
"Condition": {
|
||||
"StringEquals": {
|
||||
"s3:x-amz-copy-source": "yourbucket/myobject*"
|
||||
}
|
||||
}
|
||||
}`)
|
||||
|
||||
testCases := []struct {
|
||||
data []byte
|
||||
expectedResult Statement
|
||||
expectErr bool
|
||||
}{
|
||||
{case1Data, case1Statement, false},
|
||||
{case2Data, case2Statement, false},
|
||||
{case3Data, case3Statement, false},
|
||||
// JSON unmarshaling error.
|
||||
{case4Data, Statement{}, true},
|
||||
// Invalid effect error.
|
||||
{case5Data, Statement{}, true},
|
||||
// empty principal error.
|
||||
{case6Data, Statement{}, true},
|
||||
// Empty action error.
|
||||
{case7Data, Statement{}, true},
|
||||
// Empty resource error.
|
||||
{case8Data, Statement{}, true},
|
||||
// Empty condition error.
|
||||
{case9Data, Statement{}, true},
|
||||
// Unsupported condition key error.
|
||||
{case10Data, Statement{}, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
var result Statement
|
||||
err := json.Unmarshal(testCase.data, &result)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
|
||||
if !testCase.expectErr {
|
||||
if !reflect.DeepEqual(result, testCase.expectedResult) {
|
||||
t.Fatalf("case %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatementValidate(t *testing.T) {
|
||||
case1Statement := NewStatement(
|
||||
Allow,
|
||||
NewPrincipal("*"),
|
||||
NewActionSet(PutObjectAction),
|
||||
NewResourceSet(NewResource("mybucket", "/myobject*")),
|
||||
condition.NewFunctions(),
|
||||
)
|
||||
|
||||
func1, err := condition.NewNullFunc(
|
||||
condition.S3XAmzCopySource,
|
||||
true,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
func2, err := condition.NewNullFunc(
|
||||
condition.S3XAmzServerSideEncryption,
|
||||
false,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error. %v\n", err)
|
||||
}
|
||||
case2Statement := NewStatement(
|
||||
Allow,
|
||||
NewPrincipal("*"),
|
||||
NewActionSet(GetObjectAction, PutObjectAction),
|
||||
NewResourceSet(NewResource("mybucket", "myobject*")),
|
||||
condition.NewFunctions(func1, func2),
|
||||
)
|
||||
|
||||
testCases := []struct {
|
||||
statement Statement
|
||||
bucketName string
|
||||
expectErr bool
|
||||
}{
|
||||
{case1Statement, "mybucket", false},
|
||||
{case2Statement, "mybucket", true},
|
||||
{case1Statement, "yourbucket", true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
err := testCase.statement.Validate(testCase.bucketName)
|
||||
expectErr := (err != nil)
|
||||
|
||||
if expectErr != testCase.expectErr {
|
||||
t.Fatalf("case %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,202 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package certs
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rjeczalik/notify"
|
||||
)
|
||||
|
||||
// A Certs represents a certificate manager able to watch certificate
|
||||
// and key pairs for changes.
|
||||
type Certs struct {
|
||||
sync.RWMutex
|
||||
// user input params.
|
||||
certFile string
|
||||
keyFile string
|
||||
loadCert LoadX509KeyPairFunc
|
||||
|
||||
// points to the latest certificate.
|
||||
cert *tls.Certificate
|
||||
|
||||
// internal param to track for events, also
|
||||
// used to close the watcher.
|
||||
e chan notify.EventInfo
|
||||
}
|
||||
|
||||
// LoadX509KeyPairFunc - provides a type for custom cert loader function.
|
||||
type LoadX509KeyPairFunc func(certFile, keyFile string) (tls.Certificate, error)
|
||||
|
||||
// New initializes a new certs monitor.
|
||||
func New(certFile, keyFile string, loadCert LoadX509KeyPairFunc) (*Certs, error) {
|
||||
certFileIsLink, err := checkSymlink(certFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
keyFileIsLink, err := checkSymlink(keyFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &Certs{
|
||||
certFile: certFile,
|
||||
keyFile: keyFile,
|
||||
loadCert: loadCert,
|
||||
// Make the channel buffered to ensure no event is dropped. Notify will drop
|
||||
// an event if the receiver is not able to keep up the sending pace.
|
||||
e: make(chan notify.EventInfo, 1),
|
||||
}
|
||||
|
||||
if certFileIsLink && keyFileIsLink {
|
||||
if err := c.watchSymlinks(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if err := c.watch(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func checkSymlink(file string) (bool, error) {
|
||||
st, err := os.Lstat(file)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return st.Mode()&os.ModeSymlink == os.ModeSymlink, nil
|
||||
}
|
||||
|
||||
// watchSymlinks reloads symlinked files since fsnotify cannot watch
|
||||
// on symbolic links.
|
||||
func (c *Certs) watchSymlinks() (err error) {
|
||||
cert, err := c.loadCert(c.certFile, c.keyFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Lock()
|
||||
c.cert = &cert
|
||||
c.Unlock()
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-c.e:
|
||||
// Once stopped exits this routine.
|
||||
return
|
||||
case <-time.After(24 * time.Hour):
|
||||
cert, cerr := c.loadCert(c.certFile, c.keyFile)
|
||||
if cerr != nil {
|
||||
continue
|
||||
}
|
||||
c.Lock()
|
||||
c.cert = &cert
|
||||
c.Unlock()
|
||||
}
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
// watch starts watching for changes to the certificate
|
||||
// and key files. On any change the certificate and key
|
||||
// are reloaded. If there is an issue the loading will fail
|
||||
// and the old (if any) certificates and keys will continue
|
||||
// to be used.
|
||||
func (c *Certs) watch() (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// Stop any watches previously setup after an error.
|
||||
notify.Stop(c.e)
|
||||
}
|
||||
}()
|
||||
|
||||
// Windows doesn't allow for watching file changes but instead allows
|
||||
// for directory changes only, while we can still watch for changes
|
||||
// on files on other platforms. Watch parent directory on all platforms
|
||||
// for simplicity.
|
||||
if err = notify.Watch(filepath.Dir(c.certFile), c.e, eventWrite...); err != nil {
|
||||
return err
|
||||
}
|
||||
if err = notify.Watch(filepath.Dir(c.keyFile), c.e, eventWrite...); err != nil {
|
||||
return err
|
||||
}
|
||||
cert, err := c.loadCert(c.certFile, c.keyFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Lock()
|
||||
c.cert = &cert
|
||||
c.Unlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go c.run()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Certs) run() {
|
||||
for event := range c.e {
|
||||
base := filepath.Base(event.Path())
|
||||
if isWriteEvent(event.Event()) {
|
||||
certChanged := base == filepath.Base(c.certFile)
|
||||
keyChanged := base == filepath.Base(c.keyFile)
|
||||
if certChanged || keyChanged {
|
||||
cert, err := c.loadCert(c.certFile, c.keyFile)
|
||||
if err != nil {
|
||||
// ignore the error continue to use
|
||||
// old certificates.
|
||||
continue
|
||||
}
|
||||
c.Lock()
|
||||
c.cert = &cert
|
||||
c.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetCertificateFunc provides a GetCertificate type for custom client implementations.
|
||||
type GetCertificateFunc func(hello *tls.ClientHelloInfo) (*tls.Certificate, error)
|
||||
|
||||
// GetCertificate returns the loaded certificate for use by
|
||||
// the TLSConfig fields GetCertificate field in a http.Server.
|
||||
func (c *Certs) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
return c.cert, nil
|
||||
}
|
||||
|
||||
// GetClientCertificate returns the loaded certificate for use by
|
||||
// the TLSConfig fields GetClientCertificate field in a http.Server.
|
||||
func (c *Certs) GetClientCertificate(_ *tls.CertificateRequestInfo) (*tls.Certificate, error) {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
return c.cert, nil
|
||||
}
|
||||
|
||||
// Stop tells loader to stop watching for changes to the
|
||||
// certificate and key files.
|
||||
func (c *Certs) Stop() {
|
||||
if c != nil {
|
||||
notify.Stop(c.e)
|
||||
}
|
||||
}
|
|
@ -1,134 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package certs_test
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/certs"
|
||||
)
|
||||
|
||||
func updateCerts(crt, key string) {
|
||||
// ignore error handling
|
||||
crtSource, _ := os.Open(crt)
|
||||
defer crtSource.Close()
|
||||
crtDest, _ := os.Create("server.crt")
|
||||
defer crtDest.Close()
|
||||
io.Copy(crtDest, crtSource)
|
||||
|
||||
keySource, _ := os.Open(key)
|
||||
defer keySource.Close()
|
||||
keyDest, _ := os.Create("server.key")
|
||||
defer keyDest.Close()
|
||||
io.Copy(keyDest, keySource)
|
||||
}
|
||||
|
||||
func TestCertNew(t *testing.T) {
|
||||
c, err := certs.New("server.crt", "server.key", tls.LoadX509KeyPair)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer c.Stop()
|
||||
hello := &tls.ClientHelloInfo{}
|
||||
gcert, err := c.GetCertificate(hello)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
expectedCert, err := tls.LoadX509KeyPair("server.crt", "server.key")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !reflect.DeepEqual(gcert.Certificate, expectedCert.Certificate) {
|
||||
t.Error("certificate doesn't match expected certificate")
|
||||
}
|
||||
_, err = certs.New("server.crt", "server2.key", tls.LoadX509KeyPair)
|
||||
if err == nil {
|
||||
t.Fatal("Expected to fail but got success")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidPairAfterWrite(t *testing.T) {
|
||||
expectedCert, err := tls.LoadX509KeyPair("server2.crt", "server2.key")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
c, err := certs.New("server.crt", "server.key", tls.LoadX509KeyPair)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer c.Stop()
|
||||
|
||||
updateCerts("server2.crt", "server2.key")
|
||||
defer updateCerts("server1.crt", "server1.key")
|
||||
|
||||
// Wait for the write event..
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
|
||||
hello := &tls.ClientHelloInfo{}
|
||||
gcert, err := c.GetCertificate(hello)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(gcert.Certificate, expectedCert.Certificate) {
|
||||
t.Error("certificate doesn't match expected certificate")
|
||||
}
|
||||
|
||||
rInfo := &tls.CertificateRequestInfo{}
|
||||
gcert, err = c.GetClientCertificate(rInfo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(gcert.Certificate, expectedCert.Certificate) {
|
||||
t.Error("client certificate doesn't match expected certificate")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStop(t *testing.T) {
|
||||
expectedCert, err := tls.LoadX509KeyPair("server2.crt", "server2.key")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
c, err := certs.New("server.crt", "server.key", tls.LoadX509KeyPair)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c.Stop()
|
||||
|
||||
// No one is listening on the event, will be ignored and
|
||||
// certificate will not be reloaded.
|
||||
updateCerts("server2.crt", "server2.key")
|
||||
defer updateCerts("server1.crt", "server1.key")
|
||||
|
||||
hello := &tls.ClientHelloInfo{}
|
||||
gcert, err := c.GetCertificate(hello)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(gcert.Certificate, expectedCert.Certificate) {
|
||||
t.Error("certificate shouldn't match, but matched")
|
||||
}
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package certs
|
||||
|
||||
import (
|
||||
"github.com/rjeczalik/notify"
|
||||
)
|
||||
|
||||
// isWriteEvent checks if the event returned is a write event
|
||||
func isWriteEvent(event notify.Event) bool {
|
||||
for _, ev := range eventWrite {
|
||||
if event&ev != 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package certs
|
||||
|
||||
import "github.com/rjeczalik/notify"
|
||||
|
||||
var (
|
||||
// eventWrite contains the notify events that will cause a write
|
||||
eventWrite = []notify.Event{notify.InCloseWrite}
|
||||
)
|
|
@ -1,26 +0,0 @@
|
|||
// +build !linux
|
||||
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package certs
|
||||
|
||||
import "github.com/rjeczalik/notify"
|
||||
|
||||
var (
|
||||
// eventWrite contains the notify events that will cause a write
|
||||
eventWrite = []notify.Event{notify.Create, notify.Write}
|
||||
)
|
|
@ -1,22 +0,0 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDqjCCApKgAwIBAgIJAOcv4FsrflS4MA0GCSqGSIb3DQEBCwUAMGoxCzAJBgNV
|
||||
BAYTAlVTMQswCQYDVQQIDAJDQTEVMBMGA1UEBwwMUmVkd29vZCBDaXR5MQ4wDAYD
|
||||
VQQKDAVNaW5pbzEUMBIGA1UECwwLRW5naW5lZXJpbmcxETAPBgNVBAMMCG1pbmlv
|
||||
LmlvMB4XDTE4MDUyMDA4NDc0MFoXDTE5MDUyMDA4NDc0MFowajELMAkGA1UEBhMC
|
||||
VVMxCzAJBgNVBAgMAkNBMRUwEwYDVQQHDAxSZWR3b29kIENpdHkxDjAMBgNVBAoM
|
||||
BU1pbmlvMRQwEgYDVQQLDAtFbmdpbmVlcmluZzERMA8GA1UEAwwIbWluaW8uaW8w
|
||||
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPszxaYwn+mIz6IGuUlmvW
|
||||
wUs/yWTH4MC17qey2N5MqcxlfIWHUugcBsbGhi/e1druFW0s7YGMxp+G+Q1IezxX
|
||||
+VmVaJCN8AgSowbYgpRdpRQ+mhGeQby0JcvO16fyPnUJBz3GGel2bcK8fcQyT0TV
|
||||
apCiD9oURVmdvDSsRXz+EoPlOve8AWciHHgm1ItO5qdPRP5YtcJfLiwKnoYnpda2
|
||||
d9SzmYk+Q2JFArooF7/A1DYz9bXCMo3qp0gQlMpSMDR+MCbxHBzBBr+fQG8QdDrz
|
||||
WQ2slhniBhFDk0LuPCBLlSeIzkp+DoAGDXf3hWYhechlabZ7nfngg5erEz776WCF
|
||||
AgMBAAGjUzBRMB0GA1UdDgQWBBRzC09a+3AlbFDg6BsvELolmO8jYjAfBgNVHSME
|
||||
GDAWgBRzC09a+3AlbFDg6BsvELolmO8jYjAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
|
||||
SIb3DQEBCwUAA4IBAQBl0cx7qbidKjhoZ1Iv4pCD8xHZgtuWEDApPoGuMtVS66jJ
|
||||
+oj0ncD5xCtv9XqXtshE65FIsEWnDOIwa+kyjMnxHbFwxveWBT4W0twtqwbVs7NE
|
||||
I0So6cEmSx4+rB0XorY6mIbD3O9YAStelNhB1jVfQfIMSByYkcGq2Fh+B1LHlOrz
|
||||
06LJdwYMiILzK0c5fvjZvsDq/9EK+Xo66hphKjs5cl1t9WK7wKOCoZDt2lOTZqEq
|
||||
UWYGPWlTAxSWQxO4WnvSKqFdsRi8fOO3KlDq1eNqeDSGGCI0DTGgJxidHIpfOPEF
|
||||
s/zojgc5npE32/1n8og6gLcv7LIKelBfMhUrFTp7
|
||||
-----END CERTIFICATE-----
|
|
@ -1,28 +0,0 @@
|
|||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDPszxaYwn+mIz6
|
||||
IGuUlmvWwUs/yWTH4MC17qey2N5MqcxlfIWHUugcBsbGhi/e1druFW0s7YGMxp+G
|
||||
+Q1IezxX+VmVaJCN8AgSowbYgpRdpRQ+mhGeQby0JcvO16fyPnUJBz3GGel2bcK8
|
||||
fcQyT0TVapCiD9oURVmdvDSsRXz+EoPlOve8AWciHHgm1ItO5qdPRP5YtcJfLiwK
|
||||
noYnpda2d9SzmYk+Q2JFArooF7/A1DYz9bXCMo3qp0gQlMpSMDR+MCbxHBzBBr+f
|
||||
QG8QdDrzWQ2slhniBhFDk0LuPCBLlSeIzkp+DoAGDXf3hWYhechlabZ7nfngg5er
|
||||
Ez776WCFAgMBAAECggEBAJcHRyCWmcLm3MRY5MF0K9BKV9R3NnBdTuQ8OPdE2Ui3
|
||||
w6gcRuBi+eK/TrU3CAIqUXsEW5Hq1mQuXfwAh5cn/XYfG/QXx91eKBCdOTIgqY/6
|
||||
pODsmVkRhg0c2rl6eWYd4m6BNHsjhm8WWx9C+HJ4z528UpV1n2dUElkvbMHD+aKp
|
||||
Ndwd0W+0PCn/BjMn/sdyy01f8sfaK2Zoy7HBw/fGeBDNLFFj3Iz7BqXYeS+OyfLN
|
||||
B4xD5I5fFqt1iJeyqVPzGkOAYSqisijbM1GtZJCeVp37/+IDylCKTO3l8Xd8x73U
|
||||
qTYcYT3heSHyUC2xCM6Va2YkSrOHeqbq91QgHh9LVrUCgYEA9t/wE2S8TE2l1IG9
|
||||
68SXdhyaXTnB2qSL7ggY0uazPzBNLQpNMOxicZ6/4QGEi3hSuCqGxxGo9UEoTsVd
|
||||
pk8oIeDULdPVi4NQxSmkxUyArs/dzOMygUPyosOiEc8z6jWFFKDcQ7mnZnay8dZ4
|
||||
e4j+/hZDONtDrJ+zH2xu98ZrJPcCgYEA12CbSRbCkTiRj/dq8Qvgp6+ceTVcAbnk
|
||||
MWpAhZQaXHrG3XP0L7QTIHG/7a09Mln92zjuAFXDp/Vc5NdxeXcnj9j6oUAxq+0I
|
||||
dq+vibzjROemmvnmQvXGY9tc0ns6u7GjM0+Sicmas+IH4vuum/aRasABfVe2XBwe
|
||||
4fVs0n7yU2MCgYA7KevFGg0uVCV7yiQTzqdlvPEZim/00B5gyzv3vyYR7KdyNdfN
|
||||
87ib9imR6OU0738Td82ZA5h0PktEpXQOGUZK6DCxUuUIbE39Ej/UsMLeIh7LrV87
|
||||
L2eErlG25utQI8di7DIdYO7HVYcJAhcZs/k4N2mgxJtxUUyCKWBmrPycfQKBgAo7
|
||||
0uUUKcaQs4ntra0qbVBKbdrsiCSk2ozmiY5PTTlbtBtNqSqjGc2O2hnHA4Ni90b1
|
||||
W4m0iYlvhSxyeDfXS4/wNWh4DmQm7SIGkwaubPYXM7llamWAHB8eiziNFmtYs3J6
|
||||
s3HMnIczlEBayR8sBhjWaruz8TxLMcR2zubplUYVAoGBAItxeC9IT8BGJoZB++qM
|
||||
f2LXCqJ383x0sDHhwPMFPtwUTzAwc5BJgQe9zFktW5CBxsER+MnUZjlrarT1HQfH
|
||||
1Y1mJQXtwuBKG4pPPZphH0yoVlYcWkBTMw/KmlVlwRclEzRQwV3TPD+i6ieKeZhz
|
||||
9eZwhS3H+Zb/693WbBDyH8L+
|
||||
-----END PRIVATE KEY-----
|
|
@ -1,22 +0,0 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDqjCCApKgAwIBAgIJAOcv4FsrflS4MA0GCSqGSIb3DQEBCwUAMGoxCzAJBgNV
|
||||
BAYTAlVTMQswCQYDVQQIDAJDQTEVMBMGA1UEBwwMUmVkd29vZCBDaXR5MQ4wDAYD
|
||||
VQQKDAVNaW5pbzEUMBIGA1UECwwLRW5naW5lZXJpbmcxETAPBgNVBAMMCG1pbmlv
|
||||
LmlvMB4XDTE4MDUyMDA4NDc0MFoXDTE5MDUyMDA4NDc0MFowajELMAkGA1UEBhMC
|
||||
VVMxCzAJBgNVBAgMAkNBMRUwEwYDVQQHDAxSZWR3b29kIENpdHkxDjAMBgNVBAoM
|
||||
BU1pbmlvMRQwEgYDVQQLDAtFbmdpbmVlcmluZzERMA8GA1UEAwwIbWluaW8uaW8w
|
||||
ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPszxaYwn+mIz6IGuUlmvW
|
||||
wUs/yWTH4MC17qey2N5MqcxlfIWHUugcBsbGhi/e1druFW0s7YGMxp+G+Q1IezxX
|
||||
+VmVaJCN8AgSowbYgpRdpRQ+mhGeQby0JcvO16fyPnUJBz3GGel2bcK8fcQyT0TV
|
||||
apCiD9oURVmdvDSsRXz+EoPlOve8AWciHHgm1ItO5qdPRP5YtcJfLiwKnoYnpda2
|
||||
d9SzmYk+Q2JFArooF7/A1DYz9bXCMo3qp0gQlMpSMDR+MCbxHBzBBr+fQG8QdDrz
|
||||
WQ2slhniBhFDk0LuPCBLlSeIzkp+DoAGDXf3hWYhechlabZ7nfngg5erEz776WCF
|
||||
AgMBAAGjUzBRMB0GA1UdDgQWBBRzC09a+3AlbFDg6BsvELolmO8jYjAfBgNVHSME
|
||||
GDAWgBRzC09a+3AlbFDg6BsvELolmO8jYjAPBgNVHRMBAf8EBTADAQH/MA0GCSqG
|
||||
SIb3DQEBCwUAA4IBAQBl0cx7qbidKjhoZ1Iv4pCD8xHZgtuWEDApPoGuMtVS66jJ
|
||||
+oj0ncD5xCtv9XqXtshE65FIsEWnDOIwa+kyjMnxHbFwxveWBT4W0twtqwbVs7NE
|
||||
I0So6cEmSx4+rB0XorY6mIbD3O9YAStelNhB1jVfQfIMSByYkcGq2Fh+B1LHlOrz
|
||||
06LJdwYMiILzK0c5fvjZvsDq/9EK+Xo66hphKjs5cl1t9WK7wKOCoZDt2lOTZqEq
|
||||
UWYGPWlTAxSWQxO4WnvSKqFdsRi8fOO3KlDq1eNqeDSGGCI0DTGgJxidHIpfOPEF
|
||||
s/zojgc5npE32/1n8og6gLcv7LIKelBfMhUrFTp7
|
||||
-----END CERTIFICATE-----
|
|
@ -1,28 +0,0 @@
|
|||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDPszxaYwn+mIz6
|
||||
IGuUlmvWwUs/yWTH4MC17qey2N5MqcxlfIWHUugcBsbGhi/e1druFW0s7YGMxp+G
|
||||
+Q1IezxX+VmVaJCN8AgSowbYgpRdpRQ+mhGeQby0JcvO16fyPnUJBz3GGel2bcK8
|
||||
fcQyT0TVapCiD9oURVmdvDSsRXz+EoPlOve8AWciHHgm1ItO5qdPRP5YtcJfLiwK
|
||||
noYnpda2d9SzmYk+Q2JFArooF7/A1DYz9bXCMo3qp0gQlMpSMDR+MCbxHBzBBr+f
|
||||
QG8QdDrzWQ2slhniBhFDk0LuPCBLlSeIzkp+DoAGDXf3hWYhechlabZ7nfngg5er
|
||||
Ez776WCFAgMBAAECggEBAJcHRyCWmcLm3MRY5MF0K9BKV9R3NnBdTuQ8OPdE2Ui3
|
||||
w6gcRuBi+eK/TrU3CAIqUXsEW5Hq1mQuXfwAh5cn/XYfG/QXx91eKBCdOTIgqY/6
|
||||
pODsmVkRhg0c2rl6eWYd4m6BNHsjhm8WWx9C+HJ4z528UpV1n2dUElkvbMHD+aKp
|
||||
Ndwd0W+0PCn/BjMn/sdyy01f8sfaK2Zoy7HBw/fGeBDNLFFj3Iz7BqXYeS+OyfLN
|
||||
B4xD5I5fFqt1iJeyqVPzGkOAYSqisijbM1GtZJCeVp37/+IDylCKTO3l8Xd8x73U
|
||||
qTYcYT3heSHyUC2xCM6Va2YkSrOHeqbq91QgHh9LVrUCgYEA9t/wE2S8TE2l1IG9
|
||||
68SXdhyaXTnB2qSL7ggY0uazPzBNLQpNMOxicZ6/4QGEi3hSuCqGxxGo9UEoTsVd
|
||||
pk8oIeDULdPVi4NQxSmkxUyArs/dzOMygUPyosOiEc8z6jWFFKDcQ7mnZnay8dZ4
|
||||
e4j+/hZDONtDrJ+zH2xu98ZrJPcCgYEA12CbSRbCkTiRj/dq8Qvgp6+ceTVcAbnk
|
||||
MWpAhZQaXHrG3XP0L7QTIHG/7a09Mln92zjuAFXDp/Vc5NdxeXcnj9j6oUAxq+0I
|
||||
dq+vibzjROemmvnmQvXGY9tc0ns6u7GjM0+Sicmas+IH4vuum/aRasABfVe2XBwe
|
||||
4fVs0n7yU2MCgYA7KevFGg0uVCV7yiQTzqdlvPEZim/00B5gyzv3vyYR7KdyNdfN
|
||||
87ib9imR6OU0738Td82ZA5h0PktEpXQOGUZK6DCxUuUIbE39Ej/UsMLeIh7LrV87
|
||||
L2eErlG25utQI8di7DIdYO7HVYcJAhcZs/k4N2mgxJtxUUyCKWBmrPycfQKBgAo7
|
||||
0uUUKcaQs4ntra0qbVBKbdrsiCSk2ozmiY5PTTlbtBtNqSqjGc2O2hnHA4Ni90b1
|
||||
W4m0iYlvhSxyeDfXS4/wNWh4DmQm7SIGkwaubPYXM7llamWAHB8eiziNFmtYs3J6
|
||||
s3HMnIczlEBayR8sBhjWaruz8TxLMcR2zubplUYVAoGBAItxeC9IT8BGJoZB++qM
|
||||
f2LXCqJ383x0sDHhwPMFPtwUTzAwc5BJgQe9zFktW5CBxsER+MnUZjlrarT1HQfH
|
||||
1Y1mJQXtwuBKG4pPPZphH0yoVlYcWkBTMw/KmlVlwRclEzRQwV3TPD+i6ieKeZhz
|
||||
9eZwhS3H+Zb/693WbBDyH8L+
|
||||
-----END PRIVATE KEY-----
|
|
@ -1,21 +0,0 @@
|
|||
-----BEGIN CERTIFICATE-----
|
||||
MIIDYDCCAkigAwIBAgIJALIHkFXjtZ2yMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV
|
||||
BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX
|
||||
aWRnaXRzIFB0eSBMdGQwHhcNMTgwNTIwMDg1MzI3WhcNMTkwNTIwMDg1MzI3WjBF
|
||||
MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50
|
||||
ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
|
||||
CgKCAQEA+LZ8+eqDHyoCt7HGQIhZK+ZagDxXzJ67a2V88s/rHB3zhi1d6ha6q5sc
|
||||
ljmCNqj250fjSWpDQ4hssfqyNDmY/IUaphnT9eMBPZX6RXZVFXGtpUUFvGik5hed
|
||||
2g7j5Jhy+luz5QHn9zR6E7rkqTPl3WJZ2fe4LEfij6/bzZ2CMUFrKyt/uqn4laTl
|
||||
m4DO+wjoOUGAHmaHbkpkhYTb/qbWzV0qMh0Zy4gQuFYcBVbATcdAjV4bRNkHd0CL
|
||||
Ekd3A9ae5ZaeOrg2HkPVcinxg1ln5jBe2LBqDFqKkWudzm6jeNw+oE4lKKxDfHH8
|
||||
AD08N8qFbfs1YxZAjL3wKpcYVw2pzQIDAQABo1MwUTAdBgNVHQ4EFgQU2Yywgv8p
|
||||
WfyZxYVx+MnH+VQ5TTUwHwYDVR0jBBgwFoAU2Yywgv8pWfyZxYVx+MnH+VQ5TTUw
|
||||
DwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEA2maF7DQ7CMpCho9B
|
||||
9gjGxvt8HqY1pCyuQwcSPb4PTyoKUZ/ZuIDhVOaBX+ox1RzlfGtYs2BUM63/QUDs
|
||||
dP0GO7/IL/XEqJi1flrFvM7LNSs89qAbPJ440m6jJDzsuL2VeyUX/M72IEsBK2uS
|
||||
ajtS1+HFQjPMvt7wR6fDPCP7wHPOrkTN4hcHlgzVJShKUnFaHtb2lOnWaoM/Sk91
|
||||
IsiyAhKRuCM9et7/bnOj7G8448QDVtQNniT8V/HpqQ7ltSuIGvs3QYTLDTege/74
|
||||
Q8Ph1oH7shyRE/PqPfyIuLq3p0N9Sah3oRMHLohYjJL0zAGt0jxSsnhrBSNUUD/v
|
||||
bAd5VQ==
|
||||
-----END CERTIFICATE-----
|
|
@ -1,28 +0,0 @@
|
|||
-----BEGIN PRIVATE KEY-----
|
||||
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQD4tnz56oMfKgK3
|
||||
scZAiFkr5lqAPFfMnrtrZXzyz+scHfOGLV3qFrqrmxyWOYI2qPbnR+NJakNDiGyx
|
||||
+rI0OZj8hRqmGdP14wE9lfpFdlUVca2lRQW8aKTmF53aDuPkmHL6W7PlAef3NHoT
|
||||
uuSpM+XdYlnZ97gsR+KPr9vNnYIxQWsrK3+6qfiVpOWbgM77COg5QYAeZoduSmSF
|
||||
hNv+ptbNXSoyHRnLiBC4VhwFVsBNx0CNXhtE2Qd3QIsSR3cD1p7llp46uDYeQ9Vy
|
||||
KfGDWWfmMF7YsGoMWoqRa53ObqN43D6gTiUorEN8cfwAPTw3yoVt+zVjFkCMvfAq
|
||||
lxhXDanNAgMBAAECggEBAIGAI5rNbPCxIzEas6uuUx/0lXLn+J9mlxfYhDK56CV/
|
||||
wuk+fgQBSblIzp252/8yAz1xxPrZBaUIR/B0JI3k36+8bp/GGwOQ63hxuxqn/q1n
|
||||
v46qXc44foQAEAUWc7r3Vgbd8NFxKKMjA916Fs2zZCDdsQM5ZQBJfcJrQvvQ45VY
|
||||
//UtXdNeIBQOb5Wg4o9fHJolKzCHWRaD2ExoIHZ5Fa6JpBmk9JBHcUbrHrlbOeep
|
||||
/SkbSa0ma9j3k3jqV970XRoQUCJf+K1Li49jmaYPPGXBUAp6AfU+yiAJ1aups38m
|
||||
BClLAV9g6vgE3xK2xozGPI1+j9lkruYbvGbPNkXexdECgYEA/47XnKITSnxtV+NK
|
||||
nDbWNOgpeaRbxAdjp1P0b4VI0S0SuRvKUOCp1UlPg5BjGL0JLPQpGlPzEfLlGWAa
|
||||
68vhyj0V6HL2+PAJNib1eu6yyRBsSbPdrAD5nydHpbxRcdShhVwb2MHMyBeYH5Al
|
||||
kL+ed5wCF32kXOOGzhoGzJEKNEcCgYEA+SSdcdbuVpQFkAecIoABwdx/qeOAeS19
|
||||
FsvVSTmWlhal8m2Mn8RWZ0IKXT9AoZJ0KQBIKHViPtyV7UQey05uRgLRHZapHpe8
|
||||
dhm6SsGYtU3BhLdHJBP0kI79qm2kzqsHp6ghSzaxT9CkRfMniN+TD+w8p7lrOaxv
|
||||
vV46UHoGX0sCgYB4LlCvVHkF+mXhgv4/YHpz/woiLm0JTwBKXG0DVQbdd/jqHGuU
|
||||
hVLY/tTp5ij0JVH/VgNOYlRZCIU83blLUmIonXmECyyh/SAX21JuMXram2KRdoi0
|
||||
rvC1K9/BzUHv6jLbaGmgEeOf5Zign0VLQRHg5fkF2wxEsqtemVbBNSQ7WQKBgBFk
|
||||
Y/VRervig2zlixnBc93zpZnXft12tnfD7PS6p298z0LYMOvqSdnVe2G9C6b70U4X
|
||||
bfIdF6mpvnGcwsWQiRQsGCsHnHC9SPO5og6b6ywk7HB2VuoG1pjM0pp2Iv4mZFdo
|
||||
3kIg5EndF8qmSck9SkffRvCyefDBv98pV8rMaet3AoGBALjlN2hLoNE5Cs5vTYH8
|
||||
W0AN4lEOaTlBRKG8a1h7Fm2vPgzGGkiwU6bVzsh0oTfytc8v8MW9lNQZpE3dBKne
|
||||
ms3FrNsnBbTczX+xJmndRnVRocdyON6u476VxAuz/dHSFFnZGXX+2lJse9xnWHUz
|
||||
OpSHUPq3TrUzhgZClE2ZKpNm
|
||||
-----END PRIVATE KEY-----
|
|
@ -1,177 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2017 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// Package cgroup implements parsing for all the cgroup
|
||||
// categories and functionality in a simple way.
|
||||
package cgroup
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// DO NOT EDIT following constants are chosen defaults for any kernel
|
||||
// after 3.x, please open a github issue https://github.com/minio/minio/issues
|
||||
// and discuss first if you wish to change this.
|
||||
const (
|
||||
// Default string for looking for kernel memory param.
|
||||
memoryLimitKernelParam = "memory.limit_in_bytes"
|
||||
|
||||
// Points to sys path memory path.
|
||||
cgroupMemSysPath = "/sys/fs/cgroup/memory"
|
||||
|
||||
// Default docker prefix.
|
||||
dockerPrefixName = "/docker/"
|
||||
|
||||
// Proc controller group path.
|
||||
cgroupFileTemplate = "/proc/%d/cgroup"
|
||||
)
|
||||
|
||||
// CGEntries - represents all the entries in a process cgroup file
|
||||
// at /proc/<pid>/cgroup as key value pairs.
|
||||
type CGEntries map[string]string
|
||||
|
||||
// GetEntries reads and parses all the cgroup entries for a given process.
|
||||
func GetEntries(pid int) (CGEntries, error) {
|
||||
r, err := os.Open(fmt.Sprintf(cgroupFileTemplate, pid))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer r.Close()
|
||||
return parseProcCGroup(r)
|
||||
}
|
||||
|
||||
// parseProcCGroup - cgroups are always in the following
|
||||
// format once enabled you need to know the pid of the
|
||||
// application you are looking for so that the the
|
||||
// following parsing logic only parses the file located
|
||||
// at /proc/<pid>/cgroup.
|
||||
//
|
||||
// CGROUP entries id, component and path are always in
|
||||
// the following format. ``ID:COMPONENT:PATH``
|
||||
//
|
||||
// Following code block parses this information and
|
||||
// returns a procCGroup which is a parsed list of all
|
||||
// the line by line entires from /proc/<pid>/cgroup.
|
||||
func parseProcCGroup(r io.Reader) (CGEntries, error) {
|
||||
var cgEntries = CGEntries{}
|
||||
|
||||
// Start reading cgroup categories line by line
|
||||
// and process them into procCGroup structure.
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
|
||||
tokens := strings.SplitN(line, ":", 3)
|
||||
if len(tokens) < 3 {
|
||||
continue
|
||||
}
|
||||
|
||||
name, path := tokens[1], tokens[2]
|
||||
for _, token := range strings.Split(name, ",") {
|
||||
name = strings.TrimPrefix(token, "name=")
|
||||
cgEntries[name] = path
|
||||
}
|
||||
}
|
||||
|
||||
// Return upon any error while reading the cgroup categories.
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cgEntries, nil
|
||||
}
|
||||
|
||||
// Fetch value of the cgroup kernel param from the cgroup manager,
|
||||
// if cgroup manager is configured we should just rely on `cgm` cli
|
||||
// to fetch all the values for us.
|
||||
func getManagerKernValue(cname, path, kernParam string) (limit uint64, err error) {
|
||||
|
||||
cmd := exec.Command("cgm", "getvalue", cname, path, kernParam)
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
if err = cmd.Run(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Parse the cgm output.
|
||||
limit, err = strconv.ParseUint(strings.TrimSpace(out.String()), 10, 64)
|
||||
return limit, err
|
||||
}
|
||||
|
||||
// Get cgroup memory limit file path.
|
||||
func getMemoryLimitFilePath(cgPath string) string {
|
||||
path := cgroupMemSysPath
|
||||
|
||||
// Docker generates weird cgroup paths that don't
|
||||
// really exist on the file system.
|
||||
//
|
||||
// For example on regular Linux OS :
|
||||
// `/user.slice/user-1000.slice/session-1.scope`
|
||||
//
|
||||
// But they exist as a bind mount on Docker and
|
||||
// are not accessible : `/docker/<hash>`
|
||||
//
|
||||
// We we will just ignore if there is `/docker` in the
|
||||
// path ignore and fall back to :
|
||||
// `/sys/fs/cgroup/memory/memory.limit_in_bytes`
|
||||
if !strings.HasPrefix(cgPath, dockerPrefixName) {
|
||||
path = filepath.Join(path, cgPath)
|
||||
}
|
||||
|
||||
// Final path.
|
||||
return filepath.Join(path, memoryLimitKernelParam)
|
||||
}
|
||||
|
||||
// GetMemoryLimit - Fetches cgroup memory limit either from
|
||||
// a file path at '/sys/fs/cgroup/memory', if path fails then
|
||||
// fallback to querying cgroup manager.
|
||||
func GetMemoryLimit(pid int) (limit uint64, err error) {
|
||||
var cg CGEntries
|
||||
cg, err = GetEntries(pid)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
path := cg["memory"]
|
||||
|
||||
limit, err = getManagerKernValue("memory", path, memoryLimitKernelParam)
|
||||
if err != nil {
|
||||
|
||||
// Upon any failure returned from `cgm`, on some systems cgm
|
||||
// might not be installed. We fallback to using the the sysfs
|
||||
// path instead to lookup memory limits.
|
||||
var b []byte
|
||||
b, err = ioutil.ReadFile(getMemoryLimitFilePath(path))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
limit, err = strconv.ParseUint(strings.TrimSpace(string(b)), 10, 64)
|
||||
}
|
||||
|
||||
return limit, err
|
||||
}
|
|
@ -1,140 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2017 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cgroup
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// Testing parsing correctness for various process cgroup files.
|
||||
func TestProcCGroup(t *testing.T) {
|
||||
tmpPath, err := ioutil.TempFile("", "cgroup")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer os.Remove(tmpPath.Name())
|
||||
|
||||
cgroup := `
|
||||
11:memory:/user.slice
|
||||
10:blkio:/user.slice
|
||||
9:hugetlb:/
|
||||
8:net_cls,net_prio:/
|
||||
7:perf_event:/
|
||||
6:pids:/user.slice/user-1000.slice
|
||||
5:devices:/user.slice
|
||||
4:cpuset:/
|
||||
3:cpu,cpuacct:/user.slice
|
||||
2:freezer:/
|
||||
1:name=systemd:/user.slice/user-1000.slice/session-1.scope
|
||||
`
|
||||
_, err = tmpPath.WriteString(cgroup)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Seek back to read from the beginning.
|
||||
tmpPath.Seek(0, 0)
|
||||
|
||||
cg, err := parseProcCGroup(tmpPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
path := cg["memory"]
|
||||
if len(path) == 0 {
|
||||
t.Fatal("Path component cannot be empty")
|
||||
}
|
||||
|
||||
if path != "/user.slice" {
|
||||
t.Fatal("Path component cannot be empty")
|
||||
}
|
||||
|
||||
path = cg["systemd"]
|
||||
if path != "/user.slice/user-1000.slice/session-1.scope" {
|
||||
t.Fatal("Path component cannot be empty")
|
||||
}
|
||||
|
||||
// Mixed cgroups with different group names.
|
||||
cgroup = `
|
||||
11:memory:/newtest/newtest
|
||||
10:blkio:/user.slice
|
||||
9:hugetlb:/
|
||||
8:net_cls,net_prio:/
|
||||
7:perf_event:/
|
||||
6:pids:/user.slice/user-1000.slice
|
||||
5:devices:/user.slice
|
||||
4:cpuset:/
|
||||
3:cpu,cpuacct:/newtest/newtest
|
||||
2:freezer:/
|
||||
1:name=systemd:/user.slice/user-1000.slice/session-1.scope
|
||||
`
|
||||
|
||||
// Seek back to read from the beginning.
|
||||
tmpPath.Seek(0, 0)
|
||||
|
||||
_, err = tmpPath.WriteString(cgroup)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Seek back to read from the beginning.
|
||||
tmpPath.Seek(0, 0)
|
||||
|
||||
cg, err = parseProcCGroup(tmpPath)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
path = cg["memory"]
|
||||
if path != "/newtest/newtest" {
|
||||
t.Fatal("Path component cannot be empty")
|
||||
}
|
||||
|
||||
path = cg["systemd"]
|
||||
if path != "/user.slice/user-1000.slice/session-1.scope" {
|
||||
t.Fatal("Path component cannot be empty")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Tests cgroup memory limit path construction.
|
||||
func TestMemoryLimitPath(t *testing.T) {
|
||||
testCases := []struct {
|
||||
cgroupPath string
|
||||
expectedPath string
|
||||
}{
|
||||
{
|
||||
cgroupPath: "/user.slice",
|
||||
expectedPath: "/sys/fs/cgroup/memory/user.slice/memory.limit_in_bytes",
|
||||
},
|
||||
{
|
||||
cgroupPath: "/docker/testing",
|
||||
expectedPath: "/sys/fs/cgroup/memory/memory.limit_in_bytes",
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
actualPath := getMemoryLimitFilePath(testCase.cgroupPath)
|
||||
if actualPath != testCase.expectedPath {
|
||||
t.Fatalf("Test: %d: Expected: %s, got %s", i+1, testCase.expectedPath, actualPath)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
// +build !linux
|
||||
|
||||
/*
|
||||
* MinIO Cloud Storage, (C) 2017 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cgroup
|
|
@ -1,137 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package color
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/fatih/color"
|
||||
)
|
||||
|
||||
// global colors.
|
||||
var (
|
||||
// Check if we stderr, stdout are dumb terminals, we do not apply
|
||||
// ansi coloring on dumb terminals.
|
||||
IsTerminal = func() bool {
|
||||
return !color.NoColor
|
||||
}
|
||||
|
||||
Bold = func() func(a ...interface{}) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.Bold).SprintFunc()
|
||||
}
|
||||
return fmt.Sprint
|
||||
}()
|
||||
|
||||
RedBold = func() func(format string, a ...interface{}) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgRed, color.Bold).SprintfFunc()
|
||||
}
|
||||
return fmt.Sprintf
|
||||
}()
|
||||
|
||||
Red = func() func(format string, a ...interface{}) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgRed).SprintfFunc()
|
||||
}
|
||||
return fmt.Sprintf
|
||||
}()
|
||||
|
||||
Blue = func() func(format string, a ...interface{}) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgBlue).SprintfFunc()
|
||||
}
|
||||
return fmt.Sprintf
|
||||
}()
|
||||
|
||||
Yellow = func() func(format string, a ...interface{}) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgYellow).SprintfFunc()
|
||||
}
|
||||
return fmt.Sprintf
|
||||
}()
|
||||
|
||||
Green = func() func(a ...interface{}) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgGreen).SprintFunc()
|
||||
}
|
||||
return fmt.Sprint
|
||||
}()
|
||||
|
||||
GreenBold = func() func(a ...interface{}) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgGreen, color.Bold).SprintFunc()
|
||||
}
|
||||
return fmt.Sprint
|
||||
}()
|
||||
|
||||
CyanBold = func() func(a ...interface{}) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgCyan, color.Bold).SprintFunc()
|
||||
}
|
||||
return fmt.Sprint
|
||||
}()
|
||||
|
||||
YellowBold = func() func(format string, a ...interface{}) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgYellow, color.Bold).SprintfFunc()
|
||||
}
|
||||
return fmt.Sprintf
|
||||
}()
|
||||
|
||||
BlueBold = func() func(format string, a ...interface{}) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgBlue, color.Bold).SprintfFunc()
|
||||
}
|
||||
return fmt.Sprintf
|
||||
}()
|
||||
|
||||
BgYellow = func() func(format string, a ...interface{}) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.BgYellow).SprintfFunc()
|
||||
}
|
||||
return fmt.Sprintf
|
||||
}()
|
||||
|
||||
Black = func() func(format string, a ...interface{}) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgBlack).SprintfFunc()
|
||||
}
|
||||
return fmt.Sprintf
|
||||
}()
|
||||
|
||||
FgRed = func() func(a ...interface{}) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgRed).SprintFunc()
|
||||
}
|
||||
return fmt.Sprint
|
||||
}()
|
||||
|
||||
BgRed = func() func(format string, a ...interface{}) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.BgRed).SprintfFunc()
|
||||
}
|
||||
return fmt.Sprintf
|
||||
}()
|
||||
|
||||
FgWhite = func() func(format string, a ...interface{}) string {
|
||||
if IsTerminal() {
|
||||
return color.New(color.FgWhite).SprintfFunc()
|
||||
}
|
||||
return fmt.Sprintf
|
||||
}()
|
||||
)
|
|
@ -1,436 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
// Package console implements console printing helpers
|
||||
package console
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/fatih/color"
|
||||
"github.com/mattn/go-colorable"
|
||||
"github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
var (
|
||||
// DebugPrint enables/disables console debug printing.
|
||||
DebugPrint = false
|
||||
|
||||
// Used by the caller to print multiple lines atomically. Exposed by Lock/Unlock methods.
|
||||
publicMutex sync.Mutex
|
||||
|
||||
// Used internally by console.
|
||||
privateMutex sync.Mutex
|
||||
|
||||
stderrColoredOutput = colorable.NewColorableStderr()
|
||||
|
||||
// Print prints a message.
|
||||
Print = func(data ...interface{}) {
|
||||
consolePrint("Print", Theme["Print"], data...)
|
||||
}
|
||||
|
||||
// PrintC prints a message with color.
|
||||
PrintC = func(data ...interface{}) {
|
||||
consolePrint("PrintC", Theme["PrintC"], data...)
|
||||
}
|
||||
|
||||
// Printf prints a formatted message.
|
||||
Printf = func(format string, data ...interface{}) {
|
||||
consolePrintf("Print", Theme["Print"], format, data...)
|
||||
}
|
||||
|
||||
// Println prints a message with a newline.
|
||||
Println = func(data ...interface{}) {
|
||||
consolePrintln("Print", Theme["Print"], data...)
|
||||
}
|
||||
|
||||
// Fatal print a error message and exit.
|
||||
Fatal = func(data ...interface{}) {
|
||||
consolePrint("Fatal", Theme["Fatal"], data...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatalf print a error message with a format specified and exit.
|
||||
Fatalf = func(format string, data ...interface{}) {
|
||||
consolePrintf("Fatal", Theme["Fatal"], format, data...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Fatalln print a error message with a new line and exit.
|
||||
Fatalln = func(data ...interface{}) {
|
||||
consolePrintln("Fatal", Theme["Fatal"], data...)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Error prints a error message.
|
||||
Error = func(data ...interface{}) {
|
||||
consolePrint("Error", Theme["Error"], data...)
|
||||
}
|
||||
|
||||
// Errorf print a error message with a format specified.
|
||||
Errorf = func(format string, data ...interface{}) {
|
||||
consolePrintf("Error", Theme["Error"], format, data...)
|
||||
}
|
||||
|
||||
// Errorln prints a error message with a new line.
|
||||
Errorln = func(data ...interface{}) {
|
||||
consolePrintln("Error", Theme["Error"], data...)
|
||||
}
|
||||
|
||||
// Info prints a informational message.
|
||||
Info = func(data ...interface{}) {
|
||||
consolePrint("Info", Theme["Info"], data...)
|
||||
}
|
||||
|
||||
// Infof prints a informational message in custom format.
|
||||
Infof = func(format string, data ...interface{}) {
|
||||
consolePrintf("Info", Theme["Info"], format, data...)
|
||||
}
|
||||
|
||||
// Infoln prints a informational message with a new line.
|
||||
Infoln = func(data ...interface{}) {
|
||||
consolePrintln("Info", Theme["Info"], data...)
|
||||
}
|
||||
|
||||
// Debug prints a debug message without a new line
|
||||
// Debug prints a debug message.
|
||||
Debug = func(data ...interface{}) {
|
||||
if DebugPrint {
|
||||
consolePrint("Debug", Theme["Debug"], data...)
|
||||
}
|
||||
}
|
||||
|
||||
// Debugf prints a debug message with a new line.
|
||||
Debugf = func(format string, data ...interface{}) {
|
||||
if DebugPrint {
|
||||
consolePrintf("Debug", Theme["Debug"], format, data...)
|
||||
}
|
||||
}
|
||||
|
||||
// Debugln prints a debug message with a new line.
|
||||
Debugln = func(data ...interface{}) {
|
||||
if DebugPrint {
|
||||
consolePrintln("Debug", Theme["Debug"], data...)
|
||||
}
|
||||
}
|
||||
|
||||
// Colorize prints message in a colorized form, dictated by the corresponding tag argument.
|
||||
Colorize = func(tag string, data interface{}) string {
|
||||
if isatty.IsTerminal(os.Stdout.Fd()) {
|
||||
colorized, ok := Theme[tag]
|
||||
if ok {
|
||||
return colorized.SprintFunc()(data)
|
||||
} // else: No theme found. Return as string.
|
||||
}
|
||||
return fmt.Sprint(data)
|
||||
}
|
||||
|
||||
// Eraseline Print in new line and adjust to top so that we don't print over the ongoing progress bar.
|
||||
Eraseline = func() {
|
||||
consolePrintf("Print", Theme["Print"], "%c[2K\n", 27)
|
||||
consolePrintf("Print", Theme["Print"], "%c[A", 27)
|
||||
}
|
||||
)
|
||||
|
||||
// wrap around standard fmt functions.
|
||||
// consolePrint prints a message prefixed with message type and program name.
|
||||
func consolePrint(tag string, c *color.Color, a ...interface{}) {
|
||||
privateMutex.Lock()
|
||||
defer privateMutex.Unlock()
|
||||
|
||||
switch tag {
|
||||
case "Debug":
|
||||
// if no arguments are given do not invoke debug printer.
|
||||
if len(a) == 0 {
|
||||
return
|
||||
}
|
||||
output := color.Output
|
||||
color.Output = stderrColoredOutput
|
||||
if isatty.IsTerminal(os.Stderr.Fd()) {
|
||||
c.Print(ProgramName() + ": <DEBUG> ")
|
||||
c.Print(a...)
|
||||
} else {
|
||||
fmt.Fprint(color.Output, ProgramName()+": <DEBUG> ")
|
||||
fmt.Fprint(color.Output, a...)
|
||||
}
|
||||
color.Output = output
|
||||
case "Fatal":
|
||||
fallthrough
|
||||
case "Error":
|
||||
// if no arguments are given do not invoke fatal and error printer.
|
||||
if len(a) == 0 {
|
||||
return
|
||||
}
|
||||
output := color.Output
|
||||
color.Output = stderrColoredOutput
|
||||
if isatty.IsTerminal(os.Stderr.Fd()) {
|
||||
c.Print(ProgramName() + ": <ERROR> ")
|
||||
c.Print(a...)
|
||||
} else {
|
||||
fmt.Fprint(color.Output, ProgramName()+": <ERROR> ")
|
||||
fmt.Fprint(color.Output, a...)
|
||||
}
|
||||
color.Output = output
|
||||
case "Info":
|
||||
// if no arguments are given do not invoke info printer.
|
||||
if len(a) == 0 {
|
||||
return
|
||||
}
|
||||
if isatty.IsTerminal(os.Stdout.Fd()) {
|
||||
c.Print(ProgramName() + ": ")
|
||||
c.Print(a...)
|
||||
} else {
|
||||
fmt.Fprint(color.Output, ProgramName()+": ")
|
||||
fmt.Fprint(color.Output, a...)
|
||||
}
|
||||
default:
|
||||
if isatty.IsTerminal(os.Stdout.Fd()) {
|
||||
c.Print(a...)
|
||||
} else {
|
||||
fmt.Fprint(color.Output, a...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// consolePrintf - same as print with a new line.
|
||||
func consolePrintf(tag string, c *color.Color, format string, a ...interface{}) {
|
||||
privateMutex.Lock()
|
||||
defer privateMutex.Unlock()
|
||||
|
||||
switch tag {
|
||||
case "Debug":
|
||||
// if no arguments are given do not invoke debug printer.
|
||||
if len(a) == 0 {
|
||||
return
|
||||
}
|
||||
output := color.Output
|
||||
color.Output = stderrColoredOutput
|
||||
if isatty.IsTerminal(os.Stderr.Fd()) {
|
||||
c.Print(ProgramName() + ": <DEBUG> ")
|
||||
c.Printf(format, a...)
|
||||
} else {
|
||||
fmt.Fprint(color.Output, ProgramName()+": <DEBUG> ")
|
||||
fmt.Fprintf(color.Output, format, a...)
|
||||
}
|
||||
color.Output = output
|
||||
case "Fatal":
|
||||
fallthrough
|
||||
case "Error":
|
||||
// if no arguments are given do not invoke fatal and error printer.
|
||||
if len(a) == 0 {
|
||||
return
|
||||
}
|
||||
output := color.Output
|
||||
color.Output = stderrColoredOutput
|
||||
if isatty.IsTerminal(os.Stderr.Fd()) {
|
||||
c.Print(ProgramName() + ": <ERROR> ")
|
||||
c.Printf(format, a...)
|
||||
} else {
|
||||
fmt.Fprint(color.Output, ProgramName()+": <ERROR> ")
|
||||
fmt.Fprintf(color.Output, format, a...)
|
||||
}
|
||||
color.Output = output
|
||||
case "Info":
|
||||
// if no arguments are given do not invoke info printer.
|
||||
if len(a) == 0 {
|
||||
return
|
||||
}
|
||||
if isatty.IsTerminal(os.Stdout.Fd()) {
|
||||
c.Print(ProgramName() + ": ")
|
||||
c.Printf(format, a...)
|
||||
} else {
|
||||
fmt.Fprint(color.Output, ProgramName()+": ")
|
||||
fmt.Fprintf(color.Output, format, a...)
|
||||
}
|
||||
default:
|
||||
if isatty.IsTerminal(os.Stdout.Fd()) {
|
||||
c.Printf(format, a...)
|
||||
} else {
|
||||
fmt.Fprintf(color.Output, format, a...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// consolePrintln - same as print with a new line.
|
||||
func consolePrintln(tag string, c *color.Color, a ...interface{}) {
|
||||
privateMutex.Lock()
|
||||
defer privateMutex.Unlock()
|
||||
|
||||
switch tag {
|
||||
case "Debug":
|
||||
// if no arguments are given do not invoke debug printer.
|
||||
if len(a) == 0 {
|
||||
return
|
||||
}
|
||||
output := color.Output
|
||||
color.Output = stderrColoredOutput
|
||||
if isatty.IsTerminal(os.Stderr.Fd()) {
|
||||
c.Print(ProgramName() + ": <DEBUG> ")
|
||||
c.Println(a...)
|
||||
} else {
|
||||
fmt.Fprint(color.Output, ProgramName()+": <DEBUG> ")
|
||||
fmt.Fprintln(color.Output, a...)
|
||||
}
|
||||
color.Output = output
|
||||
case "Fatal":
|
||||
fallthrough
|
||||
case "Error":
|
||||
// if no arguments are given do not invoke fatal and error printer.
|
||||
if len(a) == 0 {
|
||||
return
|
||||
}
|
||||
output := color.Output
|
||||
color.Output = stderrColoredOutput
|
||||
if isatty.IsTerminal(os.Stderr.Fd()) {
|
||||
c.Print(ProgramName() + ": <ERROR> ")
|
||||
c.Println(a...)
|
||||
} else {
|
||||
fmt.Fprint(color.Output, ProgramName()+": <ERROR> ")
|
||||
fmt.Fprintln(color.Output, a...)
|
||||
}
|
||||
color.Output = output
|
||||
case "Info":
|
||||
// if no arguments are given do not invoke info printer.
|
||||
if len(a) == 0 {
|
||||
return
|
||||
}
|
||||
if isatty.IsTerminal(os.Stdout.Fd()) {
|
||||
c.Print(ProgramName() + ": ")
|
||||
c.Println(a...)
|
||||
} else {
|
||||
fmt.Fprint(color.Output, ProgramName()+": ")
|
||||
fmt.Fprintln(color.Output, a...)
|
||||
}
|
||||
default:
|
||||
if isatty.IsTerminal(os.Stdout.Fd()) {
|
||||
c.Println(a...)
|
||||
} else {
|
||||
fmt.Fprintln(color.Output, a...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Lock console.
|
||||
func Lock() {
|
||||
publicMutex.Lock()
|
||||
}
|
||||
|
||||
// Unlock locked console.
|
||||
func Unlock() {
|
||||
publicMutex.Unlock()
|
||||
}
|
||||
|
||||
// ProgramName - return the name of the executable program.
|
||||
func ProgramName() string {
|
||||
_, progName := filepath.Split(os.Args[0])
|
||||
return progName
|
||||
}
|
||||
|
||||
// Table - data to print in table format with fixed row widths.
|
||||
type Table struct {
|
||||
// per-row colors
|
||||
RowColors []*color.Color
|
||||
|
||||
// per-column align-right flag (aligns left by default)
|
||||
AlignRight []bool
|
||||
|
||||
// Left margin width for table
|
||||
TableIndentWidth int
|
||||
}
|
||||
|
||||
// NewTable - create a new Table instance. Takes per-row colors and
|
||||
// per-column right-align flags and table indentation width (i.e. left
|
||||
// margin width)
|
||||
func NewTable(rowColors []*color.Color, alignRight []bool, indentWidth int) *Table {
|
||||
return &Table{rowColors, alignRight, indentWidth}
|
||||
}
|
||||
|
||||
// DisplayTable - prints the table
|
||||
func (t *Table) DisplayTable(rows [][]string) error {
|
||||
numRows := len(rows)
|
||||
numCols := len(rows[0])
|
||||
if numRows != len(t.RowColors) {
|
||||
return fmt.Errorf("row count and row-colors mismatch")
|
||||
}
|
||||
|
||||
// Compute max. column widths
|
||||
maxColWidths := make([]int, numCols)
|
||||
for _, row := range rows {
|
||||
if len(row) != len(t.AlignRight) {
|
||||
return fmt.Errorf("col count and align-right mismatch")
|
||||
}
|
||||
for i, v := range row {
|
||||
if len([]rune(v)) > maxColWidths[i] {
|
||||
maxColWidths[i] = len([]rune(v))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Compute per-cell text with padding and alignment applied.
|
||||
paddedText := make([][]string, numRows)
|
||||
for r, row := range rows {
|
||||
paddedText[r] = make([]string, numCols)
|
||||
for c, cell := range row {
|
||||
if t.AlignRight[c] {
|
||||
fmtStr := fmt.Sprintf("%%%ds", maxColWidths[c])
|
||||
paddedText[r][c] = fmt.Sprintf(fmtStr, cell)
|
||||
} else {
|
||||
extraWidth := maxColWidths[c] - len([]rune(cell))
|
||||
fmtStr := fmt.Sprintf("%%s%%%ds", extraWidth)
|
||||
paddedText[r][c] = fmt.Sprintf(fmtStr, cell, "")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Draw table top border
|
||||
segments := make([]string, numCols)
|
||||
for i, c := range maxColWidths {
|
||||
segments[i] = strings.Repeat("─", c+2)
|
||||
}
|
||||
indentText := strings.Repeat(" ", t.TableIndentWidth)
|
||||
border := fmt.Sprintf("%s┌%s┐", indentText, strings.Join(segments, "┬"))
|
||||
fmt.Println(border)
|
||||
|
||||
// Print the table with colors
|
||||
for r, row := range paddedText {
|
||||
fmt.Print(indentText + "│ ")
|
||||
for c, text := range row {
|
||||
t.RowColors[r].Print(text)
|
||||
if c != numCols-1 {
|
||||
fmt.Print(" │ ")
|
||||
}
|
||||
}
|
||||
fmt.Println(" │")
|
||||
}
|
||||
|
||||
// Draw table bottom border
|
||||
border = fmt.Sprintf("%s└%s┘", indentText, strings.Join(segments, "┴"))
|
||||
fmt.Println(border)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RewindLines - uses terminal escape symbols to clear and rewind
|
||||
// upwards on the console for `n` lines.
|
||||
func RewindLines(n int) {
|
||||
for i := 0; i < n; i++ {
|
||||
fmt.Printf("\033[1A\033[K")
|
||||
}
|
||||
}
|
|
@ -1,37 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package console
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/fatih/color"
|
||||
)
|
||||
|
||||
func TestSetColor(t *testing.T) {
|
||||
SetColor("unknown", color.New(color.FgWhite))
|
||||
_, ok := Theme["unknown"]
|
||||
if !ok {
|
||||
t.Fatal("missing theme")
|
||||
}
|
||||
}
|
||||
|
||||
func TestColorLock(t *testing.T) {
|
||||
Lock()
|
||||
Print("") // Test for deadlocks.
|
||||
Unlock()
|
||||
}
|
|
@ -1,54 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package console
|
||||
|
||||
import "github.com/fatih/color"
|
||||
|
||||
var (
|
||||
// Theme contains default color mapping.
|
||||
Theme = map[string]*color.Color{
|
||||
"Debug": color.New(color.FgWhite, color.Faint, color.Italic),
|
||||
"Fatal": color.New(color.FgRed, color.Italic, color.Bold),
|
||||
"Error": color.New(color.FgYellow, color.Italic),
|
||||
"Info": color.New(color.FgGreen, color.Bold),
|
||||
"Print": color.New(),
|
||||
"PrintB": color.New(color.FgBlue, color.Bold),
|
||||
"PrintC": color.New(color.FgGreen, color.Bold),
|
||||
}
|
||||
)
|
||||
|
||||
// SetColorOff disables coloring for the entire session.
|
||||
func SetColorOff() {
|
||||
privateMutex.Lock()
|
||||
defer privateMutex.Unlock()
|
||||
color.NoColor = true
|
||||
}
|
||||
|
||||
// SetColorOn enables coloring for the entire session.
|
||||
func SetColorOn() {
|
||||
privateMutex.Lock()
|
||||
defer privateMutex.Unlock()
|
||||
color.NoColor = false
|
||||
}
|
||||
|
||||
// SetColor sets a color for a particular tag.
|
||||
func SetColor(tag string, cl *color.Color) {
|
||||
privateMutex.Lock()
|
||||
defer privateMutex.Unlock()
|
||||
// add new theme
|
||||
Theme[tag] = cl
|
||||
}
|
|
@ -1,131 +0,0 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in https://golang.org/LICENSE
|
||||
|
||||
package csv_test
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func ExampleReader() {
|
||||
in := `first_name,last_name,username
|
||||
"Rob","Pike",rob
|
||||
Ken,Thompson,ken
|
||||
"Robert","Griesemer","gri"
|
||||
`
|
||||
r := csv.NewReader(strings.NewReader(in))
|
||||
|
||||
for {
|
||||
record, err := r.Read()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println(record)
|
||||
}
|
||||
// Output:
|
||||
// [first_name last_name username]
|
||||
// [Rob Pike rob]
|
||||
// [Ken Thompson ken]
|
||||
// [Robert Griesemer gri]
|
||||
}
|
||||
|
||||
// This example shows how csv.Reader can be configured to handle other
|
||||
// types of CSV files.
|
||||
func ExampleReader_options() {
|
||||
in := `first_name;last_name;username
|
||||
"Rob";"Pike";rob
|
||||
# lines beginning with a # character are ignored
|
||||
Ken;Thompson;ken
|
||||
"Robert";"Griesemer";"gri"
|
||||
`
|
||||
r := csv.NewReader(strings.NewReader(in))
|
||||
r.Comma = ';'
|
||||
r.Comment = '#'
|
||||
|
||||
records, err := r.ReadAll()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Print(records)
|
||||
// Output:
|
||||
// [[first_name last_name username] [Rob Pike rob] [Ken Thompson ken] [Robert Griesemer gri]]
|
||||
}
|
||||
|
||||
func ExampleReader_ReadAll() {
|
||||
in := `first_name,last_name,username
|
||||
"Rob","Pike",rob
|
||||
Ken,Thompson,ken
|
||||
"Robert","Griesemer","gri"
|
||||
`
|
||||
r := csv.NewReader(strings.NewReader(in))
|
||||
|
||||
records, err := r.ReadAll()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Print(records)
|
||||
// Output:
|
||||
// [[first_name last_name username] [Rob Pike rob] [Ken Thompson ken] [Robert Griesemer gri]]
|
||||
}
|
||||
|
||||
func ExampleWriter() {
|
||||
records := [][]string{
|
||||
{"first_name", "last_name", "username"},
|
||||
{"Rob", "Pike", "rob"},
|
||||
{"Ken", "Thompson", "ken"},
|
||||
{"Robert", "Griesemer", "gri"},
|
||||
}
|
||||
|
||||
w := csv.NewWriter(os.Stdout)
|
||||
|
||||
for _, record := range records {
|
||||
if err := w.Write(record); err != nil {
|
||||
log.Fatalln("error writing record to csv:", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Write any buffered data to the underlying writer (standard output).
|
||||
w.Flush()
|
||||
|
||||
if err := w.Error(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
// Output:
|
||||
// first_name,last_name,username
|
||||
// Rob,Pike,rob
|
||||
// Ken,Thompson,ken
|
||||
// Robert,Griesemer,gri
|
||||
}
|
||||
|
||||
func ExampleWriter_WriteAll() {
|
||||
records := [][]string{
|
||||
{"first_name", "last_name", "username"},
|
||||
{"Rob", "Pike", "rob"},
|
||||
{"Ken", "Thompson", "ken"},
|
||||
{"Robert", "Griesemer", "gri"},
|
||||
}
|
||||
|
||||
w := csv.NewWriter(os.Stdout)
|
||||
w.WriteAll(records) // calls Flush internally
|
||||
|
||||
if err := w.Error(); err != nil {
|
||||
log.Fatalln("error writing csv:", err)
|
||||
}
|
||||
// Output:
|
||||
// first_name,last_name,username
|
||||
// Rob,Pike,rob
|
||||
// Ken,Thompson,ken
|
||||
// Robert,Griesemer,gri
|
||||
}
|
|
@ -1,70 +0,0 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in https://golang.org/LICENSE
|
||||
|
||||
// +build gofuzz
|
||||
|
||||
package csv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func Fuzz(data []byte) int {
|
||||
score := 0
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
for _, tt := range []Reader{
|
||||
{},
|
||||
{Comma: ';'},
|
||||
{Comma: '\t'},
|
||||
{LazyQuotes: true},
|
||||
{TrimLeadingSpace: true},
|
||||
{Comment: '#'},
|
||||
{Comment: ';'},
|
||||
} {
|
||||
r := NewReader(bytes.NewReader(data))
|
||||
r.Comma = tt.Comma
|
||||
r.Comment = tt.Comment
|
||||
r.LazyQuotes = tt.LazyQuotes
|
||||
r.TrimLeadingSpace = tt.TrimLeadingSpace
|
||||
|
||||
records, err := r.ReadAll()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
score = 1
|
||||
|
||||
buf.Reset()
|
||||
w := NewWriter(buf)
|
||||
w.Comma = tt.Comma
|
||||
err = w.WriteAll(records)
|
||||
if err != nil {
|
||||
fmt.Printf("writer = %#v\n", w)
|
||||
fmt.Printf("records = %v\n", records)
|
||||
panic(err)
|
||||
}
|
||||
|
||||
r = NewReader(buf)
|
||||
r.Comma = tt.Comma
|
||||
r.Comment = tt.Comment
|
||||
r.LazyQuotes = tt.LazyQuotes
|
||||
r.TrimLeadingSpace = tt.TrimLeadingSpace
|
||||
result, err := r.ReadAll()
|
||||
if err != nil {
|
||||
fmt.Printf("reader = %#v\n", r)
|
||||
fmt.Printf("records = %v\n", records)
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(records, result) {
|
||||
fmt.Println("records = \n", records)
|
||||
fmt.Println("result = \n", records)
|
||||
panic("not equal")
|
||||
}
|
||||
}
|
||||
|
||||
return score
|
||||
}
|
|
@ -1,445 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in https://golang.org/LICENSE
|
||||
|
||||
// Package csv reads and writes comma-separated values (CSV) files.
|
||||
// There are many kinds of CSV files; this package supports the format
|
||||
// described in RFC 4180.
|
||||
//
|
||||
// A csv file contains zero or more records of one or more fields per record.
|
||||
// Each record is separated by the newline character. The final record may
|
||||
// optionally be followed by a newline character.
|
||||
//
|
||||
// field1,field2,field3
|
||||
//
|
||||
// White space is considered part of a field.
|
||||
//
|
||||
// Carriage returns before newline characters are silently removed.
|
||||
//
|
||||
// Blank lines are ignored. A line with only whitespace characters (excluding
|
||||
// the ending newline character) is not considered a blank line.
|
||||
//
|
||||
// Fields which start and stop with the quote character " are called
|
||||
// quoted-fields. The beginning and ending quote are not part of the
|
||||
// field.
|
||||
//
|
||||
// The source:
|
||||
//
|
||||
// normal string,"quoted-field"
|
||||
//
|
||||
// results in the fields
|
||||
//
|
||||
// {`normal string`, `quoted-field`}
|
||||
//
|
||||
// Within a quoted-field a quote character followed by a second quote
|
||||
// character is considered a single quote.
|
||||
//
|
||||
// "the ""word"" is true","a ""quoted-field"""
|
||||
//
|
||||
// results in
|
||||
//
|
||||
// {`the "word" is true`, `a "quoted-field"`}
|
||||
//
|
||||
// Newlines and commas may be included in a quoted-field
|
||||
//
|
||||
// "Multi-line
|
||||
// field","comma is ,"
|
||||
//
|
||||
// results in
|
||||
//
|
||||
// {`Multi-line
|
||||
// field`, `comma is ,`}
|
||||
package csv
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// A ParseError is returned for parsing errors.
|
||||
// Line numbers are 1-indexed and columns are 0-indexed.
|
||||
type ParseError struct {
|
||||
StartLine int // Line where the record starts
|
||||
Line int // Line where the error occurred
|
||||
Column int // Column (rune index) where the error occurred
|
||||
Err error // The actual error
|
||||
}
|
||||
|
||||
func (e *ParseError) Error() string {
|
||||
if e.Err == ErrFieldCount {
|
||||
return fmt.Sprintf("record on line %d: %v", e.Line, e.Err)
|
||||
}
|
||||
if e.StartLine != e.Line {
|
||||
return fmt.Sprintf("record on line %d; parse error on line %d, column %d: %v", e.StartLine, e.Line, e.Column, e.Err)
|
||||
}
|
||||
return fmt.Sprintf("parse error on line %d, column %d: %v", e.Line, e.Column, e.Err)
|
||||
}
|
||||
|
||||
// Unwrap returns the underlying error
|
||||
func (e *ParseError) Unwrap() error { return e.Err }
|
||||
|
||||
// These are the errors that can be returned in ParseError.Err.
|
||||
var (
|
||||
ErrTrailingComma = errors.New("extra delimiter at end of line") // Deprecated: No longer used.
|
||||
ErrBareQuote = errors.New("bare \" in non-quoted-field")
|
||||
ErrQuote = errors.New("extraneous or missing \" in quoted-field")
|
||||
ErrFieldCount = errors.New("wrong number of fields")
|
||||
)
|
||||
|
||||
var errInvalidDelim = errors.New("csv: invalid field or comment delimiter")
|
||||
|
||||
func validDelim(r rune) bool {
|
||||
return r != 0 && r != '"' && r != '\r' && r != '\n' && utf8.ValidRune(r) && r != utf8.RuneError
|
||||
}
|
||||
|
||||
// A Reader reads records from a CSV-encoded file.
|
||||
//
|
||||
// As returned by NewReader, a Reader expects input conforming to RFC 4180.
|
||||
// The exported fields can be changed to customize the details before the
|
||||
// first call to Read or ReadAll.
|
||||
//
|
||||
// The Reader converts all \r\n sequences in its input to plain \n,
|
||||
// including in multiline field values, so that the returned data does
|
||||
// not depend on which line-ending convention an input file uses.
|
||||
type Reader struct {
|
||||
// Comma is the field delimiter.
|
||||
// It is set to comma (',') by NewReader.
|
||||
// Comma must be a valid rune and must not be \r, \n,
|
||||
// or the Unicode replacement character (0xFFFD).
|
||||
Comma rune
|
||||
|
||||
// Quote is a single rune used for marking fields limits
|
||||
Quote []rune
|
||||
|
||||
// QuoteEscape is a single rune to escape the quote character
|
||||
QuoteEscape rune
|
||||
|
||||
// Comment, if not 0, is the comment character. Lines beginning with the
|
||||
// Comment character without preceding whitespace are ignored.
|
||||
// With leading whitespace the Comment character becomes part of the
|
||||
// field, even if TrimLeadingSpace is true.
|
||||
// Comment must be a valid rune and must not be \r, \n,
|
||||
// or the Unicode replacement character (0xFFFD).
|
||||
// It must also not be equal to Comma.
|
||||
Comment rune
|
||||
|
||||
// FieldsPerRecord is the number of expected fields per record.
|
||||
// If FieldsPerRecord is positive, Read requires each record to
|
||||
// have the given number of fields. If FieldsPerRecord is 0, Read sets it to
|
||||
// the number of fields in the first record, so that future records must
|
||||
// have the same field count. If FieldsPerRecord is negative, no check is
|
||||
// made and records may have a variable number of fields.
|
||||
FieldsPerRecord int
|
||||
|
||||
// If LazyQuotes is true, a quote may appear in an unquoted field and a
|
||||
// non-doubled quote may appear in a quoted field.
|
||||
LazyQuotes bool
|
||||
|
||||
// If TrimLeadingSpace is true, leading white space in a field is ignored.
|
||||
// This is done even if the field delimiter, Comma, is white space.
|
||||
TrimLeadingSpace bool
|
||||
|
||||
// ReuseRecord controls whether calls to Read may return a slice sharing
|
||||
// the backing array of the previous call's returned slice for performance.
|
||||
// By default, each call to Read returns newly allocated memory owned by the caller.
|
||||
ReuseRecord bool
|
||||
|
||||
TrailingComma bool // Deprecated: No longer used.
|
||||
|
||||
r *bufio.Reader
|
||||
|
||||
// numLine is the current line being read in the CSV file.
|
||||
numLine int
|
||||
|
||||
// rawBuffer is a line buffer only used by the readLine method.
|
||||
rawBuffer []byte
|
||||
|
||||
// recordBuffer holds the unescaped fields, one after another.
|
||||
// The fields can be accessed by using the indexes in fieldIndexes.
|
||||
// E.g., For the row `a,"b","c""d",e`, recordBuffer will contain `abc"de`
|
||||
// and fieldIndexes will contain the indexes [1, 2, 5, 6].
|
||||
recordBuffer []byte
|
||||
|
||||
// fieldIndexes is an index of fields inside recordBuffer.
|
||||
// The i'th field ends at offset fieldIndexes[i] in recordBuffer.
|
||||
fieldIndexes []int
|
||||
|
||||
// lastRecord is a record cache and only used when ReuseRecord == true.
|
||||
lastRecord []string
|
||||
|
||||
// Caching some values between Read() calls for performance gain
|
||||
cached bool
|
||||
cachedQuoteEscapeLen int
|
||||
cachedQuoteLen int
|
||||
cachedEncodedQuote []byte
|
||||
cachedCommaLen int
|
||||
cachedQuotes string
|
||||
}
|
||||
|
||||
// NewReader returns a new Reader that reads from r.
|
||||
func NewReader(r io.Reader) *Reader {
|
||||
return &Reader{
|
||||
Comma: ',',
|
||||
Quote: []rune(`"`),
|
||||
QuoteEscape: '"',
|
||||
r: bufio.NewReader(r),
|
||||
}
|
||||
}
|
||||
|
||||
// Read reads one record (a slice of fields) from r.
|
||||
// If the record has an unexpected number of fields,
|
||||
// Read returns the record along with the error ErrFieldCount.
|
||||
// Except for that case, Read always returns either a non-nil
|
||||
// record or a non-nil error, but not both.
|
||||
// If there is no data left to be read, Read returns nil, io.EOF.
|
||||
// If ReuseRecord is true, the returned slice may be shared
|
||||
// between multiple calls to Read.
|
||||
func (r *Reader) Read() (record []string, err error) {
|
||||
if r.ReuseRecord {
|
||||
record, err = r.readRecord(r.lastRecord)
|
||||
r.lastRecord = record
|
||||
} else {
|
||||
record, err = r.readRecord(nil)
|
||||
}
|
||||
return record, err
|
||||
}
|
||||
|
||||
// ReadAll reads all the remaining records from r.
|
||||
// Each record is a slice of fields.
|
||||
// A successful call returns err == nil, not err == io.EOF. Because ReadAll is
|
||||
// defined to read until EOF, it does not treat end of file as an error to be
|
||||
// reported.
|
||||
func (r *Reader) ReadAll() (records [][]string, err error) {
|
||||
for {
|
||||
record, err := r.readRecord(nil)
|
||||
if err == io.EOF {
|
||||
return records, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
records = append(records, record)
|
||||
}
|
||||
}
|
||||
|
||||
// readLine reads the next line (with the trailing endline).
|
||||
// If EOF is hit without a trailing endline, it will be omitted.
|
||||
// If some bytes were read, then the error is never io.EOF.
|
||||
// The result is only valid until the next call to readLine.
|
||||
func (r *Reader) readLine() ([]byte, error) {
|
||||
line, err := r.r.ReadSlice('\n')
|
||||
if err == bufio.ErrBufferFull {
|
||||
r.rawBuffer = append(r.rawBuffer[:0], line...)
|
||||
for err == bufio.ErrBufferFull {
|
||||
line, err = r.r.ReadSlice('\n')
|
||||
r.rawBuffer = append(r.rawBuffer, line...)
|
||||
}
|
||||
line = r.rawBuffer
|
||||
}
|
||||
if len(line) > 0 && err == io.EOF {
|
||||
err = nil
|
||||
// For backwards compatibility, drop trailing \r before EOF.
|
||||
if line[len(line)-1] == '\r' {
|
||||
line = line[:len(line)-1]
|
||||
}
|
||||
}
|
||||
r.numLine++
|
||||
// Normalize \r\n to \n on all input lines.
|
||||
if n := len(line); n >= 2 && line[n-2] == '\r' && line[n-1] == '\n' {
|
||||
line[n-2] = '\n'
|
||||
line = line[:n-1]
|
||||
}
|
||||
return line, err
|
||||
}
|
||||
|
||||
// lengthNL reports the number of bytes for the trailing \n.
|
||||
func lengthNL(b []byte) int {
|
||||
if len(b) > 0 && b[len(b)-1] == '\n' {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// nextRune returns the next rune in b or utf8.RuneError.
|
||||
func nextRune(b []byte) rune {
|
||||
r, _ := utf8.DecodeRune(b)
|
||||
return r
|
||||
}
|
||||
|
||||
func encodeRune(r rune) []byte {
|
||||
rlen := utf8.RuneLen(r)
|
||||
p := make([]byte, rlen)
|
||||
_ = utf8.EncodeRune(p, r)
|
||||
return p
|
||||
}
|
||||
|
||||
func (r *Reader) readRecord(dst []string) ([]string, error) {
|
||||
if r.Comma == r.Comment || !validDelim(r.Comma) || (r.Comment != 0 && !validDelim(r.Comment)) {
|
||||
return nil, errInvalidDelim
|
||||
}
|
||||
|
||||
// Read line (automatically skipping past empty lines and any comments).
|
||||
var line, fullLine []byte
|
||||
var errRead error
|
||||
for errRead == nil {
|
||||
line, errRead = r.readLine()
|
||||
if r.Comment != 0 && nextRune(line) == r.Comment {
|
||||
line = nil
|
||||
continue // Skip comment lines
|
||||
}
|
||||
if errRead == nil && len(line) == lengthNL(line) {
|
||||
line = nil
|
||||
continue // Skip empty lines
|
||||
}
|
||||
fullLine = line
|
||||
break
|
||||
}
|
||||
if errRead == io.EOF {
|
||||
return nil, errRead
|
||||
}
|
||||
|
||||
if !r.cached {
|
||||
r.cachedQuoteEscapeLen = utf8.RuneLen(r.QuoteEscape)
|
||||
if len(r.Quote) > 0 {
|
||||
r.cachedQuoteLen = utf8.RuneLen(r.Quote[0])
|
||||
r.cachedEncodedQuote = encodeRune(r.Quote[0])
|
||||
r.cachedQuotes += string(r.Quote[0])
|
||||
}
|
||||
r.cachedCommaLen = utf8.RuneLen(r.Comma)
|
||||
r.cachedQuotes += string(r.QuoteEscape)
|
||||
r.cached = true
|
||||
}
|
||||
|
||||
// Parse each field in the record.
|
||||
var err error
|
||||
recLine := r.numLine // Starting line for record
|
||||
r.recordBuffer = r.recordBuffer[:0]
|
||||
r.fieldIndexes = r.fieldIndexes[:0]
|
||||
parseField:
|
||||
for {
|
||||
if r.TrimLeadingSpace {
|
||||
line = bytes.TrimLeftFunc(line, unicode.IsSpace)
|
||||
}
|
||||
if len(line) == 0 || r.cachedQuoteLen == 0 || nextRune(line) != r.Quote[0] {
|
||||
// Non-quoted string field
|
||||
i := bytes.IndexRune(line, r.Comma)
|
||||
field := line
|
||||
if i >= 0 {
|
||||
field = field[:i]
|
||||
} else {
|
||||
field = field[:len(field)-lengthNL(field)]
|
||||
}
|
||||
// Check to make sure a quote does not appear in field.
|
||||
if !r.LazyQuotes {
|
||||
if j := bytes.IndexRune(field, r.Quote[0]); j >= 0 {
|
||||
col := utf8.RuneCount(fullLine[:len(fullLine)-len(line[j:])])
|
||||
err = &ParseError{StartLine: recLine, Line: r.numLine, Column: col, Err: ErrBareQuote}
|
||||
break parseField
|
||||
}
|
||||
}
|
||||
r.recordBuffer = append(r.recordBuffer, field...)
|
||||
r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer))
|
||||
if i >= 0 {
|
||||
line = line[i+r.cachedCommaLen:]
|
||||
continue parseField
|
||||
}
|
||||
break parseField
|
||||
} else {
|
||||
// Quoted string field
|
||||
line = line[r.cachedQuoteLen:]
|
||||
for {
|
||||
i := bytes.IndexAny(line, r.cachedQuotes)
|
||||
if i >= 0 {
|
||||
// Hit next quote or escape quote
|
||||
r.recordBuffer = append(r.recordBuffer, line[:i]...)
|
||||
|
||||
escape := nextRune(line[i:]) == r.QuoteEscape
|
||||
if escape {
|
||||
line = line[i+r.cachedQuoteEscapeLen:]
|
||||
} else {
|
||||
line = line[i+r.cachedQuoteLen:]
|
||||
}
|
||||
|
||||
switch rn := nextRune(line); {
|
||||
case escape && r.QuoteEscape != r.Quote[0]:
|
||||
r.recordBuffer = append(r.recordBuffer, encodeRune(rn)...)
|
||||
line = line[utf8.RuneLen(rn):]
|
||||
case rn == r.Quote[0]:
|
||||
// `""` sequence (append quote).
|
||||
r.recordBuffer = append(r.recordBuffer, r.cachedEncodedQuote...)
|
||||
line = line[r.cachedQuoteLen:]
|
||||
case rn == r.Comma:
|
||||
// `",` sequence (end of field).
|
||||
line = line[r.cachedCommaLen:]
|
||||
r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer))
|
||||
continue parseField
|
||||
case lengthNL(line) == len(line):
|
||||
// `"\n` sequence (end of line).
|
||||
r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer))
|
||||
break parseField
|
||||
case r.LazyQuotes:
|
||||
// `"` sequence (bare quote).
|
||||
r.recordBuffer = append(r.recordBuffer, r.cachedEncodedQuote...)
|
||||
default:
|
||||
// `"*` sequence (invalid non-escaped quote).
|
||||
col := utf8.RuneCount(fullLine[:len(fullLine)-len(line)-r.cachedQuoteLen])
|
||||
err = &ParseError{StartLine: recLine, Line: r.numLine, Column: col, Err: ErrQuote}
|
||||
break parseField
|
||||
}
|
||||
} else if len(line) > 0 {
|
||||
// Hit end of line (copy all data so far).
|
||||
r.recordBuffer = append(r.recordBuffer, line...)
|
||||
if errRead != nil {
|
||||
break parseField
|
||||
}
|
||||
line, errRead = r.readLine()
|
||||
if errRead == io.EOF {
|
||||
errRead = nil
|
||||
}
|
||||
fullLine = line
|
||||
} else {
|
||||
// Abrupt end of file (EOF or error).
|
||||
if !r.LazyQuotes && errRead == nil {
|
||||
col := utf8.RuneCount(fullLine)
|
||||
err = &ParseError{StartLine: recLine, Line: r.numLine, Column: col, Err: ErrQuote}
|
||||
break parseField
|
||||
}
|
||||
r.fieldIndexes = append(r.fieldIndexes, len(r.recordBuffer))
|
||||
break parseField
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
err = errRead
|
||||
}
|
||||
|
||||
// Create a single string and create slices out of it.
|
||||
// This pins the memory of the fields together, but allocates once.
|
||||
str := string(r.recordBuffer) // Convert to string once to batch allocations
|
||||
dst = dst[:0]
|
||||
if cap(dst) < len(r.fieldIndexes) {
|
||||
dst = make([]string, len(r.fieldIndexes))
|
||||
}
|
||||
dst = dst[:len(r.fieldIndexes)]
|
||||
var preIdx int
|
||||
for i, idx := range r.fieldIndexes {
|
||||
dst[i] = str[preIdx:idx]
|
||||
preIdx = idx
|
||||
}
|
||||
|
||||
// Check or update the expected fields per record.
|
||||
if r.FieldsPerRecord > 0 {
|
||||
if len(dst) != r.FieldsPerRecord && err == nil {
|
||||
err = &ParseError{StartLine: recLine, Line: recLine, Err: ErrFieldCount}
|
||||
}
|
||||
} else if r.FieldsPerRecord == 0 {
|
||||
r.FieldsPerRecord = len(dst)
|
||||
}
|
||||
return dst, err
|
||||
}
|
|
@ -1,509 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in https://golang.org/LICENSE
|
||||
|
||||
package csv
|
||||
|
||||
import (
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
func TestRead(t *testing.T) {
|
||||
tests := []struct {
|
||||
Name string
|
||||
Input string
|
||||
Output [][]string
|
||||
Error error
|
||||
|
||||
// These fields are copied into the Reader
|
||||
Comma rune
|
||||
Comment rune
|
||||
UseFieldsPerRecord bool // false (default) means FieldsPerRecord is -1
|
||||
FieldsPerRecord int
|
||||
LazyQuotes bool
|
||||
TrimLeadingSpace bool
|
||||
ReuseRecord bool
|
||||
}{{
|
||||
Name: "Simple",
|
||||
Input: "a,b,c\n",
|
||||
Output: [][]string{{"a", "b", "c"}},
|
||||
}, {
|
||||
Name: "CRLF",
|
||||
Input: "a,b\r\nc,d\r\n",
|
||||
Output: [][]string{{"a", "b"}, {"c", "d"}},
|
||||
}, {
|
||||
Name: "BareCR",
|
||||
Input: "a,b\rc,d\r\n",
|
||||
Output: [][]string{{"a", "b\rc", "d"}},
|
||||
}, {
|
||||
Name: "RFC4180test",
|
||||
Input: `#field1,field2,field3
|
||||
"aaa","bb
|
||||
b","ccc"
|
||||
"a,a","b""bb","ccc"
|
||||
zzz,yyy,xxx
|
||||
`,
|
||||
Output: [][]string{
|
||||
{"#field1", "field2", "field3"},
|
||||
{"aaa", "bb\nb", "ccc"},
|
||||
{"a,a", `b"bb`, "ccc"},
|
||||
{"zzz", "yyy", "xxx"},
|
||||
},
|
||||
UseFieldsPerRecord: true,
|
||||
FieldsPerRecord: 0,
|
||||
}, {
|
||||
Name: "NoEOLTest",
|
||||
Input: "a,b,c",
|
||||
Output: [][]string{{"a", "b", "c"}},
|
||||
}, {
|
||||
Name: "Semicolon",
|
||||
Input: "a;b;c\n",
|
||||
Output: [][]string{{"a", "b", "c"}},
|
||||
Comma: ';',
|
||||
}, {
|
||||
Name: "MultiLine",
|
||||
Input: `"two
|
||||
line","one line","three
|
||||
line
|
||||
field"`,
|
||||
Output: [][]string{{"two\nline", "one line", "three\nline\nfield"}},
|
||||
}, {
|
||||
Name: "BlankLine",
|
||||
Input: "a,b,c\n\nd,e,f\n\n",
|
||||
Output: [][]string{
|
||||
{"a", "b", "c"},
|
||||
{"d", "e", "f"},
|
||||
},
|
||||
}, {
|
||||
Name: "BlankLineFieldCount",
|
||||
Input: "a,b,c\n\nd,e,f\n\n",
|
||||
Output: [][]string{
|
||||
{"a", "b", "c"},
|
||||
{"d", "e", "f"},
|
||||
},
|
||||
UseFieldsPerRecord: true,
|
||||
FieldsPerRecord: 0,
|
||||
}, {
|
||||
Name: "TrimSpace",
|
||||
Input: " a, b, c\n",
|
||||
Output: [][]string{{"a", "b", "c"}},
|
||||
TrimLeadingSpace: true,
|
||||
}, {
|
||||
Name: "LeadingSpace",
|
||||
Input: " a, b, c\n",
|
||||
Output: [][]string{{" a", " b", " c"}},
|
||||
}, {
|
||||
Name: "Comment",
|
||||
Input: "#1,2,3\na,b,c\n#comment",
|
||||
Output: [][]string{{"a", "b", "c"}},
|
||||
Comment: '#',
|
||||
}, {
|
||||
Name: "NoComment",
|
||||
Input: "#1,2,3\na,b,c",
|
||||
Output: [][]string{{"#1", "2", "3"}, {"a", "b", "c"}},
|
||||
}, {
|
||||
Name: "LazyQuotes",
|
||||
Input: `a "word","1"2",a","b`,
|
||||
Output: [][]string{{`a "word"`, `1"2`, `a"`, `b`}},
|
||||
LazyQuotes: true,
|
||||
}, {
|
||||
Name: "BareQuotes",
|
||||
Input: `a "word","1"2",a"`,
|
||||
Output: [][]string{{`a "word"`, `1"2`, `a"`}},
|
||||
LazyQuotes: true,
|
||||
}, {
|
||||
Name: "BareDoubleQuotes",
|
||||
Input: `a""b,c`,
|
||||
Output: [][]string{{`a""b`, `c`}},
|
||||
LazyQuotes: true,
|
||||
}, {
|
||||
Name: "BadDoubleQuotes",
|
||||
Input: `a""b,c`,
|
||||
Error: &ParseError{StartLine: 1, Line: 1, Column: 1, Err: ErrBareQuote},
|
||||
}, {
|
||||
Name: "TrimQuote",
|
||||
Input: ` "a"," b",c`,
|
||||
Output: [][]string{{"a", " b", "c"}},
|
||||
TrimLeadingSpace: true,
|
||||
}, {
|
||||
Name: "BadBareQuote",
|
||||
Input: `a "word","b"`,
|
||||
Error: &ParseError{StartLine: 1, Line: 1, Column: 2, Err: ErrBareQuote},
|
||||
}, {
|
||||
Name: "BadTrailingQuote",
|
||||
Input: `"a word",b"`,
|
||||
Error: &ParseError{StartLine: 1, Line: 1, Column: 10, Err: ErrBareQuote},
|
||||
}, {
|
||||
Name: "ExtraneousQuote",
|
||||
Input: `"a "word","b"`,
|
||||
Error: &ParseError{StartLine: 1, Line: 1, Column: 3, Err: ErrQuote},
|
||||
}, {
|
||||
Name: "BadFieldCount",
|
||||
Input: "a,b,c\nd,e",
|
||||
Error: &ParseError{StartLine: 2, Line: 2, Err: ErrFieldCount},
|
||||
UseFieldsPerRecord: true,
|
||||
FieldsPerRecord: 0,
|
||||
}, {
|
||||
Name: "BadFieldCount1",
|
||||
Input: `a,b,c`,
|
||||
Error: &ParseError{StartLine: 1, Line: 1, Err: ErrFieldCount},
|
||||
UseFieldsPerRecord: true,
|
||||
FieldsPerRecord: 2,
|
||||
}, {
|
||||
Name: "FieldCount",
|
||||
Input: "a,b,c\nd,e",
|
||||
Output: [][]string{{"a", "b", "c"}, {"d", "e"}},
|
||||
}, {
|
||||
Name: "TrailingCommaEOF",
|
||||
Input: "a,b,c,",
|
||||
Output: [][]string{{"a", "b", "c", ""}},
|
||||
}, {
|
||||
Name: "TrailingCommaEOL",
|
||||
Input: "a,b,c,\n",
|
||||
Output: [][]string{{"a", "b", "c", ""}},
|
||||
}, {
|
||||
Name: "TrailingCommaSpaceEOF",
|
||||
Input: "a,b,c, ",
|
||||
Output: [][]string{{"a", "b", "c", ""}},
|
||||
TrimLeadingSpace: true,
|
||||
}, {
|
||||
Name: "TrailingCommaSpaceEOL",
|
||||
Input: "a,b,c, \n",
|
||||
Output: [][]string{{"a", "b", "c", ""}},
|
||||
TrimLeadingSpace: true,
|
||||
}, {
|
||||
Name: "TrailingCommaLine3",
|
||||
Input: "a,b,c\nd,e,f\ng,hi,",
|
||||
Output: [][]string{{"a", "b", "c"}, {"d", "e", "f"}, {"g", "hi", ""}},
|
||||
TrimLeadingSpace: true,
|
||||
}, {
|
||||
Name: "NotTrailingComma3",
|
||||
Input: "a,b,c, \n",
|
||||
Output: [][]string{{"a", "b", "c", " "}},
|
||||
}, {
|
||||
Name: "CommaFieldTest",
|
||||
Input: `x,y,z,w
|
||||
x,y,z,
|
||||
x,y,,
|
||||
x,,,
|
||||
,,,
|
||||
"x","y","z","w"
|
||||
"x","y","z",""
|
||||
"x","y","",""
|
||||
"x","","",""
|
||||
"","","",""
|
||||
`,
|
||||
Output: [][]string{
|
||||
{"x", "y", "z", "w"},
|
||||
{"x", "y", "z", ""},
|
||||
{"x", "y", "", ""},
|
||||
{"x", "", "", ""},
|
||||
{"", "", "", ""},
|
||||
{"x", "y", "z", "w"},
|
||||
{"x", "y", "z", ""},
|
||||
{"x", "y", "", ""},
|
||||
{"x", "", "", ""},
|
||||
{"", "", "", ""},
|
||||
},
|
||||
}, {
|
||||
Name: "TrailingCommaIneffective1",
|
||||
Input: "a,b,\nc,d,e",
|
||||
Output: [][]string{
|
||||
{"a", "b", ""},
|
||||
{"c", "d", "e"},
|
||||
},
|
||||
TrimLeadingSpace: true,
|
||||
}, {
|
||||
Name: "ReadAllReuseRecord",
|
||||
Input: "a,b\nc,d",
|
||||
Output: [][]string{
|
||||
{"a", "b"},
|
||||
{"c", "d"},
|
||||
},
|
||||
ReuseRecord: true,
|
||||
}, {
|
||||
Name: "StartLine1", // Issue 19019
|
||||
Input: "a,\"b\nc\"d,e",
|
||||
Error: &ParseError{StartLine: 1, Line: 2, Column: 1, Err: ErrQuote},
|
||||
}, {
|
||||
Name: "StartLine2",
|
||||
Input: "a,b\n\"d\n\n,e",
|
||||
Error: &ParseError{StartLine: 2, Line: 5, Column: 0, Err: ErrQuote},
|
||||
}, {
|
||||
Name: "CRLFInQuotedField", // Issue 21201
|
||||
Input: "A,\"Hello\r\nHi\",B\r\n",
|
||||
Output: [][]string{
|
||||
{"A", "Hello\nHi", "B"},
|
||||
},
|
||||
}, {
|
||||
Name: "BinaryBlobField", // Issue 19410
|
||||
Input: "x09\x41\xb4\x1c,aktau",
|
||||
Output: [][]string{{"x09A\xb4\x1c", "aktau"}},
|
||||
}, {
|
||||
Name: "TrailingCR",
|
||||
Input: "field1,field2\r",
|
||||
Output: [][]string{{"field1", "field2"}},
|
||||
}, {
|
||||
Name: "QuotedTrailingCR",
|
||||
Input: "\"field\"\r",
|
||||
Output: [][]string{{"field"}},
|
||||
}, {
|
||||
Name: "QuotedTrailingCRCR",
|
||||
Input: "\"field\"\r\r",
|
||||
Error: &ParseError{StartLine: 1, Line: 1, Column: 6, Err: ErrQuote},
|
||||
}, {
|
||||
Name: "FieldCR",
|
||||
Input: "field\rfield\r",
|
||||
Output: [][]string{{"field\rfield"}},
|
||||
}, {
|
||||
Name: "FieldCRCR",
|
||||
Input: "field\r\rfield\r\r",
|
||||
Output: [][]string{{"field\r\rfield\r"}},
|
||||
}, {
|
||||
Name: "FieldCRCRLF",
|
||||
Input: "field\r\r\nfield\r\r\n",
|
||||
Output: [][]string{{"field\r"}, {"field\r"}},
|
||||
}, {
|
||||
Name: "FieldCRCRLFCR",
|
||||
Input: "field\r\r\n\rfield\r\r\n\r",
|
||||
Output: [][]string{{"field\r"}, {"\rfield\r"}},
|
||||
}, {
|
||||
Name: "FieldCRCRLFCRCR",
|
||||
Input: "field\r\r\n\r\rfield\r\r\n\r\r",
|
||||
Output: [][]string{{"field\r"}, {"\r\rfield\r"}, {"\r"}},
|
||||
}, {
|
||||
Name: "MultiFieldCRCRLFCRCR",
|
||||
Input: "field1,field2\r\r\n\r\rfield1,field2\r\r\n\r\r,",
|
||||
Output: [][]string{
|
||||
{"field1", "field2\r"},
|
||||
{"\r\rfield1", "field2\r"},
|
||||
{"\r\r", ""},
|
||||
},
|
||||
}, {
|
||||
Name: "NonASCIICommaAndComment",
|
||||
Input: "a£b,c£ \td,e\n€ comment\n",
|
||||
Output: [][]string{{"a", "b,c", "d,e"}},
|
||||
TrimLeadingSpace: true,
|
||||
Comma: '£',
|
||||
Comment: '€',
|
||||
}, {
|
||||
Name: "NonASCIICommaAndCommentWithQuotes",
|
||||
Input: "a€\" b,\"€ c\nλ comment\n",
|
||||
Output: [][]string{{"a", " b,", " c"}},
|
||||
Comma: '€',
|
||||
Comment: 'λ',
|
||||
}, {
|
||||
// λ and θ start with the same byte.
|
||||
// This tests that the parser doesn't confuse such characters.
|
||||
Name: "NonASCIICommaConfusion",
|
||||
Input: "\"abθcd\"λefθgh",
|
||||
Output: [][]string{{"abθcd", "efθgh"}},
|
||||
Comma: 'λ',
|
||||
Comment: '€',
|
||||
}, {
|
||||
Name: "NonASCIICommentConfusion",
|
||||
Input: "λ\nλ\nθ\nλ\n",
|
||||
Output: [][]string{{"λ"}, {"λ"}, {"λ"}},
|
||||
Comment: 'θ',
|
||||
}, {
|
||||
Name: "QuotedFieldMultipleLF",
|
||||
Input: "\"\n\n\n\n\"",
|
||||
Output: [][]string{{"\n\n\n\n"}},
|
||||
}, {
|
||||
Name: "MultipleCRLF",
|
||||
Input: "\r\n\r\n\r\n\r\n",
|
||||
}, {
|
||||
// The implementation may read each line in several chunks if it doesn't fit entirely
|
||||
// in the read buffer, so we should test the code to handle that condition.
|
||||
Name: "HugeLines",
|
||||
Input: strings.Repeat("#ignore\n", 10000) + strings.Repeat("@", 5000) + "," + strings.Repeat("*", 5000),
|
||||
Output: [][]string{{strings.Repeat("@", 5000), strings.Repeat("*", 5000)}},
|
||||
Comment: '#',
|
||||
}, {
|
||||
Name: "QuoteWithTrailingCRLF",
|
||||
Input: "\"foo\"bar\"\r\n",
|
||||
Error: &ParseError{StartLine: 1, Line: 1, Column: 4, Err: ErrQuote},
|
||||
}, {
|
||||
Name: "LazyQuoteWithTrailingCRLF",
|
||||
Input: "\"foo\"bar\"\r\n",
|
||||
Output: [][]string{{`foo"bar`}},
|
||||
LazyQuotes: true,
|
||||
}, {
|
||||
Name: "DoubleQuoteWithTrailingCRLF",
|
||||
Input: "\"foo\"\"bar\"\r\n",
|
||||
Output: [][]string{{`foo"bar`}},
|
||||
}, {
|
||||
Name: "EvenQuotes",
|
||||
Input: `""""""""`,
|
||||
Output: [][]string{{`"""`}},
|
||||
}, {
|
||||
Name: "OddQuotes",
|
||||
Input: `"""""""`,
|
||||
Error: &ParseError{StartLine: 1, Line: 1, Column: 7, Err: ErrQuote},
|
||||
}, {
|
||||
Name: "LazyOddQuotes",
|
||||
Input: `"""""""`,
|
||||
Output: [][]string{{`"""`}},
|
||||
LazyQuotes: true,
|
||||
}, {
|
||||
Name: "BadComma1",
|
||||
Comma: '\n',
|
||||
Error: errInvalidDelim,
|
||||
}, {
|
||||
Name: "BadComma2",
|
||||
Comma: '\r',
|
||||
Error: errInvalidDelim,
|
||||
}, {
|
||||
Name: "BadComma3",
|
||||
Comma: '"',
|
||||
Error: errInvalidDelim,
|
||||
}, {
|
||||
Name: "BadComma4",
|
||||
Comma: utf8.RuneError,
|
||||
Error: errInvalidDelim,
|
||||
}, {
|
||||
Name: "BadComment1",
|
||||
Comment: '\n',
|
||||
Error: errInvalidDelim,
|
||||
}, {
|
||||
Name: "BadComment2",
|
||||
Comment: '\r',
|
||||
Error: errInvalidDelim,
|
||||
}, {
|
||||
Name: "BadComment3",
|
||||
Comment: utf8.RuneError,
|
||||
Error: errInvalidDelim,
|
||||
}, {
|
||||
Name: "BadCommaComment",
|
||||
Comma: 'X',
|
||||
Comment: 'X',
|
||||
Error: errInvalidDelim,
|
||||
}}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.Name, func(t *testing.T) {
|
||||
r := NewReader(strings.NewReader(tt.Input))
|
||||
|
||||
if tt.Comma != 0 {
|
||||
r.Comma = tt.Comma
|
||||
}
|
||||
r.Comment = tt.Comment
|
||||
if tt.UseFieldsPerRecord {
|
||||
r.FieldsPerRecord = tt.FieldsPerRecord
|
||||
} else {
|
||||
r.FieldsPerRecord = -1
|
||||
}
|
||||
r.LazyQuotes = tt.LazyQuotes
|
||||
r.TrimLeadingSpace = tt.TrimLeadingSpace
|
||||
r.ReuseRecord = tt.ReuseRecord
|
||||
|
||||
out, err := r.ReadAll()
|
||||
if !reflect.DeepEqual(err, tt.Error) {
|
||||
t.Errorf("ReadAll() error:\ngot %v\nwant %v", err, tt.Error)
|
||||
} else if !reflect.DeepEqual(out, tt.Output) {
|
||||
t.Errorf("ReadAll() output:\ngot %q\nwant %q", out, tt.Output)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// nTimes is an io.Reader which yields the string s n times.
|
||||
type nTimes struct {
|
||||
s string
|
||||
n int
|
||||
off int
|
||||
}
|
||||
|
||||
func (r *nTimes) Read(p []byte) (n int, err error) {
|
||||
for {
|
||||
if r.n <= 0 || r.s == "" {
|
||||
return n, io.EOF
|
||||
}
|
||||
n0 := copy(p, r.s[r.off:])
|
||||
p = p[n0:]
|
||||
n += n0
|
||||
r.off += n0
|
||||
if r.off == len(r.s) {
|
||||
r.off = 0
|
||||
r.n--
|
||||
}
|
||||
if len(p) == 0 {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// benchmarkRead measures reading the provided CSV rows data.
|
||||
// initReader, if non-nil, modifies the Reader before it's used.
|
||||
func benchmarkRead(b *testing.B, initReader func(*Reader), rows string) {
|
||||
b.ReportAllocs()
|
||||
r := NewReader(&nTimes{s: rows, n: b.N})
|
||||
if initReader != nil {
|
||||
initReader(r)
|
||||
}
|
||||
for {
|
||||
_, err := r.Read()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const benchmarkCSVData = `x,y,z,w
|
||||
x,y,z,
|
||||
x,y,,
|
||||
x,,,
|
||||
,,,
|
||||
"x","y","z","w"
|
||||
"x","y","z",""
|
||||
"x","y","",""
|
||||
"x","","",""
|
||||
"","","",""
|
||||
`
|
||||
|
||||
func BenchmarkRead(b *testing.B) {
|
||||
benchmarkRead(b, nil, benchmarkCSVData)
|
||||
}
|
||||
|
||||
func BenchmarkReadWithFieldsPerRecord(b *testing.B) {
|
||||
benchmarkRead(b, func(r *Reader) { r.FieldsPerRecord = 4 }, benchmarkCSVData)
|
||||
}
|
||||
|
||||
func BenchmarkReadWithoutFieldsPerRecord(b *testing.B) {
|
||||
benchmarkRead(b, func(r *Reader) { r.FieldsPerRecord = -1 }, benchmarkCSVData)
|
||||
}
|
||||
|
||||
func BenchmarkReadLargeFields(b *testing.B) {
|
||||
benchmarkRead(b, nil, strings.Repeat(`xxxxxxxxxxxxxxxx,yyyyyyyyyyyyyyyy,zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww,vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
|
||||
xxxxxxxxxxxxxxxxxxxxxxxx,yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy,zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww,vvvv
|
||||
,,zzzz,wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww,vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
|
||||
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy,zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww,vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
|
||||
`, 3))
|
||||
}
|
||||
|
||||
func BenchmarkReadReuseRecord(b *testing.B) {
|
||||
benchmarkRead(b, func(r *Reader) { r.ReuseRecord = true }, benchmarkCSVData)
|
||||
}
|
||||
|
||||
func BenchmarkReadReuseRecordWithFieldsPerRecord(b *testing.B) {
|
||||
benchmarkRead(b, func(r *Reader) { r.ReuseRecord = true; r.FieldsPerRecord = 4 }, benchmarkCSVData)
|
||||
}
|
||||
|
||||
func BenchmarkReadReuseRecordWithoutFieldsPerRecord(b *testing.B) {
|
||||
benchmarkRead(b, func(r *Reader) { r.ReuseRecord = true; r.FieldsPerRecord = -1 }, benchmarkCSVData)
|
||||
}
|
||||
|
||||
func BenchmarkReadReuseRecordLargeFields(b *testing.B) {
|
||||
benchmarkRead(b, func(r *Reader) { r.ReuseRecord = true }, strings.Repeat(`xxxxxxxxxxxxxxxx,yyyyyyyyyyyyyyyy,zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww,vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
|
||||
xxxxxxxxxxxxxxxxxxxxxxxx,yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy,zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww,vvvv
|
||||
,,zzzz,wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww,vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
|
||||
xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx,yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy,zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz,wwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwwww,vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
|
||||
`, 3))
|
||||
}
|
|
@ -1,179 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in https://golang.org/LICENSE
|
||||
|
||||
package csv
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// A Writer writes records using CSV encoding.
|
||||
//
|
||||
// As returned by NewWriter, a Writer writes records terminated by a
|
||||
// newline and uses ',' as the field delimiter. The exported fields can be
|
||||
// changed to customize the details before the first call to Write or WriteAll.
|
||||
//
|
||||
// Comma is the field delimiter.
|
||||
//
|
||||
// If UseCRLF is true, the Writer ends each output line with \r\n instead of \n.
|
||||
//
|
||||
// The writes of individual records are buffered.
|
||||
// After all data has been written, the client should call the
|
||||
// Flush method to guarantee all data has been forwarded to
|
||||
// the underlying io.Writer. Any errors that occurred should
|
||||
// be checked by calling the Error method.
|
||||
type Writer struct {
|
||||
Comma rune // Field delimiter (set to ',' by NewWriter)
|
||||
Quote rune // Fields quote character
|
||||
QuoteEscape rune
|
||||
AlwaysQuote bool // True to quote all fields
|
||||
UseCRLF bool // True to use \r\n as the line terminator
|
||||
w *bufio.Writer
|
||||
}
|
||||
|
||||
// NewWriter returns a new Writer that writes to w.
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
return &Writer{
|
||||
Comma: ',',
|
||||
Quote: '"',
|
||||
QuoteEscape: '"',
|
||||
w: bufio.NewWriter(w),
|
||||
}
|
||||
}
|
||||
|
||||
// Write writes a single CSV record to w along with any necessary quoting.
|
||||
// A record is a slice of strings with each string being one field.
|
||||
// Writes are buffered, so Flush must eventually be called to ensure
|
||||
// that the record is written to the underlying io.Writer.
|
||||
func (w *Writer) Write(record []string) error {
|
||||
if !validDelim(w.Comma) {
|
||||
return errInvalidDelim
|
||||
}
|
||||
|
||||
for n, field := range record {
|
||||
if n > 0 {
|
||||
if _, err := w.w.WriteRune(w.Comma); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// If we don't have to have a quoted field then just
|
||||
// write out the field and continue to the next field.
|
||||
if !w.AlwaysQuote && !w.fieldNeedsQuotes(field) {
|
||||
if _, err := w.w.WriteString(field); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if _, err := w.w.WriteRune(w.Quote); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
specialChars := "\r\n" + string(w.Quote)
|
||||
|
||||
for len(field) > 0 {
|
||||
// Search for special characters.
|
||||
i := strings.IndexAny(field, specialChars)
|
||||
if i < 0 {
|
||||
i = len(field)
|
||||
}
|
||||
|
||||
// Copy verbatim everything before the special character.
|
||||
if _, err := w.w.WriteString(field[:i]); err != nil {
|
||||
return err
|
||||
}
|
||||
field = field[i:]
|
||||
|
||||
// Encode the special character.
|
||||
if len(field) > 0 {
|
||||
var err error
|
||||
switch nextRune([]byte(field)) {
|
||||
case w.Quote:
|
||||
_, err = w.w.WriteRune(w.QuoteEscape)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
_, err = w.w.WriteRune(w.Quote)
|
||||
case '\r':
|
||||
if !w.UseCRLF {
|
||||
err = w.w.WriteByte('\r')
|
||||
}
|
||||
case '\n':
|
||||
if w.UseCRLF {
|
||||
_, err = w.w.WriteString("\r\n")
|
||||
} else {
|
||||
err = w.w.WriteByte('\n')
|
||||
}
|
||||
}
|
||||
field = field[1:]
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if _, err := w.w.WriteRune(w.Quote); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
var err error
|
||||
if w.UseCRLF {
|
||||
_, err = w.w.WriteString("\r\n")
|
||||
} else {
|
||||
err = w.w.WriteByte('\n')
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Flush writes any buffered data to the underlying io.Writer.
|
||||
// To check if an error occurred during the Flush, call Error.
|
||||
func (w *Writer) Flush() {
|
||||
w.w.Flush()
|
||||
}
|
||||
|
||||
// Error reports any error that has occurred during a previous Write or Flush.
|
||||
func (w *Writer) Error() error {
|
||||
_, err := w.w.Write(nil)
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteAll writes multiple CSV records to w using Write and then calls Flush,
|
||||
// returning any error from the Flush.
|
||||
func (w *Writer) WriteAll(records [][]string) error {
|
||||
for _, record := range records {
|
||||
err := w.Write(record)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return w.w.Flush()
|
||||
}
|
||||
|
||||
// fieldNeedsQuotes reports whether our field must be enclosed in quotes.
|
||||
// Fields with a Comma, fields with a quote or newline, and
|
||||
// fields which start with a space must be enclosed in quotes.
|
||||
// We used to quote empty strings, but we do not anymore (as of Go 1.4).
|
||||
// The two representations should be equivalent, but Postgres distinguishes
|
||||
// quoted vs non-quoted empty string during database imports, and it has
|
||||
// an option to force the quoted behavior for non-quoted CSV but it has
|
||||
// no option to force the non-quoted behavior for quoted CSV, making
|
||||
// CSV with quoted empty strings strictly less useful.
|
||||
// Not quoting the empty string also makes this package match the behavior
|
||||
// of Microsoft Excel and Google Drive.
|
||||
// For Postgres, quote the data terminating string `\.`.
|
||||
func (w *Writer) fieldNeedsQuotes(field string) bool {
|
||||
if field == "" {
|
||||
return false
|
||||
}
|
||||
if field == `\.` || strings.ContainsAny(field, "\r\n"+string(w.Quote)+string(w.Comma)) {
|
||||
return true
|
||||
}
|
||||
|
||||
r1, _ := utf8.DecodeRuneInString(field)
|
||||
return unicode.IsSpace(r1)
|
||||
}
|
|
@ -1,102 +0,0 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in https://golang.org/LICENSE
|
||||
|
||||
package csv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var writeTests = []struct {
|
||||
Input [][]string
|
||||
Output string
|
||||
Error error
|
||||
UseCRLF bool
|
||||
Comma rune
|
||||
Quote rune
|
||||
AlwaysQuote bool
|
||||
}{
|
||||
{Input: [][]string{{"abc"}}, Output: "abc\n"},
|
||||
{Input: [][]string{{"abc"}}, Output: "abc\r\n", UseCRLF: true},
|
||||
{Input: [][]string{{`"abc"`}}, Output: `"""abc"""` + "\n"},
|
||||
{Input: [][]string{{`a"b`}}, Output: `"a""b"` + "\n"},
|
||||
{Input: [][]string{{`"a"b"`}}, Output: `"""a""b"""` + "\n"},
|
||||
{Input: [][]string{{" abc"}}, Output: `" abc"` + "\n"},
|
||||
{Input: [][]string{{"abc,def"}}, Output: `"abc,def"` + "\n"},
|
||||
{Input: [][]string{{"abc", "def"}}, Output: "abc,def\n"},
|
||||
{Input: [][]string{{"abc"}, {"def"}}, Output: "abc\ndef\n"},
|
||||
{Input: [][]string{{"abc\ndef"}}, Output: "\"abc\ndef\"\n"},
|
||||
{Input: [][]string{{"abc\ndef"}}, Output: "\"abc\r\ndef\"\r\n", UseCRLF: true},
|
||||
{Input: [][]string{{"abc\rdef"}}, Output: "\"abcdef\"\r\n", UseCRLF: true},
|
||||
{Input: [][]string{{"abc\rdef"}}, Output: "\"abc\rdef\"\n", UseCRLF: false},
|
||||
{Input: [][]string{{""}}, Output: "\n"},
|
||||
{Input: [][]string{{"", ""}}, Output: ",\n"},
|
||||
{Input: [][]string{{"", "", ""}}, Output: ",,\n"},
|
||||
{Input: [][]string{{"", "", "a"}}, Output: ",,a\n"},
|
||||
{Input: [][]string{{"", "a", ""}}, Output: ",a,\n"},
|
||||
{Input: [][]string{{"", "a", "a"}}, Output: ",a,a\n"},
|
||||
{Input: [][]string{{"a", "", ""}}, Output: "a,,\n"},
|
||||
{Input: [][]string{{"a", "", "a"}}, Output: "a,,a\n"},
|
||||
{Input: [][]string{{"a", "a", ""}}, Output: "a,a,\n"},
|
||||
{Input: [][]string{{"a", "a", "a"}}, Output: "a,a,a\n"},
|
||||
{Input: [][]string{{`\.`}}, Output: "\"\\.\"\n"},
|
||||
{Input: [][]string{{"x09\x41\xb4\x1c", "aktau"}}, Output: "x09\x41\xb4\x1c,aktau\n"},
|
||||
{Input: [][]string{{",x09\x41\xb4\x1c", "aktau"}}, Output: "\",x09\x41\xb4\x1c\",aktau\n"},
|
||||
{Input: [][]string{{"a", "a", ""}}, Output: "a|a|\n", Comma: '|'},
|
||||
{Input: [][]string{{",", ",", ""}}, Output: ",|,|\n", Comma: '|'},
|
||||
{Input: [][]string{{"foo"}}, Comma: '"', Error: errInvalidDelim},
|
||||
{Input: [][]string{{"a", "a", ""}}, Quote: '"', AlwaysQuote: true, Output: "\"a\"|\"a\"|\"\"\n", Comma: '|'},
|
||||
}
|
||||
|
||||
func TestWrite(t *testing.T) {
|
||||
for n, tt := range writeTests {
|
||||
b := &bytes.Buffer{}
|
||||
f := NewWriter(b)
|
||||
f.UseCRLF = tt.UseCRLF
|
||||
if tt.Comma != 0 {
|
||||
f.Comma = tt.Comma
|
||||
}
|
||||
if tt.Quote != 0 {
|
||||
f.Quote = tt.Quote
|
||||
}
|
||||
f.AlwaysQuote = tt.AlwaysQuote
|
||||
err := f.WriteAll(tt.Input)
|
||||
if err != tt.Error {
|
||||
t.Errorf("Unexpected error:\ngot %v\nwant %v", err, tt.Error)
|
||||
}
|
||||
out := b.String()
|
||||
if out != tt.Output {
|
||||
t.Errorf("#%d: out=%q want %q", n, out, tt.Output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type errorWriter struct{}
|
||||
|
||||
func (e errorWriter) Write(b []byte) (int, error) {
|
||||
return 0, errors.New("Test")
|
||||
}
|
||||
|
||||
func TestError(t *testing.T) {
|
||||
b := &bytes.Buffer{}
|
||||
f := NewWriter(b)
|
||||
f.Write([]string{"abc"})
|
||||
f.Flush()
|
||||
err := f.Error()
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %s\n", err)
|
||||
}
|
||||
|
||||
f = NewWriter(errorWriter{})
|
||||
f.Write([]string{"abc"})
|
||||
f.Flush()
|
||||
err = f.Error()
|
||||
|
||||
if err == nil {
|
||||
t.Error("Error should not be nil")
|
||||
}
|
||||
}
|
|
@ -1,41 +0,0 @@
|
|||
/*
|
||||
* Minio Cloud Storage, (C) 2019-2020 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package disk
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/ncw/directio"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// OpenFileDirectIO - bypass kernel cache.
|
||||
func OpenFileDirectIO(filePath string, flag int, perm os.FileMode) (*os.File, error) {
|
||||
return directio.OpenFile(filePath, flag, perm)
|
||||
}
|
||||
|
||||
// DisableDirectIO - disables directio mode.
|
||||
func DisableDirectIO(f *os.File) error {
|
||||
fd := f.Fd()
|
||||
_, err := unix.FcntlInt(fd, unix.F_NOCACHE, 0)
|
||||
return err
|
||||
}
|
||||
|
||||
// AlignedBlock - pass through to directio implementation.
|
||||
func AlignedBlock(BlockSize int) []byte {
|
||||
return directio.AlignedBlock(BlockSize)
|
||||
}
|
|
@ -1,49 +0,0 @@
|
|||
// +build linux netbsd freebsd
|
||||
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2019-2020 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package disk
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/ncw/directio"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// OpenFileDirectIO - bypass kernel cache.
|
||||
func OpenFileDirectIO(filePath string, flag int, perm os.FileMode) (*os.File, error) {
|
||||
return directio.OpenFile(filePath, flag, perm)
|
||||
}
|
||||
|
||||
// DisableDirectIO - disables directio mode.
|
||||
func DisableDirectIO(f *os.File) error {
|
||||
fd := f.Fd()
|
||||
flag, err := unix.FcntlInt(fd, unix.F_GETFL, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
flag = flag & ^(syscall.O_DIRECT)
|
||||
_, err = unix.FcntlInt(fd, unix.F_SETFL, flag)
|
||||
return err
|
||||
}
|
||||
|
||||
// AlignedBlock - pass through to directio implementation.
|
||||
func AlignedBlock(BlockSize int) []byte {
|
||||
return directio.AlignedBlock(BlockSize)
|
||||
}
|
|
@ -1,69 +0,0 @@
|
|||
// +build !linux,!netbsd,!freebsd,!darwin,!openbsd
|
||||
|
||||
/*
|
||||
* Minio Cloud Storage, (C) 2019-2020 Minio, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package disk
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// OpenBSD, Windows, and illumos do not support O_DIRECT.
|
||||
// On Windows there is no documentation on disabling O_DIRECT.
|
||||
// For these systems we do not attempt to build the 'directio' dependency since
|
||||
// the O_DIRECT symbol may not be exposed resulting in a failed build.
|
||||
//
|
||||
//
|
||||
// On illumos an explicit O_DIRECT flag is not necessary for two primary
|
||||
// reasons. Note that ZFS is effectively the default filesystem on illumos
|
||||
// systems.
|
||||
//
|
||||
// One benefit of using DirectIO on Linux is that the page cache will not be
|
||||
// polluted with single-access data. The ZFS read cache (ARC) is scan-resistant
|
||||
// so there is no risk of polluting the entire cache with data accessed once.
|
||||
// Another goal of DirectIO is to minimize the mutation of data by the kernel
|
||||
// before issuing IO to underlying devices. ZFS users often enable features like
|
||||
// compression and checksumming which currently necessitates mutating data in
|
||||
// the kernel.
|
||||
//
|
||||
// DirectIO semantics for a filesystem like ZFS would be quite different than
|
||||
// the semantics on filesystems like XFS, and these semantics are not
|
||||
// implemented at this time.
|
||||
// For more information on why typical DirectIO semantics do not apply to ZFS
|
||||
// see this ZFS-on-Linux commit message:
|
||||
// https://github.com/openzfs/zfs/commit/a584ef26053065f486d46a7335bea222cb03eeea
|
||||
|
||||
// OpenFileDirectIO wrapper around os.OpenFile nothing special
|
||||
func OpenFileDirectIO(filePath string, flag int, perm os.FileMode) (*os.File, error) {
|
||||
return os.OpenFile(filePath, flag, perm)
|
||||
}
|
||||
|
||||
// DisableDirectIO is a no-op
|
||||
func DisableDirectIO(f *os.File) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AlignedBlock simply returns an unaligned buffer
|
||||
// for systems that do not support DirectIO.
|
||||
func AlignedBlock(BlockSize int) []byte {
|
||||
return make([]byte, BlockSize)
|
||||
}
|
||||
|
||||
// Fdatasync is a no-op
|
||||
func Fdatasync(f *os.File) error {
|
||||
return nil
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018-2020 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package disk
|
||||
|
||||
// Info stat fs struct is container which holds following values
|
||||
// Total - total size of the volume / disk
|
||||
// Free - free size of the volume / disk
|
||||
// Files - total inodes available
|
||||
// Ffree - free inodes available
|
||||
// FSType - file system type
|
||||
type Info struct {
|
||||
Total uint64
|
||||
Free uint64
|
||||
Files uint64
|
||||
Ffree uint64
|
||||
FSType string
|
||||
|
||||
// Usage is calculated per tenant.
|
||||
Usage uint64
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue