Remove legacy subpackage

This commit is contained in:
Evgeniy Kulikov 2020-08-06 13:47:44 +03:00
parent f055e7d269
commit c49d2824a1
389 changed files with 0 additions and 132787 deletions

View file

@ -1,288 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2018-2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"encoding/xml"
"io"
"net/http"
"net/url"
"github.com/gorilla/mux"
xhttp "github.com/minio/minio/legacy/http"
"github.com/minio/minio/legacy/logger"
"github.com/minio/minio/pkg/bucket/policy"
)
// Data types used for returning dummy access control
// policy XML, these variables shouldn't be used elsewhere
// they are only defined to be used in this file alone.
type grantee struct {
XMLNS string `xml:"xmlns:xsi,attr"`
XMLXSI string `xml:"xsi:type,attr"`
Type string `xml:"Type"`
ID string `xml:"ID,omitempty"`
DisplayName string `xml:"DisplayName,omitempty"`
URI string `xml:"URI,omitempty"`
}
type grant struct {
Grantee grantee `xml:"Grantee"`
Permission string `xml:"Permission"`
}
type accessControlPolicy struct {
XMLName xml.Name `xml:"AccessControlPolicy"`
Owner Owner `xml:"Owner"`
AccessControlList struct {
Grants []grant `xml:"Grant"`
} `xml:"AccessControlList"`
}
// PutBucketACLHandler - PUT Bucket ACL
// -----------------
// This operation uses the ACL subresource
// to set ACL for a bucket, this is a dummy call
// only responds success if the ACL is private.
func (api objectAPIHandlers) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketACL")
defer logger.AuditLog(w, r, "PutBucketACL", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
// Allow putBucketACL if policy action is set, since this is a dummy call
// we are simply re-purposing the bucketPolicyAction.
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Before proceeding validate if bucket exists.
_, err := objAPI.GetBucketInfo(ctx, bucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
aclHeader := r.Header.Get(xhttp.AmzACL)
if aclHeader == "" {
acl := &accessControlPolicy{}
if err = xmlDecoder(r.Body, acl, r.ContentLength); err != nil {
if err == io.EOF {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingSecurityHeader),
r.URL, guessIsBrowserReq(r))
return
}
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if len(acl.AccessControlList.Grants) == 0 {
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
return
}
if acl.AccessControlList.Grants[0].Permission != "FULL_CONTROL" {
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
return
}
}
if aclHeader != "" && aclHeader != "private" {
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
return
}
w.(http.Flusher).Flush()
}
// GetBucketACLHandler - GET Bucket ACL
// -----------------
// This operation uses the ACL
// subresource to return the ACL of a specified bucket.
func (api objectAPIHandlers) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketACL")
defer logger.AuditLog(w, r, "GetBucketACL", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
// Allow getBucketACL if policy action is set, since this is a dummy call
// we are simply re-purposing the bucketPolicyAction.
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Before proceeding validate if bucket exists.
_, err := objAPI.GetBucketInfo(ctx, bucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
acl := &accessControlPolicy{}
acl.AccessControlList.Grants = append(acl.AccessControlList.Grants, grant{
Grantee: grantee{
XMLNS: "http://www.w3.org/2001/XMLSchema-instance",
XMLXSI: "CanonicalUser",
Type: "CanonicalUser",
},
Permission: "FULL_CONTROL",
})
if err := xml.NewEncoder(w).Encode(acl); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
w.(http.Flusher).Flush()
}
// PutObjectACLHandler - PUT Object ACL
// -----------------
// This operation uses the ACL subresource
// to set ACL for a bucket, this is a dummy call
// only responds success if the ACL is private.
func (api objectAPIHandlers) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutObjectACL")
defer logger.AuditLog(w, r, "PutObjectACL", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := url.PathUnescape(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
// Allow putObjectACL if policy action is set, since this is a dummy call
// we are simply re-purposing the bucketPolicyAction.
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Before proceeding validate if object exists.
_, err = objAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
aclHeader := r.Header.Get(xhttp.AmzACL)
if aclHeader == "" {
acl := &accessControlPolicy{}
if err = xmlDecoder(r.Body, acl, r.ContentLength); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if len(acl.AccessControlList.Grants) == 0 {
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
return
}
if acl.AccessControlList.Grants[0].Permission != "FULL_CONTROL" {
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
return
}
}
if aclHeader != "" && aclHeader != "private" {
writeErrorResponse(ctx, w, toAPIError(ctx, NotImplemented{}), r.URL, guessIsBrowserReq(r))
return
}
w.(http.Flusher).Flush()
}
// GetObjectACLHandler - GET Object ACL
// -----------------
// This operation uses the ACL
// subresource to return the ACL of a specified object.
func (api objectAPIHandlers) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetObjectACL")
defer logger.AuditLog(w, r, "GetObjectACL", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
object, err := url.PathUnescape(vars["object"])
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
// Allow getObjectACL if policy action is set, since this is a dummy call
// we are simply re-purposing the bucketPolicyAction.
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Before proceeding validate if object exists.
_, err = objAPI.GetObjectInfo(ctx, bucket, object, ObjectOptions{})
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
acl := &accessControlPolicy{}
acl.AccessControlList.Grants = append(acl.AccessControlList.Grants, grant{
Grantee: grantee{
XMLNS: "http://www.w3.org/2001/XMLSchema-instance",
XMLXSI: "CanonicalUser",
Type: "CanonicalUser",
},
Permission: "FULL_CONTROL",
})
if err := xml.NewEncoder(w).Encode(acl); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
w.(http.Flusher).Flush()
}

View file

@ -1,483 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"bytes"
"context"
"encoding/json"
"io"
"net/http"
"strconv"
"strings"
"github.com/gorilla/mux"
"github.com/minio/minio/legacy/config"
"github.com/minio/minio/legacy/config/cache"
"github.com/minio/minio/legacy/config/etcd"
xldap "github.com/minio/minio/legacy/config/identity/ldap"
"github.com/minio/minio/legacy/config/identity/openid"
"github.com/minio/minio/legacy/config/policy/opa"
"github.com/minio/minio/legacy/config/storageclass"
"github.com/minio/minio/legacy/crypto"
"github.com/minio/minio/legacy/logger"
"github.com/minio/minio/pkg/auth"
iampolicy "github.com/minio/minio/pkg/iam/policy"
"github.com/minio/minio/pkg/madmin"
)
func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *http.Request) (auth.Credentials, ObjectLayer) {
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return auth.Credentials{}, nil
}
// Validate request signature.
cred, adminAPIErr := checkAdminRequestAuthType(ctx, r, iampolicy.ConfigUpdateAdminAction, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
return cred, nil
}
return cred, objectAPI
}
// DelConfigKVHandler - DELETE /minio/admin/v3/del-config-kv
func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteConfigKV")
defer logger.AuditLog(w, r, "DeleteConfigKV", mustGetClaimsFromToken(r))
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
return
}
password := cred.SecretKey
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
logger.LogIf(ctx, err, logger.Application)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}
cfg, err := readServerConfig(ctx, objectAPI)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = cfg.DelFrom(bytes.NewReader(kvBytes)); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
// SetConfigKVHandler - PUT /minio/admin/v3/set-config-kv
func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetConfigKV")
defer logger.AuditLog(w, r, "SetConfigKV", mustGetClaimsFromToken(r))
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
return
}
password := cred.SecretKey
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
logger.LogIf(ctx, err, logger.Application)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}
cfg, err := readServerConfig(ctx, objectAPI)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if _, err = cfg.ReadFrom(bytes.NewReader(kvBytes)); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = validateConfig(cfg); err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
return
}
// Update the actual server config on disk.
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Write to the config input KV to history.
if err = saveServerConfigHistory(ctx, objectAPI, kvBytes); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Make sure to write backend is encrypted
if globalConfigEncrypted {
saveConfig(GlobalContext, objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
}
writeSuccessResponseHeadersOnly(w)
}
// GetConfigKVHandler - GET /minio/admin/v3/get-config-kv?key={key}
func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetConfigKV")
defer logger.AuditLog(w, r, "GetConfigKV", mustGetClaimsFromToken(r))
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
cfg := globalServerConfig
if newObjectLayerFn() == nil {
var err error
cfg, err = getValidConfig(objectAPI)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
vars := mux.Vars(r)
var buf = &bytes.Buffer{}
cw := config.NewConfigWriteTo(cfg, vars["key"])
if _, err := cw.WriteTo(buf); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
password := cred.SecretKey
econfigData, err := madmin.EncryptData(password, buf.Bytes())
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, econfigData)
}
func (a adminAPIHandlers) ClearConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ClearConfigHistoryKV")
defer logger.AuditLog(w, r, "ClearConfigHistoryKV", mustGetClaimsFromToken(r))
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
restoreID := vars["restoreId"]
if restoreID == "" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
if restoreID == "all" {
chEntries, err := listServerConfigHistory(ctx, objectAPI, false, -1)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
for _, chEntry := range chEntries {
if err = delServerConfigHistory(ctx, objectAPI, chEntry.RestoreID); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
} else {
if err := delServerConfigHistory(ctx, objectAPI, restoreID); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
}
// RestoreConfigHistoryKVHandler - restores a config with KV settings for the given KV id.
func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RestoreConfigHistoryKV")
defer logger.AuditLog(w, r, "RestoreConfigHistoryKV", mustGetClaimsFromToken(r))
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
restoreID := vars["restoreId"]
if restoreID == "" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
kvBytes, err := readServerConfigHistory(ctx, objectAPI, restoreID)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
cfg, err := readServerConfig(ctx, objectAPI)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if _, err = cfg.ReadFrom(bytes.NewReader(kvBytes)); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = validateConfig(cfg); err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
return
}
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
delServerConfigHistory(ctx, objectAPI, restoreID)
}
// ListConfigHistoryKVHandler - lists all the KV ids.
func (a adminAPIHandlers) ListConfigHistoryKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListConfigHistoryKV")
defer logger.AuditLog(w, r, "ListConfigHistoryKV", mustGetClaimsFromToken(r))
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
count, err := strconv.Atoi(vars["count"])
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
chEntries, err := listServerConfigHistory(ctx, objectAPI, true, count)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
data, err := json.Marshal(chEntries)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
password := cred.SecretKey
econfigData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, econfigData)
}
// HelpConfigKVHandler - GET /minio/admin/v3/help-config-kv?subSys={subSys}&key={key}
func (a adminAPIHandlers) HelpConfigKVHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "HelpConfigKV")
defer logger.AuditLog(w, r, "HelpHistoryKV", mustGetClaimsFromToken(r))
_, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
subSys := vars["subSys"]
key := vars["key"]
_, envOnly := r.URL.Query()["env"]
rd, err := GetHelp(subSys, key, envOnly)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
json.NewEncoder(w).Encode(rd)
w.(http.Flusher).Flush()
}
// SetConfigHandler - PUT /minio/admin/v3/config
func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetConfig")
defer logger.AuditLog(w, r, "SetConfig", mustGetClaimsFromToken(r))
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
return
}
password := cred.SecretKey
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
logger.LogIf(ctx, err, logger.Application)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}
cfg := newServerConfig()
if _, err = cfg.ReadFrom(bytes.NewReader(kvBytes)); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if err = validateConfig(cfg); err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
return
}
// Update the actual server config on disk.
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Write to the config input KV to history.
if err = saveServerConfigHistory(ctx, objectAPI, kvBytes); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Make sure to write backend is encrypted
if globalConfigEncrypted {
saveConfig(GlobalContext, objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
}
writeSuccessResponseHeadersOnly(w)
}
// GetConfigHandler - GET /minio/admin/v3/config
// Get config.json of this minio setup.
func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetConfig")
defer logger.AuditLog(w, r, "GetConfig", mustGetClaimsFromToken(r))
cred, objectAPI := validateAdminReqConfigKV(ctx, w, r)
if objectAPI == nil {
return
}
cfg, err := readServerConfig(ctx, objectAPI)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
var s strings.Builder
hkvs := config.HelpSubSysMap[""]
var count int
for _, hkv := range hkvs {
count += len(cfg[hkv.Key])
}
for _, hkv := range hkvs {
v := cfg[hkv.Key]
for target, kv := range v {
off := kv.Get(config.Enable) == config.EnableOff
switch hkv.Key {
case config.EtcdSubSys:
off = !etcd.Enabled(kv)
case config.CacheSubSys:
off = !cache.Enabled(kv)
case config.StorageClassSubSys:
off = !storageclass.Enabled(kv)
case config.KmsVaultSubSys:
off = !crypto.EnabledVault(kv)
case config.KmsKesSubSys:
off = !crypto.EnabledKes(kv)
case config.PolicyOPASubSys:
off = !opa.Enabled(kv)
case config.IdentityOpenIDSubSys:
off = !openid.Enabled(kv)
case config.IdentityLDAPSubSys:
off = !xldap.Enabled(kv)
}
if off {
s.WriteString(config.KvComment)
s.WriteString(config.KvSpaceSeparator)
}
s.WriteString(hkv.Key)
if target != config.Default {
s.WriteString(config.SubSystemSeparator)
s.WriteString(target)
}
s.WriteString(config.KvSpaceSeparator)
s.WriteString(kv.String())
count--
if count > 0 {
s.WriteString(config.KvNewline)
}
}
}
password := cred.SecretKey
econfigData, err := madmin.EncryptData(password, []byte(s.String()))
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, econfigData)
}

View file

@ -1,917 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019-2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"context"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"github.com/gorilla/mux"
"github.com/minio/minio/legacy/logger"
"github.com/minio/minio/pkg/auth"
iampolicy "github.com/minio/minio/pkg/iam/policy"
"github.com/minio/minio/pkg/madmin"
)
func validateAdminUsersReq(ctx context.Context, w http.ResponseWriter, r *http.Request, action iampolicy.AdminAction) (ObjectLayer, auth.Credentials) {
var cred auth.Credentials
var adminAPIErr APIErrorCode
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return nil, cred
}
// Validate request signature.
cred, adminAPIErr = checkAdminRequestAuthType(ctx, r, action, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
return nil, cred
}
return objectAPI, cred
}
// RemoveUser - DELETE /minio/admin/v3/remove-user?accessKey=<access_key>
func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RemoveUser")
defer logger.AuditLog(w, r, "RemoveUser", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeleteUserAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
accessKey := vars["accessKey"]
ok, err := globalIAMSys.IsTempUser(accessKey)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if ok {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errIAMActionNotAllowed), r.URL)
return
}
if err := globalIAMSys.DeleteUser(accessKey); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other MinIO peers to delete user.
for _, nerr := range globalNotificationSys.DeleteUser(accessKey) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
// ListUsers - GET /minio/admin/v3/list-users
func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListUsers")
defer logger.AuditLog(w, r, "ListUsers", mustGetClaimsFromToken(r))
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.ListUsersAdminAction)
if objectAPI == nil {
return
}
password := cred.SecretKey
allCredentials, err := globalIAMSys.ListUsers()
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
data, err := json.Marshal(allCredentials)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
econfigData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, econfigData)
}
// GetUserInfo - GET /minio/admin/v3/user-info
func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetUserInfo")
defer logger.AuditLog(w, r, "GetUserInfo", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetUserAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
name := vars["accessKey"]
userInfo, err := globalIAMSys.GetUserInfo(name)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
data, err := json.Marshal(userInfo)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, data)
}
// UpdateGroupMembers - PUT /minio/admin/v3/update-group-members
func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "UpdateGroupMembers")
defer logger.AuditLog(w, r, "UpdateGroupMembers", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AddUserToGroupAdminAction)
if objectAPI == nil {
return
}
defer r.Body.Close()
data, err := ioutil.ReadAll(r.Body)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
var updReq madmin.GroupAddRemove
err = json.Unmarshal(data, &updReq)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
if updReq.IsRemove {
err = globalIAMSys.RemoveUsersFromGroup(updReq.Group, updReq.Members)
} else {
err = globalIAMSys.AddUsersToGroup(updReq.Group, updReq.Members)
}
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other MinIO peers to load group.
for _, nerr := range globalNotificationSys.LoadGroup(updReq.Group) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
// GetGroup - /minio/admin/v3/group?group=mygroup1
func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetGroup")
defer logger.AuditLog(w, r, "GetGroup", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetGroupAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
group := vars["group"]
gdesc, err := globalIAMSys.GetGroupDescription(group)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
body, err := json.Marshal(gdesc)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, body)
}
// ListGroups - GET /minio/admin/v3/groups
func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListGroups")
defer logger.AuditLog(w, r, "ListGroups", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListGroupsAdminAction)
if objectAPI == nil {
return
}
groups, err := globalIAMSys.ListGroups()
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
body, err := json.Marshal(groups)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, body)
}
// SetGroupStatus - PUT /minio/admin/v3/set-group-status?group=mygroup1&status=enabled
func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetGroupStatus")
defer logger.AuditLog(w, r, "SetGroupStatus", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableGroupAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
group := vars["group"]
status := vars["status"]
var err error
if status == statusEnabled {
err = globalIAMSys.SetGroupStatus(group, true)
} else if status == statusDisabled {
err = globalIAMSys.SetGroupStatus(group, false)
} else {
err = errInvalidArgument
}
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other MinIO peers to reload user.
for _, nerr := range globalNotificationSys.LoadGroup(group) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
// SetUserStatus - PUT /minio/admin/v3/set-user-status?accessKey=<access_key>&status=[enabled|disabled]
func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetUserStatus")
defer logger.AuditLog(w, r, "SetUserStatus", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableUserAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
accessKey := vars["accessKey"]
status := vars["status"]
// Custom IAM policies not allowed for admin user.
if accessKey == globalActiveCred.AccessKey {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
if err := globalIAMSys.SetUserStatus(accessKey, madmin.AccountStatus(status)); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other MinIO peers to reload user.
for _, nerr := range globalNotificationSys.LoadUser(accessKey, false) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
// AddUser - PUT /minio/admin/v3/add-user?accessKey=<access_key>
func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AddUser")
defer logger.AuditLog(w, r, "AddUser", mustGetClaimsFromToken(r))
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.CreateUserAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
accessKey := vars["accessKey"]
// Custom IAM policies not allowed for admin user.
if accessKey == globalActiveCred.AccessKey {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserInvalidArgument), r.URL)
return
}
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
return
}
password := cred.SecretKey
configBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}
var uinfo madmin.UserInfo
if err = json.Unmarshal(configBytes, &uinfo); err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}
if err = globalIAMSys.SetUser(accessKey, uinfo); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other Minio peers to reload user
for _, nerr := range globalNotificationSys.LoadUser(accessKey, false) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
// AddServiceAccount - PUT /minio/admin/v3/add-service-account
func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AddServiceAccount")
defer logger.AuditLog(w, r, "AddServiceAccount", mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
password := cred.SecretKey
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
var createReq madmin.AddServiceAccountReq
if err = json.Unmarshal(reqBytes, &createReq); err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err), r.URL)
return
}
// Disallow creating service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
parentUser := cred.AccessKey
if cred.ParentUser != "" {
parentUser = cred.ParentUser
}
newCred, err := globalIAMSys.NewServiceAccount(ctx, parentUser, createReq.Policy)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other Minio peers to reload user the service account
for _, nerr := range globalNotificationSys.LoadServiceAccount(newCred.AccessKey) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
var createResp = madmin.AddServiceAccountResp{
Credentials: auth.Credentials{
AccessKey: newCred.AccessKey,
SecretKey: newCred.SecretKey,
},
}
data, err := json.Marshal(createResp)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
encryptedData, err := madmin.EncryptData(password, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, encryptedData)
}
// ListServiceAccounts - GET /minio/admin/v3/list-service-accounts
func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListServiceAccounts")
defer logger.AuditLog(w, r, "ListServiceAccounts", mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
// Disallow creating service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
parentUser := cred.AccessKey
if cred.ParentUser != "" {
parentUser = cred.ParentUser
}
serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, parentUser)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
var listResp = madmin.ListServiceAccountsResp{
Accounts: serviceAccounts,
}
data, err := json.Marshal(listResp)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, encryptedData)
}
// DeleteServiceAccount - DELETE /minio/admin/v3/delete-service-account
func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteServiceAccount")
defer logger.AuditLog(w, r, "DeleteServiceAccount", mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, _, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
// Disallow creating service accounts by root user.
if owner {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminAccountNotEligible), r.URL)
return
}
serviceAccount := mux.Vars(r)["accessKey"]
if serviceAccount == "" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminInvalidArgument), r.URL)
return
}
user, err := globalIAMSys.GetServiceAccountParent(ctx, serviceAccount)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
parentUser := cred.AccessKey
if cred.ParentUser != "" {
parentUser = cred.ParentUser
}
if parentUser != user || user == "" {
// The service account belongs to another user but return not
// found error to mitigate brute force attacks. or the
// serviceAccount doesn't exist.
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServiceAccountNotFound), r.URL)
return
}
err = globalIAMSys.DeleteServiceAccount(ctx, serviceAccount)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessNoContent(w)
}
// AccountUsageInfoHandler returns usage
func (a adminAPIHandlers) AccountUsageInfoHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AccountUsageInfo")
defer logger.AuditLog(w, r, "AccountUsageInfo", mustGetClaimsFromToken(r))
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
// Set prefix value for "s3:prefix" policy conditionals.
r.Header.Set("prefix", "")
// Set delimiter value for "s3:delimiter" policy conditionals.
r.Header.Set("delimiter", SlashSeparator)
isAllowedAccess := func(bucketName string) (rd, wr bool) {
// Use the following trick to filter in place
// https://github.com/golang/go/wiki/SliceTricks#filter-in-place
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.ListBucketAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
ObjectName: "",
Claims: claims,
}) {
rd = true
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.PutObjectAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
ObjectName: "",
Claims: claims,
}) {
wr = true
}
return rd, wr
}
buckets, err := objectAPI.ListBuckets(ctx)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Load the latest calculated data usage
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
if err != nil {
// log the error, continue with the accounting response
logger.LogIf(ctx, err)
}
accountName := cred.AccessKey
if cred.ParentUser != "" {
accountName = cred.ParentUser
}
acctInfo := madmin.AccountUsageInfo{
AccountName: accountName,
}
for _, bucket := range buckets {
rd, wr := isAllowedAccess(bucket.Name)
if rd || wr {
var size uint64
// Fetch the data usage of the current bucket
if !dataUsageInfo.LastUpdate.IsZero() {
size = dataUsageInfo.BucketsUsage[bucket.Name].Size
}
acctInfo.Buckets = append(acctInfo.Buckets, madmin.BucketUsageInfo{
Name: bucket.Name,
Created: bucket.Created,
Size: size,
Access: madmin.AccountAccess{
Read: rd,
Write: wr,
},
})
}
}
usageInfoJSON, err := json.Marshal(acctInfo)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, usageInfoJSON)
}
// InfoCannedPolicyV2 - GET /minio/admin/v2/info-canned-policy?name={policyName}
func (a adminAPIHandlers) InfoCannedPolicyV2(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "InfoCannedPolicyV2")
defer logger.AuditLog(w, r, "InfoCannedPolicyV2", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
if objectAPI == nil {
return
}
policy, err := globalIAMSys.InfoPolicy(mux.Vars(r)["name"])
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
data, err := json.Marshal(policy)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
w.Write(data)
w.(http.Flusher).Flush()
}
// InfoCannedPolicy - GET /minio/admin/v3/info-canned-policy?name={policyName}
func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "InfoCannedPolicy")
defer logger.AuditLog(w, r, "InfoCannedPolicy", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
if objectAPI == nil {
return
}
policy, err := globalIAMSys.InfoPolicy(mux.Vars(r)["name"])
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
json.NewEncoder(w).Encode(policy)
w.(http.Flusher).Flush()
}
// ListCannedPoliciesV2 - GET /minio/admin/v2/list-canned-policies
func (a adminAPIHandlers) ListCannedPoliciesV2(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListCannedPoliciesV2")
defer logger.AuditLog(w, r, "ListCannedPoliciesV2", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
if objectAPI == nil {
return
}
policies, err := globalIAMSys.ListPolicies()
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
policyMap := make(map[string][]byte, len(policies))
for k, p := range policies {
var err error
policyMap[k], err = json.Marshal(p)
if err != nil {
logger.LogIf(ctx, err)
continue
}
}
if err = json.NewEncoder(w).Encode(policyMap); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
w.(http.Flusher).Flush()
}
// ListCannedPolicies - GET /minio/admin/v3/list-canned-policies
func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListCannedPolicies")
defer logger.AuditLog(w, r, "ListCannedPolicies", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
if objectAPI == nil {
return
}
policies, err := globalIAMSys.ListPolicies()
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
var newPolicies = make(map[string]iampolicy.Policy)
for name, p := range policies {
_, err = json.Marshal(p)
if err != nil {
logger.LogIf(ctx, err)
continue
}
newPolicies[name] = p
}
if err = json.NewEncoder(w).Encode(newPolicies); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
w.(http.Flusher).Flush()
}
// RemoveCannedPolicy - DELETE /minio/admin/v3/remove-canned-policy?name=<policy_name>
func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "RemoveCannedPolicy")
defer logger.AuditLog(w, r, "RemoveCannedPolicy", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeletePolicyAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
policyName := vars["name"]
if err := globalIAMSys.DeletePolicy(policyName); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other MinIO peers to delete policy
for _, nerr := range globalNotificationSys.DeletePolicy(policyName) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
// AddCannedPolicy - PUT /minio/admin/v3/add-canned-policy?name=<policy_name>
func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "AddCannedPolicy")
defer logger.AuditLog(w, r, "AddCannedPolicy", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.CreatePolicyAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
policyName := vars["name"]
// Error out if Content-Length is missing.
if r.ContentLength <= 0 {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL)
return
}
// Error out if Content-Length is beyond allowed size.
if r.ContentLength > maxBucketPolicySize {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrEntityTooLarge), r.URL)
return
}
iamPolicy, err := iampolicy.ParseConfig(io.LimitReader(r.Body, r.ContentLength))
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Version in policy must not be empty
if iamPolicy.Version == "" {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMalformedPolicy), r.URL)
return
}
if err = globalIAMSys.SetPolicy(policyName, *iamPolicy); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other MinIO peers to reload policy
for _, nerr := range globalNotificationSys.LoadPolicy(policyName) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}
// SetPolicyForUserOrGroup - PUT /minio/admin/v3/set-policy?policy=xxx&user-or-group=?[&is-group]
func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetPolicyForUserOrGroup")
defer logger.AuditLog(w, r, "SetPolicyForUserOrGroup", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AttachPolicyAdminAction)
if objectAPI == nil {
return
}
vars := mux.Vars(r)
policyName := vars["policyName"]
entityName := vars["userOrGroup"]
isGroup := vars["isGroup"] == "true"
if !isGroup {
ok, err := globalIAMSys.IsTempUser(entityName)
if err != nil && err != errNoSuchUser {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if ok {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errIAMActionNotAllowed), r.URL)
return
}
}
if err := globalIAMSys.PolicyDBSet(entityName, policyName, isGroup); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Notify all other MinIO peers to reload policy
for _, nerr := range globalNotificationSys.LoadPolicyMapping(entityName, isGroup) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,380 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2016-2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"bytes"
"context"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"sync"
"testing"
"github.com/gorilla/mux"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/madmin"
)
// adminXLTestBed - encapsulates subsystems that need to be setup for
// admin-handler unit tests.
type adminXLTestBed struct {
xlDirs []string
objLayer ObjectLayer
router *mux.Router
}
// prepareAdminXLTestBed - helper function that setups a single-node
// XL backend for admin-handler tests.
func prepareAdminXLTestBed(ctx context.Context) (*adminXLTestBed, error) {
// reset global variables to start afresh.
resetTestGlobals()
// Set globalIsXL to indicate that the setup uses an erasure
// code backend.
globalIsXL = true
// Initializing objectLayer for HealFormatHandler.
objLayer, xlDirs, xlErr := initTestXLObjLayer(ctx)
if xlErr != nil {
return nil, xlErr
}
// Initialize minio server config.
if err := newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
return nil, err
}
// Initialize boot time
globalBootTime = UTCNow()
globalEndpoints = mustGetZoneEndpoints(xlDirs...)
newAllSubsystems()
initAllSubsystems(ctx, objLayer)
// Setup admin mgmt REST API handlers.
adminRouter := mux.NewRouter()
registerAdminRouter(adminRouter, true, true)
return &adminXLTestBed{
xlDirs: xlDirs,
objLayer: objLayer,
router: adminRouter,
}, nil
}
// TearDown - method that resets the test bed for subsequent unit
// tests to start afresh.
func (atb *adminXLTestBed) TearDown() {
removeRoots(atb.xlDirs)
resetTestGlobals()
}
// initTestObjLayer - Helper function to initialize an XL-based object
// layer and set globalObjectAPI.
func initTestXLObjLayer(ctx context.Context) (ObjectLayer, []string, error) {
xlDirs, err := getRandomDisks(16)
if err != nil {
return nil, nil, err
}
endpoints := mustGetNewEndpoints(xlDirs...)
storageDisks, format, err := waitForFormatXL(true, endpoints, 1, 1, 16, "")
if err != nil {
removeRoots(xlDirs)
return nil, nil, err
}
globalPolicySys = NewPolicySys()
objLayer, err := newXLSets(ctx, endpoints, storageDisks, format)
if err != nil {
return nil, nil, err
}
// Make objLayer available to all internal services via globalObjectAPI.
globalObjLayerMutex.Lock()
globalObjectAPI = objLayer
globalObjLayerMutex.Unlock()
return objLayer, xlDirs, nil
}
// cmdType - Represents different service subcomands like status, stop
// and restart.
type cmdType int
const (
restartCmd cmdType = iota
stopCmd
)
// toServiceSignal - Helper function that translates a given cmdType
// value to its corresponding serviceSignal value.
func (c cmdType) toServiceSignal() serviceSignal {
switch c {
case restartCmd:
return serviceRestart
case stopCmd:
return serviceStop
}
return serviceRestart
}
func (c cmdType) toServiceAction() madmin.ServiceAction {
switch c {
case restartCmd:
return madmin.ServiceActionRestart
case stopCmd:
return madmin.ServiceActionStop
}
return madmin.ServiceActionRestart
}
// testServiceSignalReceiver - Helper function that simulates a
// go-routine waiting on service signal.
func testServiceSignalReceiver(cmd cmdType, t *testing.T) {
expectedCmd := cmd.toServiceSignal()
serviceCmd := <-globalServiceSignalCh
if serviceCmd != expectedCmd {
t.Errorf("Expected service command %v but received %v", expectedCmd, serviceCmd)
}
}
// getServiceCmdRequest - Constructs a management REST API request for service
// subcommands for a given cmdType value.
func getServiceCmdRequest(cmd cmdType, cred auth.Credentials) (*http.Request, error) {
queryVal := url.Values{}
queryVal.Set("action", string(cmd.toServiceAction()))
resource := adminPathPrefix + adminAPIVersionPrefix + "/service?" + queryVal.Encode()
req, err := newTestRequest(http.MethodPost, resource, 0, nil)
if err != nil {
return nil, err
}
// management REST API uses signature V4 for authentication.
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
if err != nil {
return nil, err
}
return req, nil
}
// testServicesCmdHandler - parametrizes service subcommand tests on
// cmdType value.
func testServicesCmdHandler(cmd cmdType, t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
adminTestBed, err := prepareAdminXLTestBed(ctx)
if err != nil {
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
}
defer adminTestBed.TearDown()
// Initialize admin peers to make admin RPC calls. Note: In a
// single node setup, this degenerates to a simple function
// call under the hood.
globalMinioAddr = "127.0.0.1:9000"
var wg sync.WaitGroup
// Setting up a go routine to simulate ServerRouter's
// handleServiceSignals for stop and restart commands.
if cmd == restartCmd {
wg.Add(1)
go func() {
defer wg.Done()
testServiceSignalReceiver(cmd, t)
}()
}
credentials := globalActiveCred
req, err := getServiceCmdRequest(cmd, credentials)
if err != nil {
t.Fatalf("Failed to build service status request %v", err)
}
rec := httptest.NewRecorder()
adminTestBed.router.ServeHTTP(rec, req)
if rec.Code != http.StatusOK {
resp, _ := ioutil.ReadAll(rec.Body)
t.Errorf("Expected to receive %d status code but received %d. Body (%s)",
http.StatusOK, rec.Code, string(resp))
}
// Wait until testServiceSignalReceiver() called in a goroutine quits.
wg.Wait()
}
// Test for service restart management REST API.
func TestServiceRestartHandler(t *testing.T) {
testServicesCmdHandler(restartCmd, t)
}
// buildAdminRequest - helper function to build an admin API request.
func buildAdminRequest(queryVal url.Values, method, path string,
contentLength int64, bodySeeker io.ReadSeeker) (*http.Request, error) {
req, err := newTestRequest(method,
adminPathPrefix+adminAPIVersionPrefix+path+"?"+queryVal.Encode(),
contentLength, bodySeeker)
if err != nil {
return nil, err
}
cred := globalActiveCred
err = signRequestV4(req, cred.AccessKey, cred.SecretKey)
if err != nil {
return nil, err
}
return req, nil
}
func TestAdminServerInfo(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
adminTestBed, err := prepareAdminXLTestBed(ctx)
if err != nil {
t.Fatal("Failed to initialize a single node XL backend for admin handler tests.")
}
defer adminTestBed.TearDown()
// Initialize admin peers to make admin RPC calls.
globalMinioAddr = "127.0.0.1:9000"
// Prepare query params for set-config mgmt REST API.
queryVal := url.Values{}
queryVal.Set("info", "")
req, err := buildAdminRequest(queryVal, http.MethodGet, "/info", 0, nil)
if err != nil {
t.Fatalf("Failed to construct get-config object request - %v", err)
}
rec := httptest.NewRecorder()
adminTestBed.router.ServeHTTP(rec, req)
if rec.Code != http.StatusOK {
t.Errorf("Expected to succeed but failed with %d", rec.Code)
}
results := madmin.InfoMessage{}
err = json.NewDecoder(rec.Body).Decode(&results)
if err != nil {
t.Fatalf("Failed to decode set config result json %v", err)
}
if results.Region != globalMinioDefaultRegion {
t.Errorf("Expected %s, got %s", globalMinioDefaultRegion, results.Region)
}
}
// TestToAdminAPIErrCode - test for toAdminAPIErrCode helper function.
func TestToAdminAPIErrCode(t *testing.T) {
testCases := []struct {
err error
expectedAPIErr APIErrorCode
}{
// 1. Server not in quorum.
{
err: errXLWriteQuorum,
expectedAPIErr: ErrAdminConfigNoQuorum,
},
// 2. No error.
{
err: nil,
expectedAPIErr: ErrNone,
},
// 3. Non-admin API specific error.
{
err: errDiskNotFound,
expectedAPIErr: toAPIErrorCode(GlobalContext, errDiskNotFound),
},
}
for i, test := range testCases {
actualErr := toAdminAPIErrCode(GlobalContext, test.err)
if actualErr != test.expectedAPIErr {
t.Errorf("Test %d: Expected %v but received %v",
i+1, test.expectedAPIErr, actualErr)
}
}
}
func TestExtractHealInitParams(t *testing.T) {
mkParams := func(clientToken string, forceStart, forceStop bool) url.Values {
v := url.Values{}
if clientToken != "" {
v.Add(string(mgmtClientToken), clientToken)
}
if forceStart {
v.Add(string(mgmtForceStart), "")
}
if forceStop {
v.Add(string(mgmtForceStop), "")
}
return v
}
qParmsArr := []url.Values{
// Invalid cases
mkParams("", true, true),
mkParams("111", true, true),
mkParams("111", true, false),
mkParams("111", false, true),
// Valid cases follow
mkParams("", true, false),
mkParams("", false, true),
mkParams("", false, false),
mkParams("111", false, false),
}
varsArr := []map[string]string{
// Invalid cases
{string(mgmtPrefix): "objprefix"},
// Valid cases
{},
{string(mgmtBucket): "bucket"},
{string(mgmtBucket): "bucket", string(mgmtPrefix): "objprefix"},
}
// Body is always valid - we do not test JSON decoding.
body := `{"recursive": false, "dryRun": true, "remove": false, "scanMode": 0}`
// Test all combinations!
for pIdx, parms := range qParmsArr {
for vIdx, vars := range varsArr {
_, err := extractHealInitParams(vars, parms, bytes.NewBuffer([]byte(body)))
isErrCase := false
if pIdx < 4 || vIdx < 1 {
isErrCase = true
}
if err != ErrNone && !isErrCase {
t.Errorf("Got unexpected error: %v %v %v", pIdx, vIdx, err)
} else if err == ErrNone && isErrCase {
t.Errorf("Got no error but expected one: %v %v", pIdx, vIdx)
}
}
}
}

View file

@ -1,873 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"context"
"encoding/json"
"fmt"
"net/http"
"strings"
"sync"
"time"
"github.com/minio/minio/legacy/logger"
"github.com/minio/minio/pkg/madmin"
)
// healStatusSummary - overall short summary of a healing sequence
type healStatusSummary string
// healStatusSummary constants
const (
healNotStartedStatus healStatusSummary = "not started"
healRunningStatus = "running"
healStoppedStatus = "stopped"
healFinishedStatus = "finished"
)
const (
// a heal sequence with this many un-consumed heal result
// items blocks until heal-status consumption resumes or is
// aborted due to timeout.
maxUnconsumedHealResultItems = 1000
// if no heal-results are consumed (via the heal-status API)
// for this timeout duration, the heal sequence is aborted.
healUnconsumedTimeout = 24 * time.Hour
// time-duration to keep heal sequence state after it
// completes.
keepHealSeqStateDuration = time.Minute * 10
// nopHeal is a no operating healing action to
// wait for the current healing operation to finish
nopHeal = ""
)
var (
errHealIdleTimeout = fmt.Errorf("healing results were not consumed for too long")
errHealStopSignalled = fmt.Errorf("heal stop signaled")
errFnHealFromAPIErr = func(ctx context.Context, err error) error {
apiErr := toAPIError(ctx, err)
return fmt.Errorf("Heal internal error: %s: %s",
apiErr.Code, apiErr.Description)
}
)
// healSequenceStatus - accumulated status of the heal sequence
type healSequenceStatus struct {
// summary and detail for failures
Summary healStatusSummary `json:"Summary"`
FailureDetail string `json:"Detail,omitempty"`
StartTime time.Time `json:"StartTime"`
// settings for the heal sequence
HealSettings madmin.HealOpts `json:"Settings"`
// slice of available heal result records
Items []madmin.HealResultItem `json:"Items"`
}
// structure to hold state of all heal sequences in server memory
type allHealState struct {
sync.Mutex
// map of heal path to heal sequence
healSeqMap map[string]*healSequence
}
// newHealState - initialize global heal state management
func newHealState() *allHealState {
healState := &allHealState{
healSeqMap: make(map[string]*healSequence),
}
go healState.periodicHealSeqsClean(GlobalContext)
return healState
}
func (ahs *allHealState) periodicHealSeqsClean(ctx context.Context) {
// Launch clean-up routine to remove this heal sequence (after
// it ends) from the global state after timeout has elapsed.
for {
select {
case <-time.After(time.Minute * 5):
now := UTCNow()
ahs.Lock()
for path, h := range ahs.healSeqMap {
if h.hasEnded() && h.endTime.Add(keepHealSeqStateDuration).Before(now) {
delete(ahs.healSeqMap, path)
}
}
ahs.Unlock()
case <-ctx.Done():
// server could be restarting - need
// to exit immediately
return
}
}
}
// getHealSequenceByToken - Retrieve a heal sequence by token. The second
// argument returns if a heal sequence actually exists.
func (ahs *allHealState) getHealSequenceByToken(token string) (h *healSequence, exists bool) {
ahs.Lock()
defer ahs.Unlock()
for _, healSeq := range ahs.healSeqMap {
if healSeq.clientToken == token {
return healSeq, true
}
}
return nil, false
}
// getHealSequence - Retrieve a heal sequence by path. The second
// argument returns if a heal sequence actually exists.
func (ahs *allHealState) getHealSequence(path string) (h *healSequence, exists bool) {
ahs.Lock()
defer ahs.Unlock()
h, exists = ahs.healSeqMap[path]
return h, exists
}
func (ahs *allHealState) stopHealSequence(path string) ([]byte, APIError) {
var hsp madmin.HealStopSuccess
he, exists := ahs.getHealSequence(path)
if !exists {
hsp = madmin.HealStopSuccess{
ClientToken: "invalid",
StartTime: UTCNow(),
}
} else {
hsp = madmin.HealStopSuccess{
ClientToken: he.clientToken,
ClientAddress: he.clientAddress,
StartTime: he.startTime,
}
he.stop()
for !he.hasEnded() {
time.Sleep(1 * time.Second)
}
ahs.Lock()
defer ahs.Unlock()
// Heal sequence explicitly stopped, remove it.
delete(ahs.healSeqMap, path)
}
b, err := json.Marshal(&hsp)
return b, toAdminAPIErr(GlobalContext, err)
}
// LaunchNewHealSequence - launches a background routine that performs
// healing according to the healSequence argument. For each heal
// sequence, state is stored in the `globalAllHealState`, which is a
// map of the heal path to `healSequence` which holds state about the
// heal sequence.
//
// Heal results are persisted in server memory for
// `keepHealSeqStateDuration`. This function also launches a
// background routine to clean up heal results after the
// aforementioned duration.
func (ahs *allHealState) LaunchNewHealSequence(h *healSequence) (
respBytes []byte, apiErr APIError, errMsg string) {
existsAndLive := false
he, exists := ahs.getHealSequence(h.path)
if exists {
existsAndLive = !he.hasEnded()
}
if existsAndLive {
// A heal sequence exists on the given path.
if h.forceStarted {
// stop the running heal sequence - wait for it to finish.
he.stop()
for !he.hasEnded() {
time.Sleep(1 * time.Second)
}
} else {
errMsg = "Heal is already running on the given path " +
"(use force-start option to stop and start afresh). " +
fmt.Sprintf("The heal was started by IP %s at %s, token is %s",
h.clientAddress, h.startTime.Format(http.TimeFormat), h.clientToken)
return nil, errorCodes.ToAPIErr(ErrHealAlreadyRunning), errMsg
}
}
ahs.Lock()
defer ahs.Unlock()
// Check if new heal sequence to be started overlaps with any
// existing, running sequence
for k, hSeq := range ahs.healSeqMap {
if !hSeq.hasEnded() && (HasPrefix(k, h.path) || HasPrefix(h.path, k)) {
errMsg = "The provided heal sequence path overlaps with an existing " +
fmt.Sprintf("heal path: %s", k)
return nil, errorCodes.ToAPIErr(ErrHealOverlappingPaths), errMsg
}
}
// Add heal state and start sequence
ahs.healSeqMap[h.path] = h
// Launch top-level background heal go-routine
go h.healSequenceStart()
b, err := json.Marshal(madmin.HealStartSuccess{
ClientToken: h.clientToken,
ClientAddress: h.clientAddress,
StartTime: h.startTime,
})
if err != nil {
logger.LogIf(h.ctx, err)
return nil, toAPIError(h.ctx, err), ""
}
return b, noError, ""
}
// PopHealStatusJSON - Called by heal-status API. It fetches the heal
// status results from global state and returns its JSON
// representation. The clientToken helps ensure there aren't
// conflicting clients fetching status.
func (ahs *allHealState) PopHealStatusJSON(path string,
clientToken string) ([]byte, APIErrorCode) {
// fetch heal state for given path
h, exists := ahs.getHealSequence(path)
if !exists {
// If there is no such heal sequence, return error.
return nil, ErrHealNoSuchProcess
}
// Check if client-token is valid
if clientToken != h.clientToken {
return nil, ErrHealInvalidClientToken
}
// Take lock to access and update the heal-sequence
h.mutex.Lock()
defer h.mutex.Unlock()
numItems := len(h.currentStatus.Items)
// calculate index of most recently available heal result
// record.
lastResultIndex := h.lastSentResultIndex
if numItems > 0 {
lastResultIndex = h.currentStatus.Items[numItems-1].ResultIndex
}
h.lastSentResultIndex = lastResultIndex
jbytes, err := json.Marshal(h.currentStatus)
if err != nil {
h.currentStatus.Items = nil
logger.LogIf(h.ctx, err)
return nil, ErrInternalError
}
h.currentStatus.Items = nil
return jbytes, ErrNone
}
// healSource denotes single entity and heal option.
type healSource struct {
path string // entity path (format, buckets, objects) to heal
opts *madmin.HealOpts // optional heal option overrides default setting
}
// healSequence - state for each heal sequence initiated on the
// server.
type healSequence struct {
// bucket, and prefix on which heal seq. was initiated
bucket, objPrefix string
// path is just pathJoin(bucket, objPrefix)
path string
// A channel of entities (format, buckets, objects) to heal
sourceCh chan healSource
// A channel of entities with heal result
respCh chan healResult
// Report healing progress
reportProgress bool
// time at which heal sequence was started
startTime time.Time
// time at which heal sequence has ended
endTime time.Time
// Heal client info
clientToken, clientAddress string
// was this heal sequence force started?
forceStarted bool
// heal settings applied to this heal sequence
settings madmin.HealOpts
// current accumulated status of the heal sequence
currentStatus healSequenceStatus
// channel signaled by background routine when traversal has
// completed
traverseAndHealDoneCh chan error
// canceler to cancel heal sequence.
cancelCtx context.CancelFunc
// the last result index sent to client
lastSentResultIndex int64
// Number of total items scanned against item type
scannedItemsMap map[madmin.HealItemType]int64
// Number of total items healed against item type
healedItemsMap map[madmin.HealItemType]int64
// Number of total items where healing failed against endpoint and drive state
healFailedItemsMap map[string]int64
// The time of the last scan/heal activity
lastHealActivity time.Time
// Holds the request-info for logging
ctx context.Context
// used to lock this structure as it is concurrently accessed
mutex sync.RWMutex
}
// NewHealSequence - creates healSettings, assumes bucket and
// objPrefix are already validated.
func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
hs madmin.HealOpts, forceStart bool) *healSequence {
reqInfo := &logger.ReqInfo{RemoteHost: clientAddr, API: "Heal", BucketName: bucket}
reqInfo.AppendTags("prefix", objPrefix)
ctx, cancel := context.WithCancel(logger.SetReqInfo(ctx, reqInfo))
return &healSequence{
respCh: make(chan healResult),
bucket: bucket,
objPrefix: objPrefix,
path: pathJoin(bucket, objPrefix),
reportProgress: true,
startTime: UTCNow(),
clientToken: mustGetUUID(),
clientAddress: clientAddr,
forceStarted: forceStart,
settings: hs,
currentStatus: healSequenceStatus{
Summary: healNotStartedStatus,
HealSettings: hs,
},
traverseAndHealDoneCh: make(chan error),
cancelCtx: cancel,
ctx: ctx,
scannedItemsMap: make(map[madmin.HealItemType]int64),
healedItemsMap: make(map[madmin.HealItemType]int64),
healFailedItemsMap: make(map[string]int64),
}
}
// resetHealStatusCounters - reset the healSequence status counters between
// each monthly background heal scanning activity.
// This is used only in case of Background healing scenario, where
// we use a single long running healSequence which reactively heals
// objects passed to the SourceCh.
func (h *healSequence) resetHealStatusCounters() {
h.mutex.Lock()
defer h.mutex.Unlock()
h.currentStatus.Items = []madmin.HealResultItem{}
h.lastSentResultIndex = 0
h.scannedItemsMap = make(map[madmin.HealItemType]int64)
h.healedItemsMap = make(map[madmin.HealItemType]int64)
h.healFailedItemsMap = make(map[string]int64)
}
// getScannedItemsCount - returns a count of all scanned items
func (h *healSequence) getScannedItemsCount() int64 {
var count int64
h.mutex.RLock()
defer h.mutex.RUnlock()
for _, v := range h.scannedItemsMap {
count = count + v
}
return count
}
// getScannedItemsMap - returns map of all scanned items against type
func (h *healSequence) getScannedItemsMap() map[madmin.HealItemType]int64 {
h.mutex.RLock()
defer h.mutex.RUnlock()
// Make a copy before returning the value
retMap := make(map[madmin.HealItemType]int64, len(h.scannedItemsMap))
for k, v := range h.scannedItemsMap {
retMap[k] = v
}
return retMap
}
// getHealedItemsMap - returns the map of all healed items against type
func (h *healSequence) getHealedItemsMap() map[madmin.HealItemType]int64 {
h.mutex.RLock()
defer h.mutex.RUnlock()
// Make a copy before returning the value
retMap := make(map[madmin.HealItemType]int64, len(h.healedItemsMap))
for k, v := range h.healedItemsMap {
retMap[k] = v
}
return retMap
}
// gethealFailedItemsMap - returns map of all items where heal failed against
// drive endpoint and status
func (h *healSequence) gethealFailedItemsMap() map[string]int64 {
h.mutex.RLock()
defer h.mutex.RUnlock()
// Make a copy before returning the value
retMap := make(map[string]int64, len(h.healFailedItemsMap))
for k, v := range h.healFailedItemsMap {
retMap[k] = v
}
return retMap
}
// isQuitting - determines if the heal sequence is quitting (due to an
// external signal)
func (h *healSequence) isQuitting() bool {
select {
case <-h.ctx.Done():
return true
default:
return false
}
}
// check if the heal sequence has ended
func (h *healSequence) hasEnded() bool {
h.mutex.RLock()
ended := len(h.currentStatus.Items) == 0 || h.currentStatus.Summary == healStoppedStatus || h.currentStatus.Summary == healFinishedStatus
h.mutex.RUnlock()
return ended
}
// stops the heal sequence - safe to call multiple times.
func (h *healSequence) stop() {
h.cancelCtx()
}
// pushHealResultItem - pushes a heal result item for consumption in
// the heal-status API. It blocks if there are
// maxUnconsumedHealResultItems. When it blocks, the heal sequence
// routine is effectively paused - this happens when the server has
// accumulated the maximum number of heal records per heal
// sequence. When the client consumes further records, the heal
// sequence automatically resumes. The return value indicates if the
// operation succeeded.
func (h *healSequence) pushHealResultItem(r madmin.HealResultItem) error {
// start a timer to keep an upper time limit to find an empty
// slot to add the given heal result - if no slot is found it
// means that the server is holding the maximum amount of
// heal-results in memory and the client has not consumed it
// for too long.
unconsumedTimer := time.NewTimer(healUnconsumedTimeout)
defer func() {
// stop the timeout timer so it is garbage collected.
if !unconsumedTimer.Stop() {
<-unconsumedTimer.C
}
}()
var itemsLen int
for {
h.mutex.Lock()
itemsLen = len(h.currentStatus.Items)
if itemsLen == maxUnconsumedHealResultItems {
// wait for a second, or quit if an external
// stop signal is received or the
// unconsumedTimer fires.
select {
// Check after a second
case <-time.After(time.Second):
h.mutex.Unlock()
continue
case <-h.ctx.Done():
h.mutex.Unlock()
// discard result and return.
return errHealStopSignalled
// Timeout if no results consumed for too long.
case <-unconsumedTimer.C:
h.mutex.Unlock()
return errHealIdleTimeout
}
}
break
}
// Set the correct result index for the new result item
if itemsLen > 0 {
r.ResultIndex = 1 + h.currentStatus.Items[itemsLen-1].ResultIndex
} else {
r.ResultIndex = 1 + h.lastSentResultIndex
}
// append to results
h.currentStatus.Items = append(h.currentStatus.Items, r)
// release lock
h.mutex.Unlock()
return nil
}
// healSequenceStart - this is the top-level background heal
// routine. It launches another go-routine that actually traverses
// on-disk data, checks and heals according to the selected
// settings. This go-routine itself, (1) monitors the traversal
// routine for completion, and (2) listens for external stop
// signals. When either event happens, it sets the finish status for
// the heal-sequence.
func (h *healSequence) healSequenceStart() {
// Set status as running
h.mutex.Lock()
h.currentStatus.Summary = healRunningStatus
h.currentStatus.StartTime = UTCNow()
h.mutex.Unlock()
if h.sourceCh == nil {
go h.traverseAndHeal()
} else {
go h.healFromSourceCh()
}
select {
case err, ok := <-h.traverseAndHealDoneCh:
if !ok {
return
}
h.mutex.Lock()
h.endTime = UTCNow()
// Heal traversal is complete.
if err == nil {
// heal traversal succeeded.
h.currentStatus.Summary = healFinishedStatus
} else {
// heal traversal had an error.
h.currentStatus.Summary = healStoppedStatus
h.currentStatus.FailureDetail = err.Error()
}
h.mutex.Unlock()
case <-h.ctx.Done():
h.mutex.Lock()
h.endTime = UTCNow()
h.currentStatus.Summary = healStoppedStatus
h.currentStatus.FailureDetail = errHealStopSignalled.Error()
h.mutex.Unlock()
// drain traverse channel so the traversal
// go-routine does not leak.
go func() {
// Eventually the traversal go-routine closes
// the channel and returns, so this go-routine
// itself will not leak.
<-h.traverseAndHealDoneCh
}()
}
}
func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItemType) error {
// Send heal request
task := healTask{
path: source.path,
opts: h.settings,
responseCh: h.respCh,
}
if source.opts != nil {
task.opts = *source.opts
}
globalBackgroundHealRoutine.queueHealTask(task)
select {
case res := <-h.respCh:
if !h.reportProgress {
// Object might have been deleted, by the time heal
// was attempted, we should ignore this object and
// return success.
if isErrObjectNotFound(res.err) {
return nil
}
h.mutex.Lock()
defer h.mutex.Unlock()
// Progress is not reported in case of background heal processing.
// Instead we increment relevant counter based on the heal result
// for prometheus reporting.
if res.err != nil {
for _, d := range res.result.After.Drives {
// For failed items we report the endpoint and drive state
// This will help users take corrective actions for drives
h.healFailedItemsMap[d.Endpoint+","+d.State]++
}
} else {
// Only object type reported for successful healing
h.healedItemsMap[res.result.Type]++
}
// Report caller of any failure
return res.err
}
res.result.Type = healType
if res.err != nil {
// Object might have been deleted, by the time heal
// was attempted, we should ignore this object and return success.
if isErrObjectNotFound(res.err) {
return nil
}
// Only report object error
if healType != madmin.HealItemObject {
return res.err
}
res.result.Detail = res.err.Error()
}
return h.pushHealResultItem(res.result)
case <-h.ctx.Done():
return nil
}
}
func (h *healSequence) healItemsFromSourceCh() error {
for {
select {
case source, ok := <-h.sourceCh:
if !ok {
return nil
}
var itemType madmin.HealItemType
switch {
case source.path == nopHeal:
continue
case source.path == SlashSeparator:
itemType = madmin.HealItemMetadata
case !strings.Contains(source.path, SlashSeparator):
itemType = madmin.HealItemBucket
default:
itemType = madmin.HealItemObject
}
if err := h.queueHealTask(source, itemType); err != nil {
logger.LogIf(h.ctx, err)
}
h.scannedItemsMap[itemType]++
h.lastHealActivity = UTCNow()
case <-h.ctx.Done():
return nil
}
}
}
func (h *healSequence) healFromSourceCh() {
h.healItemsFromSourceCh()
}
func (h *healSequence) healDiskMeta() error {
// Start with format healing
if err := h.healDiskFormat(); err != nil {
return err
}
// Start healing the config prefix.
if err := h.healMinioSysMeta(minioConfigPrefix)(); err != nil {
return err
}
// Start healing the bucket config prefix.
return h.healMinioSysMeta(bucketConfigPrefix)()
}
func (h *healSequence) healItems(bucketsOnly bool) error {
if err := h.healDiskMeta(); err != nil {
return err
}
// Heal buckets and objects
return h.healBuckets(bucketsOnly)
}
// traverseAndHeal - traverses on-disk data and performs healing
// according to settings. At each "safe" point it also checks if an
// external quit signal has been received and quits if so. Since the
// healing traversal may be mutating on-disk data when an external
// quit signal is received, this routine cannot quit immediately and
// has to wait until a safe point is reached, such as between scanning
// two objects.
func (h *healSequence) traverseAndHeal() {
bucketsOnly := false // Heals buckets and objects also.
h.traverseAndHealDoneCh <- h.healItems(bucketsOnly)
close(h.traverseAndHealDoneCh)
}
// healMinioSysMeta - heals all files under a given meta prefix, returns a function
// which in-turn heals the respective meta directory path and any files in int.
func (h *healSequence) healMinioSysMeta(metaPrefix string) func() error {
return func() error {
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
return errServerNotInitialized
}
// NOTE: Healing on meta is run regardless
// of any bucket being selected, this is to ensure that
// meta are always upto date and correct.
return objectAPI.HealObjects(h.ctx, minioMetaBucket, metaPrefix, h.settings, func(bucket string, object string) error {
if h.isQuitting() {
return errHealStopSignalled
}
herr := h.queueHealTask(healSource{path: pathJoin(bucket, object)}, madmin.HealItemBucketMetadata)
// Object might have been deleted, by the time heal
// was attempted we ignore this object an move on.
if isErrObjectNotFound(herr) {
return nil
}
return herr
})
}
}
// healDiskFormat - heals format.json, return value indicates if a
// failure error occurred.
func (h *healSequence) healDiskFormat() error {
if h.isQuitting() {
return errHealStopSignalled
}
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
return errServerNotInitialized
}
return h.queueHealTask(healSource{path: SlashSeparator}, madmin.HealItemMetadata)
}
// healBuckets - check for all buckets heal or just particular bucket.
func (h *healSequence) healBuckets(bucketsOnly bool) error {
if h.isQuitting() {
return errHealStopSignalled
}
// 1. If a bucket was specified, heal only the bucket.
if h.bucket != "" {
return h.healBucket(h.bucket, bucketsOnly)
}
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
return errServerNotInitialized
}
buckets, err := objectAPI.ListBucketsHeal(h.ctx)
if err != nil {
return errFnHealFromAPIErr(h.ctx, err)
}
for _, bucket := range buckets {
if err = h.healBucket(bucket.Name, bucketsOnly); err != nil {
return err
}
}
return nil
}
// healBucket - traverses and heals given bucket
func (h *healSequence) healBucket(bucket string, bucketsOnly bool) error {
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
return errServerNotInitialized
}
if err := h.queueHealTask(healSource{path: bucket}, madmin.HealItemBucket); err != nil {
return err
}
if bucketsOnly {
return nil
}
if !h.settings.Recursive {
if h.objPrefix != "" {
// Check if an object named as the objPrefix exists,
// and if so heal it.
_, err := objectAPI.GetObjectInfo(h.ctx, bucket, h.objPrefix, ObjectOptions{})
if err == nil {
if err = h.healObject(bucket, h.objPrefix); err != nil {
return err
}
}
}
return nil
}
if err := objectAPI.HealObjects(h.ctx, bucket, h.objPrefix, h.settings, h.healObject); err != nil {
return errFnHealFromAPIErr(h.ctx, err)
}
return nil
}
// healObject - heal the given object and record result
func (h *healSequence) healObject(bucket, object string) error {
// Get current object layer instance.
objectAPI := newObjectLayerWithoutSafeModeFn()
if objectAPI == nil {
return errServerNotInitialized
}
if h.isQuitting() {
return errHealStopSignalled
}
return h.queueHealTask(healSource{path: pathJoin(bucket, object)}, madmin.HealItemObject)
}

View file

@ -1,118 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"encoding/json"
"io/ioutil"
"net/http"
"github.com/gorilla/mux"
"github.com/minio/minio/legacy/config"
"github.com/minio/minio/legacy/logger"
"github.com/minio/minio/pkg/env"
iampolicy "github.com/minio/minio/pkg/iam/policy"
)
const (
bucketQuotaConfigFile = "quota.json"
)
// PutBucketQuotaConfigHandler - PUT Bucket quota configuration.
// ----------
// Places a quota configuration on the specified bucket. The quota
// specified in the quota configuration will be applied by default
// to enforce total quota for the specified bucket.
func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketQuotaConfig")
defer logger.AuditLog(w, r, "PutBucketQuotaConfig", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketQuotaAdminAction)
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
// Turn off quota commands if data usage info is unavailable.
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOff {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminBucketQuotaDisabled), r.URL)
return
}
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
data, err := ioutil.ReadAll(r.Body)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
if _, err = parseBucketQuota(bucket, data); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if err = globalBucketMetadataSys.Update(bucket, bucketQuotaConfigFile, data); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Write success response.
writeSuccessResponseHeadersOnly(w)
}
// GetBucketQuotaConfigHandler - gets bucket quota configuration
func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketQuotaConfig")
defer logger.AuditLog(w, r, "GetBucketQuotaConfig", mustGetClaimsFromToken(r))
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetBucketQuotaAdminAction)
if objectAPI == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
config, err := globalBucketMetadataSys.GetQuotaConfig(bucket)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
configData, err := json.Marshal(config)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Write success response.
writeSuccessResponseJSON(w, configData)
}

View file

@ -1,222 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2016, 2017, 2018, 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"net/http"
"github.com/gorilla/mux"
"github.com/minio/minio/legacy/config"
"github.com/minio/minio/pkg/env"
"github.com/minio/minio/pkg/madmin"
)
const (
adminPathPrefix = minioReservedBucketPath + "/admin"
adminAPIVersionV2 = madmin.AdminAPIVersionV2
adminAPIVersion = madmin.AdminAPIVersion
adminAPIVersionPrefix = SlashSeparator + adminAPIVersion
adminAPIVersionV2Prefix = SlashSeparator + adminAPIVersionV2
)
// adminAPIHandlers provides HTTP handlers for MinIO admin API.
type adminAPIHandlers struct{}
// registerAdminRouter - Add handler functions for each service REST API routes.
func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool) {
adminAPI := adminAPIHandlers{}
// Admin router
adminRouter := router.PathPrefix(adminPathPrefix).Subrouter()
/// Service operations
adminVersions := []string{
adminAPIVersionPrefix,
adminAPIVersionV2Prefix,
}
for _, adminVersion := range adminVersions {
// Restart and stop MinIO service.
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/service").HandlerFunc(httpTraceAll(adminAPI.ServiceHandler)).Queries("action", "{action:.*}")
// Update MinIO servers.
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/update").HandlerFunc(httpTraceAll(adminAPI.ServerUpdateHandler)).Queries("updateURL", "{updateURL:.*}")
// Info operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/info").HandlerFunc(httpTraceAll(adminAPI.ServerInfoHandler))
// StorageInfo operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/storageinfo").HandlerFunc(httpTraceAll(adminAPI.StorageInfoHandler))
// DataUsageInfo operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/datausageinfo").HandlerFunc(httpTraceAll(adminAPI.DataUsageInfoHandler))
if globalIsDistXL || globalIsXL {
/// Heal operations
// Heal processing endpoint.
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/heal/{bucket}/{prefix:.*}").HandlerFunc(httpTraceAll(adminAPI.HealHandler))
adminRouter.Methods(http.MethodPost).Path(adminVersion + "/background-heal/status").HandlerFunc(httpTraceAll(adminAPI.BackgroundHealStatusHandler))
/// Health operations
}
// Profiling operations
adminRouter.Methods(http.MethodPost).Path(adminVersion+"/profiling/start").HandlerFunc(httpTraceAll(adminAPI.StartProfilingHandler)).
Queries("profilerType", "{profilerType:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/profiling/download").HandlerFunc(httpTraceAll(adminAPI.DownloadProfilingHandler))
// Config KV operations.
if enableConfigOps {
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigKVHandler)).Queries("key", "{key:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/set-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigKVHandler))
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/del-config-kv").HandlerFunc(httpTraceHdrs(adminAPI.DelConfigKVHandler))
}
// Enable config help in all modes.
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/help-config-kv").HandlerFunc(httpTraceAll(adminAPI.HelpConfigKVHandler)).Queries("subSys", "{subSys:.*}", "key", "{key:.*}")
// Config KV history operations.
if enableConfigOps {
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-config-history-kv").HandlerFunc(httpTraceAll(adminAPI.ListConfigHistoryKVHandler)).Queries("count", "{count:[0-9]+}")
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/clear-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.ClearConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/restore-config-history-kv").HandlerFunc(httpTraceHdrs(adminAPI.RestoreConfigHistoryKVHandler)).Queries("restoreId", "{restoreId:.*}")
}
/// Config import/export bulk operations
if enableConfigOps {
// Get config
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/config").HandlerFunc(httpTraceHdrs(adminAPI.GetConfigHandler))
// Set config
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/config").HandlerFunc(httpTraceHdrs(adminAPI.SetConfigHandler))
}
if enableIAMOps {
// -- IAM APIs --
// Add policy IAM
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.AddCannedPolicy)).Queries("name", "{name:.*}")
// Add user IAM
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/accountusageinfo").HandlerFunc(httpTraceAll(adminAPI.AccountUsageInfoHandler))
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/add-user").HandlerFunc(httpTraceHdrs(adminAPI.AddUser)).Queries("accessKey", "{accessKey:.*}")
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-user-status").HandlerFunc(httpTraceHdrs(adminAPI.SetUserStatus)).Queries("accessKey", "{accessKey:.*}").Queries("status", "{status:.*}")
// Service accounts ops
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/add-service-account").HandlerFunc(httpTraceHdrs(adminAPI.AddServiceAccount))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-service-accounts").HandlerFunc(httpTraceHdrs(adminAPI.ListServiceAccounts))
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/delete-service-account").HandlerFunc(httpTraceHdrs(adminAPI.DeleteServiceAccount)).Queries("accessKey", "{accessKey:.*}")
if adminVersion == adminAPIVersionV2Prefix {
// Info policy IAM v2
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.InfoCannedPolicyV2)).Queries("name", "{name:.*}")
// List policies v2
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPoliciesV2))
} else {
// Info policy IAM latest
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/info-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.InfoCannedPolicy)).Queries("name", "{name:.*}")
// List policies latest
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPolicies))
}
// Remove policy IAM
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-canned-policy").HandlerFunc(httpTraceHdrs(adminAPI.RemoveCannedPolicy)).Queries("name", "{name:.*}")
// Set user or group policy
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-user-or-group-policy").
HandlerFunc(httpTraceHdrs(adminAPI.SetPolicyForUserOrGroup)).
Queries("policyName", "{policyName:.*}", "userOrGroup", "{userOrGroup:.*}", "isGroup", "{isGroup:true|false}")
// Remove user IAM
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-user").HandlerFunc(httpTraceHdrs(adminAPI.RemoveUser)).Queries("accessKey", "{accessKey:.*}")
// List users
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-users").HandlerFunc(httpTraceHdrs(adminAPI.ListUsers))
// User info
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/user-info").HandlerFunc(httpTraceHdrs(adminAPI.GetUserInfo)).Queries("accessKey", "{accessKey:.*}")
// Add/Remove members from group
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/update-group-members").HandlerFunc(httpTraceHdrs(adminAPI.UpdateGroupMembers))
// Get Group
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/group").HandlerFunc(httpTraceHdrs(adminAPI.GetGroup)).Queries("group", "{group:.*}")
// List Groups
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/groups").HandlerFunc(httpTraceHdrs(adminAPI.ListGroups))
// Set Group Status
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-group-status").HandlerFunc(httpTraceHdrs(adminAPI.SetGroupStatus)).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
}
// Quota operations
if globalIsXL || globalIsDistXL {
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn {
// GetBucketQuotaConfig
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
// PutBucketQuotaConfig
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
}
}
// -- Top APIs --
// Top locks
if globalIsDistXL {
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/top/locks").HandlerFunc(httpTraceHdrs(adminAPI.TopLocksHandler))
}
// HTTP Trace
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/trace").HandlerFunc(adminAPI.TraceHandler)
// Console Logs
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/log").HandlerFunc(httpTraceAll(adminAPI.ConsoleLogHandler))
// -- KMS APIs --
//
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/kms/key/status").HandlerFunc(httpTraceAll(adminAPI.KMSKeyStatusHandler))
if !globalIsGateway {
// -- OBD API --
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/obdinfo").
HandlerFunc(httpTraceHdrs(adminAPI.OBDInfoHandler)).
Queries("perfdrive", "{perfdrive:true|false}",
"perfnet", "{perfnet:true|false}",
"minioinfo", "{minioinfo:true|false}",
"minioconfig", "{minioconfig:true|false}",
"syscpu", "{syscpu:true|false}",
"sysdiskhw", "{sysdiskhw:true|false}",
"sysosinfo", "{sysosinfo:true|false}",
"sysmem", "{sysmem:true|false}",
"sysprocess", "{sysprocess:true|false}",
)
}
}
// If none of the routes match add default error handler routes
adminRouter.NotFoundHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
adminRouter.MethodNotAllowedHandler = http.HandlerFunc(httpTraceAll(errorResponseHandler))
}

View file

@ -1,86 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"net/http"
"os"
"github.com/minio/minio/pkg/disk"
"github.com/minio/minio/pkg/madmin"
)
// getLocalServerProperty - returns madmin.ServerProperties for only the
// local endpoints from given list of endpoints
func getLocalServerProperty(endpointZones EndpointZones, r *http.Request) madmin.ServerProperties {
var disks []madmin.Disk
addr := r.Host
if globalIsDistXL {
addr = GetLocalPeer(endpointZones)
}
network := make(map[string]string)
for _, ep := range endpointZones {
for _, endpoint := range ep.Endpoints {
nodeName := endpoint.Host
if nodeName == "" {
nodeName = r.Host
}
if endpoint.IsLocal {
// Only proceed for local endpoints
network[nodeName] = "online"
var di = madmin.Disk{
DrivePath: endpoint.Path,
}
diInfo, err := disk.GetInfo(endpoint.Path)
if err != nil {
if os.IsNotExist(err) || isSysErrPathNotFound(err) {
di.State = madmin.DriveStateMissing
} else {
di.State = madmin.DriveStateCorrupt
}
} else {
di.State = madmin.DriveStateOk
di.DrivePath = endpoint.Path
di.TotalSpace = diInfo.Total
di.UsedSpace = diInfo.Total - diInfo.Free
di.Utilization = float64((diInfo.Total - diInfo.Free) / diInfo.Total * 100)
}
disks = append(disks, di)
} else {
_, present := network[nodeName]
if !present {
err := IsServerResolvable(endpoint)
if err == nil {
network[nodeName] = "online"
} else {
network[nodeName] = "offline"
}
}
}
}
}
return madmin.ServerProperties{
State: "ok",
Endpoint: addr,
Uptime: UTCNow().Unix() - globalBootTime.Unix(),
Version: Version,
CommitID: CommitID,
Network: network,
Disks: disks,
}
}

View file

@ -1,41 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2015, 2016 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"encoding/xml"
)
// ObjectIdentifier carries key name for the object to delete.
type ObjectIdentifier struct {
ObjectName string `xml:"Key"`
}
// createBucketConfiguration container for bucket configuration request from client.
// Used for parsing the location from the request body for Makebucket.
type createBucketLocationConfiguration struct {
XMLName xml.Name `xml:"CreateBucketConfiguration" json:"-"`
Location string `xml:"LocationConstraint"`
}
// DeleteObjectsRequest - xml carrying the object key names which needs to be deleted.
type DeleteObjectsRequest struct {
// Element to enable quiet mode for the request
Quiet bool
// List of objects to be deleted
Objects []ObjectIdentifier `xml:"Object"`
}

File diff suppressed because it is too large Load diff

View file

@ -1,82 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2016 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"context"
"errors"
"os"
"path/filepath"
"testing"
"github.com/minio/minio/legacy/crypto"
"github.com/minio/minio/pkg/hash"
)
var toAPIErrorTests = []struct {
err error
errCode APIErrorCode
}{
{err: hash.BadDigest{}, errCode: ErrBadDigest},
{err: hash.SHA256Mismatch{}, errCode: ErrContentSHA256Mismatch},
{err: IncompleteBody{}, errCode: ErrIncompleteBody},
{err: ObjectExistsAsDirectory{}, errCode: ErrObjectExistsAsDirectory},
{err: BucketNameInvalid{}, errCode: ErrInvalidBucketName},
{err: BucketExists{}, errCode: ErrBucketAlreadyOwnedByYou},
{err: ObjectNotFound{}, errCode: ErrNoSuchKey},
{err: ObjectNameInvalid{}, errCode: ErrInvalidObjectName},
{err: InvalidUploadID{}, errCode: ErrNoSuchUpload},
{err: InvalidPart{}, errCode: ErrInvalidPart},
{err: InsufficientReadQuorum{}, errCode: ErrSlowDown},
{err: InsufficientWriteQuorum{}, errCode: ErrSlowDown},
{err: UnsupportedDelimiter{}, errCode: ErrNotImplemented},
{err: InvalidMarkerPrefixCombination{}, errCode: ErrNotImplemented},
{err: InvalidUploadIDKeyCombination{}, errCode: ErrNotImplemented},
{err: MalformedUploadID{}, errCode: ErrNoSuchUpload},
{err: PartTooSmall{}, errCode: ErrEntityTooSmall},
{err: BucketNotEmpty{}, errCode: ErrBucketNotEmpty},
{err: BucketNotFound{}, errCode: ErrNoSuchBucket},
{err: StorageFull{}, errCode: ErrStorageFull},
{err: NotImplemented{}, errCode: ErrNotImplemented},
{err: errSignatureMismatch, errCode: ErrSignatureDoesNotMatch},
// SSE-C errors
{err: crypto.ErrInvalidCustomerAlgorithm, errCode: ErrInvalidSSECustomerAlgorithm},
{err: crypto.ErrMissingCustomerKey, errCode: ErrMissingSSECustomerKey},
{err: crypto.ErrInvalidCustomerKey, errCode: ErrAccessDenied},
{err: crypto.ErrMissingCustomerKeyMD5, errCode: ErrMissingSSECustomerKeyMD5},
{err: crypto.ErrCustomerKeyMD5Mismatch, errCode: ErrSSECustomerKeyMD5Mismatch},
{err: errObjectTampered, errCode: ErrObjectTampered},
{err: nil, errCode: ErrNone},
{err: errors.New("Custom error"), errCode: ErrInternalError}, // Case where err type is unknown.
}
func TestAPIErrCode(t *testing.T) {
disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix())
defer os.RemoveAll(disk)
initFSObjects(disk, t)
ctx := context.Background()
for i, testCase := range toAPIErrorTests {
errCode := toAPIErrorCode(ctx, testCase.err)
if errCode != testCase.errCode {
t.Errorf("Test %d: Expected error code %d, got %d", i+1, testCase.errCode, errCode)
}
}
}

View file

@ -1,156 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"bytes"
"encoding/json"
"encoding/xml"
"fmt"
"net/http"
"net/url"
"strconv"
"strings"
"time"
"github.com/minio/minio/legacy/crypto"
xhttp "github.com/minio/minio/legacy/http"
)
// Returns a hexadecimal representation of time at the
// time response is sent to the client.
func mustGetRequestID(t time.Time) string {
return fmt.Sprintf("%X", t.UnixNano())
}
// Write http common headers
func setCommonHeaders(w http.ResponseWriter) {
w.Header().Set(xhttp.ServerInfo, "MinIO/"+ReleaseTag)
// Set `x-amz-bucket-region` only if region is set on the server
// by default minio uses an empty region.
if region := globalServerRegion; region != "" {
w.Header().Set(xhttp.AmzBucketRegion, region)
}
w.Header().Set(xhttp.AcceptRanges, "bytes")
// Remove sensitive information
crypto.RemoveSensitiveHeaders(w.Header())
}
// Encodes the response headers into XML format.
func encodeResponse(response interface{}) []byte {
var bytesBuffer bytes.Buffer
bytesBuffer.WriteString(xml.Header)
e := xml.NewEncoder(&bytesBuffer)
e.Encode(response)
return bytesBuffer.Bytes()
}
// Encodes the response headers into JSON format.
func encodeResponseJSON(response interface{}) []byte {
var bytesBuffer bytes.Buffer
e := json.NewEncoder(&bytesBuffer)
e.Encode(response)
return bytesBuffer.Bytes()
}
// Write parts count
func setPartsCountHeaders(w http.ResponseWriter, objInfo ObjectInfo) {
if strings.Contains(objInfo.ETag, "-") && len(objInfo.Parts) > 0 {
w.Header()[xhttp.AmzMpPartsCount] = []string{strconv.Itoa(len(objInfo.Parts))}
}
}
// Write object header
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec) (err error) {
// set common headers
setCommonHeaders(w)
// Set last modified time.
lastModified := objInfo.ModTime.UTC().Format(http.TimeFormat)
w.Header().Set(xhttp.LastModified, lastModified)
// Set Etag if available.
if objInfo.ETag != "" {
w.Header()[xhttp.ETag] = []string{"\"" + objInfo.ETag + "\""}
}
if objInfo.ContentType != "" {
w.Header().Set(xhttp.ContentType, objInfo.ContentType)
}
if objInfo.ContentEncoding != "" {
w.Header().Set(xhttp.ContentEncoding, objInfo.ContentEncoding)
}
if !objInfo.Expires.IsZero() {
w.Header().Set(xhttp.Expires, objInfo.Expires.UTC().Format(http.TimeFormat))
}
if globalCacheConfig.Enabled {
w.Header().Set(xhttp.XCache, objInfo.CacheStatus.String())
w.Header().Set(xhttp.XCacheLookup, objInfo.CacheLookupStatus.String())
}
// Set tag count if object has tags
tags, _ := url.ParseQuery(objInfo.UserTags)
tagCount := len(tags)
if tagCount > 0 {
w.Header()[xhttp.AmzTagCount] = []string{strconv.Itoa(tagCount)}
}
// Set all other user defined metadata.
for k, v := range objInfo.UserDefined {
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
// Do not need to send any internal metadata
// values to client.
continue
}
var isSet bool
for _, userMetadataPrefix := range userMetadataKeyPrefixes {
if !strings.HasPrefix(k, userMetadataPrefix) {
continue
}
w.Header()[strings.ToLower(k)] = []string{v}
isSet = true
break
}
if !isSet {
w.Header().Set(k, v)
}
}
totalObjectSize, err := objInfo.GetActualSize()
if err != nil {
return err
}
// for providing ranged content
start, rangeLen, err := rs.GetOffsetLength(totalObjectSize)
if err != nil {
return err
}
// Set content length.
w.Header().Set(xhttp.ContentLength, strconv.FormatInt(rangeLen, 10))
if rs != nil {
contentRange := fmt.Sprintf("bytes %d-%d/%d", start, start+rangeLen-1, totalObjectSize)
w.Header().Set(xhttp.ContentRange, contentRange)
}
return nil
}

View file

@ -1,40 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"testing"
)
func TestNewRequestID(t *testing.T) {
// Ensure that it returns an alphanumeric result of length 16.
var id = mustGetRequestID(UTCNow())
if len(id) != 16 {
t.Fail()
}
var e rune
for _, char := range id {
e = rune(char)
// Ensure that it is alphanumeric, in this case, between 0-9 and A-Z.
if !(('0' <= e && e <= '9') || ('A' <= e && e <= 'Z')) {
t.Fail()
}
}
}

View file

@ -1,152 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2015, 2016, 2017, 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"encoding/base64"
"net/url"
"strconv"
)
// Parse bucket url queries
func getListObjectsV1Args(values url.Values) (prefix, marker, delimiter string, maxkeys int, encodingType string, errCode APIErrorCode) {
errCode = ErrNone
if values.Get("max-keys") != "" {
var err error
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
errCode = ErrInvalidMaxKeys
return
}
} else {
maxkeys = maxObjectList
}
prefix = values.Get("prefix")
marker = values.Get("marker")
delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type")
return
}
func getListBucketObjectVersionsArgs(values url.Values) (prefix, marker, delimiter string, maxkeys int, encodingType, versionIDMarker string, errCode APIErrorCode) {
errCode = ErrNone
if values.Get("max-keys") != "" {
var err error
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
errCode = ErrInvalidMaxKeys
return
}
} else {
maxkeys = maxObjectList
}
prefix = values.Get("prefix")
marker = values.Get("key-marker")
delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type")
versionIDMarker = values.Get("version-id-marker")
return
}
// Parse bucket url queries for ListObjects V2.
func getListObjectsV2Args(values url.Values) (prefix, token, startAfter, delimiter string, fetchOwner bool, maxkeys int, encodingType string, errCode APIErrorCode) {
errCode = ErrNone
// The continuation-token cannot be empty.
if val, ok := values["continuation-token"]; ok {
if len(val[0]) == 0 {
errCode = ErrIncorrectContinuationToken
return
}
}
if values.Get("max-keys") != "" {
var err error
if maxkeys, err = strconv.Atoi(values.Get("max-keys")); err != nil {
errCode = ErrInvalidMaxKeys
return
}
} else {
maxkeys = maxObjectList
}
prefix = values.Get("prefix")
startAfter = values.Get("start-after")
delimiter = values.Get("delimiter")
fetchOwner = values.Get("fetch-owner") == "true"
encodingType = values.Get("encoding-type")
if token = values.Get("continuation-token"); token != "" {
decodedToken, err := base64.StdEncoding.DecodeString(token)
if err != nil {
errCode = ErrIncorrectContinuationToken
return
}
token = string(decodedToken)
}
return
}
// Parse bucket url queries for ?uploads
func getBucketMultipartResources(values url.Values) (prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int, encodingType string, errCode APIErrorCode) {
errCode = ErrNone
if values.Get("max-uploads") != "" {
var err error
if maxUploads, err = strconv.Atoi(values.Get("max-uploads")); err != nil {
errCode = ErrInvalidMaxUploads
return
}
} else {
maxUploads = maxUploadsList
}
prefix = values.Get("prefix")
keyMarker = values.Get("key-marker")
uploadIDMarker = values.Get("upload-id-marker")
delimiter = values.Get("delimiter")
encodingType = values.Get("encoding-type")
return
}
// Parse object url queries
func getObjectResources(values url.Values) (uploadID string, partNumberMarker, maxParts int, encodingType string, errCode APIErrorCode) {
var err error
errCode = ErrNone
if values.Get("max-parts") != "" {
if maxParts, err = strconv.Atoi(values.Get("max-parts")); err != nil {
errCode = ErrInvalidMaxParts
return
}
} else {
maxParts = maxPartsList
}
if values.Get("part-number-marker") != "" {
if partNumberMarker, err = strconv.Atoi(values.Get("part-number-marker")); err != nil {
errCode = ErrInvalidPartNumberMarker
return
}
}
uploadID = values.Get("uploadId")
encodingType = values.Get("encoding-type")
return
}

View file

@ -1,221 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2016 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"net/url"
"testing"
)
// Test list objects resources V2.
func TestListObjectsV2Resources(t *testing.T) {
testCases := []struct {
values url.Values
prefix, token, startAfter, delimiter string
fetchOwner bool
maxKeys int
encodingType string
errCode APIErrorCode
}{
{
values: url.Values{
"prefix": []string{"photos/"},
"continuation-token": []string{"dG9rZW4="},
"start-after": []string{"start-after"},
"delimiter": []string{SlashSeparator},
"fetch-owner": []string{"true"},
"max-keys": []string{"100"},
"encoding-type": []string{"gzip"},
},
prefix: "photos/",
token: "token",
startAfter: "start-after",
delimiter: SlashSeparator,
fetchOwner: true,
maxKeys: 100,
encodingType: "gzip",
errCode: ErrNone,
},
{
values: url.Values{
"prefix": []string{"photos/"},
"continuation-token": []string{"dG9rZW4="},
"start-after": []string{"start-after"},
"delimiter": []string{SlashSeparator},
"fetch-owner": []string{"true"},
"encoding-type": []string{"gzip"},
},
prefix: "photos/",
token: "token",
startAfter: "start-after",
delimiter: SlashSeparator,
fetchOwner: true,
maxKeys: maxObjectList,
encodingType: "gzip",
errCode: ErrNone,
},
{
values: url.Values{
"prefix": []string{"photos/"},
"continuation-token": []string{""},
"start-after": []string{"start-after"},
"delimiter": []string{SlashSeparator},
"fetch-owner": []string{"true"},
"encoding-type": []string{"gzip"},
},
prefix: "",
token: "",
startAfter: "",
delimiter: "",
fetchOwner: false,
maxKeys: 0,
encodingType: "",
errCode: ErrIncorrectContinuationToken,
},
}
for i, testCase := range testCases {
prefix, token, startAfter, delimiter, fetchOwner, maxKeys, encodingType, errCode := getListObjectsV2Args(testCase.values)
if errCode != testCase.errCode {
t.Errorf("Test %d: Expected error code:%d, got %d", i+1, testCase.errCode, errCode)
}
if prefix != testCase.prefix {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.prefix, prefix)
}
if token != testCase.token {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.token, token)
}
if startAfter != testCase.startAfter {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.startAfter, startAfter)
}
if delimiter != testCase.delimiter {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.delimiter, delimiter)
}
if fetchOwner != testCase.fetchOwner {
t.Errorf("Test %d: Expected %t, got %t", i+1, testCase.fetchOwner, fetchOwner)
}
if maxKeys != testCase.maxKeys {
t.Errorf("Test %d: Expected %d, got %d", i+1, testCase.maxKeys, maxKeys)
}
if encodingType != testCase.encodingType {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.encodingType, encodingType)
}
}
}
// Test list objects resources V1.
func TestListObjectsV1Resources(t *testing.T) {
testCases := []struct {
values url.Values
prefix, marker, delimiter string
maxKeys int
encodingType string
}{
{
values: url.Values{
"prefix": []string{"photos/"},
"marker": []string{"test"},
"delimiter": []string{SlashSeparator},
"max-keys": []string{"100"},
"encoding-type": []string{"gzip"},
},
prefix: "photos/",
marker: "test",
delimiter: SlashSeparator,
maxKeys: 100,
encodingType: "gzip",
},
{
values: url.Values{
"prefix": []string{"photos/"},
"marker": []string{"test"},
"delimiter": []string{SlashSeparator},
"encoding-type": []string{"gzip"},
},
prefix: "photos/",
marker: "test",
delimiter: SlashSeparator,
maxKeys: maxObjectList,
encodingType: "gzip",
},
}
for i, testCase := range testCases {
prefix, marker, delimiter, maxKeys, encodingType, argsErr := getListObjectsV1Args(testCase.values)
if argsErr != ErrNone {
t.Errorf("Test %d: argument parsing failed, got %v", i+1, argsErr)
}
if prefix != testCase.prefix {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.prefix, prefix)
}
if marker != testCase.marker {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.marker, marker)
}
if delimiter != testCase.delimiter {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.delimiter, delimiter)
}
if maxKeys != testCase.maxKeys {
t.Errorf("Test %d: Expected %d, got %d", i+1, testCase.maxKeys, maxKeys)
}
if encodingType != testCase.encodingType {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.encodingType, encodingType)
}
}
}
// Validates extracting information for object resources.
func TestGetObjectsResources(t *testing.T) {
testCases := []struct {
values url.Values
uploadID string
partNumberMarker, maxParts int
encodingType string
}{
{
values: url.Values{
"uploadId": []string{"11123-11312312311231-12313"},
"part-number-marker": []string{"1"},
"max-parts": []string{"1000"},
"encoding-type": []string{"gzip"},
},
uploadID: "11123-11312312311231-12313",
partNumberMarker: 1,
maxParts: 1000,
encodingType: "gzip",
},
}
for i, testCase := range testCases {
uploadID, partNumberMarker, maxParts, encodingType, argsErr := getObjectResources(testCase.values)
if argsErr != ErrNone {
t.Errorf("Test %d: argument parsing failed, got %v", i+1, argsErr)
}
if uploadID != testCase.uploadID {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.uploadID, uploadID)
}
if partNumberMarker != testCase.partNumberMarker {
t.Errorf("Test %d: Expected %d, got %d", i+1, testCase.partNumberMarker, partNumberMarker)
}
if maxParts != testCase.maxParts {
t.Errorf("Test %d: Expected %d, got %d", i+1, testCase.maxParts, maxParts)
}
if encodingType != testCase.encodingType {
t.Errorf("Test %d: Expected %s, got %s", i+1, testCase.encodingType, encodingType)
}
}
}

View file

@ -1,825 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2015, 2016, 2017, 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"context"
"encoding/base64"
"encoding/xml"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"time"
xhttp "github.com/minio/minio/legacy/http"
"github.com/minio/minio/legacy/logger"
"github.com/minio/minio/pkg/handlers"
)
const (
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
iso8601TimeFormat = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision.
maxObjectList = 50000 // Limit number of objects in a listObjectsResponse/listObjectsVersionsResponse.
maxDeleteList = 10000 // Limit number of objects deleted in a delete call.
maxUploadsList = 10000 // Limit number of uploads in a listUploadsResponse.
maxPartsList = 10000 // Limit number of parts in a listPartsResponse.
)
// LocationResponse - format for location response.
type LocationResponse struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ LocationConstraint" json:"-"`
Location string `xml:",chardata"`
}
// ListVersionsResponse - format for list bucket versions response.
type ListVersionsResponse struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListVersionsResult" json:"-"`
Name string
Prefix string
KeyMarker string
// When response is truncated (the IsTruncated element value in the response
// is true), you can use the key name in this field as marker in the subsequent
// request to get next set of objects. Server lists objects in alphabetical
// order Note: This element is returned only if you have delimiter request parameter
// specified. If response does not include the NextMaker and it is truncated,
// you can use the value of the last Key in the response as the marker in the
// subsequent request to get the next set of object keys.
NextKeyMarker string `xml:"NextKeyMarker,omitempty"`
// When the number of responses exceeds the value of MaxKeys,
// NextVersionIdMarker specifies the first object version not
// returned that satisfies the search criteria. Use this value
// for the version-id-marker request parameter in a subsequent request.
NextVersionIDMarker string `xml:"NextVersionIdMarker"`
// Marks the last version of the Key returned in a truncated response.
VersionIDMarker string `xml:"VersionIdMarker"`
MaxKeys int
Delimiter string
// A flag that indicates whether or not ListObjects returned all of the results
// that satisfied the search criteria.
IsTruncated bool
CommonPrefixes []CommonPrefix
Versions []ObjectVersion
// Encoding type used to encode object keys in the response.
EncodingType string `xml:"EncodingType,omitempty"`
}
// ListObjectsResponse - format for list objects response.
type ListObjectsResponse struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult" json:"-"`
Name string
Prefix string
Marker string
// When response is truncated (the IsTruncated element value in the response
// is true), you can use the key name in this field as marker in the subsequent
// request to get next set of objects. Server lists objects in alphabetical
// order Note: This element is returned only if you have delimiter request parameter
// specified. If response does not include the NextMaker and it is truncated,
// you can use the value of the last Key in the response as the marker in the
// subsequent request to get the next set of object keys.
NextMarker string `xml:"NextMarker,omitempty"`
MaxKeys int
Delimiter string
// A flag that indicates whether or not ListObjects returned all of the results
// that satisfied the search criteria.
IsTruncated bool
Contents []Object
CommonPrefixes []CommonPrefix
// Encoding type used to encode object keys in the response.
EncodingType string `xml:"EncodingType,omitempty"`
}
// ListObjectsV2Response - format for list objects response.
type ListObjectsV2Response struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListBucketResult" json:"-"`
Name string
Prefix string
StartAfter string `xml:"StartAfter,omitempty"`
// When response is truncated (the IsTruncated element value in the response
// is true), you can use the key name in this field as marker in the subsequent
// request to get next set of objects. Server lists objects in alphabetical
// order Note: This element is returned only if you have delimiter request parameter
// specified. If response does not include the NextMaker and it is truncated,
// you can use the value of the last Key in the response as the marker in the
// subsequent request to get the next set of object keys.
ContinuationToken string `xml:"ContinuationToken,omitempty"`
NextContinuationToken string `xml:"NextContinuationToken,omitempty"`
KeyCount int
MaxKeys int
Delimiter string
// A flag that indicates whether or not ListObjects returned all of the results
// that satisfied the search criteria.
IsTruncated bool
Contents []Object
CommonPrefixes []CommonPrefix
// Encoding type used to encode object keys in the response.
EncodingType string `xml:"EncodingType,omitempty"`
}
// Part container for part metadata.
type Part struct {
PartNumber int
LastModified string
ETag string
Size int64
}
// ListPartsResponse - format for list parts response.
type ListPartsResponse struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListPartsResult" json:"-"`
Bucket string
Key string
UploadID string `xml:"UploadId"`
Initiator Initiator
Owner Owner
// The class of storage used to store the object.
StorageClass string
PartNumberMarker int
NextPartNumberMarker int
MaxParts int
IsTruncated bool
// List of parts.
Parts []Part `xml:"Part"`
}
// ListMultipartUploadsResponse - format for list multipart uploads response.
type ListMultipartUploadsResponse struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListMultipartUploadsResult" json:"-"`
Bucket string
KeyMarker string
UploadIDMarker string `xml:"UploadIdMarker"`
NextKeyMarker string
NextUploadIDMarker string `xml:"NextUploadIdMarker"`
Delimiter string
Prefix string
EncodingType string `xml:"EncodingType,omitempty"`
MaxUploads int
IsTruncated bool
// List of pending uploads.
Uploads []Upload `xml:"Upload"`
// Delimed common prefixes.
CommonPrefixes []CommonPrefix
}
// ListBucketsResponse - format for list buckets response
type ListBucketsResponse struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListAllMyBucketsResult" json:"-"`
Owner Owner
// Container for one or more buckets.
Buckets struct {
Buckets []Bucket `xml:"Bucket"`
} // Buckets are nested
}
// Upload container for in progress multipart upload
type Upload struct {
Key string
UploadID string `xml:"UploadId"`
Initiator Initiator
Owner Owner
StorageClass string
Initiated string
}
// CommonPrefix container for prefix response in ListObjectsResponse
type CommonPrefix struct {
Prefix string
}
// Bucket container for bucket metadata
type Bucket struct {
Name string
CreationDate string // time string of format "2006-01-02T15:04:05.000Z"
}
// ObjectVersion container for object version metadata
type ObjectVersion struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ Version" json:"-"`
Object
VersionID string `xml:"VersionId"`
IsLatest bool
}
// StringMap is a map[string]string.
type StringMap map[string]string
// MarshalXML - StringMap marshals into XML.
func (s StringMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
tokens := []xml.Token{start}
for key, value := range s {
t := xml.StartElement{}
t.Name = xml.Name{
Space: "",
Local: key,
}
tokens = append(tokens, t, xml.CharData(value), xml.EndElement{Name: t.Name})
}
tokens = append(tokens, xml.EndElement{
Name: start.Name,
})
for _, t := range tokens {
if err := e.EncodeToken(t); err != nil {
return err
}
}
// flush to ensure tokens are written
return e.Flush()
}
// Object container for object metadata
type Object struct {
Key string
LastModified string // time string of format "2006-01-02T15:04:05.000Z"
ETag string
Size int64
// Owner of the object.
Owner Owner
// The class of storage used to store the object.
StorageClass string
// UserMetadata user-defined metadata
UserMetadata StringMap `xml:"UserMetadata,omitempty"`
}
// CopyObjectResponse container returns ETag and LastModified of the successfully copied object
type CopyObjectResponse struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyObjectResult" json:"-"`
LastModified string // time string of format "2006-01-02T15:04:05.000Z"
ETag string // md5sum of the copied object.
}
// CopyObjectPartResponse container returns ETag and LastModified of the successfully copied object
type CopyObjectPartResponse struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CopyPartResult" json:"-"`
LastModified string // time string of format "2006-01-02T15:04:05.000Z"
ETag string // md5sum of the copied object part.
}
// Initiator inherit from Owner struct, fields are same
type Initiator Owner
// Owner - bucket owner/principal
type Owner struct {
ID string
DisplayName string
}
// InitiateMultipartUploadResponse container for InitiateMultiPartUpload response, provides uploadID to start MultiPart upload
type InitiateMultipartUploadResponse struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InitiateMultipartUploadResult" json:"-"`
Bucket string
Key string
UploadID string `xml:"UploadId"`
}
// CompleteMultipartUploadResponse container for completed multipart upload response
type CompleteMultipartUploadResponse struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUploadResult" json:"-"`
Location string
Bucket string
Key string
ETag string
}
// DeleteError structure.
type DeleteError struct {
Code string
Message string
Key string
}
// DeleteObjectsResponse container for multiple object deletes.
type DeleteObjectsResponse struct {
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ DeleteResult" json:"-"`
// Collection of all deleted objects
DeletedObjects []ObjectIdentifier `xml:"Deleted,omitempty"`
// Collection of errors deleting certain objects.
Errors []DeleteError `xml:"Error,omitempty"`
}
// PostResponse container for POST object request when success_action_status is set to 201
type PostResponse struct {
Bucket string
Key string
ETag string
Location string
}
// returns "https" if the tls boolean is true, "http" otherwise.
func getURLScheme(tls bool) string {
if tls {
return httpsScheme
}
return httpScheme
}
// getObjectLocation gets the fully qualified URL of an object.
func getObjectLocation(r *http.Request, domains []string, bucket, object string) string {
// unit tests do not have host set.
if r.Host == "" {
return path.Clean(r.URL.Path)
}
proto := handlers.GetSourceScheme(r)
if proto == "" {
proto = getURLScheme(globalIsSSL)
}
u := &url.URL{
Host: r.Host,
Path: path.Join(SlashSeparator, bucket, object),
Scheme: proto,
}
// If domain is set then we need to use bucket DNS style.
for _, domain := range domains {
if strings.Contains(r.Host, domain) {
u.Host = bucket + "." + r.Host
u.Path = path.Join(SlashSeparator, object)
break
}
}
return u.String()
}
// generates ListBucketsResponse from array of BucketInfo which can be
// serialized to match XML and JSON API spec output.
func generateListBucketsResponse(buckets []BucketInfo) ListBucketsResponse {
var listbuckets []Bucket
var data = ListBucketsResponse{}
var owner = Owner{}
owner.ID = globalMinioDefaultOwnerID
for _, bucket := range buckets {
var listbucket = Bucket{}
listbucket.Name = bucket.Name
listbucket.CreationDate = bucket.Created.UTC().Format(iso8601TimeFormat)
listbuckets = append(listbuckets, listbucket)
}
data.Owner = owner
data.Buckets.Buckets = listbuckets
return data
}
// generates an ListBucketVersions response for the said bucket with other enumerated options.
func generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListVersionsResponse {
var versions []ObjectVersion
var prefixes []CommonPrefix
var owner = Owner{}
var data = ListVersionsResponse{}
owner.ID = globalMinioDefaultOwnerID
for _, object := range resp.Objects {
var content = ObjectVersion{}
if object.Name == "" {
continue
}
content.Key = s3EncodeName(object.Name, encodingType)
content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat)
if object.ETag != "" {
content.ETag = "\"" + object.ETag + "\""
}
content.Size = object.Size
if object.StorageClass != "" {
content.StorageClass = object.StorageClass
} else {
content.StorageClass = globalMinioDefaultStorageClass
}
content.Owner = owner
content.VersionID = "null"
content.IsLatest = true
versions = append(versions, content)
}
data.Name = bucket
data.Versions = versions
data.EncodingType = encodingType
data.Prefix = s3EncodeName(prefix, encodingType)
data.KeyMarker = s3EncodeName(marker, encodingType)
data.Delimiter = s3EncodeName(delimiter, encodingType)
data.MaxKeys = maxKeys
data.NextKeyMarker = s3EncodeName(resp.NextMarker, encodingType)
data.IsTruncated = resp.IsTruncated
for _, prefix := range resp.Prefixes {
var prefixItem = CommonPrefix{}
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
prefixes = append(prefixes, prefixItem)
}
data.CommonPrefixes = prefixes
return data
}
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
var contents []Object
var prefixes []CommonPrefix
var owner = Owner{}
var data = ListObjectsResponse{}
owner.ID = globalMinioDefaultOwnerID
for _, object := range resp.Objects {
var content = Object{}
if object.Name == "" {
continue
}
content.Key = s3EncodeName(object.Name, encodingType)
content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat)
if object.ETag != "" {
content.ETag = "\"" + object.ETag + "\""
}
content.Size = object.Size
if object.StorageClass != "" {
content.StorageClass = object.StorageClass
} else {
content.StorageClass = globalMinioDefaultStorageClass
}
content.Owner = owner
contents = append(contents, content)
}
data.Name = bucket
data.Contents = contents
data.EncodingType = encodingType
data.Prefix = s3EncodeName(prefix, encodingType)
data.Marker = s3EncodeName(marker, encodingType)
data.Delimiter = s3EncodeName(delimiter, encodingType)
data.MaxKeys = maxKeys
data.NextMarker = s3EncodeName(resp.NextMarker, encodingType)
data.IsTruncated = resp.IsTruncated
for _, prefix := range resp.Prefixes {
var prefixItem = CommonPrefix{}
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
prefixes = append(prefixes, prefixItem)
}
data.CommonPrefixes = prefixes
return data
}
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response {
var contents []Object
var commonPrefixes []CommonPrefix
var owner = Owner{}
var data = ListObjectsV2Response{}
if fetchOwner {
owner.ID = globalMinioDefaultOwnerID
}
for _, object := range objects {
var content = Object{}
if object.Name == "" {
continue
}
content.Key = s3EncodeName(object.Name, encodingType)
content.LastModified = object.ModTime.UTC().Format(iso8601TimeFormat)
if object.ETag != "" {
content.ETag = "\"" + object.ETag + "\""
}
content.Size = object.Size
if object.StorageClass != "" {
content.StorageClass = object.StorageClass
} else {
content.StorageClass = globalMinioDefaultStorageClass
}
content.Owner = owner
if metadata {
content.UserMetadata = make(StringMap)
for k, v := range CleanMinioInternalMetadataKeys(object.UserDefined) {
if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) {
// Do not need to send any internal metadata
// values to client.
continue
}
content.UserMetadata[k] = v
}
}
contents = append(contents, content)
}
data.Name = bucket
data.Contents = contents
data.EncodingType = encodingType
data.StartAfter = s3EncodeName(startAfter, encodingType)
data.Delimiter = s3EncodeName(delimiter, encodingType)
data.Prefix = s3EncodeName(prefix, encodingType)
data.MaxKeys = maxKeys
data.ContinuationToken = base64.StdEncoding.EncodeToString([]byte(token))
data.NextContinuationToken = base64.StdEncoding.EncodeToString([]byte(nextToken))
data.IsTruncated = isTruncated
for _, prefix := range prefixes {
var prefixItem = CommonPrefix{}
prefixItem.Prefix = s3EncodeName(prefix, encodingType)
commonPrefixes = append(commonPrefixes, prefixItem)
}
data.CommonPrefixes = commonPrefixes
data.KeyCount = len(data.Contents) + len(data.CommonPrefixes)
return data
}
// generates CopyObjectResponse from etag and lastModified time.
func generateCopyObjectResponse(etag string, lastModified time.Time) CopyObjectResponse {
return CopyObjectResponse{
ETag: "\"" + etag + "\"",
LastModified: lastModified.UTC().Format(iso8601TimeFormat),
}
}
// generates CopyObjectPartResponse from etag and lastModified time.
func generateCopyObjectPartResponse(etag string, lastModified time.Time) CopyObjectPartResponse {
return CopyObjectPartResponse{
ETag: "\"" + etag + "\"",
LastModified: lastModified.UTC().Format(iso8601TimeFormat),
}
}
// generates InitiateMultipartUploadResponse for given bucket, key and uploadID.
func generateInitiateMultipartUploadResponse(bucket, key, uploadID string) InitiateMultipartUploadResponse {
return InitiateMultipartUploadResponse{
Bucket: bucket,
Key: key,
UploadID: uploadID,
}
}
// generates CompleteMultipartUploadResponse for given bucket, key, location and ETag.
func generateCompleteMultpartUploadResponse(bucket, key, location, etag string) CompleteMultipartUploadResponse {
return CompleteMultipartUploadResponse{
Location: location,
Bucket: bucket,
Key: key,
// AWS S3 quotes the ETag in XML, make sure we are compatible here.
ETag: "\"" + etag + "\"",
}
}
// generates ListPartsResponse from ListPartsInfo.
func generateListPartsResponse(partsInfo ListPartsInfo, encodingType string) ListPartsResponse {
listPartsResponse := ListPartsResponse{}
listPartsResponse.Bucket = partsInfo.Bucket
listPartsResponse.Key = s3EncodeName(partsInfo.Object, encodingType)
listPartsResponse.UploadID = partsInfo.UploadID
listPartsResponse.StorageClass = globalMinioDefaultStorageClass
listPartsResponse.Initiator.ID = globalMinioDefaultOwnerID
listPartsResponse.Owner.ID = globalMinioDefaultOwnerID
listPartsResponse.MaxParts = partsInfo.MaxParts
listPartsResponse.PartNumberMarker = partsInfo.PartNumberMarker
listPartsResponse.IsTruncated = partsInfo.IsTruncated
listPartsResponse.NextPartNumberMarker = partsInfo.NextPartNumberMarker
listPartsResponse.Parts = make([]Part, len(partsInfo.Parts))
for index, part := range partsInfo.Parts {
newPart := Part{}
newPart.PartNumber = part.PartNumber
newPart.ETag = "\"" + part.ETag + "\""
newPart.Size = part.Size
newPart.LastModified = part.LastModified.UTC().Format(iso8601TimeFormat)
listPartsResponse.Parts[index] = newPart
}
return listPartsResponse
}
// generates ListMultipartUploadsResponse for given bucket and ListMultipartsInfo.
func generateListMultipartUploadsResponse(bucket string, multipartsInfo ListMultipartsInfo, encodingType string) ListMultipartUploadsResponse {
listMultipartUploadsResponse := ListMultipartUploadsResponse{}
listMultipartUploadsResponse.Bucket = bucket
listMultipartUploadsResponse.Delimiter = s3EncodeName(multipartsInfo.Delimiter, encodingType)
listMultipartUploadsResponse.IsTruncated = multipartsInfo.IsTruncated
listMultipartUploadsResponse.EncodingType = encodingType
listMultipartUploadsResponse.Prefix = s3EncodeName(multipartsInfo.Prefix, encodingType)
listMultipartUploadsResponse.KeyMarker = s3EncodeName(multipartsInfo.KeyMarker, encodingType)
listMultipartUploadsResponse.NextKeyMarker = s3EncodeName(multipartsInfo.NextKeyMarker, encodingType)
listMultipartUploadsResponse.MaxUploads = multipartsInfo.MaxUploads
listMultipartUploadsResponse.NextUploadIDMarker = multipartsInfo.NextUploadIDMarker
listMultipartUploadsResponse.UploadIDMarker = multipartsInfo.UploadIDMarker
listMultipartUploadsResponse.CommonPrefixes = make([]CommonPrefix, len(multipartsInfo.CommonPrefixes))
for index, commonPrefix := range multipartsInfo.CommonPrefixes {
listMultipartUploadsResponse.CommonPrefixes[index] = CommonPrefix{
Prefix: s3EncodeName(commonPrefix, encodingType),
}
}
listMultipartUploadsResponse.Uploads = make([]Upload, len(multipartsInfo.Uploads))
for index, upload := range multipartsInfo.Uploads {
newUpload := Upload{}
newUpload.UploadID = upload.UploadID
newUpload.Key = s3EncodeName(upload.Object, encodingType)
newUpload.Initiated = upload.Initiated.UTC().Format(iso8601TimeFormat)
listMultipartUploadsResponse.Uploads[index] = newUpload
}
return listMultipartUploadsResponse
}
// generate multi objects delete response.
func generateMultiDeleteResponse(quiet bool, deletedObjects []ObjectIdentifier, errs []DeleteError) DeleteObjectsResponse {
deleteResp := DeleteObjectsResponse{}
if !quiet {
deleteResp.DeletedObjects = deletedObjects
}
deleteResp.Errors = errs
return deleteResp
}
func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
setCommonHeaders(w)
if mType != mimeNone {
w.Header().Set(xhttp.ContentType, string(mType))
}
w.Header().Set(xhttp.ContentLength, strconv.Itoa(len(response)))
w.WriteHeader(statusCode)
if response != nil {
w.Write(response)
w.(http.Flusher).Flush()
}
}
// mimeType represents various MIME type used API responses.
type mimeType string
const (
// Means no response type.
mimeNone mimeType = ""
// Means response type is JSON.
mimeJSON mimeType = "application/json"
// Means response type is XML.
mimeXML mimeType = "application/xml"
)
// writeSuccessResponseJSON writes success headers and response if any,
// with content-type set to `application/json`.
func writeSuccessResponseJSON(w http.ResponseWriter, response []byte) {
writeResponse(w, http.StatusOK, response, mimeJSON)
}
// writeSuccessResponseXML writes success headers and response if any,
// with content-type set to `application/xml`.
func writeSuccessResponseXML(w http.ResponseWriter, response []byte) {
writeResponse(w, http.StatusOK, response, mimeXML)
}
// writeSuccessNoContent writes success headers with http status 204
func writeSuccessNoContent(w http.ResponseWriter) {
writeResponse(w, http.StatusNoContent, nil, mimeNone)
}
// writeRedirectSeeOther writes Location header with http status 303
func writeRedirectSeeOther(w http.ResponseWriter, location string) {
w.Header().Set(xhttp.Location, location)
writeResponse(w, http.StatusSeeOther, nil, mimeNone)
}
func writeSuccessResponseHeadersOnly(w http.ResponseWriter) {
writeResponse(w, http.StatusOK, nil, mimeNone)
}
// writeErrorRespone writes error headers
func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL, browser bool) {
switch err.Code {
case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum":
// Set retry-after header to indicate user-agents to retry request after 120secs.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
w.Header().Set(xhttp.RetryAfter, "120")
case "AccessDenied":
// The request is from browser and also if browser
// is enabled we need to redirect.
if browser && globalBrowserEnabled {
w.Header().Set(xhttp.Location, minioReservedBucketPath+reqURL.Path)
w.WriteHeader(http.StatusTemporaryRedirect)
return
}
}
// Generate error response.
errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path,
w.Header().Get(xhttp.AmzRequestID), globalDeploymentID)
encodedErrorResponse := encodeResponse(errorResponse)
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeXML)
}
func writeErrorResponseHeadersOnly(w http.ResponseWriter, err APIError) {
writeResponse(w, err.HTTPStatusCode, nil, mimeNone)
}
func writeErrorResponseString(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) {
// Generate string error response.
writeResponse(w, err.HTTPStatusCode, []byte(err.Description), mimeNone)
}
// writeErrorResponseJSON - writes error response in JSON format;
// useful for admin APIs.
func writeErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) {
// Generate error response.
errorResponse := getAPIErrorResponse(ctx, err, reqURL.Path, w.Header().Get(xhttp.AmzRequestID), globalDeploymentID)
encodedErrorResponse := encodeResponseJSON(errorResponse)
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
}
// writeCustomErrorResponseJSON - similar to writeErrorResponseJSON,
// but accepts the error message directly (this allows messages to be
// dynamically generated.)
func writeCustomErrorResponseJSON(ctx context.Context, w http.ResponseWriter, err APIError,
errBody string, reqURL *url.URL) {
reqInfo := logger.GetReqInfo(ctx)
errorResponse := APIErrorResponse{
Code: err.Code,
Message: errBody,
Resource: reqURL.Path,
BucketName: reqInfo.BucketName,
Key: reqInfo.ObjectName,
RequestID: w.Header().Get(xhttp.AmzRequestID),
HostID: globalDeploymentID,
}
encodedErrorResponse := encodeResponseJSON(errorResponse)
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeJSON)
}
// writeCustomErrorResponseXML - similar to writeErrorResponse,
// but accepts the error message directly (this allows messages to be
// dynamically generated.)
func writeCustomErrorResponseXML(ctx context.Context, w http.ResponseWriter, err APIError, errBody string, reqURL *url.URL, browser bool) {
switch err.Code {
case "SlowDown", "XMinioServerNotInitialized", "XMinioReadQuorum", "XMinioWriteQuorum":
// Set retry-after header to indicate user-agents to retry request after 120secs.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
w.Header().Set(xhttp.RetryAfter, "120")
case "AccessDenied":
// The request is from browser and also if browser
// is enabled we need to redirect.
if browser && globalBrowserEnabled {
w.Header().Set(xhttp.Location, minioReservedBucketPath+reqURL.Path)
w.WriteHeader(http.StatusTemporaryRedirect)
return
}
}
reqInfo := logger.GetReqInfo(ctx)
errorResponse := APIErrorResponse{
Code: err.Code,
Message: errBody,
Resource: reqURL.Path,
BucketName: reqInfo.BucketName,
Key: reqInfo.ObjectName,
RequestID: w.Header().Get(xhttp.AmzRequestID),
HostID: globalDeploymentID,
}
encodedErrorResponse := encodeResponse(errorResponse)
writeResponse(w, err.HTTPStatusCode, encodedErrorResponse, mimeXML)
}

View file

@ -1,121 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"net/http"
"testing"
)
// Tests object location.
func TestObjectLocation(t *testing.T) {
testCases := []struct {
request *http.Request
bucket, object string
domains []string
expectedLocation string
}{
// Server binding to localhost IP with https.
{
request: &http.Request{
Host: "127.0.0.1:9000",
Header: map[string][]string{
"X-Forwarded-Scheme": {httpScheme},
},
},
bucket: "testbucket1",
object: "test/1.txt",
expectedLocation: "http://127.0.0.1:9000/testbucket1/test/1.txt",
},
{
request: &http.Request{
Host: "127.0.0.1:9000",
Header: map[string][]string{
"X-Forwarded-Scheme": {httpsScheme},
},
},
bucket: "testbucket1",
object: "test/1.txt",
expectedLocation: "https://127.0.0.1:9000/testbucket1/test/1.txt",
},
// Server binding to fqdn.
{
request: &http.Request{
Host: "s3.mybucket.org",
Header: map[string][]string{
"X-Forwarded-Scheme": {httpScheme},
},
},
bucket: "mybucket",
object: "test/1.txt",
expectedLocation: "http://s3.mybucket.org/mybucket/test/1.txt",
},
// Server binding to fqdn.
{
request: &http.Request{
Host: "mys3.mybucket.org",
Header: map[string][]string{},
},
bucket: "mybucket",
object: "test/1.txt",
expectedLocation: "http://mys3.mybucket.org/mybucket/test/1.txt",
},
// Server with virtual domain name.
{
request: &http.Request{
Host: "mys3.bucket.org",
Header: map[string][]string{},
},
domains: []string{"mys3.bucket.org"},
bucket: "mybucket",
object: "test/1.txt",
expectedLocation: "http://mybucket.mys3.bucket.org/test/1.txt",
},
{
request: &http.Request{
Host: "mys3.bucket.org",
Header: map[string][]string{
"X-Forwarded-Scheme": {httpsScheme},
},
},
domains: []string{"mys3.bucket.org"},
bucket: "mybucket",
object: "test/1.txt",
expectedLocation: "https://mybucket.mys3.bucket.org/test/1.txt",
},
}
for i, testCase := range testCases {
gotLocation := getObjectLocation(testCase.request, testCase.domains, testCase.bucket, testCase.object)
if testCase.expectedLocation != gotLocation {
t.Errorf("Test %d: expected %s, got %s", i+1, testCase.expectedLocation, gotLocation)
}
}
}
// Tests getURLScheme function behavior.
func TestGetURLScheme(t *testing.T) {
tls := false
gotScheme := getURLScheme(tls)
if gotScheme != httpScheme {
t.Errorf("Expected %s, got %s", httpScheme, gotScheme)
}
tls = true
gotScheme = getURLScheme(tls)
if gotScheme != httpsScheme {
t.Errorf("Expected %s, got %s", httpsScheme, gotScheme)
}
}

View file

@ -1,297 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2016-2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"net/http"
"github.com/gorilla/mux"
xhttp "github.com/minio/minio/legacy/http"
)
func newHTTPServerFn() *xhttp.Server {
globalObjLayerMutex.Lock()
defer globalObjLayerMutex.Unlock()
return globalHTTPServer
}
func newObjectLayerWithoutSafeModeFn() ObjectLayer {
globalObjLayerMutex.Lock()
defer globalObjLayerMutex.Unlock()
return globalObjectAPI
}
func newObjectLayerFn() ObjectLayer {
globalObjLayerMutex.Lock()
defer globalObjLayerMutex.Unlock()
if globalSafeMode {
return nil
}
return globalObjectAPI
}
func newCachedObjectLayerFn() CacheObjectLayer {
globalObjLayerMutex.Lock()
defer globalObjLayerMutex.Unlock()
if globalSafeMode {
return nil
}
return globalCacheObjectAPI
}
// objectAPIHandler implements and provides http handlers for S3 API.
type objectAPIHandlers struct {
ObjectAPI func() ObjectLayer
CacheAPI func() CacheObjectLayer
// Returns true of handlers should interpret encryption.
EncryptionEnabled func() bool
// Returns true if handlers allow SSE-KMS encryption headers.
AllowSSEKMS func() bool
}
// registerAPIRouter - registers S3 compatible APIs.
func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool) {
// Initialize API.
api := objectAPIHandlers{
ObjectAPI: newObjectLayerFn,
CacheAPI: newCachedObjectLayerFn,
EncryptionEnabled: func() bool {
return encryptionEnabled
},
AllowSSEKMS: func() bool {
return allowSSEKMS
},
}
// API Router
apiRouter := router.PathPrefix(SlashSeparator).Subrouter()
var routers []*mux.Router
for _, domainName := range globalDomainNames {
r1 := apiRouter.Host("{bucket:.+}." + domainName).Subrouter()
r2 := apiRouter.Host("{bucket:.+}." + domainName + ":{port:.*}").Subrouter()
routers = append(routers, []*mux.Router{r1, r2}...)
}
routers = append(routers, apiRouter.PathPrefix("/{bucket}").Subrouter())
for _, bucket := range routers {
// Object operations
// HeadObject
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("headobject", httpTraceAll(api.HeadObjectHandler))))
// CopyObjectPart
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobjectpart", httpTraceAll(api.CopyObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// PutObjectPart
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("putobjectpart", httpTraceHdrs(api.PutObjectPartHandler)))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
// ListObjectParts
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("listobjectparts", httpTraceAll(api.ListObjectPartsHandler)))).Queries("uploadId", "{uploadId:.*}")
// CompleteMultipartUpload
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("completemutipartupload", httpTraceAll(api.CompleteMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
// NewMultipartUpload
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("newmultipartupload", httpTraceAll(api.NewMultipartUploadHandler)))).Queries("uploads", "")
// AbortMultipartUpload
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("abortmultipartupload", httpTraceAll(api.AbortMultipartUploadHandler)))).Queries("uploadId", "{uploadId:.*}")
// GetObjectACL - this is a dummy call.
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("getobjectacl", httpTraceHdrs(api.GetObjectACLHandler)))).Queries("acl", "")
// PutObjectACL - this is a dummy call.
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("putobjectacl", httpTraceHdrs(api.PutObjectACLHandler)))).Queries("acl", "")
// GetObjectTagging
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("getobjecttagging", httpTraceHdrs(api.GetObjectTaggingHandler)))).Queries("tagging", "")
// PutObjectTagging
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("putobjecttagging", httpTraceHdrs(api.PutObjectTaggingHandler)))).Queries("tagging", "")
// DeleteObjectTagging
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("deleteobjecttagging", httpTraceHdrs(api.DeleteObjectTaggingHandler)))).Queries("tagging", "")
// SelectObjectContent
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("selectobjectcontent", httpTraceHdrs(api.SelectObjectContentHandler)))).Queries("select", "").Queries("select-type", "2")
// GetObjectRetention
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("getobjectretention", httpTraceAll(api.GetObjectRetentionHandler)))).Queries("retention", "")
// GetObjectLegalHold
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("getobjectlegalhold", httpTraceAll(api.GetObjectLegalHoldHandler)))).Queries("legal-hold", "")
// GetObject
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("getobject", httpTraceHdrs(api.GetObjectHandler))))
// CopyObject
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(maxClients(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler))))
// PutObjectRetention
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("putobjectretention", httpTraceAll(api.PutObjectRetentionHandler)))).Queries("retention", "")
// PutObjectLegalHold
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("putobjectlegalhold", httpTraceAll(api.PutObjectLegalHoldHandler)))).Queries("legal-hold", "")
// PutObject
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("putobject", httpTraceHdrs(api.PutObjectHandler))))
// DeleteObject
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(
maxClients(collectAPIStats("deleteobject", httpTraceAll(api.DeleteObjectHandler))))
/// Bucket operations
// GetBucketLocation
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketlocation", httpTraceAll(api.GetBucketLocationHandler)))).Queries("location", "")
// GetBucketPolicy
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketpolicy", httpTraceAll(api.GetBucketPolicyHandler)))).Queries("policy", "")
// GetBucketLifecycle
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
// GetBucketEncryption
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketencryption", httpTraceAll(api.GetBucketEncryptionHandler)))).Queries("encryption", "")
// Dummy Bucket Calls
// GetBucketACL -- this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketacl", httpTraceAll(api.GetBucketACLHandler)))).Queries("acl", "")
// PutBucketACL -- this is a dummy call.
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketacl", httpTraceAll(api.PutBucketACLHandler)))).Queries("acl", "")
// GetBucketCors - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketcors", httpTraceAll(api.GetBucketCorsHandler)))).Queries("cors", "")
// GetBucketWebsiteHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketwebsite", httpTraceAll(api.GetBucketWebsiteHandler)))).Queries("website", "")
// GetBucketAccelerateHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketaccelerate", httpTraceAll(api.GetBucketAccelerateHandler)))).Queries("accelerate", "")
// GetBucketRequestPaymentHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketrequestpayment", httpTraceAll(api.GetBucketRequestPaymentHandler)))).Queries("requestPayment", "")
// GetBucketLoggingHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketlogging", httpTraceAll(api.GetBucketLoggingHandler)))).Queries("logging", "")
// GetBucketLifecycleHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler)))).Queries("lifecycle", "")
// GetBucketReplicationHandler - this is a dummy call.
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketreplication", httpTraceAll(api.GetBucketReplicationHandler)))).Queries("replication", "")
// GetBucketTaggingHandler
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbuckettagging", httpTraceAll(api.GetBucketTaggingHandler)))).Queries("tagging", "")
//DeleteBucketWebsiteHandler
bucket.Methods(http.MethodDelete).HandlerFunc(
maxClients(collectAPIStats("deletebucketwebsite", httpTraceAll(api.DeleteBucketWebsiteHandler)))).Queries("website", "")
// DeleteBucketTaggingHandler
bucket.Methods(http.MethodDelete).HandlerFunc(
maxClients(collectAPIStats("deletebuckettagging", httpTraceAll(api.DeleteBucketTaggingHandler)))).Queries("tagging", "")
// GetBucketObjectLockConfig
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketobjectlockconfiguration", httpTraceAll(api.GetBucketObjectLockConfigHandler)))).Queries("object-lock", "")
// GetBucketVersioning
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketversioning", httpTraceAll(api.GetBucketVersioningHandler)))).Queries("versioning", "")
// GetBucketNotification
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("getbucketnotification", httpTraceAll(api.GetBucketNotificationHandler)))).Queries("notification", "")
// ListenBucketNotification
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listenbucketnotification", httpTraceAll(api.ListenBucketNotificationHandler))).Queries("events", "{events:.*}")
// ListMultipartUploads
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("listmultipartuploads", httpTraceAll(api.ListMultipartUploadsHandler)))).Queries("uploads", "")
// ListObjectsV2M
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("listobjectsv2M", httpTraceAll(api.ListObjectsV2MHandler)))).Queries("list-type", "2", "metadata", "true")
// ListObjectsV2
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("listobjectsv2", httpTraceAll(api.ListObjectsV2Handler)))).Queries("list-type", "2")
// ListBucketVersions
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("listbucketversions", httpTraceAll(api.ListBucketObjectVersionsHandler)))).Queries("versions", "")
// ListObjectsV1 (Legacy)
bucket.Methods(http.MethodGet).HandlerFunc(
maxClients(collectAPIStats("listobjectsv1", httpTraceAll(api.ListObjectsV1Handler))))
// PutBucketLifecycle
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketlifecycle", httpTraceAll(api.PutBucketLifecycleHandler)))).Queries("lifecycle", "")
// PutBucketEncryption
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketencryption", httpTraceAll(api.PutBucketEncryptionHandler)))).Queries("encryption", "")
// PutBucketPolicy
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketpolicy", httpTraceAll(api.PutBucketPolicyHandler)))).Queries("policy", "")
// PutBucketObjectLockConfig
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketobjectlockconfig", httpTraceAll(api.PutBucketObjectLockConfigHandler)))).Queries("object-lock", "")
// PutBucketTaggingHandler
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbuckettagging", httpTraceAll(api.PutBucketTaggingHandler)))).Queries("tagging", "")
// PutBucketVersioning
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketversioning", httpTraceAll(api.PutBucketVersioningHandler)))).Queries("versioning", "")
// PutBucketNotification
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucketnotification", httpTraceAll(api.PutBucketNotificationHandler)))).Queries("notification", "")
// PutBucket
bucket.Methods(http.MethodPut).HandlerFunc(
maxClients(collectAPIStats("putbucket", httpTraceAll(api.PutBucketHandler))))
// HeadBucket
bucket.Methods(http.MethodHead).HandlerFunc(
maxClients(collectAPIStats("headbucket", httpTraceAll(api.HeadBucketHandler))))
// PostPolicy
bucket.Methods(http.MethodPost).HeadersRegexp(xhttp.ContentType, "multipart/form-data*").HandlerFunc(
maxClients(collectAPIStats("postpolicybucket", httpTraceHdrs(api.PostPolicyBucketHandler))))
// DeleteMultipleObjects
bucket.Methods(http.MethodPost).HandlerFunc(
maxClients(collectAPIStats("deletemultipleobjects", httpTraceAll(api.DeleteMultipleObjectsHandler)))).Queries("delete", "")
// DeleteBucketPolicy
bucket.Methods(http.MethodDelete).HandlerFunc(
maxClients(collectAPIStats("deletebucketpolicy", httpTraceAll(api.DeleteBucketPolicyHandler)))).Queries("policy", "")
// DeleteBucketLifecycle
bucket.Methods(http.MethodDelete).HandlerFunc(
maxClients(collectAPIStats("deletebucketlifecycle", httpTraceAll(api.DeleteBucketLifecycleHandler)))).Queries("lifecycle", "")
// DeleteBucketEncryption
bucket.Methods(http.MethodDelete).HandlerFunc(
maxClients(collectAPIStats("deletebucketencryption", httpTraceAll(api.DeleteBucketEncryptionHandler)))).Queries("encryption", "")
// DeleteBucket
bucket.Methods(http.MethodDelete).HandlerFunc(
maxClients(collectAPIStats("deletebucket", httpTraceAll(api.DeleteBucketHandler))))
}
/// Root operation
// ListBuckets
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(
maxClients(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler))))
// S3 browser with signature v4 adds '//' for ListBuckets request, so rather
// than failing with UnknownAPIRequest we simply handle it for now.
apiRouter.Methods(http.MethodGet).Path(SlashSeparator + SlashSeparator).HandlerFunc(
maxClients(collectAPIStats("listbuckets", httpTraceAll(api.ListBucketsHandler))))
// If none of the routes match add default error handler routes
apiRouter.NotFoundHandler = http.HandlerFunc(collectAPIStats("notfound", httpTraceAll(errorResponseHandler)))
apiRouter.MethodNotAllowedHandler = http.HandlerFunc(collectAPIStats("methodnotallowed", httpTraceAll(errorResponseHandler)))
}

View file

@ -1,107 +0,0 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"strings"
)
func shouldEscape(c byte) bool {
if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
return false
}
switch c {
case '-', '_', '.', '/', '*':
return false
}
return true
}
// s3URLEncode is based on Golang's url.QueryEscape() code,
// while considering some S3 exceptions:
// - Avoid encoding '/' and '*'
// - Force encoding of '~'
func s3URLEncode(s string) string {
spaceCount, hexCount := 0, 0
for i := 0; i < len(s); i++ {
c := s[i]
if shouldEscape(c) {
if c == ' ' {
spaceCount++
} else {
hexCount++
}
}
}
if spaceCount == 0 && hexCount == 0 {
return s
}
var buf [64]byte
var t []byte
required := len(s) + 2*hexCount
if required <= len(buf) {
t = buf[:required]
} else {
t = make([]byte, required)
}
if hexCount == 0 {
copy(t, s)
for i := 0; i < len(s); i++ {
if s[i] == ' ' {
t[i] = '+'
}
}
return string(t)
}
j := 0
for i := 0; i < len(s); i++ {
switch c := s[i]; {
case c == ' ':
t[j] = '+'
j++
case shouldEscape(c):
t[j] = '%'
t[j+1] = "0123456789ABCDEF"[c>>4]
t[j+2] = "0123456789ABCDEF"[c&15]
j += 3
default:
t[j] = s[i]
j++
}
}
return string(t)
}
// s3EncodeName encodes string in response when encodingType is specified in AWS S3 requests.
func s3EncodeName(name string, encodingType string) (result string) {
// Quick path to exit
if encodingType == "" {
return name
}
encodingType = strings.ToLower(encodingType)
switch encodingType {
case "url":
return s3URLEncode(name)
}
return name
}

View file

@ -1,49 +0,0 @@
/*
* Minio Cloud Storage, (C) 2019 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"fmt"
"testing"
)
func TestS3EncodeName(t *testing.T) {
testCases := []struct {
inputText, encodingType, expectedOutput string
}{
{"a b", "", "a b"},
{"a b", "url", "a+b"},
{"p- ", "url", "p-+"},
{"p-%", "url", "p-%25"},
{"p/", "url", "p/"},
{"p/", "url", "p/"},
{"~user", "url", "%7Euser"},
{"*user", "url", "*user"},
{"user+password", "url", "user%2Bpassword"},
{"_user", "url", "_user"},
{"firstname.lastname", "url", "firstname.lastname"},
}
for i, testCase := range testCases {
t.Run(fmt.Sprintf("Test%d", i+1), func(t *testing.T) {
outputText := s3EncodeName(testCase.inputText, testCase.encodingType)
if testCase.expectedOutput != outputText {
t.Errorf("Expected `%s`, got `%s`", testCase.expectedOutput, outputText)
}
})
}
}

View file

@ -1,643 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2015-2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"bytes"
"context"
"crypto/subtle"
"encoding/base64"
"encoding/hex"
"errors"
"io"
"io/ioutil"
"net/http"
"strconv"
"strings"
"time"
xhttp "github.com/minio/minio/legacy/http"
xjwt "github.com/minio/minio/legacy/jwt"
"github.com/minio/minio/legacy/logger"
"github.com/minio/minio/pkg/auth"
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/hash"
iampolicy "github.com/minio/minio/pkg/iam/policy"
)
// Verify if request has JWT.
func isRequestJWT(r *http.Request) bool {
return strings.HasPrefix(r.Header.Get(xhttp.Authorization), jwtAlgorithm)
}
// Verify if request has AWS Signature Version '4'.
func isRequestSignatureV4(r *http.Request) bool {
return strings.HasPrefix(r.Header.Get(xhttp.Authorization), signV4Algorithm)
}
// Verify if request has AWS Signature Version '2'.
func isRequestSignatureV2(r *http.Request) bool {
return (!strings.HasPrefix(r.Header.Get(xhttp.Authorization), signV4Algorithm) &&
strings.HasPrefix(r.Header.Get(xhttp.Authorization), signV2Algorithm))
}
// Verify if request has AWS PreSign Version '4'.
func isRequestPresignedSignatureV4(r *http.Request) bool {
_, ok := r.URL.Query()[xhttp.AmzCredential]
return ok
}
// Verify request has AWS PreSign Version '2'.
func isRequestPresignedSignatureV2(r *http.Request) bool {
_, ok := r.URL.Query()[xhttp.AmzAccessKeyID]
return ok
}
// Verify if request has AWS Post policy Signature Version '4'.
func isRequestPostPolicySignatureV4(r *http.Request) bool {
return strings.Contains(r.Header.Get(xhttp.ContentType), "multipart/form-data") &&
r.Method == http.MethodPost
}
// Verify if the request has AWS Streaming Signature Version '4'. This is only valid for 'PUT' operation.
func isRequestSignStreamingV4(r *http.Request) bool {
return r.Header.Get(xhttp.AmzContentSha256) == streamingContentSHA256 &&
r.Method == http.MethodPut
}
// Authorization type.
type authType int
// List of all supported auth types.
const (
authTypeUnknown authType = iota
authTypeAnonymous
authTypePresigned
authTypePresignedV2
authTypePostPolicy
authTypeStreamingSigned
authTypeSigned
authTypeSignedV2
authTypeJWT
authTypeSTS
)
// Get request authentication type.
func getRequestAuthType(r *http.Request) authType {
if isRequestSignatureV2(r) {
return authTypeSignedV2
} else if isRequestPresignedSignatureV2(r) {
return authTypePresignedV2
} else if isRequestSignStreamingV4(r) {
return authTypeStreamingSigned
} else if isRequestSignatureV4(r) {
return authTypeSigned
} else if isRequestPresignedSignatureV4(r) {
return authTypePresigned
} else if isRequestJWT(r) {
return authTypeJWT
} else if isRequestPostPolicySignatureV4(r) {
return authTypePostPolicy
} else if _, ok := r.URL.Query()[xhttp.Action]; ok {
return authTypeSTS
} else if _, ok := r.Header[xhttp.Authorization]; !ok {
return authTypeAnonymous
}
return authTypeUnknown
}
func validateAdminSignature(ctx context.Context, r *http.Request, region string) (auth.Credentials, map[string]interface{}, bool, APIErrorCode) {
var cred auth.Credentials
var owner bool
s3Err := ErrAccessDenied
if _, ok := r.Header[xhttp.AmzContentSha256]; ok &&
getRequestAuthType(r) == authTypeSigned && !skipContentSha256Cksum(r) {
// We only support admin credentials to access admin APIs.
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
if s3Err != ErrNone {
return cred, nil, owner, s3Err
}
// we only support V4 (no presign) with auth body
s3Err = isReqAuthenticated(ctx, r, region, serviceS3)
}
if s3Err != ErrNone {
reqInfo := (&logger.ReqInfo{}).AppendTags("requestHeaders", dumpRequest(r))
ctx := logger.SetReqInfo(ctx, reqInfo)
logger.LogIf(ctx, errors.New(getAPIError(s3Err).Description), logger.Application)
return cred, nil, owner, s3Err
}
claims, s3Err := checkClaimsFromToken(r, cred)
if s3Err != ErrNone {
return cred, nil, owner, s3Err
}
return cred, claims, owner, ErrNone
}
// checkAdminRequestAuthType checks whether the request is a valid signature V2 or V4 request.
// It does not accept presigned or JWT or anonymous requests.
func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iampolicy.AdminAction, region string) (auth.Credentials, APIErrorCode) {
cred, claims, owner, s3Err := validateAdminSignature(ctx, r, region)
if s3Err != ErrNone {
return cred, s3Err
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.Action(action),
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
IsOwner: owner,
Claims: claims,
}) {
// Request is allowed return the appropriate access key.
return cred, ErrNone
}
return cred, ErrAccessDenied
}
// Fetch the security token set by the client.
func getSessionToken(r *http.Request) (token string) {
token = r.Header.Get(xhttp.AmzSecurityToken)
if token != "" {
return token
}
return r.URL.Query().Get(xhttp.AmzSecurityToken)
}
// Fetch claims in the security token returned by the client, doesn't return
// errors - upon errors the returned claims map will be empty.
func mustGetClaimsFromToken(r *http.Request) map[string]interface{} {
claims, _ := getClaimsFromToken(r, getSessionToken(r))
return claims
}
// Fetch claims in the security token returned by the client.
func getClaimsFromToken(r *http.Request, token string) (map[string]interface{}, error) {
claims := xjwt.NewMapClaims()
if token == "" {
return claims.Map(), nil
}
stsTokenCallback := func(claims *xjwt.MapClaims) ([]byte, error) {
// JWT token for x-amz-security-token is signed with admin
// secret key, temporary credentials become invalid if
// server admin credentials change. This is done to ensure
// that clients cannot decode the token using the temp
// secret keys and generate an entirely new claim by essentially
// hijacking the policies. We need to make sure that this is
// based an admin credential such that token cannot be decoded
// on the client side and is treated like an opaque value.
return []byte(globalActiveCred.SecretKey), nil
}
if err := xjwt.ParseWithClaims(token, claims, stsTokenCallback); err != nil {
return nil, errAuthentication
}
if globalPolicyOPA == nil {
// If OPA is not set and if ldap claim key is set, allow the claim.
if _, ok := claims.Lookup(ldapUser); ok {
return claims.Map(), nil
}
// If OPA is not set, session token should
// have a policy and its mandatory, reject
// requests without policy claim.
_, pokOpenID := claims.Lookup(iamPolicyClaimNameOpenID())
_, pokSA := claims.Lookup(iamPolicyClaimNameSA())
if !pokOpenID && !pokSA {
return nil, errAuthentication
}
sp, spok := claims.Lookup(iampolicy.SessionPolicyName)
if !spok {
return claims.Map(), nil
}
// Looks like subpolicy is set and is a string, if set then its
// base64 encoded, decode it. Decoding fails reject such requests.
spBytes, err := base64.StdEncoding.DecodeString(sp)
if err != nil {
// Base64 decoding fails, we should log to indicate
// something is malforming the request sent by client.
logger.LogIf(r.Context(), err, logger.Application)
return nil, errAuthentication
}
claims.MapClaims[iampolicy.SessionPolicyName] = string(spBytes)
}
return claims.Map(), nil
}
// Fetch claims in the security token returned by the client and validate the token.
func checkClaimsFromToken(r *http.Request, cred auth.Credentials) (map[string]interface{}, APIErrorCode) {
token := getSessionToken(r)
if token != "" && cred.AccessKey == "" {
return nil, ErrNoAccessKey
}
if cred.IsServiceAccount() && token == "" {
token = cred.SessionToken
}
if subtle.ConstantTimeCompare([]byte(token), []byte(cred.SessionToken)) != 1 {
return nil, ErrInvalidToken
}
claims, err := getClaimsFromToken(r, token)
if err != nil {
return nil, toAPIErrorCode(r.Context(), err)
}
return claims, ErrNone
}
// Check request auth type verifies the incoming http request
// - validates the request signature
// - validates the policy action if anonymous tests bucket policies if any,
// for authenticated requests validates IAM policies.
// returns APIErrorCode if any to be replied to the client.
func checkRequestAuthType(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (s3Err APIErrorCode) {
_, _, s3Err = checkRequestAuthTypeToAccessKey(ctx, r, action, bucketName, objectName)
return s3Err
}
// FIXME: Remove this temporary stub to by-pass Minio auth procedure.
func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (string, bool, APIErrorCode) {
return "", true, ErrNone
}
// Check request auth type verifies the incoming http request
// - validates the request signature
// - validates the policy action if anonymous tests bucket policies if any,
// for authenticated requests validates IAM policies.
// returns APIErrorCode if any to be replied to the client.
// Additionally returns the accessKey used in the request, and if this request is by an admin.
func _checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, action policy.Action, bucketName, objectName string) (accessKey string, owner bool, s3Err APIErrorCode) {
var cred auth.Credentials
switch getRequestAuthType(r) {
case authTypeUnknown, authTypeStreamingSigned:
return accessKey, owner, ErrSignatureVersionNotSupported
case authTypePresignedV2, authTypeSignedV2:
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
return accessKey, owner, s3Err
}
cred, owner, s3Err = getReqAccessKeyV2(r)
case authTypeSigned, authTypePresigned:
region := globalServerRegion
switch action {
case policy.GetBucketLocationAction, policy.ListAllMyBucketsAction:
region = ""
}
if s3Err = isReqAuthenticated(ctx, r, region, serviceS3); s3Err != ErrNone {
return accessKey, owner, s3Err
}
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
}
if s3Err != ErrNone {
return accessKey, owner, s3Err
}
var claims map[string]interface{}
claims, s3Err = checkClaimsFromToken(r, cred)
if s3Err != ErrNone {
return accessKey, owner, s3Err
}
// LocationConstraint is valid only for CreateBucketAction.
var locationConstraint string
if action == policy.CreateBucketAction {
// To extract region from XML in request body, get copy of request body.
payload, err := ioutil.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize))
if err != nil {
logger.LogIf(ctx, err, logger.Application)
return accessKey, owner, ErrMalformedXML
}
// Populate payload to extract location constraint.
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
var s3Error APIErrorCode
locationConstraint, s3Error = parseLocationConstraint(r)
if s3Error != ErrNone {
return accessKey, owner, s3Error
}
// Populate payload again to handle it in HTTP handler.
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
}
if cred.AccessKey == "" {
if globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Action: action,
BucketName: bucketName,
ConditionValues: getConditionValues(r, locationConstraint, "", nil),
IsOwner: false,
ObjectName: objectName,
}) {
// Request is allowed return the appropriate access key.
return cred.AccessKey, owner, ErrNone
}
return cred.AccessKey, owner, ErrAccessDenied
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: iampolicy.Action(action),
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
ObjectName: objectName,
IsOwner: owner,
Claims: claims,
}) {
// Request is allowed return the appropriate access key.
return cred.AccessKey, owner, ErrNone
}
return cred.AccessKey, owner, ErrAccessDenied
}
// Verify if request has valid AWS Signature Version '2'.
func isReqAuthenticatedV2(r *http.Request) (s3Error APIErrorCode) {
if isRequestSignatureV2(r) {
return doesSignV2Match(r)
}
return doesPresignV2SignatureMatch(r)
}
func reqSignatureV4Verify(r *http.Request, region string, stype serviceType) (s3Error APIErrorCode) {
sha256sum := getContentSha256Cksum(r, stype)
switch {
case isRequestSignatureV4(r):
return doesSignatureMatch(sha256sum, r, region, stype)
case isRequestPresignedSignatureV4(r):
return doesPresignedSignatureMatch(sha256sum, r, region, stype)
default:
return ErrAccessDenied
}
}
// Verify if request has valid AWS Signature Version '4'.
func isReqAuthenticated(ctx context.Context, r *http.Request, region string, stype serviceType) (s3Error APIErrorCode) {
if errCode := reqSignatureV4Verify(r, region, stype); errCode != ErrNone {
return errCode
}
var (
err error
contentMD5, contentSHA256 []byte
)
// Extract 'Content-Md5' if present.
contentMD5, err = checkValidMD5(r.Header)
if err != nil {
return ErrInvalidDigest
}
// Extract either 'X-Amz-Content-Sha256' header or 'X-Amz-Content-Sha256' query parameter (if V4 presigned)
// Do not verify 'X-Amz-Content-Sha256' if skipSHA256.
if skipSHA256 := skipContentSha256Cksum(r); !skipSHA256 && isRequestPresignedSignatureV4(r) {
if sha256Sum, ok := r.URL.Query()[xhttp.AmzContentSha256]; ok && len(sha256Sum) > 0 {
contentSHA256, err = hex.DecodeString(sha256Sum[0])
if err != nil {
return ErrContentSHA256Mismatch
}
}
} else if _, ok := r.Header[xhttp.AmzContentSha256]; !skipSHA256 && ok {
contentSHA256, err = hex.DecodeString(r.Header.Get(xhttp.AmzContentSha256))
if err != nil || len(contentSHA256) == 0 {
return ErrContentSHA256Mismatch
}
}
// Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present.
// The verification happens implicit during reading.
reader, err := hash.NewReader(r.Body, -1, hex.EncodeToString(contentMD5),
hex.EncodeToString(contentSHA256), -1, globalCLIContext.StrictS3Compat)
if err != nil {
return toAPIErrorCode(ctx, err)
}
r.Body = reader
return ErrNone
}
// authHandler - handles all the incoming authorization headers and validates them if possible.
type authHandler struct {
handler http.Handler
}
// setAuthHandler to validate authorization header for the incoming request.
func setAuthHandler(h http.Handler) http.Handler {
return authHandler{h}
}
// List of all support S3 auth types.
var supportedS3AuthTypes = map[authType]struct{}{
authTypeAnonymous: {},
authTypePresigned: {},
authTypePresignedV2: {},
authTypeSigned: {},
authTypeSignedV2: {},
authTypePostPolicy: {},
authTypeStreamingSigned: {},
}
// Validate if the authType is valid and supported.
func isSupportedS3AuthType(aType authType) bool {
_, ok := supportedS3AuthTypes[aType]
return ok
}
// handler for validating incoming authorization headers.
func (a authHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
aType := getRequestAuthType(r)
if isSupportedS3AuthType(aType) {
// Let top level caller validate for anonymous and known signed requests.
a.handler.ServeHTTP(w, r)
return
} else if aType == authTypeJWT {
// Validate Authorization header if its valid for JWT request.
if _, _, authErr := webRequestAuthenticate(r); authErr != nil {
w.WriteHeader(http.StatusUnauthorized)
return
}
a.handler.ServeHTTP(w, r)
return
} else if aType == authTypeSTS {
a.handler.ServeHTTP(w, r)
return
}
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
}
func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool, map[string]interface{}, APIErrorCode) {
var cred auth.Credentials
var owner bool
var s3Err APIErrorCode
switch atype {
case authTypeUnknown, authTypeStreamingSigned:
return cred, owner, nil, ErrSignatureVersionNotSupported
case authTypeSignedV2, authTypePresignedV2:
if s3Err = isReqAuthenticatedV2(r); s3Err != ErrNone {
return cred, owner, nil, s3Err
}
cred, owner, s3Err = getReqAccessKeyV2(r)
case authTypePresigned, authTypeSigned:
region := globalServerRegion
if s3Err = isReqAuthenticated(GlobalContext, r, region, serviceS3); s3Err != ErrNone {
return cred, owner, nil, s3Err
}
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
}
if s3Err != ErrNone {
return cred, owner, nil, s3Err
}
claims, s3Err := checkClaimsFromToken(r, cred)
if s3Err != ErrNone {
return cred, owner, nil, s3Err
}
return cred, owner, claims, ErrNone
}
func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate time.Time, retMode objectlock.RetMode, byPassSet bool, r *http.Request, cred auth.Credentials, owner bool, claims map[string]interface{}) (s3Err APIErrorCode) {
var retSet bool
if cred.AccessKey == "" {
conditions := getConditionValues(r, "", "", nil)
conditions["object-lock-mode"] = []string{string(retMode)}
conditions["object-lock-retain-until-date"] = []string{retDate.Format(time.RFC3339)}
if retDays > 0 {
conditions["object-lock-remaining-retention-days"] = []string{strconv.Itoa(retDays)}
}
if retMode == objectlock.RetGovernance && byPassSet {
byPassSet = globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Action: policy.Action(policy.BypassGovernanceRetentionAction),
BucketName: bucketName,
ConditionValues: conditions,
IsOwner: false,
ObjectName: objectName,
})
}
if globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Action: policy.Action(policy.PutObjectRetentionAction),
BucketName: bucketName,
ConditionValues: conditions,
IsOwner: false,
ObjectName: objectName,
}) {
retSet = true
}
if byPassSet || retSet {
return ErrNone
}
return ErrAccessDenied
}
conditions := getConditionValues(r, "", cred.AccessKey, claims)
conditions["object-lock-mode"] = []string{string(retMode)}
conditions["object-lock-retain-until-date"] = []string{retDate.Format(time.RFC3339)}
if retDays > 0 {
conditions["object-lock-remaining-retention-days"] = []string{strconv.Itoa(retDays)}
}
if retMode == objectlock.RetGovernance && byPassSet {
byPassSet = globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: policy.BypassGovernanceRetentionAction,
BucketName: bucketName,
ObjectName: objectName,
ConditionValues: conditions,
IsOwner: owner,
Claims: claims,
})
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: policy.PutObjectRetentionAction,
BucketName: bucketName,
ConditionValues: conditions,
ObjectName: objectName,
IsOwner: owner,
Claims: claims,
}) {
retSet = true
}
if byPassSet || retSet {
return ErrNone
}
return ErrAccessDenied
}
// isPutActionAllowed - check if PUT operation is allowed on the resource, this
// call verifies bucket policies and IAM policies, supports multi user
// checks etc.
func isPutActionAllowed(atype authType, bucketName, objectName string, r *http.Request, action iampolicy.Action) (s3Err APIErrorCode) {
var cred auth.Credentials
var owner bool
switch atype {
case authTypeUnknown:
return ErrSignatureVersionNotSupported
case authTypeSignedV2, authTypePresignedV2:
cred, owner, s3Err = getReqAccessKeyV2(r)
case authTypeStreamingSigned, authTypePresigned, authTypeSigned:
region := globalServerRegion
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
}
if s3Err != ErrNone {
return s3Err
}
claims, s3Err := checkClaimsFromToken(r, cred)
if s3Err != ErrNone {
return s3Err
}
// Do not check for PutObjectRetentionAction permission,
// if mode and retain until date are not set.
// Can happen when bucket has default lock config set
if action == iampolicy.PutObjectRetentionAction &&
r.Header.Get(xhttp.AmzObjectLockMode) == "" &&
r.Header.Get(xhttp.AmzObjectLockRetainUntilDate) == "" {
return ErrNone
}
if cred.AccessKey == "" {
if globalPolicySys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Action: policy.Action(action),
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", "", nil),
IsOwner: false,
ObjectName: objectName,
}) {
return ErrNone
}
return ErrAccessDenied
}
if globalIAMSys.IsAllowed(iampolicy.Args{
AccountName: cred.AccessKey,
Action: action,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
ObjectName: objectName,
IsOwner: owner,
Claims: claims,
}) {
return ErrNone
}
return ErrAccessDenied
}

View file

@ -1,473 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"bytes"
"context"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"testing"
"time"
"github.com/minio/minio/pkg/auth"
iampolicy "github.com/minio/minio/pkg/iam/policy"
)
// Test get request auth type.
func TestGetRequestAuthType(t *testing.T) {
type testCase struct {
req *http.Request
authT authType
}
testCases := []testCase{
// Test case - 1
// Check for generic signature v4 header.
{
req: &http.Request{
URL: &url.URL{
Host: "127.0.0.1:9000",
Scheme: httpScheme,
Path: SlashSeparator,
},
Header: http.Header{
"Authorization": []string{"AWS4-HMAC-SHA256 <cred_string>"},
"X-Amz-Content-Sha256": []string{streamingContentSHA256},
"Content-Encoding": []string{streamingContentEncoding},
},
Method: "PUT",
},
authT: authTypeStreamingSigned,
},
// Test case - 2
// Check for JWT header.
{
req: &http.Request{
URL: &url.URL{
Host: "127.0.0.1:9000",
Scheme: httpScheme,
Path: SlashSeparator,
},
Header: http.Header{
"Authorization": []string{"Bearer 12313123"},
},
},
authT: authTypeJWT,
},
// Test case - 3
// Empty authorization header.
{
req: &http.Request{
URL: &url.URL{
Host: "127.0.0.1:9000",
Scheme: httpScheme,
Path: SlashSeparator,
},
Header: http.Header{
"Authorization": []string{""},
},
},
authT: authTypeUnknown,
},
// Test case - 4
// Check for presigned.
{
req: &http.Request{
URL: &url.URL{
Host: "127.0.0.1:9000",
Scheme: httpScheme,
Path: SlashSeparator,
RawQuery: "X-Amz-Credential=EXAMPLEINVALIDEXAMPL%2Fs3%2F20160314%2Fus-east-1",
},
},
authT: authTypePresigned,
},
// Test case - 5
// Check for post policy.
{
req: &http.Request{
URL: &url.URL{
Host: "127.0.0.1:9000",
Scheme: httpScheme,
Path: SlashSeparator,
},
Header: http.Header{
"Content-Type": []string{"multipart/form-data"},
},
Method: "POST",
},
authT: authTypePostPolicy,
},
}
// .. Tests all request auth type.
for i, testc := range testCases {
authT := getRequestAuthType(testc.req)
if authT != testc.authT {
t.Errorf("Test %d: Expected %d, got %d", i+1, testc.authT, authT)
}
}
}
// Test all s3 supported auth types.
func TestS3SupportedAuthType(t *testing.T) {
type testCase struct {
authT authType
pass bool
}
// List of all valid and invalid test cases.
testCases := []testCase{
// Test 1 - supported s3 type anonymous.
{
authT: authTypeAnonymous,
pass: true,
},
// Test 2 - supported s3 type presigned.
{
authT: authTypePresigned,
pass: true,
},
// Test 3 - supported s3 type signed.
{
authT: authTypeSigned,
pass: true,
},
// Test 4 - supported s3 type with post policy.
{
authT: authTypePostPolicy,
pass: true,
},
// Test 5 - supported s3 type with streaming signed.
{
authT: authTypeStreamingSigned,
pass: true,
},
// Test 6 - supported s3 type with signature v2.
{
authT: authTypeSignedV2,
pass: true,
},
// Test 7 - supported s3 type with presign v2.
{
authT: authTypePresignedV2,
pass: true,
},
// Test 8 - JWT is not supported s3 type.
{
authT: authTypeJWT,
pass: false,
},
// Test 9 - unknown auth header is not supported s3 type.
{
authT: authTypeUnknown,
pass: false,
},
// Test 10 - some new auth type is not supported s3 type.
{
authT: authType(9),
pass: false,
},
}
// Validate all the test cases.
for i, tt := range testCases {
ok := isSupportedS3AuthType(tt.authT)
if ok != tt.pass {
t.Errorf("Test %d:, Expected %t, got %t", i+1, tt.pass, ok)
}
}
}
func TestIsRequestPresignedSignatureV2(t *testing.T) {
testCases := []struct {
inputQueryKey string
inputQueryValue string
expectedResult bool
}{
// Test case - 1.
// Test case with query key "AWSAccessKeyId" set.
{"", "", false},
// Test case - 2.
{"AWSAccessKeyId", "", true},
// Test case - 3.
{"X-Amz-Content-Sha256", "", false},
}
for i, testCase := range testCases {
// creating an input HTTP request.
// Only the query parameters are relevant for this particular test.
inputReq, err := http.NewRequest("GET", "http://example.com", nil)
if err != nil {
t.Fatalf("Error initializing input HTTP request: %v", err)
}
q := inputReq.URL.Query()
q.Add(testCase.inputQueryKey, testCase.inputQueryValue)
inputReq.URL.RawQuery = q.Encode()
actualResult := isRequestPresignedSignatureV2(inputReq)
if testCase.expectedResult != actualResult {
t.Errorf("Test %d: Expected the result to `%v`, but instead got `%v`", i+1, testCase.expectedResult, actualResult)
}
}
}
// TestIsRequestPresignedSignatureV4 - Test validates the logic for presign signature verision v4 detection.
func TestIsRequestPresignedSignatureV4(t *testing.T) {
testCases := []struct {
inputQueryKey string
inputQueryValue string
expectedResult bool
}{
// Test case - 1.
// Test case with query key ""X-Amz-Credential" set.
{"", "", false},
// Test case - 2.
{"X-Amz-Credential", "", true},
// Test case - 3.
{"X-Amz-Content-Sha256", "", false},
}
for i, testCase := range testCases {
// creating an input HTTP request.
// Only the query parameters are relevant for this particular test.
inputReq, err := http.NewRequest("GET", "http://example.com", nil)
if err != nil {
t.Fatalf("Error initializing input HTTP request: %v", err)
}
q := inputReq.URL.Query()
q.Add(testCase.inputQueryKey, testCase.inputQueryValue)
inputReq.URL.RawQuery = q.Encode()
actualResult := isRequestPresignedSignatureV4(inputReq)
if testCase.expectedResult != actualResult {
t.Errorf("Test %d: Expected the result to `%v`, but instead got `%v`", i+1, testCase.expectedResult, actualResult)
}
}
}
// Provides a fully populated http request instance, fails otherwise.
func mustNewRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
req, err := newTestRequest(method, urlStr, contentLength, body)
if err != nil {
t.Fatalf("Unable to initialize new http request %s", err)
}
return req
}
// This is similar to mustNewRequest but additionally the request
// is signed with AWS Signature V4, fails if not able to do so.
func mustNewSignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
req := mustNewRequest(method, urlStr, contentLength, body, t)
cred := globalActiveCred
if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil {
t.Fatalf("Unable to inititalized new signed http request %s", err)
}
return req
}
// This is similar to mustNewRequest but additionally the request
// is signed with AWS Signature V2, fails if not able to do so.
func mustNewSignedV2Request(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
req := mustNewRequest(method, urlStr, contentLength, body, t)
cred := globalActiveCred
if err := signRequestV2(req, cred.AccessKey, cred.SecretKey); err != nil {
t.Fatalf("Unable to inititalized new signed http request %s", err)
}
return req
}
// This is similar to mustNewRequest but additionally the request
// is presigned with AWS Signature V2, fails if not able to do so.
func mustNewPresignedV2Request(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
req := mustNewRequest(method, urlStr, contentLength, body, t)
cred := globalActiveCred
if err := preSignV2(req, cred.AccessKey, cred.SecretKey, time.Now().Add(10*time.Minute).Unix()); err != nil {
t.Fatalf("Unable to inititalized new signed http request %s", err)
}
return req
}
// This is similar to mustNewRequest but additionally the request
// is presigned with AWS Signature V4, fails if not able to do so.
func mustNewPresignedRequest(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
req := mustNewRequest(method, urlStr, contentLength, body, t)
cred := globalActiveCred
if err := preSignV4(req, cred.AccessKey, cred.SecretKey, time.Now().Add(10*time.Minute).Unix()); err != nil {
t.Fatalf("Unable to inititalized new signed http request %s", err)
}
return req
}
func mustNewSignedShortMD5Request(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
req := mustNewRequest(method, urlStr, contentLength, body, t)
req.Header.Set("Content-Md5", "invalid-digest")
cred := globalActiveCred
if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil {
t.Fatalf("Unable to initialized new signed http request %s", err)
}
return req
}
func mustNewSignedEmptyMD5Request(method string, urlStr string, contentLength int64, body io.ReadSeeker, t *testing.T) *http.Request {
req := mustNewRequest(method, urlStr, contentLength, body, t)
req.Header.Set("Content-Md5", "")
cred := globalActiveCred
if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil {
t.Fatalf("Unable to initialized new signed http request %s", err)
}
return req
}
func mustNewSignedBadMD5Request(method string, urlStr string, contentLength int64,
body io.ReadSeeker, t *testing.T) *http.Request {
req := mustNewRequest(method, urlStr, contentLength, body, t)
req.Header.Set("Content-Md5", "YWFhYWFhYWFhYWFhYWFhCg==")
cred := globalActiveCred
if err := signRequestV4(req, cred.AccessKey, cred.SecretKey); err != nil {
t.Fatalf("Unable to initialized new signed http request %s", err)
}
return req
}
// Tests is requested authenticated function, tests replies for s3 errors.
func TestIsReqAuthenticated(t *testing.T) {
objLayer, fsDir, err := prepareFS()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(fsDir)
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
t.Fatalf("unable initialize config file, %s", err)
}
creds, err := auth.CreateCredentials("myuser", "mypassword")
if err != nil {
t.Fatalf("unable create credential, %s", err)
}
globalActiveCred = creds
// List of test cases for validating http request authentication.
testCases := []struct {
req *http.Request
s3Error APIErrorCode
}{
// When request is unsigned, access denied is returned.
{mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrAccessDenied},
// Empty Content-Md5 header.
{mustNewSignedEmptyMD5Request("PUT", "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrInvalidDigest},
// Short Content-Md5 header.
{mustNewSignedShortMD5Request("PUT", "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrInvalidDigest},
// When request is properly signed, but has bad Content-MD5 header.
{mustNewSignedBadMD5Request("PUT", "http://127.0.0.1:9000/", 5, bytes.NewReader([]byte("hello")), t), ErrBadDigest},
// When request is properly signed, error is none.
{mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrNone},
}
ctx := context.Background()
// Validates all testcases.
for i, testCase := range testCases {
s3Error := isReqAuthenticated(ctx, testCase.req, globalServerRegion, serviceS3)
if s3Error != testCase.s3Error {
if _, err := ioutil.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %s)", i, testCase.s3Error, s3Error, toAPIError(ctx, err).Code)
}
}
}
}
func TestCheckAdminRequestAuthType(t *testing.T) {
objLayer, fsDir, err := prepareFS()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(fsDir)
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
t.Fatalf("unable initialize config file, %s", err)
}
creds, err := auth.CreateCredentials("myuser", "mypassword")
if err != nil {
t.Fatalf("unable create credential, %s", err)
}
globalActiveCred = creds
testCases := []struct {
Request *http.Request
ErrCode APIErrorCode
}{
{Request: mustNewRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
{Request: mustNewSignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrNone},
{Request: mustNewSignedV2Request("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
{Request: mustNewPresignedV2Request("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
{Request: mustNewPresignedRequest("GET", "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
}
ctx := context.Background()
for i, testCase := range testCases {
if _, s3Error := checkAdminRequestAuthType(ctx, testCase.Request, iampolicy.AllAdminActions, globalServerRegion); s3Error != testCase.ErrCode {
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
}
}
}
func TestValidateAdminSignature(t *testing.T) {
ctx := context.Background()
objLayer, fsDir, err := prepareFS()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(fsDir)
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
t.Fatalf("unable initialize config file, %s", err)
}
creds, err := auth.CreateCredentials("admin", "mypassword")
if err != nil {
t.Fatalf("unable create credential, %s", err)
}
globalActiveCred = creds
testCases := []struct {
AccessKey string
SecretKey string
ErrCode APIErrorCode
}{
{"", "", ErrInvalidAccessKeyID},
{"admin", "", ErrSignatureDoesNotMatch},
{"admin", "wrongpassword", ErrSignatureDoesNotMatch},
{"wronguser", "mypassword", ErrInvalidAccessKeyID},
{"", "mypassword", ErrInvalidAccessKeyID},
{"admin", "mypassword", ErrNone},
}
for i, testCase := range testCases {
req := mustNewRequest("GET", "http://localhost:9000/", 0, nil, t)
if err := signRequestV4(req, testCase.AccessKey, testCase.SecretKey); err != nil {
t.Fatalf("Unable to inititalized new signed http request %s", err)
}
_, _, _, s3Error := validateAdminSignature(ctx, req, globalMinioDefaultRegion)
if s3Error != testCase.ErrCode {
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i+1, testCase.ErrCode, s3Error)
}
}
}

View file

@ -1,160 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"context"
"time"
"github.com/minio/minio/legacy/logger"
"github.com/minio/minio/pkg/madmin"
)
// healTask represents what to heal along with options
// path: '/' => Heal disk formats along with metadata
// path: 'bucket/' or '/bucket/' => Heal bucket
// path: 'bucket/object' => Heal object
type healTask struct {
path string
opts madmin.HealOpts
// Healing response will be sent here
responseCh chan healResult
}
// healResult represents a healing result with a possible error
type healResult struct {
result madmin.HealResultItem
err error
}
// healRoutine receives heal tasks, to heal buckets, objects and format.json
type healRoutine struct {
tasks chan healTask
doneCh chan struct{}
}
// Add a new task in the tasks queue
func (h *healRoutine) queueHealTask(task healTask) {
h.tasks <- task
}
func waitForLowHTTPReq(tolerance int32) {
if httpServer := newHTTPServerFn(); httpServer != nil {
// Wait at max 10 minute for an inprogress request before proceeding to heal
waitCount := 600
// Any requests in progress, delay the heal.
for (httpServer.GetRequestCount() >= tolerance) &&
waitCount > 0 {
waitCount--
time.Sleep(1 * time.Second)
}
}
}
// Wait for heal requests and process them
func (h *healRoutine) run(ctx context.Context, objAPI ObjectLayer) {
for {
select {
case task, ok := <-h.tasks:
if !ok {
break
}
// Wait and proceed if there are active requests
waitForLowHTTPReq(int32(globalEndpoints.NEndpoints()))
var res madmin.HealResultItem
var err error
bucket, object := path2BucketObject(task.path)
switch {
case bucket == "" && object == "":
res, err = healDiskFormat(ctx, objAPI, task.opts)
case bucket != "" && object == "":
res, err = objAPI.HealBucket(ctx, bucket, task.opts.DryRun, task.opts.Remove)
case bucket != "" && object != "":
res, err = objAPI.HealObject(ctx, bucket, object, task.opts)
}
if task.path != slashSeparator && task.path != nopHeal {
ObjectPathUpdated(task.path)
}
task.responseCh <- healResult{result: res, err: err}
case <-h.doneCh:
return
case <-ctx.Done():
return
}
}
}
func newHealRoutine() *healRoutine {
return &healRoutine{
tasks: make(chan healTask),
doneCh: make(chan struct{}),
}
}
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
// Run the background healer
globalBackgroundHealRoutine = newHealRoutine()
go globalBackgroundHealRoutine.run(ctx, objAPI)
nh := newBgHealSequence()
// Heal any disk format and metadata early, if possible.
if err := nh.healDiskMeta(); err != nil {
if newObjectLayerFn() != nil {
// log only in situations, when object layer
// has fully initialized.
logger.LogIf(nh.ctx, err)
}
}
globalBackgroundHealState.LaunchNewHealSequence(nh)
}
// healDiskFormat - heals format.json, return value indicates if a
// failure error occurred.
func healDiskFormat(ctx context.Context, objAPI ObjectLayer, opts madmin.HealOpts) (madmin.HealResultItem, error) {
res, err := objAPI.HealFormat(ctx, opts.DryRun)
// return any error, ignore error returned when disks have
// already healed.
if err != nil && err != errNoHealRequired {
return madmin.HealResultItem{}, err
}
// Healing succeeded notify the peers to reload format and re-initialize disks.
// We will not notify peers if healing is not required.
if err == nil {
// Notify servers in background and retry if needed.
go func() {
retry:
for _, nerr := range globalNotificationSys.ReloadFormat(opts.DryRun) {
if nerr.Err != nil {
if nerr.Err.Error() == errServerNotInitialized.Error() {
time.Sleep(time.Second)
goto retry
}
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
}
}
}()
}
return res, nil
}

View file

@ -1,126 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"context"
"time"
"github.com/minio/minio/legacy/logger"
)
const defaultMonitorNewDiskInterval = time.Minute * 10
func initLocalDisksAutoHeal(ctx context.Context, objAPI ObjectLayer) {
go monitorLocalDisksAndHeal(ctx, objAPI)
}
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
// 1. Only the concerned erasure set will be listed and healed
// 2. Only the node hosting the disk is responsible to perform the heal
func monitorLocalDisksAndHeal(ctx context.Context, objAPI ObjectLayer) {
z, ok := objAPI.(*xlZones)
if !ok {
return
}
var bgSeq *healSequence
var found bool
for {
bgSeq, found = globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID)
if found {
break
}
time.Sleep(time.Second)
}
// Perform automatic disk healing when a disk is replaced locally.
for {
select {
case <-ctx.Done():
return
case <-time.After(defaultMonitorNewDiskInterval):
// Attempt a heal as the server starts-up first.
localDisksInZoneHeal := make([]Endpoints, len(z.zones))
var healNewDisks bool
for i, ep := range globalEndpoints {
localDisksToHeal := Endpoints{}
for _, endpoint := range ep.Endpoints {
if !endpoint.IsLocal {
continue
}
// Try to connect to the current endpoint
// and reformat if the current disk is not formatted
_, _, err := connectEndpoint(endpoint)
if err == errUnformattedDisk {
localDisksToHeal = append(localDisksToHeal, endpoint)
}
}
if len(localDisksToHeal) == 0 {
continue
}
localDisksInZoneHeal[i] = localDisksToHeal
healNewDisks = true
}
// Reformat disks only if needed.
if !healNewDisks {
continue
}
// Reformat disks
bgSeq.sourceCh <- healSource{path: SlashSeparator}
// Ensure that reformatting disks is finished
bgSeq.sourceCh <- healSource{path: nopHeal}
var erasureSetInZoneToHeal = make([][]int, len(localDisksInZoneHeal))
// Compute the list of erasure set to heal
for i, localDisksToHeal := range localDisksInZoneHeal {
var erasureSetToHeal []int
for _, endpoint := range localDisksToHeal {
// Load the new format of this passed endpoint
_, format, err := connectEndpoint(endpoint)
if err != nil {
logger.LogIf(ctx, err)
continue
}
// Calculate the set index where the current endpoint belongs
setIndex, _, err := findDiskIndex(z.zones[i].format, format)
if err != nil {
logger.LogIf(ctx, err)
continue
}
erasureSetToHeal = append(erasureSetToHeal, setIndex)
}
erasureSetInZoneToHeal[i] = erasureSetToHeal
}
// Heal all erasure sets that need
for i, erasureSetToHeal := range erasureSetInZoneToHeal {
for _, setIndex := range erasureSetToHeal {
err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex], z.zones[i].drivesPerSet)
if err != nil {
logger.LogIf(ctx, err)
}
}
}
}
}
}

View file

@ -1,372 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"bytes"
"context"
"io/ioutil"
"math"
"math/rand"
"strconv"
"testing"
humanize "github.com/dustin/go-humanize"
)
// Benchmark utility functions for ObjectLayer.PutObject().
// Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark.
func runPutObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
var err error
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err = obj.MakeBucketWithLocation(context.Background(), bucket, "", false)
if err != nil {
b.Fatal(err)
}
// get text data generated for number of bytes equal to object size.
textData := generateBytesData(objSize)
// generate md5sum for the generated data.
// md5sum of the data to written is required as input for PutObject.
md5hex := getMD5Hash(textData)
sha256hex := ""
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
b.ReportAllocs()
// the actual benchmark for PutObject starts here. Reset the benchmark timer.
b.ResetTimer()
for i := 0; i < b.N; i++ {
// insert the object.
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
if objInfo.ETag != md5hex {
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, md5hex)
}
}
// Benchmark ends here. Stop timer.
b.StopTimer()
}
// Benchmark utility functions for ObjectLayer.PutObjectPart().
// Creates Object layer setup ( MakeBucket ) and then runs the PutObjectPart benchmark.
func runPutObjectPartBenchmark(b *testing.B, obj ObjectLayer, partSize int) {
var err error
// obtains random bucket name.
bucket := getRandomBucketName()
object := getRandomObjectName()
// create bucket.
err = obj.MakeBucketWithLocation(context.Background(), bucket, "", false)
if err != nil {
b.Fatal(err)
}
objSize := 128 * humanize.MiByte
// PutObjectPart returns etag of the object inserted.
// etag variable is assigned with that value.
var etag, uploadID string
// get text data generated for number of bytes equal to object size.
textData := generateBytesData(objSize)
// generate md5sum for the generated data.
// md5sum of the data to written is required as input for NewMultipartUpload.
uploadID, err = obj.NewMultipartUpload(context.Background(), bucket, object, ObjectOptions{})
if err != nil {
b.Fatal(err)
}
sha256hex := ""
var textPartData []byte
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
b.ReportAllocs()
// the actual benchmark for PutObjectPart starts here. Reset the benchmark timer.
b.ResetTimer()
for i := 0; i < b.N; i++ {
// insert the object.
totalPartsNR := int(math.Ceil(float64(objSize) / float64(partSize)))
for j := 0; j < totalPartsNR; j++ {
if j < totalPartsNR-1 {
textPartData = textData[j*partSize : (j+1)*partSize-1]
} else {
textPartData = textData[j*partSize:]
}
md5hex := getMD5Hash([]byte(textPartData))
var partInfo PartInfo
partInfo, err = obj.PutObjectPart(context.Background(), bucket, object, uploadID, j,
mustGetPutObjReader(b, bytes.NewBuffer(textPartData), int64(len(textPartData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
if partInfo.ETag != md5hex {
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, etag, md5hex)
}
}
}
// Benchmark ends here. Stop timer.
b.StopTimer()
}
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectPartBenchmark function.
func benchmarkPutObjectPart(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
// cleaning up the backend by removing all the directories and files created on function return.
defer removeRoots(disks)
// uses *testing.B and the object Layer to run the benchmark.
runPutObjectPartBenchmark(b, objLayer, objSize)
}
// creates XL/FS backend setup, obtains the object layer and calls the runPutObjectBenchmark function.
func benchmarkPutObject(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
// cleaning up the backend by removing all the directories and files created on function return.
defer removeRoots(disks)
// uses *testing.B and the object Layer to run the benchmark.
runPutObjectBenchmark(b, objLayer, objSize)
}
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for put object.
func benchmarkPutObjectParallel(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
// cleaning up the backend by removing all the directories and files created on function return.
defer removeRoots(disks)
// uses *testing.B and the object Layer to run the benchmark.
runPutObjectBenchmarkParallel(b, objLayer, objSize)
}
// Benchmark utility functions for ObjectLayer.GetObject().
// Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark.
func runGetObjectBenchmark(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false)
if err != nil {
b.Fatal(err)
}
textData := generateBytesData(objSize)
// generate etag for the generated data.
// etag of the data to written is required as input for PutObject.
// PutObject is the functions which writes the data onto the FS/XL backend.
// get text data generated for number of bytes equal to object size.
md5hex := getMD5Hash(textData)
sha256hex := ""
for i := 0; i < 10; i++ {
// insert the object.
var objInfo ObjectInfo
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
if objInfo.ETag != md5hex {
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, md5hex)
}
}
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
b.ReportAllocs()
// the actual benchmark for GetObject starts here. Reset the benchmark timer.
b.ResetTimer()
for i := 0; i < b.N; i++ {
var buffer = new(bytes.Buffer)
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i%10), 0, int64(objSize), buffer, "", ObjectOptions{})
if err != nil {
b.Error(err)
}
}
// Benchmark ends here. Stop timer.
b.StopTimer()
}
// randomly picks a character and returns its equivalent byte array.
func getRandomByte() []byte {
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
// seeding the random number generator.
rand.Seed(UTCNow().UnixNano())
// pick a character randomly.
return []byte{letterBytes[rand.Intn(len(letterBytes))]}
}
// picks a random byte and repeats it to size bytes.
func generateBytesData(size int) []byte {
// repeat the random character chosen size
return bytes.Repeat(getRandomByte(), size)
}
// creates XL/FS backend setup, obtains the object layer and calls the runGetObjectBenchmark function.
func benchmarkGetObject(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
// cleaning up the backend by removing all the directories and files created.
defer removeRoots(disks)
// uses *testing.B and the object Layer to run the benchmark.
runGetObjectBenchmark(b, objLayer, objSize)
}
// creates XL/FS backend setup, obtains the object layer and runs parallel benchmark for ObjectLayer.GetObject() .
func benchmarkGetObjectParallel(b *testing.B, instanceType string, objSize int) {
// create a temp XL/FS backend.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
objLayer, disks, err := prepareTestBackend(ctx, instanceType)
if err != nil {
b.Fatalf("Failed obtaining Temp Backend: <ERROR> %s", err)
}
// cleaning up the backend by removing all the directories and files created.
defer removeRoots(disks)
// uses *testing.B and the object Layer to run the benchmark.
runGetObjectBenchmarkParallel(b, objLayer, objSize)
}
// Parallel benchmark utility functions for ObjectLayer.PutObject().
// Creates Object layer setup ( MakeBucket ) and then runs the PutObject benchmark.
func runPutObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false)
if err != nil {
b.Fatal(err)
}
// get text data generated for number of bytes equal to object size.
textData := generateBytesData(objSize)
// generate md5sum for the generated data.
// md5sum of the data to written is required as input for PutObject.
md5hex := getMD5Hash([]byte(textData))
sha256hex := ""
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
b.ReportAllocs()
// the actual benchmark for PutObject starts here. Reset the benchmark timer.
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
i := 0
for pb.Next() {
// insert the object.
objInfo, err := obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
if objInfo.ETag != md5hex {
b.Fatalf("Write no: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", objInfo.ETag, md5hex)
}
i++
}
})
// Benchmark ends here. Stop timer.
b.StopTimer()
}
// Parallel benchmark utility functions for ObjectLayer.GetObject().
// Creates Object layer setup ( MakeBucket, PutObject) and then runs the benchmark.
func runGetObjectBenchmarkParallel(b *testing.B, obj ObjectLayer, objSize int) {
// obtains random bucket name.
bucket := getRandomBucketName()
// create bucket.
err := obj.MakeBucketWithLocation(context.Background(), bucket, "", false)
if err != nil {
b.Fatal(err)
}
// get text data generated for number of bytes equal to object size.
textData := generateBytesData(objSize)
// generate md5sum for the generated data.
// md5sum of the data to written is required as input for PutObject.
// PutObject is the functions which writes the data onto the FS/XL backend.
md5hex := getMD5Hash([]byte(textData))
sha256hex := ""
for i := 0; i < 10; i++ {
// insert the object.
var objInfo ObjectInfo
objInfo, err = obj.PutObject(context.Background(), bucket, "object"+strconv.Itoa(i),
mustGetPutObjReader(b, bytes.NewBuffer(textData), int64(len(textData)), md5hex, sha256hex), ObjectOptions{})
if err != nil {
b.Fatal(err)
}
if objInfo.ETag != md5hex {
b.Fatalf("Write no: %d: Md5Sum mismatch during object write into the bucket: Expected %s, got %s", i+1, objInfo.ETag, md5hex)
}
}
// benchmark utility which helps obtain number of allocations and bytes allocated per ops.
b.ReportAllocs()
// the actual benchmark for GetObject starts here. Reset the benchmark timer.
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
i := 0
for pb.Next() {
err = obj.GetObject(context.Background(), bucket, "object"+strconv.Itoa(i), 0, int64(objSize), ioutil.Discard, "", ObjectOptions{})
if err != nil {
b.Error(err)
}
i++
if i == 10 {
i = 0
}
}
})
// Benchmark ends here. Stop timer.
b.StopTimer()
}

View file

@ -1,165 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"bytes"
"encoding/hex"
"fmt"
"hash"
"io"
"github.com/minio/minio/legacy/logger"
)
type errHashMismatch struct {
message string
}
func (err *errHashMismatch) Error() string {
return err.message
}
// Calculates bitrot in chunks and writes the hash into the stream.
type streamingBitrotWriter struct {
iow *io.PipeWriter
h hash.Hash
shardSize int64
canClose chan struct{} // Needed to avoid race explained in Close() call.
}
func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
if len(p) == 0 {
return 0, nil
}
b.h.Reset()
b.h.Write(p)
hashBytes := b.h.Sum(nil)
_, err := b.iow.Write(hashBytes)
if err != nil {
return 0, err
}
return b.iow.Write(p)
}
func (b *streamingBitrotWriter) Close() error {
err := b.iow.Close()
// Wait for all data to be written before returning else it causes race conditions.
// Race condition is because of io.PipeWriter implementation. i.e consider the following
// sequent of operations:
// 1) pipe.Write()
// 2) pipe.Close()
// Now pipe.Close() can return before the data is read on the other end of the pipe and written to the disk
// Hence an immediate Read() on the file can return incorrect data.
<-b.canClose
return err
}
// Returns streaming bitrot writer implementation.
func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.WriteCloser {
r, w := io.Pipe()
h := algo.New()
bw := &streamingBitrotWriter{w, h, shardSize, make(chan struct{})}
go func() {
totalFileSize := int64(-1) // For compressed objects length will be unknown (represented by length=-1)
if length != -1 {
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
totalFileSize = bitrotSumsTotalSize + length
}
err := disk.CreateFile(volume, filePath, totalFileSize, r)
r.CloseWithError(err)
close(bw.canClose)
}()
return bw
}
// ReadAt() implementation which verifies the bitrot hash available as part of the stream.
type streamingBitrotReader struct {
disk StorageAPI
rc io.ReadCloser
volume string
filePath string
tillOffset int64
currOffset int64
h hash.Hash
shardSize int64
hashBytes []byte
}
func (b *streamingBitrotReader) Close() error {
if b.rc == nil {
return nil
}
return b.rc.Close()
}
func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
var err error
if offset%b.shardSize != 0 {
// Offset should always be aligned to b.shardSize
// Can never happen unless there are programmer bugs
return 0, errUnexpected
}
if b.rc == nil {
// For the first ReadAt() call we need to open the stream for reading.
b.currOffset = offset
streamOffset := (offset/b.shardSize)*int64(b.h.Size()) + offset
b.rc, err = b.disk.ReadFileStream(b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
if err != nil {
return 0, err
}
}
if offset != b.currOffset {
// Can never happen unless there are programmer bugs
return 0, errUnexpected
}
b.h.Reset()
_, err = io.ReadFull(b.rc, b.hashBytes)
if err != nil {
return 0, err
}
_, err = io.ReadFull(b.rc, buf)
if err != nil {
return 0, err
}
b.h.Write(buf)
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
err := &errHashMismatch{fmt.Sprintf("hashes do not match expected %s, got %s",
hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil)))}
logger.LogIf(GlobalContext, err)
return 0, err
}
b.currOffset += int64(len(buf))
return len(buf), nil
}
// Returns streaming bitrot reader implementation.
func newStreamingBitrotReader(disk StorageAPI, volume, filePath string, tillOffset int64, algo BitrotAlgorithm, shardSize int64) *streamingBitrotReader {
h := algo.New()
return &streamingBitrotReader{
disk,
nil,
volume,
filePath,
ceilFrac(tillOffset, shardSize)*int64(h.Size()) + tillOffset,
0,
h,
shardSize,
make([]byte, h.Size()),
}
}

View file

@ -1,97 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"hash"
"io"
"github.com/minio/minio/legacy/logger"
)
// Implementation to calculate bitrot for the whole file.
type wholeBitrotWriter struct {
disk StorageAPI
volume string
filePath string
shardSize int64 // This is the shard size of the erasure logic
hash.Hash // For bitrot hash
}
func (b *wholeBitrotWriter) Write(p []byte) (int, error) {
err := b.disk.AppendFile(b.volume, b.filePath, p)
if err != nil {
logger.LogIf(GlobalContext, err)
return 0, err
}
_, err = b.Hash.Write(p)
if err != nil {
logger.LogIf(GlobalContext, err)
return 0, err
}
return len(p), nil
}
func (b *wholeBitrotWriter) Close() error {
return nil
}
// Returns whole-file bitrot writer.
func newWholeBitrotWriter(disk StorageAPI, volume, filePath string, algo BitrotAlgorithm, shardSize int64) io.WriteCloser {
return &wholeBitrotWriter{disk, volume, filePath, shardSize, algo.New()}
}
// Implementation to verify bitrot for the whole file.
type wholeBitrotReader struct {
disk StorageAPI
volume string
filePath string
verifier *BitrotVerifier // Holds the bit-rot info
tillOffset int64 // Affects the length of data requested in disk.ReadFile depending on Read()'s offset
buf []byte // Holds bit-rot verified data
}
func (b *wholeBitrotReader) ReadAt(buf []byte, offset int64) (n int, err error) {
if b.buf == nil {
b.buf = make([]byte, b.tillOffset-offset)
if _, err := b.disk.ReadFile(b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
ctx := GlobalContext
logger.GetReqInfo(ctx).AppendTags("disk", b.disk.String())
logger.LogIf(ctx, err)
return 0, err
}
}
if len(b.buf) < len(buf) {
logger.LogIf(GlobalContext, errLessData)
return 0, errLessData
}
n = copy(buf, b.buf)
b.buf = b.buf[n:]
return n, nil
}
// Returns whole-file bitrot reader.
func newWholeBitrotReader(disk StorageAPI, volume, filePath string, algo BitrotAlgorithm, tillOffset int64, sum []byte) *wholeBitrotReader {
return &wholeBitrotReader{
disk: disk,
volume: volume,
filePath: filePath,
verifier: &BitrotVerifier{algo, sum},
tillOffset: tillOffset,
buf: nil,
}
}

View file

@ -1,164 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"errors"
"hash"
"io"
"github.com/minio/highwayhash"
"github.com/minio/minio/legacy/logger"
sha256 "github.com/minio/sha256-simd"
"golang.org/x/crypto/blake2b"
)
// magic HH-256 key as HH-256 hash of the first 100 decimals of π as utf-8 string with a zero key.
var magicHighwayHash256Key = []byte("\x4b\xe7\x34\xfa\x8e\x23\x8a\xcd\x26\x3e\x83\xe6\xbb\x96\x85\x52\x04\x0f\x93\x5d\xa3\x9f\x44\x14\x97\xe0\x9d\x13\x22\xde\x36\xa0")
// BitrotAlgorithm specifies a algorithm used for bitrot protection.
type BitrotAlgorithm uint
const (
// SHA256 represents the SHA-256 hash function
SHA256 BitrotAlgorithm = 1 + iota
// HighwayHash256 represents the HighwayHash-256 hash function
HighwayHash256
// HighwayHash256S represents the Streaming HighwayHash-256 hash function
HighwayHash256S
// BLAKE2b512 represents the BLAKE2b-512 hash function
BLAKE2b512
)
// DefaultBitrotAlgorithm is the default algorithm used for bitrot protection.
const (
DefaultBitrotAlgorithm = HighwayHash256S
)
var bitrotAlgorithms = map[BitrotAlgorithm]string{
SHA256: "sha256",
BLAKE2b512: "blake2b",
HighwayHash256: "highwayhash256",
HighwayHash256S: "highwayhash256S",
}
// New returns a new hash.Hash calculating the given bitrot algorithm.
func (a BitrotAlgorithm) New() hash.Hash {
switch a {
case SHA256:
return sha256.New()
case BLAKE2b512:
b2, _ := blake2b.New512(nil) // New512 never returns an error if the key is nil
return b2
case HighwayHash256:
hh, _ := highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit
return hh
case HighwayHash256S:
hh, _ := highwayhash.New(magicHighwayHash256Key) // New will never return error since key is 256 bit
return hh
default:
logger.CriticalIf(GlobalContext, errors.New("Unsupported bitrot algorithm"))
return nil
}
}
// Available reports whether the given algorithm is available.
func (a BitrotAlgorithm) Available() bool {
_, ok := bitrotAlgorithms[a]
return ok
}
// String returns the string identifier for a given bitrot algorithm.
// If the algorithm is not supported String panics.
func (a BitrotAlgorithm) String() string {
name, ok := bitrotAlgorithms[a]
if !ok {
logger.CriticalIf(GlobalContext, errors.New("Unsupported bitrot algorithm"))
}
return name
}
// NewBitrotVerifier returns a new BitrotVerifier implementing the given algorithm.
func NewBitrotVerifier(algorithm BitrotAlgorithm, checksum []byte) *BitrotVerifier {
return &BitrotVerifier{algorithm, checksum}
}
// BitrotVerifier can be used to verify protected data.
type BitrotVerifier struct {
algorithm BitrotAlgorithm
sum []byte
}
// BitrotAlgorithmFromString returns a bitrot algorithm from the given string representation.
// It returns 0 if the string representation does not match any supported algorithm.
// The zero value of a bitrot algorithm is never supported.
func BitrotAlgorithmFromString(s string) (a BitrotAlgorithm) {
for alg, name := range bitrotAlgorithms {
if name == s {
return alg
}
}
return
}
func newBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
if algo == HighwayHash256S {
return newStreamingBitrotWriter(disk, volume, filePath, length, algo, shardSize)
}
return newWholeBitrotWriter(disk, volume, filePath, algo, shardSize)
}
func newBitrotReader(disk StorageAPI, bucket string, filePath string, tillOffset int64, algo BitrotAlgorithm, sum []byte, shardSize int64) io.ReaderAt {
if algo == HighwayHash256S {
return newStreamingBitrotReader(disk, bucket, filePath, tillOffset, algo, shardSize)
}
return newWholeBitrotReader(disk, bucket, filePath, algo, tillOffset, sum)
}
// Close all the readers.
func closeBitrotReaders(rs []io.ReaderAt) {
for _, r := range rs {
if br, ok := r.(io.Closer); ok {
br.Close()
}
}
}
// Close all the writers.
func closeBitrotWriters(ws []io.Writer) {
for _, w := range ws {
if bw, ok := w.(io.Closer); ok {
bw.Close()
}
}
}
// Returns hash sum for whole-bitrot, nil for streaming-bitrot.
func bitrotWriterSum(w io.Writer) []byte {
if bw, ok := w.(*wholeBitrotWriter); ok {
return bw.Sum(nil)
}
return nil
}
// Returns the size of the file with bitrot protection
func bitrotShardFileSize(size int64, shardSize int64, algo BitrotAlgorithm) int64 {
if algo != HighwayHash256S {
return size
}
return ceilFrac(size, shardSize)*int64(algo.New().Size()) + size
}

View file

@ -1,84 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"io"
"io/ioutil"
"log"
"os"
"testing"
)
func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
tmpDir, err := ioutil.TempDir("", "")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(tmpDir)
volume := "testvol"
filePath := "testfile"
disk, err := newPosix(tmpDir, "")
if err != nil {
t.Fatal(err)
}
disk.MakeVol(volume)
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10)
_, err = writer.Write([]byte("aaaaaaaaaa"))
if err != nil {
log.Fatal(err)
}
_, err = writer.Write([]byte("aaaaaaaaaa"))
if err != nil {
log.Fatal(err)
}
_, err = writer.Write([]byte("aaaaaaaaaa"))
if err != nil {
log.Fatal(err)
}
_, err = writer.Write([]byte("aaaaa"))
if err != nil {
log.Fatal(err)
}
writer.(io.Closer).Close()
reader := newBitrotReader(disk, volume, filePath, 35, bitrotAlgo, bitrotWriterSum(writer), 10)
b := make([]byte, 10)
if _, err = reader.ReadAt(b, 0); err != nil {
log.Fatal(err)
}
if _, err = reader.ReadAt(b, 10); err != nil {
log.Fatal(err)
}
if _, err = reader.ReadAt(b, 20); err != nil {
log.Fatal(err)
}
if _, err = reader.ReadAt(b[:5], 30); err != nil {
log.Fatal(err)
}
}
func TestAllBitrotAlgorithms(t *testing.T) {
for bitrotAlgo := range bitrotAlgorithms {
testBitrotReaderWriterAlgo(t, bitrotAlgo)
}
}

View file

@ -1,257 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"runtime"
"sync/atomic"
"time"
"github.com/gorilla/mux"
"github.com/minio/minio-go/v6/pkg/set"
xhttp "github.com/minio/minio/legacy/http"
"github.com/minio/minio/legacy/logger"
"github.com/minio/minio/legacy/rest"
)
const (
bootstrapRESTVersion = "v1"
bootstrapRESTVersionPrefix = SlashSeparator + bootstrapRESTVersion
bootstrapRESTPrefix = minioReservedBucketPath + "/bootstrap"
bootstrapRESTPath = bootstrapRESTPrefix + bootstrapRESTVersionPrefix
)
const (
bootstrapRESTMethodVerify = "/verify"
)
// To abstract a node over network.
type bootstrapRESTServer struct{}
// ServerSystemConfig - captures information about server configuration.
type ServerSystemConfig struct {
MinioPlatform string
MinioRuntime string
MinioEndpoints EndpointZones
}
// Diff - returns error on first difference found in two configs.
func (s1 ServerSystemConfig) Diff(s2 ServerSystemConfig) error {
if s1.MinioPlatform != s2.MinioPlatform {
return fmt.Errorf("Expected platform '%s', found to be running '%s'",
s1.MinioPlatform, s2.MinioPlatform)
}
if s1.MinioEndpoints.NEndpoints() != s2.MinioEndpoints.NEndpoints() {
return fmt.Errorf("Expected number of endpoints %d, seen %d", s1.MinioEndpoints.NEndpoints(),
s2.MinioEndpoints.NEndpoints())
}
for i, ep := range s1.MinioEndpoints {
if ep.SetCount != s2.MinioEndpoints[i].SetCount {
return fmt.Errorf("Expected set count %d, seen %d", ep.SetCount,
s2.MinioEndpoints[i].SetCount)
}
if ep.DrivesPerSet != s2.MinioEndpoints[i].DrivesPerSet {
return fmt.Errorf("Expected drives pet set %d, seen %d", ep.DrivesPerSet,
s2.MinioEndpoints[i].DrivesPerSet)
}
for j, endpoint := range ep.Endpoints {
if endpoint.String() != s2.MinioEndpoints[i].Endpoints[j].String() {
return fmt.Errorf("Expected endpoint %s, seen %s", endpoint,
s2.MinioEndpoints[i].Endpoints[j])
}
}
}
return nil
}
func getServerSystemCfg() ServerSystemConfig {
return ServerSystemConfig{
MinioPlatform: fmt.Sprintf("OS: %s | Arch: %s", runtime.GOOS, runtime.GOARCH),
MinioEndpoints: globalEndpoints,
}
}
func (b *bootstrapRESTServer) VerifyHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "VerifyHandler")
cfg := getServerSystemCfg()
logger.LogIf(ctx, json.NewEncoder(w).Encode(&cfg))
w.(http.Flusher).Flush()
}
// registerBootstrapRESTHandlers - register bootstrap rest router.
func registerBootstrapRESTHandlers(router *mux.Router) {
server := &bootstrapRESTServer{}
subrouter := router.PathPrefix(bootstrapRESTPrefix).Subrouter()
subrouter.Methods(http.MethodPost).Path(bootstrapRESTVersionPrefix + bootstrapRESTMethodVerify).HandlerFunc(
httpTraceHdrs(server.VerifyHandler))
}
// client to talk to bootstrap NEndpoints.
type bootstrapRESTClient struct {
endpoint Endpoint
restClient *rest.Client
connected int32
}
// Reconnect to a bootstrap rest server.k
func (client *bootstrapRESTClient) reConnect() {
atomic.StoreInt32(&client.connected, 1)
}
// Wrapper to restClient.Call to handle network errors, in case of network error the connection is marked disconnected
// permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints()
// after verifying format.json
func (client *bootstrapRESTClient) call(method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) {
return client.callWithContext(GlobalContext, method, values, body, length)
}
// Wrapper to restClient.Call to handle network errors, in case of network error the connection is marked disconnected
// permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints()
// after verifying format.json
func (client *bootstrapRESTClient) callWithContext(ctx context.Context, method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) {
if !client.IsOnline() {
client.reConnect()
}
if values == nil {
values = make(url.Values)
}
respBody, err = client.restClient.CallWithContext(ctx, method, values, body, length)
if err == nil {
return respBody, nil
}
if isNetworkError(err) {
atomic.StoreInt32(&client.connected, 0)
}
return nil, err
}
// Stringer provides a canonicalized representation of node.
func (client *bootstrapRESTClient) String() string {
return client.endpoint.String()
}
// IsOnline - returns whether RPC client failed to connect or not.
func (client *bootstrapRESTClient) IsOnline() bool {
return atomic.LoadInt32(&client.connected) == 1
}
// Close - marks the client as closed.
func (client *bootstrapRESTClient) Close() error {
atomic.StoreInt32(&client.connected, 0)
client.restClient.Close()
return nil
}
// Verify - fetches system server config.
func (client *bootstrapRESTClient) Verify(srcCfg ServerSystemConfig) (err error) {
if newObjectLayerFn() != nil {
return nil
}
respBody, err := client.call(bootstrapRESTMethodVerify, nil, nil, -1)
if err != nil {
return
}
defer xhttp.DrainBody(respBody)
recvCfg := ServerSystemConfig{}
if err = json.NewDecoder(respBody).Decode(&recvCfg); err != nil {
return err
}
return srcCfg.Diff(recvCfg)
}
func verifyServerSystemConfig(endpointZones EndpointZones) error {
srcCfg := getServerSystemCfg()
clnts := newBootstrapRESTClients(endpointZones)
var onlineServers int
for onlineServers < len(clnts)/2 {
for _, clnt := range clnts {
if err := clnt.Verify(srcCfg); err != nil {
if isNetworkError(err) {
continue
}
return fmt.Errorf("%s as has incorrect configuration: %w", clnt.String(), err)
}
onlineServers++
}
// Sleep for a while - so that we don't go into
// 100% CPU when half the endpoints are offline.
time.Sleep(500 * time.Millisecond)
}
return nil
}
func newBootstrapRESTClients(endpointZones EndpointZones) []*bootstrapRESTClient {
seenHosts := set.NewStringSet()
var clnts []*bootstrapRESTClient
for _, ep := range endpointZones {
for _, endpoint := range ep.Endpoints {
if seenHosts.Contains(endpoint.Host) {
continue
}
seenHosts.Add(endpoint.Host)
// Only proceed for remote endpoints.
if !endpoint.IsLocal {
clnt, err := newBootstrapRESTClient(endpoint)
if err != nil {
continue
}
clnts = append(clnts, clnt)
}
}
}
return clnts
}
// Returns a new bootstrap client.
func newBootstrapRESTClient(endpoint Endpoint) (*bootstrapRESTClient, error) {
serverURL := &url.URL{
Scheme: endpoint.Scheme,
Host: endpoint.Host,
Path: bootstrapRESTPath,
}
var tlsConfig *tls.Config
if globalIsSSL {
tlsConfig = &tls.Config{
ServerName: endpoint.Hostname(),
RootCAs: globalRootCAs,
}
}
trFn := newCustomHTTPTransport(tlsConfig, rest.DefaultRESTTimeout)
restClient, err := rest.NewClient(serverURL, trFn, newAuthToken)
if err != nil {
return nil, err
}
return &bootstrapRESTClient{endpoint: endpoint, restClient: restClient, connected: 1}, nil
}

View file

@ -1,178 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"encoding/xml"
"fmt"
"io"
"net/http"
"github.com/gorilla/mux"
"github.com/minio/minio/legacy/logger"
"github.com/minio/minio/pkg/bucket/policy"
)
const (
// Bucket Encryption configuration file name.
bucketSSEConfig = "bucket-encryption.xml"
)
// PutBucketEncryptionHandler - Stores given bucket encryption configuration
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html
func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketEncryption")
defer logger.AuditLog(w, r, "PutBucketEncryption", mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if !objAPI.IsEncryptionSupported() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketEncryptionAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Check if bucket exists.
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Parse bucket encryption xml
encConfig, err := validateBucketSSEConfig(io.LimitReader(r.Body, maxBucketSSEConfigSize))
if err != nil {
apiErr := APIError{
Code: "MalformedXML",
Description: fmt.Sprintf("%s (%s)", errorCodes[ErrMalformedXML].Description, err),
HTTPStatusCode: errorCodes[ErrMalformedXML].HTTPStatusCode,
}
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
return
}
// Return error if KMS is not initialized
if GlobalKMS == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL, guessIsBrowserReq(r))
return
}
configData, err := xml.Marshal(encConfig)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Store the bucket encryption configuration in the object layer
if err = globalBucketMetadataSys.Update(bucket, bucketSSEConfig, configData); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
writeSuccessResponseHeadersOnly(w)
}
// GetBucketEncryptionHandler - Returns bucket policy configuration
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html
func (api objectAPIHandlers) GetBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketEncryption")
defer logger.AuditLog(w, r, "GetBucketEncryption", mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketEncryptionAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Check if bucket exists
var err error
if _, err = objAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
config, err := globalBucketMetadataSys.GetSSEConfig(bucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
configData, err := xml.Marshal(config)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Write bucket encryption configuration to client
writeSuccessResponseXML(w, configData)
}
// DeleteBucketEncryptionHandler - Removes bucket encryption configuration
func (api objectAPIHandlers) DeleteBucketEncryptionHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteBucketEncryption")
defer logger.AuditLog(w, r, "DeleteBucketEncryption", mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketEncryptionAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Check if bucket exists
var err error
if _, err = objAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Delete bucket encryption config from object layer
if err = globalBucketMetadataSys.Update(bucket, bucketSSEConfig, nil); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
writeSuccessNoContent(w)
}

View file

@ -1,59 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"errors"
"io"
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
)
// BucketSSEConfigSys - in-memory cache of bucket encryption config
type BucketSSEConfigSys struct{}
// NewBucketSSEConfigSys - Creates an empty in-memory bucket encryption configuration cache
func NewBucketSSEConfigSys() *BucketSSEConfigSys {
return &BucketSSEConfigSys{}
}
// Get - gets bucket encryption config for the given bucket.
func (sys *BucketSSEConfigSys) Get(bucket string) (*bucketsse.BucketSSEConfig, error) {
if globalIsGateway {
objAPI := newObjectLayerWithoutSafeModeFn()
if objAPI == nil {
return nil, errServerNotInitialized
}
return nil, BucketSSEConfigNotFound{Bucket: bucket}
}
return globalBucketMetadataSys.GetSSEConfig(bucket)
}
// validateBucketSSEConfig parses bucket encryption configuration and validates if it is supported by MinIO.
func validateBucketSSEConfig(r io.Reader) (*bucketsse.BucketSSEConfig, error) {
encConfig, err := bucketsse.ParseBucketSSEConfig(r)
if err != nil {
return nil, err
}
if len(encConfig.Rules) == 1 && encConfig.Rules[0].DefaultEncryptionAction.Algorithm == bucketsse.AES256 {
return encConfig, nil
}
return nil, errors.New("Unsupported bucket encryption configuration")
}

View file

@ -1,70 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"bytes"
"errors"
"testing"
)
func TestValidateBucketSSEConfig(t *testing.T) {
testCases := []struct {
inputXML string
expectedErr error
shouldPass bool
}{
// MinIO supported XML
{
inputXML: `<ServerSideEncryptionConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Rule>
<ApplyServerSideEncryptionByDefault>
<SSEAlgorithm>AES256</SSEAlgorithm>
</ApplyServerSideEncryptionByDefault>
</Rule>
</ServerSideEncryptionConfiguration>`,
expectedErr: nil,
shouldPass: true,
},
// Unsupported XML
{
inputXML: `<ServerSideEncryptionConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
<Rule>
<ApplyServerSideEncryptionByDefault>
<SSEAlgorithm>aws:kms</SSEAlgorithm>
<KMSMasterKeyID>arn:aws:kms:us-east-1:1234/5678example</KMSMasterKeyID>
</ApplyServerSideEncryptionByDefault>
</Rule>
</ServerSideEncryptionConfiguration>`,
expectedErr: errors.New("Unsupported bucket encryption configuration"),
shouldPass: false,
},
}
for i, tc := range testCases {
_, err := validateBucketSSEConfig(bytes.NewReader([]byte(tc.inputXML)))
if tc.shouldPass && err != nil {
t.Fatalf("Test case %d: Expected to succeed but got %s", i+1, err)
}
if !tc.shouldPass {
if err == nil || err != nil && err.Error() != tc.expectedErr.Error() {
t.Fatalf("Test case %d: Expected %s but got %s", i+1, tc.expectedErr, err)
}
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,839 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2016, 2017, 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"bytes"
"encoding/xml"
"io/ioutil"
"net/http"
"net/http/httptest"
"strconv"
"testing"
"github.com/minio/minio/pkg/auth"
)
// Wrapper for calling RemoveBucket HTTP handler tests for both XL multiple disks and single node setup.
func TestRemoveBucketHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testRemoveBucketHandler, []string{"RemoveBucket"})
}
func testRemoveBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
_, err := obj.PutObject(GlobalContext, bucketName, "test-object", mustGetPutObjReader(t, bytes.NewBuffer([]byte{}), int64(0), "", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"), ObjectOptions{})
// if object upload fails stop the test.
if err != nil {
t.Fatalf("Error uploading object: <ERROR> %v", err)
}
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request for DELETE bucket.
req, err := newTestSignedRequestV4("DELETE", getBucketLocationURL("", bucketName), 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
if err != nil {
t.Fatalf("Test %s: Failed to create HTTP request for RemoveBucketHandler: <ERROR> %v", instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req)
switch rec.Code {
case http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent:
t.Fatalf("Test %v: expected failure, but succeeded with %v", instanceType, rec.Code)
}
// Verify response of the V2 signed HTTP request.
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for DELETE bucket.
reqV2, err := newTestSignedRequestV2("DELETE", getBucketLocationURL("", bucketName), 0, nil, credentials.AccessKey, credentials.SecretKey, nil)
if err != nil {
t.Fatalf("Test %s: Failed to create HTTP request for RemoveBucketHandler: <ERROR> %v", instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(recV2, reqV2)
switch recV2.Code {
case http.StatusOK, http.StatusCreated, http.StatusAccepted, http.StatusNoContent:
t.Fatalf("Test %v: expected failure, but succeeded with %v", instanceType, recV2.Code)
}
}
// Wrapper for calling GetBucketPolicy HTTP handler tests for both XL multiple disks and single node setup.
func TestGetBucketLocationHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testGetBucketLocationHandler, []string{"GetBucketLocation"})
}
func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
// test cases with sample input and expected output.
testCases := []struct {
bucketName string
accessKey string
secretKey string
// expected Response.
expectedRespStatus int
locationResponse []byte
errorResponse APIErrorResponse
shouldPass bool
}{
// Test case - 1.
// Tests for authenticated request and proper response.
{
bucketName: bucketName,
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusOK,
locationResponse: []byte(`<?xml version="1.0" encoding="UTF-8"?>
<LocationConstraint xmlns="http://s3.amazonaws.com/doc/2006-03-01/"></LocationConstraint>`),
errorResponse: APIErrorResponse{},
shouldPass: true,
},
// Test case - 2.
// Tests for signature mismatch error.
{
bucketName: bucketName,
accessKey: "abcd",
secretKey: "abcd",
expectedRespStatus: http.StatusForbidden,
locationResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "InvalidAccessKeyId",
Message: "The Access Key Id you provided does not exist in our records.",
},
shouldPass: false,
},
}
for i, testCase := range testCases {
if i != 1 {
continue
}
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request for Get bucket location.
req, err := newTestSignedRequestV4("GET", getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for GetBucketLocationHandler: <ERROR> %v", i+1, instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req)
if rec.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
}
if !bytes.Equal(testCase.locationResponse, rec.Body.Bytes()) && testCase.shouldPass {
t.Errorf("Test %d: %s: Expected the response to be `%s`, but instead found `%s`", i+1, instanceType, string(testCase.locationResponse), rec.Body.String())
}
errorResponse := APIErrorResponse{}
err = xml.Unmarshal(rec.Body.Bytes(), &errorResponse)
if err != nil && !testCase.shouldPass {
t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, rec.Body.String())
}
if errorResponse.Resource != testCase.errorResponse.Resource {
t.Errorf("Test %d: %s: Expected the error resource to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Resource, errorResponse.Resource)
}
if errorResponse.Message != testCase.errorResponse.Message {
t.Errorf("Test %d: %s: Expected the error message to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Message, errorResponse.Message)
}
if errorResponse.Code != testCase.errorResponse.Code {
t.Errorf("Test %d: %s: Expected the error code to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Code, errorResponse.Code)
}
// Verify response of the V2 signed HTTP request.
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("GET", getBucketLocationURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(recV2, reqV2)
if recV2.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, recV2.Code)
}
errorResponse = APIErrorResponse{}
err = xml.Unmarshal(recV2.Body.Bytes(), &errorResponse)
if err != nil && !testCase.shouldPass {
t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, recV2.Body.String())
}
if errorResponse.Resource != testCase.errorResponse.Resource {
t.Errorf("Test %d: %s: Expected the error resource to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Resource, errorResponse.Resource)
}
if errorResponse.Message != testCase.errorResponse.Message {
t.Errorf("Test %d: %s: Expected the error message to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Message, errorResponse.Message)
}
if errorResponse.Code != testCase.errorResponse.Code {
t.Errorf("Test %d: %s: Expected the error code to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Code, errorResponse.Code)
}
}
// Test for Anonymous/unsigned http request.
// ListBucketsHandler doesn't support bucket policies, setting the policies shouldn't make any difference.
anonReq, err := newTestRequest("GET", getBucketLocationURL("", bucketName), 0, nil)
if err != nil {
t.Fatalf("MinIO %s: Failed to create an anonymous request.", instanceType)
}
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
// sets the bucket policy using the policy statement generated from `getReadOnlyBucketStatement` so that the
// unsigned request goes through and its validated again.
ExecObjectLayerAPIAnonTest(t, obj, "TestGetBucketLocationHandler", bucketName, "", instanceType, apiRouter, anonReq, getAnonReadOnlyBucketPolicy(bucketName))
// HTTP request for testing when `objectLayer` is set to `nil`.
// There is no need to use an existing bucket and valid input for creating the request
// since the `objectLayer==nil` check is performed before any other checks inside the handlers.
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
nilBucket := "dummy-bucket"
nilReq, err := newTestRequest("GET", getBucketLocationURL("", nilBucket), 0, nil)
if err != nil {
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
}
// Executes the object layer set to `nil` test.
// `ExecObjectLayerAPINilTest` manages the operation.
ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq)
}
// Wrapper for calling HeadBucket HTTP handler tests for both XL multiple disks and single node setup.
func TestHeadBucketHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testHeadBucketHandler, []string{"HeadBucket"})
}
func testHeadBucketHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
// test cases with sample input and expected output.
testCases := []struct {
bucketName string
accessKey string
secretKey string
// expected Response.
expectedRespStatus int
}{
// Test case - 1.
// Bucket exists.
{
bucketName: bucketName,
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusOK,
},
// Test case - 2.
// Non-existent bucket name.
{
bucketName: "2333",
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusNotFound,
},
// Test case - 3.
// Testing for signature mismatch error.
// setting invalid acess and secret key.
{
bucketName: bucketName,
accessKey: "abcd",
secretKey: "abcd",
expectedRespStatus: http.StatusForbidden,
},
}
for i, testCase := range testCases {
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request for HEAD bucket.
req, err := newTestSignedRequestV4("HEAD", getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for HeadBucketHandler: <ERROR> %v", i+1, instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req)
if rec.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
}
// Verify response the V2 signed HTTP request.
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("HEAD", getHEADBucketURL("", testCase.bucketName), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(recV2, reqV2)
if recV2.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, recV2.Code)
}
}
// Test for Anonymous/unsigned http request.
anonReq, err := newTestRequest("HEAD", getHEADBucketURL("", bucketName), 0, nil)
if err != nil {
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
instanceType, bucketName, err)
}
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
// sets the bucket policy using the policy statement generated from `getReadOnlyBucketStatement` so that the
// unsigned request goes through and its validated again.
ExecObjectLayerAPIAnonTest(t, obj, "TestHeadBucketHandler", bucketName, "", instanceType, apiRouter, anonReq, getAnonReadOnlyBucketPolicy(bucketName))
// HTTP request for testing when `objectLayer` is set to `nil`.
// There is no need to use an existing bucket and valid input for creating the request
// since the `objectLayer==nil` check is performed before any other checks inside the handlers.
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
nilBucket := "dummy-bucket"
nilReq, err := newTestRequest("HEAD", getHEADBucketURL("", nilBucket), 0, nil)
if err != nil {
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
}
// execute the object layer set to `nil` test.
// `ExecObjectLayerAPINilTest` manages the operation.
ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq)
}
// Wrapper for calling TestListMultipartUploadsHandler tests for both XL multiple disks and single node setup.
func TestListMultipartUploadsHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testListMultipartUploadsHandler, []string{"ListMultipartUploads"})
}
// testListMultipartUploadsHandler - Tests validate listing of multipart uploads.
func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
// Collection of non-exhaustive ListMultipartUploads test cases, valid errors
// and success responses.
testCases := []struct {
// Inputs to ListMultipartUploads.
bucket string
prefix string
keyMarker string
uploadIDMarker string
delimiter string
maxUploads string
accessKey string
secretKey string
expectedRespStatus int
shouldPass bool
}{
// Test case - 1.
// Setting invalid bucket name.
{
bucket: ".test",
prefix: "",
keyMarker: "",
uploadIDMarker: "",
delimiter: "",
maxUploads: "0",
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusNotFound,
shouldPass: false,
},
// Test case - 2.
// Setting a non-existent bucket.
{
bucket: "volatile-bucket-1",
prefix: "",
keyMarker: "",
uploadIDMarker: "",
delimiter: "",
maxUploads: "0",
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusNotFound,
shouldPass: false,
},
// Test case -3.
// Delimiter unsupported, but response is empty.
{
bucket: bucketName,
prefix: "",
keyMarker: "",
uploadIDMarker: "",
delimiter: "-",
maxUploads: "0",
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusOK,
shouldPass: true,
},
// Test case - 4.
// Setting Invalid prefix and marker combination.
{
bucket: bucketName,
prefix: "asia",
keyMarker: "europe-object",
uploadIDMarker: "",
delimiter: "",
maxUploads: "0",
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusNotImplemented,
shouldPass: false,
},
// Test case - 5.
// Invalid upload id and marker combination.
{
bucket: bucketName,
prefix: "asia",
keyMarker: "asia/europe/",
uploadIDMarker: "abc",
delimiter: "",
maxUploads: "0",
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusNotImplemented,
shouldPass: false,
},
// Test case - 6.
// Setting a negative value to max-uploads paramater, should result in http.StatusBadRequest.
{
bucket: bucketName,
prefix: "",
keyMarker: "",
uploadIDMarker: "",
delimiter: "",
maxUploads: "-1",
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusBadRequest,
shouldPass: false,
},
// Test case - 7.
// Case with right set of parameters,
// should result in success 200OK.
{
bucket: bucketName,
prefix: "",
keyMarker: "",
uploadIDMarker: "",
delimiter: SlashSeparator,
maxUploads: "100",
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusOK,
shouldPass: true,
},
// Test case - 8.
// Good case without delimiter.
{
bucket: bucketName,
prefix: "",
keyMarker: "",
uploadIDMarker: "",
delimiter: "",
maxUploads: "100",
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusOK,
shouldPass: true,
},
// Test case - 9.
// Setting Invalid AccessKey and SecretKey to induce and verify Signature Mismatch error.
{
bucket: bucketName,
prefix: "",
keyMarker: "",
uploadIDMarker: "",
delimiter: "",
maxUploads: "100",
accessKey: "abcd",
secretKey: "abcd",
expectedRespStatus: http.StatusForbidden,
shouldPass: true,
},
}
for i, testCase := range testCases {
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request for List multipart uploads endpoint.
u := getListMultipartUploadsURLWithParams("", testCase.bucket, testCase.prefix, testCase.keyMarker, testCase.uploadIDMarker, testCase.delimiter, testCase.maxUploads)
req, gerr := newTestSignedRequestV4("GET", u, 0, nil, testCase.accessKey, testCase.secretKey, nil)
if gerr != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for ListMultipartUploadsHandler: <ERROR> %v", i+1, instanceType, gerr)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req)
if rec.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
}
// Verify response the V2 signed HTTP request.
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
// verify response for V2 signed HTTP request.
reqV2, err := newTestSignedRequestV2("GET", u, 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(recV2, reqV2)
if recV2.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, recV2.Code)
}
}
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request for List multipart uploads endpoint.
u := getListMultipartUploadsURLWithParams("", bucketName, "", "", "", "", "")
req, err := newTestSignedRequestV4("GET", u, 0, nil, "", "", nil) // Generate an anonymous request.
if err != nil {
t.Fatalf("Test %s: Failed to create HTTP request for ListMultipartUploadsHandler: <ERROR> %v", instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req)
if rec.Code != http.StatusForbidden {
t.Errorf("Test %s: Expected the response status to be `http.StatusForbidden`, but instead found `%d`", instanceType, rec.Code)
}
url := getListMultipartUploadsURLWithParams("", testCases[6].bucket, testCases[6].prefix, testCases[6].keyMarker,
testCases[6].uploadIDMarker, testCases[6].delimiter, testCases[6].maxUploads)
// Test for Anonymous/unsigned http request.
anonReq, err := newTestRequest("GET", url, 0, nil)
if err != nil {
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
instanceType, bucketName, err)
}
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
// sets the bucket policy using the policy statement generated from `getWriteOnlyBucketStatement` so that the
// unsigned request goes through and its validated again.
ExecObjectLayerAPIAnonTest(t, obj, "TestListMultipartUploadsHandler", bucketName, "", instanceType, apiRouter, anonReq, getAnonWriteOnlyBucketPolicy(bucketName))
// HTTP request for testing when `objectLayer` is set to `nil`.
// There is no need to use an existing bucket and valid input for creating the request
// since the `objectLayer==nil` check is performed before any other checks inside the handlers.
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
nilBucket := "dummy-bucket"
url = getListMultipartUploadsURLWithParams("", nilBucket, "dummy-prefix", testCases[6].keyMarker,
testCases[6].uploadIDMarker, testCases[6].delimiter, testCases[6].maxUploads)
nilReq, err := newTestRequest("GET", url, 0, nil)
if err != nil {
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
}
// execute the object layer set to `nil` test.
// `ExecObjectLayerAPINilTest` manages the operation.
ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq)
}
// Wrapper for calling TestListBucketsHandler tests for both XL multiple disks and single node setup.
func TestListBucketsHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testListBucketsHandler, []string{"ListBuckets"})
}
// testListBucketsHandler - Tests validate listing of buckets.
func testListBucketsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
testCases := []struct {
bucketName string
accessKey string
secretKey string
expectedRespStatus int
}{
// Test case - 1.
// Validate a good case request succeeds.
{
bucketName: bucketName,
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusOK,
},
// Test case - 2.
// Test case with invalid accessKey to produce and validate Signature MisMatch error.
{
bucketName: bucketName,
accessKey: "abcd",
secretKey: "abcd",
expectedRespStatus: http.StatusForbidden,
},
}
for i, testCase := range testCases {
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
req, lerr := newTestSignedRequestV4("GET", getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if lerr != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for ListBucketsHandler: <ERROR> %v", i+1, instanceType, lerr)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req)
if rec.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
}
// Verify response of the V2 signed HTTP request.
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
// verify response for V2 signed HTTP request.
reqV2, err := newTestSignedRequestV2("GET", getListBucketURL(""), 0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(recV2, reqV2)
if recV2.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, recV2.Code)
}
}
// Test for Anonymous/unsigned http request.
// ListBucketsHandler doesn't support bucket policies, setting the policies shouldn't make a difference.
anonReq, err := newTestRequest("GET", getListBucketURL(""), 0, nil)
if err != nil {
t.Fatalf("MinIO %s: Failed to create an anonymous request.", instanceType)
}
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
// sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the
// unsigned request goes through and its validated again.
ExecObjectLayerAPIAnonTest(t, obj, "ListBucketsHandler", "", "", instanceType, apiRouter, anonReq, getAnonWriteOnlyBucketPolicy("*"))
// HTTP request for testing when `objectLayer` is set to `nil`.
// There is no need to use an existing bucket and valid input for creating the request
// since the `objectLayer==nil` check is performed before any other checks inside the handlers.
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
nilReq, err := newTestRequest("GET", getListBucketURL(""), 0, nil)
if err != nil {
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
}
// execute the object layer set to `nil` test.
// `ExecObjectLayerAPINilTest` manages the operation.
ExecObjectLayerAPINilTest(t, "", "", instanceType, apiRouter, nilReq)
}
// Wrapper for calling DeleteMultipleObjects HTTP handler tests for both XL multiple disks and single node setup.
func TestAPIDeleteMultipleObjectsHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testAPIDeleteMultipleObjectsHandler, []string{"DeleteMultipleObjects"})
}
func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
var err error
contentBytes := []byte("hello")
sha256sum := ""
var objectNames []string
for i := 0; i < 10; i++ {
objectName := "test-object-" + strconv.Itoa(i)
// uploading the object.
_, err = obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewBuffer(contentBytes), int64(len(contentBytes)), "", sha256sum), ObjectOptions{})
// if object upload fails stop the test.
if err != nil {
t.Fatalf("Put Object %d: Error uploading object: <ERROR> %v", i, err)
}
// object used for the test.
objectNames = append(objectNames, objectName)
}
getObjectIdentifierList := func(objectNames []string) (objectIdentifierList []ObjectIdentifier) {
for _, objectName := range objectNames {
objectIdentifierList = append(objectIdentifierList, ObjectIdentifier{objectName})
}
return objectIdentifierList
}
getDeleteErrorList := func(objects []ObjectIdentifier) (deleteErrorList []DeleteError) {
for _, obj := range objects {
deleteErrorList = append(deleteErrorList, DeleteError{
Code: errorCodes[ErrAccessDenied].Code,
Message: errorCodes[ErrAccessDenied].Description,
Key: obj.ObjectName,
})
}
return deleteErrorList
}
requestList := []DeleteObjectsRequest{
{Quiet: false, Objects: getObjectIdentifierList(objectNames[:5])},
{Quiet: true, Objects: getObjectIdentifierList(objectNames[5:])},
}
// generate multi objects delete response.
successRequest0 := encodeResponse(requestList[0])
successResponse0 := generateMultiDeleteResponse(requestList[0].Quiet, requestList[0].Objects, nil)
encodedSuccessResponse0 := encodeResponse(successResponse0)
successRequest1 := encodeResponse(requestList[1])
successResponse1 := generateMultiDeleteResponse(requestList[1].Quiet, requestList[1].Objects, nil)
encodedSuccessResponse1 := encodeResponse(successResponse1)
// generate multi objects delete response for errors.
// errorRequest := encodeResponse(requestList[1])
errorResponse := generateMultiDeleteResponse(requestList[1].Quiet, requestList[1].Objects, nil)
encodedErrorResponse := encodeResponse(errorResponse)
anonRequest := encodeResponse(requestList[0])
anonResponse := generateMultiDeleteResponse(requestList[0].Quiet, nil, getDeleteErrorList(requestList[0].Objects))
encodedAnonResponse := encodeResponse(anonResponse)
testCases := []struct {
bucket string
objects []byte
accessKey string
secretKey string
expectedContent []byte
expectedRespStatus int
}{
// Test case - 1.
// Delete objects with invalid access key.
{
bucket: bucketName,
objects: successRequest0,
accessKey: "Invalid-AccessID",
secretKey: credentials.SecretKey,
expectedContent: nil,
expectedRespStatus: http.StatusForbidden,
},
// Test case - 2.
// Delete valid objects with quiet flag off.
{
bucket: bucketName,
objects: successRequest0,
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedContent: encodedSuccessResponse0,
expectedRespStatus: http.StatusOK,
},
// Test case - 3.
// Delete valid objects with quiet flag on.
{
bucket: bucketName,
objects: successRequest1,
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedContent: encodedSuccessResponse1,
expectedRespStatus: http.StatusOK,
},
// Test case - 4.
// Delete previously deleted objects.
{
bucket: bucketName,
objects: successRequest1,
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedContent: encodedErrorResponse,
expectedRespStatus: http.StatusOK,
},
// Test case - 5.
// Anonymous user access denied response
// Currently anonymous users cannot delete multiple objects in MinIO server
{
bucket: bucketName,
objects: anonRequest,
accessKey: "",
secretKey: "",
expectedContent: encodedAnonResponse,
expectedRespStatus: http.StatusOK,
},
}
for i, testCase := range testCases {
var req *http.Request
var actualContent []byte
// Generate a signed or anonymous request based on the testCase
if testCase.accessKey != "" {
req, err = newTestSignedRequestV4("POST", getDeleteMultipleObjectsURL("", bucketName),
int64(len(testCase.objects)), bytes.NewReader(testCase.objects), testCase.accessKey, testCase.secretKey, nil)
} else {
req, err = newTestRequest("POST", getDeleteMultipleObjectsURL("", bucketName),
int64(len(testCase.objects)), bytes.NewReader(testCase.objects))
}
if err != nil {
t.Fatalf("Failed to create HTTP request for DeleteMultipleObjects: <ERROR> %v", err)
}
rec := httptest.NewRecorder()
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to executes the registered handler.
apiRouter.ServeHTTP(rec, req)
// Assert the response code with the expected status.
if rec.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: MinIO %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
}
// read the response body.
actualContent, err = ioutil.ReadAll(rec.Body)
if err != nil {
t.Fatalf("Test %d : MinIO %s: Failed parsing response body: <ERROR> %v", i+1, instanceType, err)
}
// Verify whether the bucket obtained object is same as the one created.
if testCase.expectedContent != nil && !bytes.Equal(testCase.expectedContent, actualContent) {
t.Errorf("Test %d : MinIO %s: Object content differs from expected value.", i+1, instanceType)
}
}
// HTTP request to test the case of `objectLayer` being set to `nil`.
// There is no need to use an existing bucket or valid input for creating the request,
// since the `objectLayer==nil` check is performed before any other checks inside the handlers.
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
// Indicating that all parts are uploaded and initiating completeMultipartUpload.
nilBucket := "dummy-bucket"
nilObject := ""
nilReq, err := newTestSignedRequestV4("POST", getDeleteMultipleObjectsURL("", nilBucket), 0, nil, "", "", nil)
if err != nil {
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
}
// execute the object layer set to `nil` test.
// `ExecObjectLayerAPINilTest` manages the operation.
ExecObjectLayerAPINilTest(t, nilBucket, nilObject, instanceType, apiRouter, nilReq)
}

View file

@ -1,171 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"encoding/xml"
"io"
"net/http"
"github.com/gorilla/mux"
xhttp "github.com/minio/minio/legacy/http"
"github.com/minio/minio/legacy/logger"
"github.com/minio/minio/pkg/bucket/lifecycle"
"github.com/minio/minio/pkg/bucket/policy"
)
const (
// Lifecycle configuration file.
bucketLifecycleConfig = "lifecycle.xml"
)
// PutBucketLifecycleHandler - This HTTP handler stores given bucket lifecycle configuration as per
// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html
func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketLifecycle")
defer logger.AuditLog(w, r, "PutBucketLifecycle", mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
// PutBucketLifecycle always needs a Content-Md5
if _, ok := r.Header[xhttp.ContentMD5]; !ok {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentMD5), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketLifecycleAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Check if bucket exists.
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
bucketLifecycle, err := lifecycle.ParseLifecycleConfig(io.LimitReader(r.Body, r.ContentLength))
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Validate the received bucket policy document
if err = bucketLifecycle.Validate(); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
configData, err := xml.Marshal(bucketLifecycle)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if err = globalBucketMetadataSys.Update(bucket, bucketLifecycleConfig, configData); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Success.
writeSuccessResponseHeadersOnly(w)
}
// GetBucketLifecycleHandler - This HTTP handler returns bucket policy configuration.
func (api objectAPIHandlers) GetBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketLifecycle")
defer logger.AuditLog(w, r, "GetBucketLifecycle", mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketLifecycleAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Check if bucket exists.
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
config, err := globalBucketMetadataSys.GetLifecycleConfig(bucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
configData, err := xml.Marshal(config)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Write lifecycle configuration to client.
writeSuccessResponseXML(w, configData)
}
// DeleteBucketLifecycleHandler - This HTTP handler removes bucket lifecycle configuration.
func (api objectAPIHandlers) DeleteBucketLifecycleHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteBucketLifecycle")
defer logger.AuditLog(w, r, "DeleteBucketLifecycle", mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketLifecycleAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Check if bucket exists.
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if err := globalBucketMetadataSys.Update(bucket, bucketLifecycleConfig, nil); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Success.
writeSuccessNoContent(w)
}

View file

@ -1,303 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"bytes"
"encoding/xml"
"net/http"
"net/http/httptest"
"testing"
"github.com/minio/minio/pkg/auth"
)
// Test S3 Bucket lifecycle APIs with wrong credentials
func TestBucketLifecycleWrongCredentials(t *testing.T) {
ExecObjectLayerAPITest(t, testBucketLifecycleHandlersWrongCredentials, []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"})
}
// Test for authentication
func testBucketLifecycleHandlersWrongCredentials(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
// test cases with sample input and expected output.
testCases := []struct {
method string
bucketName string
accessKey string
secretKey string
// Sent body
body []byte
// Expected response
expectedRespStatus int
lifecycleResponse []byte
errorResponse APIErrorResponse
shouldPass bool
}{
// GET empty credentials
{
method: "GET", bucketName: bucketName,
accessKey: "",
secretKey: "",
expectedRespStatus: http.StatusForbidden,
lifecycleResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "AccessDenied",
Message: "Access Denied.",
},
shouldPass: false,
},
// GET wrong credentials
{
method: "GET", bucketName: bucketName,
accessKey: "abcd",
secretKey: "abcd",
expectedRespStatus: http.StatusForbidden,
lifecycleResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "InvalidAccessKeyId",
Message: "The Access Key Id you provided does not exist in our records.",
},
shouldPass: false,
},
// PUT empty credentials
{
method: "PUT",
bucketName: bucketName,
accessKey: "",
secretKey: "",
expectedRespStatus: http.StatusForbidden,
lifecycleResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "AccessDenied",
Message: "Access Denied.",
},
shouldPass: false,
},
// PUT wrong credentials
{
method: "PUT",
bucketName: bucketName,
accessKey: "abcd",
secretKey: "abcd",
expectedRespStatus: http.StatusForbidden,
lifecycleResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "InvalidAccessKeyId",
Message: "The Access Key Id you provided does not exist in our records.",
},
shouldPass: false,
},
// DELETE empty credentials
{
method: "DELETE",
bucketName: bucketName,
accessKey: "",
secretKey: "",
expectedRespStatus: http.StatusForbidden,
lifecycleResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "AccessDenied",
Message: "Access Denied.",
},
shouldPass: false,
},
// DELETE wrong credentials
{
method: "DELETE",
bucketName: bucketName,
accessKey: "abcd",
secretKey: "abcd",
expectedRespStatus: http.StatusForbidden,
lifecycleResponse: []byte(""),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "InvalidAccessKeyId",
Message: "The Access Key Id you provided does not exist in our records.",
},
shouldPass: false,
},
}
testBucketLifecycle(obj, instanceType, bucketName, apiRouter, t, testCases)
}
// Test S3 Bucket lifecycle APIs
func TestBucketLifecycle(t *testing.T) {
ExecObjectLayerAPITest(t, testBucketLifecycleHandlers, []string{"GetBucketLifecycle", "PutBucketLifecycle", "DeleteBucketLifecycle"})
}
// Simple tests of bucket lifecycle: PUT, GET, DELETE.
// Tests are related and the order is important.
func testBucketLifecycleHandlers(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
creds auth.Credentials, t *testing.T) {
// test cases with sample input and expected output.
testCases := []struct {
method string
bucketName string
accessKey string
secretKey string
// Sent body
body []byte
// Expected response
expectedRespStatus int
lifecycleResponse []byte
errorResponse APIErrorResponse
shouldPass bool
}{
// Test case - 1.
// Filter contains more than (Prefix,Tag,And) rule
{
method: "PUT",
bucketName: bucketName,
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
body: []byte(`<LifecycleConfiguration><Rule><ID>id</ID><Filter><Prefix>logs/</Prefix><Tag><Key>Key1</Key><Value>Value1</Value></Tag></Filter><Status>Enabled</Status><Expiration><Days>365</Days></Expiration></Rule></LifecycleConfiguration>`),
expectedRespStatus: http.StatusBadRequest,
lifecycleResponse: []byte(``),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "InvalidRequest",
Message: "Filter must have exactly one of Prefix, Tag, or And specified",
},
shouldPass: false,
},
// Date contains wrong format
{
method: "PUT",
bucketName: bucketName,
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
body: []byte(`<LifecycleConfiguration><Rule><ID>id</ID><Filter><Prefix>logs/</Prefix><Tag><Key>Key1</Key><Value>Value1</Value></Tag></Filter><Status>Enabled</Status><Expiration><Date>365</Date></Expiration></Rule></LifecycleConfiguration>`),
expectedRespStatus: http.StatusBadRequest,
lifecycleResponse: []byte(``),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "InvalidRequest",
Message: "Date must be provided in ISO 8601 format",
},
shouldPass: false,
},
{
method: "PUT",
bucketName: bucketName,
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
body: []byte(`<?xml version="1.0" encoding="UTF-8"?><LifecycleConfiguration><Rule><ID>id</ID><Filter><Prefix>logs/</Prefix></Filter><Status>Enabled</Status><Expiration><Days>365</Days></Expiration></Rule></LifecycleConfiguration>`),
expectedRespStatus: http.StatusOK,
lifecycleResponse: []byte(``),
errorResponse: APIErrorResponse{},
shouldPass: true,
},
{
method: "GET",
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
bucketName: bucketName,
body: []byte(``),
expectedRespStatus: http.StatusOK,
lifecycleResponse: []byte(`<LifecycleConfiguration><Rule><ID>id</ID><Status>Enabled</Status><Filter><Prefix>logs/</Prefix></Filter><Expiration><Days>365</Days></Expiration></Rule></LifecycleConfiguration>`),
errorResponse: APIErrorResponse{},
shouldPass: true,
},
{
method: "DELETE",
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
bucketName: bucketName,
body: []byte(``),
expectedRespStatus: http.StatusNoContent,
lifecycleResponse: []byte(``),
errorResponse: APIErrorResponse{},
shouldPass: true,
},
{
method: "GET",
accessKey: creds.AccessKey,
secretKey: creds.SecretKey,
bucketName: bucketName,
body: []byte(``),
expectedRespStatus: http.StatusNotFound,
lifecycleResponse: []byte(``),
errorResponse: APIErrorResponse{
Resource: SlashSeparator + bucketName + SlashSeparator,
Code: "NoSuchLifecycleConfiguration",
Message: "The lifecycle configuration does not exist",
},
shouldPass: false,
},
}
testBucketLifecycle(obj, instanceType, bucketName, apiRouter, t, testCases)
}
// testBucketLifecycle is a generic testing of lifecycle requests
func testBucketLifecycle(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
t *testing.T, testCases []struct {
method string
bucketName string
accessKey string
secretKey string
body []byte
expectedRespStatus int
lifecycleResponse []byte
errorResponse APIErrorResponse
shouldPass bool
}) {
for i, testCase := range testCases {
// initialize httptest Recorder, this records any mutations to response writer inside the handler.
rec := httptest.NewRecorder()
// construct HTTP request
req, err := newTestSignedRequestV4(testCase.method, getBucketLifecycleURL("", testCase.bucketName),
int64(len(testCase.body)), bytes.NewReader(testCase.body), testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for GetBucketLocationHandler: <ERROR> %v", i+1, instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req)
if rec.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, rec.Code)
}
if testCase.shouldPass && !bytes.Equal(testCase.lifecycleResponse, rec.Body.Bytes()) {
t.Errorf("Test %d: %s: Expected the response to be `%s`, but instead found `%s`", i+1, instanceType, string(testCase.lifecycleResponse), rec.Body.String())
}
errorResponse := APIErrorResponse{}
err = xml.Unmarshal(rec.Body.Bytes(), &errorResponse)
if err != nil && !testCase.shouldPass {
t.Fatalf("Test %d: %s: Unable to marshal response body %s", i+1, instanceType, rec.Body.String())
}
if errorResponse.Resource != testCase.errorResponse.Resource {
t.Errorf("Test %d: %s: Expected the error resource to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Resource, errorResponse.Resource)
}
if errorResponse.Message != testCase.errorResponse.Message {
t.Errorf("Test %d: %s: Expected the error message to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Message, errorResponse.Message)
}
if errorResponse.Code != testCase.errorResponse.Code {
t.Errorf("Test %d: %s: Expected the error code to be `%s`, but instead found `%s`", i+1, instanceType, testCase.errorResponse.Code, errorResponse.Code)
}
}
}

View file

@ -1,49 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"github.com/minio/minio/pkg/bucket/lifecycle"
)
const (
// Disabled means the lifecycle rule is inactive
Disabled = "Disabled"
)
// LifecycleSys - Bucket lifecycle subsystem.
type LifecycleSys struct{}
// Get - gets lifecycle config associated to a given bucket name.
func (sys *LifecycleSys) Get(bucketName string) (lc *lifecycle.Lifecycle, err error) {
if globalIsGateway {
objAPI := newObjectLayerWithoutSafeModeFn()
if objAPI == nil {
return nil, errServerNotInitialized
}
return nil, BucketLifecycleNotFound{Bucket: bucketName}
}
return globalBucketMetadataSys.GetLifecycleConfig(bucketName)
}
// NewLifecycleSys - creates new lifecycle system.
func NewLifecycleSys() *LifecycleSys {
return &LifecycleSys{}
}

View file

@ -1,329 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2016 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"net/http"
"strings"
"github.com/gorilla/mux"
"github.com/minio/minio/legacy/crypto"
"github.com/minio/minio/legacy/logger"
"github.com/minio/minio/pkg/bucket/policy"
)
// Validate all the ListObjects query arguments, returns an APIErrorCode
// if one of the args do not meet the required conditions.
// Special conditions required by MinIO server are as below
// - delimiter if set should be equal to '/', otherwise the request is rejected.
// - marker if set should have a common prefix with 'prefix' param, otherwise
// the request is rejected.
func validateListObjectsArgs(marker, delimiter, encodingType string, maxKeys int) APIErrorCode {
// Max keys cannot be negative.
if maxKeys < 0 {
return ErrInvalidMaxKeys
}
if encodingType != "" {
// Only url encoding type is supported
if strings.ToLower(encodingType) != "url" {
return ErrInvalidEncodingMethod
}
}
return ErrNone
}
// ListBucketObjectVersions - GET Bucket Object versions
// You can use the versions subresource to list metadata about all
// of the versions of objects in a bucket.
func (api objectAPIHandlers) ListBucketObjectVersionsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListBucketObjectVersions")
defer logger.AuditLog(w, r, "ListBucketObjectVersions", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
urlValues := r.URL.Query()
// Extract all the listBucketVersions query params to their native values.
// versionIDMarker is ignored here.
prefix, marker, delimiter, maxkeys, encodingType, _, errCode := getListBucketObjectVersionsArgs(urlValues)
if errCode != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
return
}
// Validate the query params before beginning to serve the request.
if s3Error := validateListObjectsArgs(marker, delimiter, encodingType, maxkeys); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
listObjects := objectAPI.ListObjects
// Inititate a list objects operation based on the input params.
// On success would return back ListObjectsInfo object to be
// marshaled into S3 compatible XML header.
listObjectsInfo, err := listObjects(ctx, bucket, prefix, marker, delimiter, maxkeys)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
for i := range listObjectsInfo.Objects {
if crypto.IsEncrypted(listObjectsInfo.Objects[i].UserDefined) {
listObjectsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsInfo.Objects[i], false)
}
listObjectsInfo.Objects[i].Size, err = listObjectsInfo.Objects[i].GetActualSize()
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}
response := generateListVersionsResponse(bucket, prefix, marker, delimiter, encodingType, maxkeys, listObjectsInfo)
// Write success response.
writeSuccessResponseXML(w, encodeResponse(response))
}
// ListObjectsV2MHandler - GET Bucket (List Objects) Version 2 with metadata.
// --------------------------
// This implementation of the GET operation returns some or all (up to 10000)
// of the objects in a bucket. You can use the request parame<ters as selection
// criteria to return a subset of the objects in a bucket.
//
// NOTE: It is recommended that this API to be used for application development.
// MinIO continues to support ListObjectsV1 and V2 for supporting legacy tools.
func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListObjectsV2M")
defer logger.AuditLog(w, r, "ListObjectsV2M", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
urlValues := r.URL.Query()
// Extract all the listObjectsV2 query params to their native values.
prefix, token, startAfter, delimiter, fetchOwner, maxKeys, encodingType, errCode := getListObjectsV2Args(urlValues)
if errCode != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
return
}
// Validate the query params before beginning to serve the request.
// fetch-owner is not validated since it is a boolean
if s3Error := validateListObjectsArgs(token, delimiter, encodingType, maxKeys); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
listObjectsV2 := objectAPI.ListObjectsV2
// Inititate a list objects operation based on the input params.
// On success would return back ListObjectsInfo object to be
// marshaled into S3 compatible XML header.
listObjectsV2Info, err := listObjectsV2(ctx, bucket, prefix, token, delimiter, maxKeys, fetchOwner, startAfter)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
for i := range listObjectsV2Info.Objects {
if crypto.IsEncrypted(listObjectsV2Info.Objects[i].UserDefined) {
listObjectsV2Info.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsV2Info.Objects[i], false)
}
listObjectsV2Info.Objects[i].Size, err = listObjectsV2Info.Objects[i].GetActualSize()
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}
response := generateListObjectsV2Response(bucket, prefix, token,
listObjectsV2Info.NextContinuationToken, startAfter,
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated,
maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes, true)
// Write success response.
writeSuccessResponseXML(w, encodeResponse(response))
}
// ListObjectsV2Handler - GET Bucket (List Objects) Version 2.
// --------------------------
// This implementation of the GET operation returns some or all (up to 10000)
// of the objects in a bucket. You can use the request parameters as selection
// criteria to return a subset of the objects in a bucket.
//
// NOTE: It is recommended that this API to be used for application development.
// MinIO continues to support ListObjectsV1 for supporting legacy tools.
func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListObjectsV2")
defer logger.AuditLog(w, r, "ListObjectsV2", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
urlValues := r.URL.Query()
// Extract all the listObjectsV2 query params to their native values.
prefix, token, startAfter, delimiter, fetchOwner, maxKeys, encodingType, errCode := getListObjectsV2Args(urlValues)
if errCode != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
return
}
// Validate the query params before beginning to serve the request.
// fetch-owner is not validated since it is a boolean
if s3Error := validateListObjectsArgs(token, delimiter, encodingType, maxKeys); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
listObjectsV2 := objectAPI.ListObjectsV2
// Inititate a list objects operation based on the input params.
// On success would return back ListObjectsInfo object to be
// marshaled into S3 compatible XML header.
listObjectsV2Info, err := listObjectsV2(ctx, bucket, prefix, token, delimiter, maxKeys, fetchOwner, startAfter)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
for i := range listObjectsV2Info.Objects {
if crypto.IsEncrypted(listObjectsV2Info.Objects[i].UserDefined) {
listObjectsV2Info.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsV2Info.Objects[i], false)
}
listObjectsV2Info.Objects[i].Size, err = listObjectsV2Info.Objects[i].GetActualSize()
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}
response := generateListObjectsV2Response(bucket, prefix, token,
listObjectsV2Info.NextContinuationToken, startAfter,
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated,
maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes, false)
// Write success response.
writeSuccessResponseXML(w, encodeResponse(response))
}
// ListObjectsV1Handler - GET Bucket (List Objects) Version 1.
// --------------------------
// This implementation of the GET operation returns some or all (up to 10000)
// of the objects in a bucket. You can use the request parameters as selection
// criteria to return a subset of the objects in a bucket.
//
func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListObjectsV1")
defer logger.AuditLog(w, r, "ListObjectsV1", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucket := vars["bucket"]
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Extract all the litsObjectsV1 query params to their native values.
prefix, marker, delimiter, maxKeys, encodingType, s3Error := getListObjectsV1Args(r.URL.Query())
if s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Validate all the query params before beginning to serve the request.
if s3Error := validateListObjectsArgs(marker, delimiter, encodingType, maxKeys); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
listObjects := objectAPI.ListObjects
// Inititate a list objects operation based on the input params.
// On success would return back ListObjectsInfo object to be
// marshaled into S3 compatible XML header.
listObjectsInfo, err := listObjects(ctx, bucket, prefix, marker, delimiter, maxKeys)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
for i := range listObjectsInfo.Objects {
if crypto.IsEncrypted(listObjectsInfo.Objects[i].UserDefined) {
listObjectsInfo.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsInfo.Objects[i], false)
}
listObjectsInfo.Objects[i].Size, err = listObjectsInfo.Objects[i].GetActualSize()
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType, maxKeys, listObjectsInfo)
// Write success response.
writeSuccessResponseXML(w, encodeResponse(response))
}

View file

@ -1,399 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"bytes"
"context"
"errors"
"fmt"
"sync"
"github.com/minio/minio-go/v6/pkg/tags"
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
"github.com/minio/minio/pkg/bucket/lifecycle"
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/madmin"
"github.com/minio/minio/pkg/sync/errgroup"
)
// BucketMetadataSys captures all bucket metadata for a given cluster.
type BucketMetadataSys struct {
sync.RWMutex
metadataMap map[string]BucketMetadata
}
// Remove bucket metadata from memory.
func (sys *BucketMetadataSys) Remove(bucket string) {
if globalIsGateway {
return
}
sys.Lock()
delete(sys.metadataMap, bucket)
sys.Unlock()
}
// Set - sets a new metadata in-memory.
// Only a shallow copy is saved and fields with references
// cannot be modified without causing a race condition,
// so they should be replaced atomically and not appended to, etc.
// Data is not persisted to disk.
func (sys *BucketMetadataSys) Set(bucket string, meta BucketMetadata) {
if globalIsGateway {
return
}
if bucket != minioMetaBucket {
sys.Lock()
sys.metadataMap[bucket] = meta
sys.Unlock()
}
}
// Update update bucket metadata for the specified config file.
// The configData data should not be modified after being sent here.
func (sys *BucketMetadataSys) Update(bucket string, configFile string, configData []byte) error {
objAPI := newObjectLayerWithoutSafeModeFn()
if objAPI == nil {
return errServerNotInitialized
}
if globalIsGateway {
// This code is needed only for gateway implementations.
switch configFile {
case bucketSSEConfig:
if globalGatewayName == "nas" {
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
if err != nil {
return err
}
meta.EncryptionConfigXML = configData
return meta.Save(GlobalContext, objAPI)
}
case bucketLifecycleConfig:
if globalGatewayName == "nas" {
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
if err != nil {
return err
}
meta.LifecycleConfigXML = configData
return meta.Save(GlobalContext, objAPI)
}
case bucketTaggingConfig:
if globalGatewayName == "nas" {
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
if err != nil {
return err
}
meta.TaggingConfigXML = configData
return meta.Save(GlobalContext, objAPI)
}
case bucketNotificationConfig:
if globalGatewayName == "nas" {
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
if err != nil {
return err
}
meta.NotificationConfigXML = configData
return meta.Save(GlobalContext, objAPI)
}
case bucketPolicyConfig:
if configData == nil {
return objAPI.DeleteBucketPolicy(GlobalContext, bucket)
}
config, err := policy.ParseConfig(bytes.NewReader(configData), bucket)
if err != nil {
return err
}
return objAPI.SetBucketPolicy(GlobalContext, bucket, config)
}
return NotImplemented{}
}
if bucket == minioMetaBucket {
return errInvalidArgument
}
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
if err != nil {
return err
}
switch configFile {
case bucketPolicyConfig:
meta.PolicyConfigJSON = configData
case bucketNotificationConfig:
meta.NotificationConfigXML = configData
case bucketLifecycleConfig:
meta.LifecycleConfigXML = configData
case bucketSSEConfig:
meta.EncryptionConfigXML = configData
case bucketTaggingConfig:
meta.TaggingConfigXML = configData
case objectLockConfig:
meta.ObjectLockConfigXML = configData
case bucketQuotaConfigFile:
meta.QuotaConfigJSON = configData
default:
return fmt.Errorf("Unknown bucket %s metadata update requested %s", bucket, configFile)
}
if err := meta.Save(GlobalContext, objAPI); err != nil {
return err
}
sys.Set(bucket, meta)
globalNotificationSys.LoadBucketMetadata(GlobalContext, bucket)
return nil
}
// Get metadata for a bucket.
// If no metadata exists errConfigNotFound is returned and a new metadata is returned.
// Only a shallow copy is returned, so referenced data should not be modified,
// but can be replaced atomically.
func (sys *BucketMetadataSys) Get(bucket string) (BucketMetadata, error) {
if globalIsGateway || bucket == minioMetaBucket {
return newBucketMetadata(bucket), errConfigNotFound
}
sys.RLock()
defer sys.RUnlock()
meta, ok := sys.metadataMap[bucket]
if !ok {
return newBucketMetadata(bucket), errConfigNotFound
}
return meta, nil
}
// GetTaggingConfig returns configured tagging config
// The returned object may not be modified.
func (sys *BucketMetadataSys) GetTaggingConfig(bucket string) (*tags.Tags, error) {
meta, err := sys.GetConfig(bucket)
if err != nil {
if errors.Is(err, errConfigNotFound) {
return nil, BucketTaggingNotFound{Bucket: bucket}
}
return nil, err
}
if meta.taggingConfig == nil {
return nil, BucketTaggingNotFound{Bucket: bucket}
}
return meta.taggingConfig, nil
}
// GetObjectLockConfig returns configured object lock config
// The returned object may not be modified.
func (sys *BucketMetadataSys) GetObjectLockConfig(bucket string) (*objectlock.Config, error) {
meta, err := sys.GetConfig(bucket)
if err != nil {
if errors.Is(err, errConfigNotFound) {
return nil, BucketObjectLockConfigNotFound{Bucket: bucket}
}
return nil, err
}
if meta.objectLockConfig == nil {
return nil, BucketObjectLockConfigNotFound{Bucket: bucket}
}
return meta.objectLockConfig, nil
}
// GetLifecycleConfig returns configured lifecycle config
// The returned object may not be modified.
func (sys *BucketMetadataSys) GetLifecycleConfig(bucket string) (*lifecycle.Lifecycle, error) {
meta, err := sys.GetConfig(bucket)
if err != nil {
if errors.Is(err, errConfigNotFound) {
return nil, BucketLifecycleNotFound{Bucket: bucket}
}
return nil, err
}
if meta.lifecycleConfig == nil {
return nil, BucketLifecycleNotFound{Bucket: bucket}
}
return meta.lifecycleConfig, nil
}
// GetNotificationConfig returns configured notification config
// The returned object may not be modified.
func (sys *BucketMetadataSys) GetNotificationConfig(bucket string) (*event.Config, error) {
if globalIsGateway && globalGatewayName == "nas" {
// Only needed in case of NAS gateway.
objAPI := newObjectLayerWithoutSafeModeFn()
if objAPI == nil {
return nil, errServerNotInitialized
}
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
if err != nil {
return nil, err
}
return meta.notificationConfig, nil
}
meta, err := sys.GetConfig(bucket)
if err != nil {
return nil, err
}
return meta.notificationConfig, nil
}
// GetSSEConfig returns configured SSE config
// The returned object may not be modified.
func (sys *BucketMetadataSys) GetSSEConfig(bucket string) (*bucketsse.BucketSSEConfig, error) {
meta, err := sys.GetConfig(bucket)
if err != nil {
if errors.Is(err, errConfigNotFound) {
return nil, BucketSSEConfigNotFound{Bucket: bucket}
}
return nil, err
}
if meta.sseConfig == nil {
return nil, BucketSSEConfigNotFound{Bucket: bucket}
}
return meta.sseConfig, nil
}
// GetPolicyConfig returns configured bucket policy
// The returned object may not be modified.
func (sys *BucketMetadataSys) GetPolicyConfig(bucket string) (*policy.Policy, error) {
if globalIsGateway {
objAPI := newObjectLayerWithoutSafeModeFn()
if objAPI == nil {
return nil, errServerNotInitialized
}
return objAPI.GetBucketPolicy(GlobalContext, bucket)
}
meta, err := sys.GetConfig(bucket)
if err != nil {
if errors.Is(err, errConfigNotFound) {
return nil, BucketPolicyNotFound{Bucket: bucket}
}
return nil, err
}
if meta.policyConfig == nil {
return nil, BucketPolicyNotFound{Bucket: bucket}
}
return meta.policyConfig, nil
}
// GetQuotaConfig returns configured bucket quota
// The returned object may not be modified.
func (sys *BucketMetadataSys) GetQuotaConfig(bucket string) (*madmin.BucketQuota, error) {
meta, err := sys.GetConfig(bucket)
if err != nil {
return nil, err
}
return meta.quotaConfig, nil
}
// GetConfig returns the current bucket metadata
// The returned object may not be modified.
func (sys *BucketMetadataSys) GetConfig(bucket string) (BucketMetadata, error) {
objAPI := newObjectLayerWithoutSafeModeFn()
if objAPI == nil {
return newBucketMetadata(bucket), errServerNotInitialized
}
if globalIsGateway {
return newBucketMetadata(bucket), NotImplemented{}
}
if bucket == minioMetaBucket {
return newBucketMetadata(bucket), errInvalidArgument
}
sys.RLock()
meta, ok := sys.metadataMap[bucket]
sys.RUnlock()
if ok {
return meta, nil
}
meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket)
if err != nil {
return meta, err
}
sys.Lock()
sys.metadataMap[bucket] = meta
sys.Unlock()
return meta, nil
}
// Init - initializes bucket metadata system for all buckets.
func (sys *BucketMetadataSys) Init(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
if objAPI == nil {
return errServerNotInitialized
}
// In gateway mode, we don't need to load the policies
// from the backend.
if globalIsGateway {
return nil
}
// Load PolicySys once during boot.
return sys.load(ctx, buckets, objAPI)
}
// concurrently load bucket metadata to speed up loading bucket metadata.
func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
g := errgroup.WithNErrs(len(buckets))
for index := range buckets {
index := index
g.Go(func() error {
meta, err := loadBucketMetadata(ctx, objAPI, buckets[index].Name)
if err != nil {
return err
}
sys.Lock()
sys.metadataMap[buckets[index].Name] = meta
sys.Unlock()
return nil
}, index)
}
for _, err := range g.Wait() {
if err != nil {
return err
}
}
return nil
}
// Loads bucket metadata for all buckets into BucketMetadataSys.
func (sys *BucketMetadataSys) load(ctx context.Context, buckets []BucketInfo, objAPI ObjectLayer) error {
count := 100 // load 100 bucket metadata at a time.
for {
if len(buckets) < count {
return sys.concurrentLoad(ctx, buckets, objAPI)
}
if err := sys.concurrentLoad(ctx, buckets[:count], objAPI); err != nil {
return err
}
buckets = buckets[count:]
}
}
// NewBucketMetadataSys - creates new policy system.
func NewBucketMetadataSys() *BucketMetadataSys {
return &BucketMetadataSys{
metadataMap: make(map[string]BucketMetadata),
}
}

View file

@ -1,317 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"bytes"
"context"
"encoding/binary"
"encoding/xml"
"errors"
"fmt"
"path"
"time"
"github.com/minio/minio-go/v6/pkg/tags"
"github.com/minio/minio/legacy/logger"
bucketsse "github.com/minio/minio/pkg/bucket/encryption"
"github.com/minio/minio/pkg/bucket/lifecycle"
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/madmin"
)
const (
legacyBucketObjectLockEnabledConfigFile = "object-lock-enabled.json"
legacyBucketObjectLockEnabledConfig = `{"x-amz-bucket-object-lock-enabled":true}`
bucketMetadataFile = ".metadata.bin"
bucketMetadataFormat = 1
bucketMetadataVersion = 1
)
var (
enabledBucketObjectLockConfig = []byte(`<ObjectLockConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><ObjectLockEnabled>Enabled</ObjectLockEnabled></ObjectLockConfiguration>`)
)
//go:generate msgp -file $GOFILE
// BucketMetadata contains bucket metadata.
// When adding/removing fields, regenerate the marshal code using the go generate above.
// Only changing meaning of fields requires a version bump.
// bucketMetadataFormat refers to the format.
// bucketMetadataVersion can be used to track a rolling upgrade of a field.
type BucketMetadata struct {
Name string
Created time.Time
LockEnabled bool // legacy not used anymore.
PolicyConfigJSON []byte
NotificationConfigXML []byte
LifecycleConfigXML []byte
ObjectLockConfigXML []byte
EncryptionConfigXML []byte
TaggingConfigXML []byte
QuotaConfigJSON []byte
// Unexported fields. Must be updated atomically.
policyConfig *policy.Policy
notificationConfig *event.Config
lifecycleConfig *lifecycle.Lifecycle
objectLockConfig *objectlock.Config
sseConfig *bucketsse.BucketSSEConfig
taggingConfig *tags.Tags
quotaConfig *madmin.BucketQuota
}
// newBucketMetadata creates BucketMetadata with the supplied name and Created to Now.
func newBucketMetadata(name string) BucketMetadata {
return BucketMetadata{
Name: name,
Created: UTCNow(),
notificationConfig: &event.Config{
XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/",
},
quotaConfig: &madmin.BucketQuota{},
}
}
// Load - loads the metadata of bucket by name from ObjectLayer api.
// If an error is returned the returned metadata will be default initialized.
func (b *BucketMetadata) Load(ctx context.Context, api ObjectLayer, name string) error {
configFile := path.Join(bucketConfigPrefix, name, bucketMetadataFile)
data, err := readConfig(ctx, api, configFile)
if err != nil {
return err
}
if len(data) <= 4 {
return fmt.Errorf("loadBucketMetadata: no data")
}
// Read header
switch binary.LittleEndian.Uint16(data[0:2]) {
case bucketMetadataFormat:
default:
return fmt.Errorf("loadBucketMetadata: unknown format: %d", binary.LittleEndian.Uint16(data[0:2]))
}
switch binary.LittleEndian.Uint16(data[2:4]) {
case bucketMetadataVersion:
default:
return fmt.Errorf("loadBucketMetadata: unknown version: %d", binary.LittleEndian.Uint16(data[2:4]))
}
// OK, parse data.
_, err = b.UnmarshalMsg(data[4:])
return err
}
// loadBucketMetadata loads and migrates to bucket metadata.
func loadBucketMetadata(ctx context.Context, objectAPI ObjectLayer, bucket string) (BucketMetadata, error) {
b := newBucketMetadata(bucket)
err := b.Load(ctx, objectAPI, bucket)
if err == nil {
return b, b.convertLegacyConfigs(ctx, objectAPI)
}
if err != errConfigNotFound {
return b, err
}
// Old bucket without bucket metadata. Hence we migrate existing settings.
return b, b.convertLegacyConfigs(ctx, objectAPI)
}
// parseAllConfigs will parse all configs and populate the private fields.
// The first error encountered is returned.
func (b *BucketMetadata) parseAllConfigs(ctx context.Context, objectAPI ObjectLayer) (err error) {
if len(b.PolicyConfigJSON) != 0 {
b.policyConfig, err = policy.ParseConfig(bytes.NewReader(b.PolicyConfigJSON), b.Name)
if err != nil {
return err
}
} else {
b.policyConfig = nil
}
if len(b.NotificationConfigXML) != 0 {
if err = xml.Unmarshal(b.NotificationConfigXML, b.notificationConfig); err != nil {
return err
}
}
if len(b.LifecycleConfigXML) != 0 {
b.lifecycleConfig, err = lifecycle.ParseLifecycleConfig(bytes.NewReader(b.LifecycleConfigXML))
if err != nil {
return err
}
} else {
b.lifecycleConfig = nil
}
if len(b.EncryptionConfigXML) != 0 {
b.sseConfig, err = bucketsse.ParseBucketSSEConfig(bytes.NewReader(b.EncryptionConfigXML))
if err != nil {
return err
}
} else {
b.sseConfig = nil
}
if len(b.TaggingConfigXML) != 0 {
b.taggingConfig, err = tags.ParseBucketXML(bytes.NewReader(b.TaggingConfigXML))
if err != nil {
return err
}
} else {
b.taggingConfig = nil
}
if len(b.ObjectLockConfigXML) != 0 {
b.objectLockConfig, err = objectlock.ParseObjectLockConfig(bytes.NewReader(b.ObjectLockConfigXML))
if err != nil {
return err
}
} else {
b.objectLockConfig = nil
}
if len(b.QuotaConfigJSON) != 0 {
b.quotaConfig, err = parseBucketQuota(b.Name, b.QuotaConfigJSON)
if err != nil {
return err
}
}
return nil
}
func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI ObjectLayer) error {
legacyConfigs := []string{
legacyBucketObjectLockEnabledConfigFile,
bucketPolicyConfig,
bucketNotificationConfig,
bucketLifecycleConfig,
bucketQuotaConfigFile,
bucketSSEConfig,
bucketTaggingConfig,
objectLockConfig,
}
configs := make(map[string][]byte)
// Handle migration from lockEnabled to newer format.
if b.LockEnabled {
configs[objectLockConfig] = enabledBucketObjectLockConfig
b.LockEnabled = false // legacy value unset it
// we are only interested in b.ObjectLockConfigXML or objectLockConfig value
}
for _, legacyFile := range legacyConfigs {
configFile := path.Join(bucketConfigPrefix, b.Name, legacyFile)
configData, err := readConfig(ctx, objectAPI, configFile)
if err != nil {
if errors.Is(err, errConfigNotFound) {
// legacy file config not found, proceed to look for new metadata.
continue
}
return err
}
configs[legacyFile] = configData
}
if len(configs) == 0 {
// nothing to update, return right away.
return b.parseAllConfigs(ctx, objectAPI)
}
for legacyFile, configData := range configs {
switch legacyFile {
case legacyBucketObjectLockEnabledConfigFile:
if string(configData) == legacyBucketObjectLockEnabledConfig {
b.ObjectLockConfigXML = enabledBucketObjectLockConfig
b.LockEnabled = false // legacy value unset it
// we are only interested in b.ObjectLockConfigXML
}
case bucketPolicyConfig:
b.PolicyConfigJSON = configData
case bucketNotificationConfig:
b.NotificationConfigXML = configData
case bucketLifecycleConfig:
b.LifecycleConfigXML = configData
case bucketSSEConfig:
b.EncryptionConfigXML = configData
case bucketTaggingConfig:
b.TaggingConfigXML = configData
case objectLockConfig:
b.ObjectLockConfigXML = configData
case bucketQuotaConfigFile:
b.QuotaConfigJSON = configData
}
}
if err := b.Save(ctx, objectAPI); err != nil {
return err
}
for legacyFile := range configs {
configFile := path.Join(bucketConfigPrefix, b.Name, legacyFile)
if err := deleteConfig(ctx, objectAPI, configFile); err != nil && !errors.Is(err, errConfigNotFound) {
logger.LogIf(ctx, err)
}
}
return nil
}
// Save config to supplied ObjectLayer api.
func (b *BucketMetadata) Save(ctx context.Context, api ObjectLayer) error {
if err := b.parseAllConfigs(ctx, api); err != nil {
return err
}
data := make([]byte, 4, b.Msgsize()+4)
// Initialize the header.
binary.LittleEndian.PutUint16(data[0:2], bucketMetadataFormat)
binary.LittleEndian.PutUint16(data[2:4], bucketMetadataVersion)
// Marshal the bucket metadata
data, err := b.MarshalMsg(data)
if err != nil {
return err
}
configFile := path.Join(bucketConfigPrefix, b.Name, bucketMetadataFile)
return saveConfig(ctx, api, configFile, data)
}
// deleteBucketMetadata deletes bucket metadata
// If config does not exist no error is returned.
func deleteBucketMetadata(ctx context.Context, obj ObjectLayer, bucket string) error {
metadataFiles := []string{
dataUsageCacheName,
bucketMetadataFile,
}
for _, metaFile := range metadataFiles {
configFile := path.Join(bucketConfigPrefix, bucket, metaFile)
if err := deleteConfig(ctx, obj, configFile); err != nil && err != errConfigNotFound {
return err
}
}
return nil
}

View file

@ -1,335 +0,0 @@
package legacy
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Name":
z.Name, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
case "Created":
z.Created, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "Created")
return
}
case "LockEnabled":
z.LockEnabled, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "LockEnabled")
return
}
case "PolicyConfigJSON":
z.PolicyConfigJSON, err = dc.ReadBytes(z.PolicyConfigJSON)
if err != nil {
err = msgp.WrapError(err, "PolicyConfigJSON")
return
}
case "NotificationConfigXML":
z.NotificationConfigXML, err = dc.ReadBytes(z.NotificationConfigXML)
if err != nil {
err = msgp.WrapError(err, "NotificationConfigXML")
return
}
case "LifecycleConfigXML":
z.LifecycleConfigXML, err = dc.ReadBytes(z.LifecycleConfigXML)
if err != nil {
err = msgp.WrapError(err, "LifecycleConfigXML")
return
}
case "ObjectLockConfigXML":
z.ObjectLockConfigXML, err = dc.ReadBytes(z.ObjectLockConfigXML)
if err != nil {
err = msgp.WrapError(err, "ObjectLockConfigXML")
return
}
case "EncryptionConfigXML":
z.EncryptionConfigXML, err = dc.ReadBytes(z.EncryptionConfigXML)
if err != nil {
err = msgp.WrapError(err, "EncryptionConfigXML")
return
}
case "TaggingConfigXML":
z.TaggingConfigXML, err = dc.ReadBytes(z.TaggingConfigXML)
if err != nil {
err = msgp.WrapError(err, "TaggingConfigXML")
return
}
case "QuotaConfigJSON":
z.QuotaConfigJSON, err = dc.ReadBytes(z.QuotaConfigJSON)
if err != nil {
err = msgp.WrapError(err, "QuotaConfigJSON")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 10
// write "Name"
err = en.Append(0x8a, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteString(z.Name)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
// write "Created"
err = en.Append(0xa7, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteTime(z.Created)
if err != nil {
err = msgp.WrapError(err, "Created")
return
}
// write "LockEnabled"
err = en.Append(0xab, 0x4c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteBool(z.LockEnabled)
if err != nil {
err = msgp.WrapError(err, "LockEnabled")
return
}
// write "PolicyConfigJSON"
err = en.Append(0xb0, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x53, 0x4f, 0x4e)
if err != nil {
return
}
err = en.WriteBytes(z.PolicyConfigJSON)
if err != nil {
err = msgp.WrapError(err, "PolicyConfigJSON")
return
}
// write "NotificationConfigXML"
err = en.Append(0xb5, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c)
if err != nil {
return
}
err = en.WriteBytes(z.NotificationConfigXML)
if err != nil {
err = msgp.WrapError(err, "NotificationConfigXML")
return
}
// write "LifecycleConfigXML"
err = en.Append(0xb2, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c)
if err != nil {
return
}
err = en.WriteBytes(z.LifecycleConfigXML)
if err != nil {
err = msgp.WrapError(err, "LifecycleConfigXML")
return
}
// write "ObjectLockConfigXML"
err = en.Append(0xb3, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c)
if err != nil {
return
}
err = en.WriteBytes(z.ObjectLockConfigXML)
if err != nil {
err = msgp.WrapError(err, "ObjectLockConfigXML")
return
}
// write "EncryptionConfigXML"
err = en.Append(0xb3, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c)
if err != nil {
return
}
err = en.WriteBytes(z.EncryptionConfigXML)
if err != nil {
err = msgp.WrapError(err, "EncryptionConfigXML")
return
}
// write "TaggingConfigXML"
err = en.Append(0xb0, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c)
if err != nil {
return
}
err = en.WriteBytes(z.TaggingConfigXML)
if err != nil {
err = msgp.WrapError(err, "TaggingConfigXML")
return
}
// write "QuotaConfigJSON"
err = en.Append(0xaf, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x53, 0x4f, 0x4e)
if err != nil {
return
}
err = en.WriteBytes(z.QuotaConfigJSON)
if err != nil {
err = msgp.WrapError(err, "QuotaConfigJSON")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 10
// string "Name"
o = append(o, 0x8a, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.Name)
// string "Created"
o = append(o, 0xa7, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64)
o = msgp.AppendTime(o, z.Created)
// string "LockEnabled"
o = append(o, 0xab, 0x4c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64)
o = msgp.AppendBool(o, z.LockEnabled)
// string "PolicyConfigJSON"
o = append(o, 0xb0, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x53, 0x4f, 0x4e)
o = msgp.AppendBytes(o, z.PolicyConfigJSON)
// string "NotificationConfigXML"
o = append(o, 0xb5, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c)
o = msgp.AppendBytes(o, z.NotificationConfigXML)
// string "LifecycleConfigXML"
o = append(o, 0xb2, 0x4c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c)
o = msgp.AppendBytes(o, z.LifecycleConfigXML)
// string "ObjectLockConfigXML"
o = append(o, 0xb3, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c)
o = msgp.AppendBytes(o, z.ObjectLockConfigXML)
// string "EncryptionConfigXML"
o = append(o, 0xb3, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c)
o = msgp.AppendBytes(o, z.EncryptionConfigXML)
// string "TaggingConfigXML"
o = append(o, 0xb0, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x58, 0x4d, 0x4c)
o = msgp.AppendBytes(o, z.TaggingConfigXML)
// string "QuotaConfigJSON"
o = append(o, 0xaf, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x53, 0x4f, 0x4e)
o = msgp.AppendBytes(o, z.QuotaConfigJSON)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Name":
z.Name, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
case "Created":
z.Created, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Created")
return
}
case "LockEnabled":
z.LockEnabled, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LockEnabled")
return
}
case "PolicyConfigJSON":
z.PolicyConfigJSON, bts, err = msgp.ReadBytesBytes(bts, z.PolicyConfigJSON)
if err != nil {
err = msgp.WrapError(err, "PolicyConfigJSON")
return
}
case "NotificationConfigXML":
z.NotificationConfigXML, bts, err = msgp.ReadBytesBytes(bts, z.NotificationConfigXML)
if err != nil {
err = msgp.WrapError(err, "NotificationConfigXML")
return
}
case "LifecycleConfigXML":
z.LifecycleConfigXML, bts, err = msgp.ReadBytesBytes(bts, z.LifecycleConfigXML)
if err != nil {
err = msgp.WrapError(err, "LifecycleConfigXML")
return
}
case "ObjectLockConfigXML":
z.ObjectLockConfigXML, bts, err = msgp.ReadBytesBytes(bts, z.ObjectLockConfigXML)
if err != nil {
err = msgp.WrapError(err, "ObjectLockConfigXML")
return
}
case "EncryptionConfigXML":
z.EncryptionConfigXML, bts, err = msgp.ReadBytesBytes(bts, z.EncryptionConfigXML)
if err != nil {
err = msgp.WrapError(err, "EncryptionConfigXML")
return
}
case "TaggingConfigXML":
z.TaggingConfigXML, bts, err = msgp.ReadBytesBytes(bts, z.TaggingConfigXML)
if err != nil {
err = msgp.WrapError(err, "TaggingConfigXML")
return
}
case "QuotaConfigJSON":
z.QuotaConfigJSON, bts, err = msgp.ReadBytesBytes(bts, z.QuotaConfigJSON)
if err != nil {
err = msgp.WrapError(err, "QuotaConfigJSON")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *BucketMetadata) Msgsize() (s int) {
s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 22 + msgp.BytesPrefixSize + len(z.NotificationConfigXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 20 + msgp.BytesPrefixSize + len(z.ObjectLockConfigXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON)
return
}

View file

@ -1,123 +0,0 @@
package legacy
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalBucketMetadata(t *testing.T) {
v := BucketMetadata{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBucketMetadata(b *testing.B) {
v := BucketMetadata{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBucketMetadata(b *testing.B) {
v := BucketMetadata{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBucketMetadata(b *testing.B) {
v := BucketMetadata{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBucketMetadata(t *testing.T) {
v := BucketMetadata{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBucketMetadata Msgsize() is inaccurate")
}
vn := BucketMetadata{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBucketMetadata(b *testing.B) {
v := BucketMetadata{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBucketMetadata(b *testing.B) {
v := BucketMetadata{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

View file

@ -1,309 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2016, 2017, 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"encoding/json"
"encoding/xml"
"io"
"net/http"
"reflect"
"time"
"github.com/gorilla/mux"
xhttp "github.com/minio/minio/legacy/http"
"github.com/minio/minio/legacy/logger"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/event"
)
const (
bucketConfigPrefix = "buckets"
bucketNotificationConfig = "notification.xml"
)
// GetBucketNotificationHandler - This HTTP handler returns event notification configuration
// as per http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html.
// It returns empty configuration if its not set.
func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketNotification")
defer logger.AuditLog(w, r, "GetBucketNotification", mustGetClaimsFromToken(r))
vars := mux.Vars(r)
bucketName := vars["bucket"]
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if !objAPI.IsNotificationSupported() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketNotificationAction, bucketName, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
_, err := objAPI.GetBucketInfo(ctx, bucketName)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
config, err := globalBucketMetadataSys.GetNotificationConfig(bucketName)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
config.SetRegion(globalServerRegion)
if err = config.Validate(globalServerRegion, globalNotificationSys.targetList); err != nil {
arnErr, ok := err.(*event.ErrARNNotFound)
if ok {
for i, queue := range config.QueueList {
// Remove ARN not found queues, because we previously allowed
// adding unexpected entries into the config.
//
// With newer config disallowing changing / turning off
// notification targets without removing ARN in notification
// configuration we won't see this problem anymore.
if reflect.DeepEqual(queue.ARN, arnErr.ARN) && i < len(config.QueueList) {
config.QueueList = append(config.QueueList[:i],
config.QueueList[i+1:]...)
}
// This is a one time activity we shall do this
// here and allow stale ARN to be removed. We shall
// never reach a stage where we will have stale
// notification configs.
}
} else {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
}
configData, err := xml.Marshal(config)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
writeSuccessResponseXML(w, configData)
}
// PutBucketNotificationHandler - This HTTP handler stores given notification configuration as per
// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html.
func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketNotification")
defer logger.AuditLog(w, r, "PutBucketNotification", mustGetClaimsFromToken(r))
objectAPI := api.ObjectAPI()
if objectAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if !objectAPI.IsNotificationSupported() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
return
}
vars := mux.Vars(r)
bucketName := vars["bucket"]
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketNotificationAction, bucketName, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
_, err := objectAPI.GetBucketInfo(ctx, bucketName)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// PutBucketNotification always needs a Content-Length.
if r.ContentLength <= 0 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
return
}
config, err := event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalServerRegion, globalNotificationSys.targetList)
if err != nil {
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
if event.IsEventError(err) {
apiErr = toAPIError(ctx, err)
}
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
return
}
configData, err := xml.Marshal(config)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if err = globalBucketMetadataSys.Update(bucketName, bucketNotificationConfig, configData); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
rulesMap := config.ToRulesMap()
globalNotificationSys.AddRulesMap(bucketName, rulesMap)
writeSuccessResponseHeadersOnly(w)
}
func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ListenBucketNotification")
defer logger.AuditLog(w, r, "ListenBucketNotification", mustGetClaimsFromToken(r))
// Validate if bucket exists.
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
if !objAPI.IsNotificationSupported() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
return
}
if !objAPI.IsListenBucketSupported() {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL, guessIsBrowserReq(r))
return
}
vars := mux.Vars(r)
bucketName := vars["bucket"]
values := r.URL.Query()
values.Set(peerRESTListenBucket, bucketName)
var prefix string
if len(values[peerRESTListenPrefix]) > 1 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrFilterNamePrefix), r.URL, guessIsBrowserReq(r))
return
}
if len(values[peerRESTListenPrefix]) == 1 {
if err := event.ValidateFilterRuleValue(values[peerRESTListenPrefix][0]); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
prefix = values[peerRESTListenPrefix][0]
}
var suffix string
if len(values[peerRESTListenSuffix]) > 1 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrFilterNameSuffix), r.URL, guessIsBrowserReq(r))
return
}
if len(values[peerRESTListenSuffix]) == 1 {
if err := event.ValidateFilterRuleValue(values[peerRESTListenSuffix][0]); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
suffix = values[peerRESTListenSuffix][0]
}
pattern := event.NewPattern(prefix, suffix)
var eventNames []event.Name
for _, s := range values[peerRESTListenEvents] {
eventName, err := event.ParseName(s)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
eventNames = append(eventNames, eventName)
}
if _, err := objAPI.GetBucketInfo(ctx, bucketName); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
rulesMap := event.NewRulesMap(eventNames, pattern, event.TargetID{ID: mustGetUUID()})
w.Header().Set(xhttp.ContentType, "text/event-stream")
// Listen Publisher and peer-listen-client uses nonblocking send and hence does not wait for slow receivers.
// Use buffered channel to take care of burst sends or slow w.Write()
listenCh := make(chan interface{}, 4000)
peers := newPeerRestClients(globalEndpoints)
globalHTTPListen.Subscribe(listenCh, ctx.Done(), func(evI interface{}) bool {
ev, ok := evI.(event.Event)
if !ok {
return false
}
if ev.S3.Bucket.Name != values.Get(peerRESTListenBucket) {
return false
}
return rulesMap.MatchSimple(ev.EventName, ev.S3.Object.Key)
})
for _, peer := range peers {
if peer == nil {
continue
}
peer.Listen(listenCh, ctx.Done(), values)
}
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
defer keepAliveTicker.Stop()
enc := json.NewEncoder(w)
for {
select {
case evI := <-listenCh:
ev := evI.(event.Event)
if len(string(ev.EventName)) > 0 {
if err := enc.Encode(struct{ Records []event.Event }{[]event.Event{ev}}); err != nil {
return
}
} else {
if _, err := w.Write([]byte(" ")); err != nil {
return
}
}
w.(http.Flusher).Flush()
case <-keepAliveTicker.C:
if _, err := w.Write([]byte(" ")); err != nil {
return
}
w.(http.Flusher).Flush()
case <-ctx.Done():
return
}
}
}

View file

@ -1,411 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019-2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"context"
"math"
"net/http"
"github.com/minio/minio/legacy/logger"
"github.com/minio/minio/pkg/auth"
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
"github.com/minio/minio/pkg/bucket/policy"
)
// BucketObjectLockSys - map of bucket and retention configuration.
type BucketObjectLockSys struct{}
// Get - Get retention configuration.
func (sys *BucketObjectLockSys) Get(bucketName string) (r objectlock.Retention, err error) {
if globalIsGateway {
objAPI := newObjectLayerWithoutSafeModeFn()
if objAPI == nil {
return r, errServerNotInitialized
}
return r, nil
}
config, err := globalBucketMetadataSys.GetObjectLockConfig(bucketName)
if err != nil {
if _, ok := err.(BucketObjectLockConfigNotFound); ok {
return r, nil
}
return r, err
}
return config.ToRetention(), nil
}
// Similar to enforceRetentionBypassForDelete but for WebUI
func enforceRetentionBypassForDeleteWeb(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn, govBypassPerms bool) APIErrorCode {
opts, err := getOpts(ctx, r, bucket, object)
if err != nil {
return toAPIErrorCode(ctx, err)
}
oi, err := getObjectInfoFn(ctx, bucket, object, opts)
if err != nil {
return toAPIErrorCode(ctx, err)
}
lhold := objectlock.GetObjectLegalHoldMeta(oi.UserDefined)
if lhold.Status.Valid() && lhold.Status == objectlock.LegalHoldOn {
return ErrObjectLocked
}
ret := objectlock.GetObjectRetentionMeta(oi.UserDefined)
if ret.Mode.Valid() {
switch ret.Mode {
case objectlock.RetCompliance:
// In compliance mode, a protected object version can't be overwritten
// or deleted by any user, including the root user in your AWS account.
// When an object is locked in compliance mode, its retention mode can't
// be changed, and its retention period can't be shortened. Compliance mode
// ensures that an object version can't be overwritten or deleted for the
// duration of the retention period.
t, err := objectlock.UTCNowNTP()
if err != nil {
logger.LogIf(ctx, err)
return ErrObjectLocked
}
if !ret.RetainUntilDate.Before(t) {
return ErrObjectLocked
}
return ErrNone
case objectlock.RetGovernance:
// In governance mode, users can't overwrite or delete an object
// version or alter its lock settings unless they have special
// permissions. With governance mode, you protect objects against
// being deleted by most users, but you can still grant some users
// permission to alter the retention settings or delete the object
// if necessary. You can also use governance mode to test retention-period
// settings before creating a compliance-mode retention period.
// To override or remove governance-mode retention settings, a
// user must have the s3:BypassGovernanceRetention permission
// and must explicitly include x-amz-bypass-governance-retention:true
// as a request header with any request that requires overriding
// governance mode.
byPassSet := govBypassPerms && objectlock.IsObjectLockGovernanceBypassSet(r.Header)
if !byPassSet {
t, err := objectlock.UTCNowNTP()
if err != nil {
logger.LogIf(ctx, err)
return ErrObjectLocked
}
if !ret.RetainUntilDate.Before(t) {
return ErrObjectLocked
}
if !govBypassPerms {
return ErrObjectLocked
}
return ErrNone
}
}
}
return ErrNone
}
// enforceRetentionForDeletion checks if it is appropriate to remove an
// object according to locking configuration when this is lifecycle/ bucket quota asking.
func enforceRetentionForDeletion(ctx context.Context, objInfo ObjectInfo) (locked bool) {
lhold := objectlock.GetObjectLegalHoldMeta(objInfo.UserDefined)
if lhold.Status.Valid() && lhold.Status == objectlock.LegalHoldOn {
return true
}
ret := objectlock.GetObjectRetentionMeta(objInfo.UserDefined)
if ret.Mode.Valid() && (ret.Mode == objectlock.RetCompliance || ret.Mode == objectlock.RetGovernance) {
t, err := objectlock.UTCNowNTP()
if err != nil {
logger.LogIf(ctx, err)
return true
}
if ret.RetainUntilDate.After(t) {
return true
}
}
return false
}
// enforceRetentionBypassForDelete enforces whether an existing object under governance can be deleted
// with governance bypass headers set in the request.
// Objects under site wide WORM can never be overwritten.
// For objects in "Governance" mode, overwrite is allowed if a) object retention date is past OR
// governance bypass headers are set and user has governance bypass permissions.
// Objects in "Compliance" mode can be overwritten only if retention date is past.
func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn) APIErrorCode {
opts, err := getOpts(ctx, r, bucket, object)
if err != nil {
return toAPIErrorCode(ctx, err)
}
oi, err := getObjectInfoFn(ctx, bucket, object, opts)
if err != nil {
return toAPIErrorCode(ctx, err)
}
lhold := objectlock.GetObjectLegalHoldMeta(oi.UserDefined)
if lhold.Status.Valid() && lhold.Status == objectlock.LegalHoldOn {
return ErrObjectLocked
}
ret := objectlock.GetObjectRetentionMeta(oi.UserDefined)
if ret.Mode.Valid() {
switch ret.Mode {
case objectlock.RetCompliance:
// In compliance mode, a protected object version can't be overwritten
// or deleted by any user, including the root user in your AWS account.
// When an object is locked in compliance mode, its retention mode can't
// be changed, and its retention period can't be shortened. Compliance mode
// ensures that an object version can't be overwritten or deleted for the
// duration of the retention period.
t, err := objectlock.UTCNowNTP()
if err != nil {
logger.LogIf(ctx, err)
return ErrObjectLocked
}
if !ret.RetainUntilDate.Before(t) {
return ErrObjectLocked
}
return ErrNone
case objectlock.RetGovernance:
// In governance mode, users can't overwrite or delete an object
// version or alter its lock settings unless they have special
// permissions. With governance mode, you protect objects against
// being deleted by most users, but you can still grant some users
// permission to alter the retention settings or delete the object
// if necessary. You can also use governance mode to test retention-period
// settings before creating a compliance-mode retention period.
// To override or remove governance-mode retention settings, a
// user must have the s3:BypassGovernanceRetention permission
// and must explicitly include x-amz-bypass-governance-retention:true
// as a request header with any request that requires overriding
// governance mode.
//
byPassSet := objectlock.IsObjectLockGovernanceBypassSet(r.Header)
if !byPassSet {
t, err := objectlock.UTCNowNTP()
if err != nil {
logger.LogIf(ctx, err)
return ErrObjectLocked
}
if !ret.RetainUntilDate.Before(t) {
return ErrObjectLocked
}
return ErrNone
}
// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes
// If you try to delete objects protected by governance mode and have s3:BypassGovernanceRetention
// or s3:GetBucketObjectLockConfiguration permissions, the operation will succeed.
govBypassPerms1 := checkRequestAuthType(ctx, r, policy.BypassGovernanceRetentionAction, bucket, object)
govBypassPerms2 := checkRequestAuthType(ctx, r, policy.GetBucketObjectLockConfigurationAction, bucket, object)
if govBypassPerms1 != ErrNone && govBypassPerms2 != ErrNone {
return ErrAccessDenied
}
}
}
return ErrNone
}
// enforceRetentionBypassForPut enforces whether an existing object under governance can be overwritten
// with governance bypass headers set in the request.
// Objects under site wide WORM cannot be overwritten.
// For objects in "Governance" mode, overwrite is allowed if a) object retention date is past OR
// governance bypass headers are set and user has governance bypass permissions.
// Objects in compliance mode can be overwritten only if retention date is being extended. No mode change is permitted.
func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn, objRetention *objectlock.ObjectRetention, cred auth.Credentials, owner bool, claims map[string]interface{}) (ObjectInfo, APIErrorCode) {
byPassSet := objectlock.IsObjectLockGovernanceBypassSet(r.Header)
opts, err := getOpts(ctx, r, bucket, object)
if err != nil {
return ObjectInfo{}, toAPIErrorCode(ctx, err)
}
oi, err := getObjectInfoFn(ctx, bucket, object, opts)
if err != nil {
return oi, toAPIErrorCode(ctx, err)
}
t, err := objectlock.UTCNowNTP()
if err != nil {
logger.LogIf(ctx, err)
return oi, ErrObjectLocked
}
// Pass in relative days from current time, to additionally to verify "object-lock-remaining-retention-days" policy if any.
days := int(math.Ceil(math.Abs(objRetention.RetainUntilDate.Sub(t).Hours()) / 24))
ret := objectlock.GetObjectRetentionMeta(oi.UserDefined)
if ret.Mode.Valid() {
// Retention has expired you may change whatever you like.
if ret.RetainUntilDate.Before(t) {
perm := isPutRetentionAllowed(bucket, object,
days, objRetention.RetainUntilDate.Time,
objRetention.Mode, byPassSet, r, cred,
owner, claims)
return oi, perm
}
switch ret.Mode {
case objectlock.RetGovernance:
govPerm := isPutRetentionAllowed(bucket, object, days,
objRetention.RetainUntilDate.Time, objRetention.Mode,
byPassSet, r, cred, owner, claims)
// Governance mode retention period cannot be shortened, if x-amz-bypass-governance is not set.
if !byPassSet {
if objRetention.Mode != objectlock.RetGovernance || objRetention.RetainUntilDate.Before((ret.RetainUntilDate.Time)) {
return oi, ErrObjectLocked
}
}
return oi, govPerm
case objectlock.RetCompliance:
// Compliance retention mode cannot be changed or shortened.
// https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html#object-lock-retention-modes
if objRetention.Mode != objectlock.RetCompliance || objRetention.RetainUntilDate.Before((ret.RetainUntilDate.Time)) {
return oi, ErrObjectLocked
}
compliancePerm := isPutRetentionAllowed(bucket, object,
days, objRetention.RetainUntilDate.Time, objRetention.Mode,
false, r, cred, owner, claims)
return oi, compliancePerm
}
return oi, ErrNone
} // No pre-existing retention metadata present.
perm := isPutRetentionAllowed(bucket, object,
days, objRetention.RetainUntilDate.Time,
objRetention.Mode, byPassSet, r, cred, owner, claims)
return oi, perm
}
// checkPutObjectLockAllowed enforces object retention policy and legal hold policy
// for requests with WORM headers
// See https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-managing.html for the spec.
// For non-existing objects with object retention headers set, this method returns ErrNone if bucket has
// locking enabled and user has requisite permissions (s3:PutObjectRetention)
// If object exists on object store and site wide WORM enabled - this method
// returns an error. For objects in "Governance" mode, overwrite is allowed if the retention date has expired.
// For objects in "Compliance" mode, retention date cannot be shortened, and mode cannot be altered.
// For objects with legal hold header set, the s3:PutObjectLegalHold permission is expected to be set
// Both legal hold and retention can be applied independently on an object
func checkPutObjectLockAllowed(ctx context.Context, r *http.Request, bucket, object string, getObjectInfoFn GetObjectInfoFn, retentionPermErr, legalHoldPermErr APIErrorCode) (objectlock.RetMode, objectlock.RetentionDate, objectlock.ObjectLegalHold, APIErrorCode) {
var mode objectlock.RetMode
var retainDate objectlock.RetentionDate
var legalHold objectlock.ObjectLegalHold
retentionRequested := objectlock.IsObjectLockRetentionRequested(r.Header)
legalHoldRequested := objectlock.IsObjectLockLegalHoldRequested(r.Header)
retentionCfg, err := globalBucketObjectLockSys.Get(bucket)
if err != nil {
return mode, retainDate, legalHold, ErrInvalidBucketObjectLockConfiguration
}
if !retentionCfg.LockEnabled {
if legalHoldRequested || retentionRequested {
return mode, retainDate, legalHold, ErrInvalidBucketObjectLockConfiguration
}
// If this not a WORM enabled bucket, we should return right here.
return mode, retainDate, legalHold, ErrNone
}
var objExists bool
opts, err := getOpts(ctx, r, bucket, object)
if err != nil {
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
}
t, err := objectlock.UTCNowNTP()
if err != nil {
logger.LogIf(ctx, err)
return mode, retainDate, legalHold, ErrObjectLocked
}
if objInfo, err := getObjectInfoFn(ctx, bucket, object, opts); err == nil {
objExists = true
r := objectlock.GetObjectRetentionMeta(objInfo.UserDefined)
if r.Mode == objectlock.RetCompliance && r.RetainUntilDate.After(t) {
return mode, retainDate, legalHold, ErrObjectLocked
}
mode = r.Mode
retainDate = r.RetainUntilDate
legalHold = objectlock.GetObjectLegalHoldMeta(objInfo.UserDefined)
// Disallow overwriting an object on legal hold
if legalHold.Status == objectlock.LegalHoldOn {
return mode, retainDate, legalHold, ErrObjectLocked
}
}
if legalHoldRequested {
var lerr error
if legalHold, lerr = objectlock.ParseObjectLockLegalHoldHeaders(r.Header); lerr != nil {
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
}
}
if retentionRequested {
legalHold, err := objectlock.ParseObjectLockLegalHoldHeaders(r.Header)
if err != nil {
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
}
rMode, rDate, err := objectlock.ParseObjectLockRetentionHeaders(r.Header)
if err != nil {
return mode, retainDate, legalHold, toAPIErrorCode(ctx, err)
}
if objExists && retainDate.After(t) {
return mode, retainDate, legalHold, ErrObjectLocked
}
if retentionPermErr != ErrNone {
return mode, retainDate, legalHold, retentionPermErr
}
return rMode, rDate, legalHold, ErrNone
}
if !retentionRequested && retentionCfg.Validity > 0 {
if retentionPermErr != ErrNone {
return mode, retainDate, legalHold, retentionPermErr
}
t, err := objectlock.UTCNowNTP()
if err != nil {
logger.LogIf(ctx, err)
return mode, retainDate, legalHold, ErrObjectLocked
}
// AWS S3 just creates a new version of object when an object is being overwritten.
if objExists && retainDate.After(t) {
return mode, retainDate, legalHold, ErrObjectLocked
}
if !legalHoldRequested {
// inherit retention from bucket configuration
return retentionCfg.Mode, objectlock.RetentionDate{Time: t.Add(retentionCfg.Validity)}, legalHold, ErrNone
}
return "", objectlock.RetentionDate{}, legalHold, ErrNone
}
return mode, retainDate, legalHold, ErrNone
}
// NewBucketObjectLockSys returns initialized BucketObjectLockSys
func NewBucketObjectLockSys() *BucketObjectLockSys {
return &BucketObjectLockSys{}
}

View file

@ -1,181 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2015, 2016, 2017, 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"encoding/json"
"io"
"net/http"
humanize "github.com/dustin/go-humanize"
"github.com/gorilla/mux"
"github.com/minio/minio/legacy/logger"
"github.com/minio/minio/pkg/bucket/policy"
)
const (
// As per AWS S3 specification, 20KiB policy JSON data is allowed.
maxBucketPolicySize = 20 * humanize.KiByte
// Policy configuration file.
bucketPolicyConfig = "policy.json"
)
// PutBucketPolicyHandler - This HTTP handler stores given bucket policy configuration as per
// https://docs.aws.amazon.com/AmazonS3/latest/dev/access-policy-language-overview.html
func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "PutBucketPolicy")
defer logger.AuditLog(w, r, "PutBucketPolicy", mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Check if bucket exists.
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Error out if Content-Length is missing.
// PutBucketPolicy always needs Content-Length.
if r.ContentLength <= 0 {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMissingContentLength), r.URL, guessIsBrowserReq(r))
return
}
// Error out if Content-Length is beyond allowed size.
if r.ContentLength > maxBucketPolicySize {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrPolicyTooLarge), r.URL, guessIsBrowserReq(r))
return
}
bucketPolicy, err := policy.ParseConfig(io.LimitReader(r.Body, r.ContentLength), bucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Version in policy must not be empty
if bucketPolicy.Version == "" {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPolicy), r.URL, guessIsBrowserReq(r))
return
}
configData, err := json.Marshal(bucketPolicy)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if err = globalBucketMetadataSys.Update(bucket, bucketPolicyConfig, configData); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Success.
writeSuccessNoContent(w)
}
// DeleteBucketPolicyHandler - This HTTP handler removes bucket policy configuration.
func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "DeleteBucketPolicy")
defer logger.AuditLog(w, r, "DeleteBucketPolicy", mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
if s3Error := checkRequestAuthType(ctx, r, policy.DeleteBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Check if bucket exists.
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
if err := globalBucketMetadataSys.Update(bucket, bucketPolicyConfig, nil); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Success.
writeSuccessNoContent(w)
}
// GetBucketPolicyHandler - This HTTP handler returns bucket policy configuration.
func (api objectAPIHandlers) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetBucketPolicy")
defer logger.AuditLog(w, r, "GetBucketPolicy", mustGetClaimsFromToken(r))
objAPI := api.ObjectAPI()
if objAPI == nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
return
}
vars := mux.Vars(r)
bucket := vars["bucket"]
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketPolicyAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
}
// Check if bucket exists.
if _, err := objAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Read bucket access policy.
config, err := globalBucketMetadataSys.GetPolicyConfig(bucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
configData, err := json.Marshal(config)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
return
}
// Write to client.
writeSuccessResponseJSON(w, configData)
}

View file

@ -1,724 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2016 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"reflect"
"strings"
"testing"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/bucket/policy/condition"
)
func getAnonReadOnlyBucketPolicy(bucketName string) *policy.Policy {
return &policy.Policy{
Version: policy.DefaultVersion,
Statements: []policy.Statement{policy.NewStatement(
policy.Allow,
policy.NewPrincipal("*"),
policy.NewActionSet(policy.GetBucketLocationAction, policy.ListBucketAction),
policy.NewResourceSet(policy.NewResource(bucketName, "")),
condition.NewFunctions(),
)},
}
}
func getAnonWriteOnlyBucketPolicy(bucketName string) *policy.Policy {
return &policy.Policy{
Version: policy.DefaultVersion,
Statements: []policy.Statement{policy.NewStatement(
policy.Allow,
policy.NewPrincipal("*"),
policy.NewActionSet(
policy.GetBucketLocationAction,
policy.ListBucketMultipartUploadsAction,
),
policy.NewResourceSet(policy.NewResource(bucketName, "")),
condition.NewFunctions(),
)},
}
}
func getAnonReadOnlyObjectPolicy(bucketName, prefix string) *policy.Policy {
return &policy.Policy{
Version: policy.DefaultVersion,
Statements: []policy.Statement{policy.NewStatement(
policy.Allow,
policy.NewPrincipal("*"),
policy.NewActionSet(policy.GetObjectAction),
policy.NewResourceSet(policy.NewResource(bucketName, prefix)),
condition.NewFunctions(),
)},
}
}
func getAnonWriteOnlyObjectPolicy(bucketName, prefix string) *policy.Policy {
return &policy.Policy{
Version: policy.DefaultVersion,
Statements: []policy.Statement{policy.NewStatement(
policy.Allow,
policy.NewPrincipal("*"),
policy.NewActionSet(
policy.AbortMultipartUploadAction,
policy.DeleteObjectAction,
policy.ListMultipartUploadPartsAction,
policy.PutObjectAction,
),
policy.NewResourceSet(policy.NewResource(bucketName, prefix)),
condition.NewFunctions(),
)},
}
}
// Wrapper for calling Put Bucket Policy HTTP handler tests for both XL multiple disks and single node setup.
func TestPutBucketPolicyHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testPutBucketPolicyHandler, []string{"PutBucketPolicy"})
}
// testPutBucketPolicyHandler - Test for Bucket policy end point.
func testPutBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
bucketName1 := fmt.Sprintf("%s-1", bucketName)
if err := obj.MakeBucketWithLocation(GlobalContext, bucketName1, "", false); err != nil {
t.Fatal(err)
}
// template for constructing HTTP request body for PUT bucket policy.
bucketPolicyTemplate := `{"Version":"2012-10-17","Statement":[{"Sid":"","Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:GetBucketLocation","s3:ListBucket"],"Resource":["arn:aws:s3:::%s"]},{"Sid":"","Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:GetObject"],"Resource":["arn:aws:s3:::%s/this*"]}]}`
bucketPolicyTemplateWithoutVersion := `{"Version":"","Statement":[{"Sid":"","Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:GetBucketLocation","s3:ListBucket"],"Resource":["arn:aws:s3:::%s"]},{"Sid":"","Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:GetObject"],"Resource":["arn:aws:s3:::%s/this*"]}]}`
// test cases with sample input and expected output.
testCases := []struct {
bucketName string
// bucket policy to be set,
// set as request body.
bucketPolicyReader io.ReadSeeker
// length in bytes of the bucket policy being set.
policyLen int
accessKey string
secretKey string
// expected Response.
expectedRespStatus int
}{
// Test case - 1.
{
bucketName: bucketName,
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))),
policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)),
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusNoContent,
},
// Test case - 2.
// Setting the content length to be more than max allowed size.
// Expecting StatusBadRequest (400).
{
bucketName: bucketName,
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))),
policyLen: maxBucketPolicySize + 1,
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusBadRequest,
},
// Test case - 3.
// Case with content-length of the HTTP request set to 0.
// Expecting the HTTP response status to be StatusLengthRequired (411).
{
bucketName: bucketName,
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))),
policyLen: 0,
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusLengthRequired,
},
// Test case - 4.
// setting the readSeeker to `nil`, bucket policy parser will fail.
{
bucketName: bucketName,
bucketPolicyReader: nil,
policyLen: 10,
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusBadRequest,
},
// Test case - 5.
// setting the keys to be empty.
// Expecting statusForbidden.
{
bucketName: bucketName,
bucketPolicyReader: nil,
policyLen: 10,
accessKey: "",
secretKey: "",
expectedRespStatus: http.StatusForbidden,
},
// Test case - 6.
// setting an invalid bucket policy.
// the bucket policy parser will fail.
{
bucketName: bucketName,
bucketPolicyReader: bytes.NewReader([]byte("dummy-policy")),
policyLen: len([]byte("dummy-policy")),
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusBadRequest,
},
// Test case - 7.
// Different bucket name used in the HTTP request and the policy string.
// checkBucketPolicyResources should fail.
{
bucketName: bucketName1,
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName))),
policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)),
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusBadRequest,
},
// Test case - 8.
// non-existent bucket is used.
// writing BucketPolicy should fail.
// should result in 404 StatusNotFound
{
bucketName: "non-existent-bucket",
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, "non-existent-bucket", "non-existent-bucket"))),
policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)),
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusNotFound,
},
// Test case - 9.
// non-existent bucket is used (with invalid bucket name)
// writing BucketPolicy should fail.
// should result in 404 StatusNotFound
{
bucketName: ".invalid-bucket",
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplate, ".invalid-bucket", ".invalid-bucket"))),
policyLen: len(fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)),
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusNotFound,
},
// Test case - 10.
// Existent bucket with policy with Version field empty.
// writing BucketPolicy should fail.
// should result in 400 StatusBadRequest.
{
bucketName: bucketName,
bucketPolicyReader: bytes.NewReader([]byte(fmt.Sprintf(bucketPolicyTemplateWithoutVersion, bucketName, bucketName))),
policyLen: len(fmt.Sprintf(bucketPolicyTemplateWithoutVersion, bucketName, bucketName)),
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusBadRequest,
},
}
// Iterating over the test cases, calling the function under test and asserting the response.
for i, testCase := range testCases {
// obtain the put bucket policy request body.
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV4 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV4, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", testCase.bucketName),
int64(testCase.policyLen), testCase.bucketPolicyReader, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(recV4, reqV4)
if recV4.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, recV4.Code)
}
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("PUT", getPutPolicyURL("", testCase.bucketName),
int64(testCase.policyLen), testCase.bucketPolicyReader, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: %s: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, instanceType, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(recV2, reqV2)
if recV2.Code != testCase.expectedRespStatus {
t.Errorf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, testCase.expectedRespStatus, recV2.Code)
}
}
// Test for Anonymous/unsigned http request.
// Bucket policy related functions doesn't support anonymous requests, setting policies shouldn't make a difference.
bucketPolicyStr := fmt.Sprintf(bucketPolicyTemplate, bucketName, bucketName)
// create unsigned HTTP request for PutBucketPolicyHandler.
anonReq, err := newTestRequest("PUT", getPutPolicyURL("", bucketName),
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)))
if err != nil {
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
instanceType, bucketName, err)
}
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
// sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the
// unsigned request goes through and its validated again.
ExecObjectLayerAPIAnonTest(t, obj, "PutBucketPolicyHandler", bucketName, "", instanceType, apiRouter, anonReq, getAnonWriteOnlyBucketPolicy(bucketName))
// HTTP request for testing when `objectLayer` is set to `nil`.
// There is no need to use an existing bucket and valid input for creating the request
// since the `objectLayer==nil` check is performed before any other checks inside the handlers.
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
nilBucket := "dummy-bucket"
nilReq, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", nilBucket),
0, nil, "", "", nil)
if err != nil {
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
}
// execute the object layer set to `nil` test.
// `ExecObjectLayerAPINilTest` manages the operation.
ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq)
}
// Wrapper for calling Get Bucket Policy HTTP handler tests for both XL multiple disks and single node setup.
func TestGetBucketPolicyHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testGetBucketPolicyHandler, []string{"PutBucketPolicy", "GetBucketPolicy"})
}
// testGetBucketPolicyHandler - Test for end point which fetches the access policy json of the given bucket.
func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
// template for constructing HTTP request body for PUT bucket policy.
bucketPolicyTemplate := `{"Version":"2012-10-17","Statement":[{"Action":["s3:GetBucketLocation","s3:ListBucket"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::%s"]},{"Action":["s3:GetObject"],"Effect":"Allow","Principal":{"AWS":["*"]},"Resource":["arn:aws:s3:::%s/this*"]}]}`
// Writing bucket policy before running test on GetBucketPolicy.
putTestPolicies := []struct {
bucketName string
accessKey string
secretKey string
// expected Response.
expectedRespStatus int
}{
{bucketName, credentials.AccessKey, credentials.SecretKey, http.StatusNoContent},
}
// Iterating over the cases and writing the bucket policy.
// its required to write the policies first before running tests on GetBucketPolicy.
for i, testPolicy := range putTestPolicies {
// obtain the put bucket policy request body.
bucketPolicyStr := fmt.Sprintf(bucketPolicyTemplate, testPolicy.bucketName, testPolicy.bucketName)
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV4 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV4, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", testPolicy.bucketName),
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(recV4, reqV4)
if recV4.Code != testPolicy.expectedRespStatus {
t.Fatalf("Case %d: Expected the response status to be `%d`, but instead found `%d`", i+1, testPolicy.expectedRespStatus, recV4.Code)
}
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("PUT", getPutPolicyURL("", testPolicy.bucketName),
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(recV2, reqV2)
if recV2.Code != testPolicy.expectedRespStatus {
t.Fatalf("Case %d: Expected the response status to be `%d`, but instead found `%d`", i+1, testPolicy.expectedRespStatus, recV2.Code)
}
}
// test cases with inputs and expected result for GetBucketPolicyHandler.
testCases := []struct {
bucketName string
accessKey string
secretKey string
// expected output.
expectedBucketPolicy string
expectedRespStatus int
}{
// Test case - 1.
// Case which valid inputs, expected to return success status of 200OK.
{
bucketName: bucketName,
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedBucketPolicy: bucketPolicyTemplate,
expectedRespStatus: http.StatusOK,
},
// Test case - 2.
// Case with non-existent bucket name.
{
bucketName: "non-existent-bucket",
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedBucketPolicy: bucketPolicyTemplate,
expectedRespStatus: http.StatusNotFound,
},
// Test case - 3.
// Case with non-existent bucket name.
{
bucketName: ".invalid-bucket-name",
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedBucketPolicy: "",
expectedRespStatus: http.StatusNotFound,
},
}
// Iterating over the cases, fetching the policy and validating the response.
for i, testCase := range testCases {
// expected bucket policy json string.
expectedBucketPolicyStr := fmt.Sprintf(testCase.expectedBucketPolicy, testCase.bucketName, testCase.bucketName)
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV4 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV4, err := newTestSignedRequestV4("GET", getGetPolicyURL("", testCase.bucketName),
0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler, GetBucketPolicyHandler handles the request.
apiRouter.ServeHTTP(recV4, reqV4)
// Assert the response code with the expected status.
if recV4.Code != testCase.expectedRespStatus {
t.Fatalf("Case %d: Expected the response status to be `%d`, but instead found `%d`", i+1, testCase.expectedRespStatus, recV4.Code)
}
// read the response body.
bucketPolicyReadBuf, err := ioutil.ReadAll(recV4.Body)
if err != nil {
t.Fatalf("Test %d: %s: Failed parsing response body: <ERROR> %v", i+1, instanceType, err)
}
if recV4.Code != testCase.expectedRespStatus {
// Verify whether the bucket policy fetched is same as the one inserted.
var expectedPolicy *policy.Policy
expectedPolicy, err = policy.ParseConfig(strings.NewReader(expectedBucketPolicyStr), testCase.bucketName)
if err != nil {
t.Fatalf("unexpected error. %v", err)
}
var gotPolicy *policy.Policy
gotPolicy, err = policy.ParseConfig(bytes.NewReader(bucketPolicyReadBuf), testCase.bucketName)
if err != nil {
t.Fatalf("unexpected error. %v", err)
}
if !reflect.DeepEqual(expectedPolicy, gotPolicy) {
t.Errorf("Test %d: %s: Bucket policy differs from expected value.", i+1, instanceType)
}
}
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("GET", getGetPolicyURL("", testCase.bucketName),
0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler, GetBucketPolicyHandler handles the request.
apiRouter.ServeHTTP(recV2, reqV2)
// Assert the response code with the expected status.
if recV2.Code != testCase.expectedRespStatus {
t.Fatalf("Case %d: Expected the response status to be `%d`, but instead found `%d`", i+1, testCase.expectedRespStatus, recV2.Code)
}
// read the response body.
bucketPolicyReadBuf, err = ioutil.ReadAll(recV2.Body)
if err != nil {
t.Fatalf("Test %d: %s: Failed parsing response body: <ERROR> %v", i+1, instanceType, err)
}
if recV2.Code == http.StatusOK {
// Verify whether the bucket policy fetched is same as the one inserted.
expectedPolicy, err := policy.ParseConfig(strings.NewReader(expectedBucketPolicyStr), testCase.bucketName)
if err != nil {
t.Fatalf("unexpected error. %v", err)
}
gotPolicy, err := policy.ParseConfig(bytes.NewReader(bucketPolicyReadBuf), testCase.bucketName)
if err != nil {
t.Fatalf("unexpected error. %v", err)
}
if !reflect.DeepEqual(expectedPolicy, gotPolicy) {
t.Errorf("Test %d: %s: Bucket policy differs from expected value.", i+1, instanceType)
}
}
}
// Test for Anonymous/unsigned http request.
// Bucket policy related functions doesn't support anonymous requests, setting policies shouldn't make a difference.
// create unsigned HTTP request for PutBucketPolicyHandler.
anonReq, err := newTestRequest("GET", getPutPolicyURL("", bucketName), 0, nil)
if err != nil {
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
instanceType, bucketName, err)
}
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
// sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the
// unsigned request goes through and its validated again.
ExecObjectLayerAPIAnonTest(t, obj, "GetBucketPolicyHandler", bucketName, "", instanceType, apiRouter, anonReq, getAnonReadOnlyBucketPolicy(bucketName))
// HTTP request for testing when `objectLayer` is set to `nil`.
// There is no need to use an existing bucket and valid input for creating the request
// since the `objectLayer==nil` check is performed before any other checks inside the handlers.
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
nilBucket := "dummy-bucket"
nilReq, err := newTestSignedRequestV4("GET", getGetPolicyURL("", nilBucket),
0, nil, "", "", nil)
if err != nil {
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
}
// execute the object layer set to `nil` test.
// `ExecObjectLayerAPINilTest` manages the operation.
ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq)
}
// Wrapper for calling Delete Bucket Policy HTTP handler tests for both XL multiple disks and single node setup.
func TestDeleteBucketPolicyHandler(t *testing.T) {
ExecObjectLayerAPITest(t, testDeleteBucketPolicyHandler, []string{"PutBucketPolicy", "DeleteBucketPolicy"})
}
// testDeleteBucketPolicyHandler - Test for Delete bucket policy end point.
func testDeleteBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler,
credentials auth.Credentials, t *testing.T) {
// template for constructing HTTP request body for PUT bucket policy.
bucketPolicyTemplate := `{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:GetBucketLocation",
"s3:ListBucket"
],
"Effect": "Allow",
"Principal": {
"AWS": [
"*"
]
},
"Resource": [
"arn:aws:s3:::%s"
]
},
{
"Action": [
"s3:GetObject"
],
"Effect": "Allow",
"Principal": {
"AWS": [
"*"
]
},
"Resource": [
"arn:aws:s3:::%s/this*"
]
}
]
}`
// Writing bucket policy before running test on DeleteBucketPolicy.
putTestPolicies := []struct {
bucketName string
accessKey string
secretKey string
// expected Response.
expectedRespStatus int
}{
{
bucketName: bucketName,
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusNoContent,
},
}
// Iterating over the cases and writing the bucket policy.
// its required to write the policies first before running tests on GetBucketPolicy.
for i, testPolicy := range putTestPolicies {
// obtain the put bucket policy request body.
bucketPolicyStr := fmt.Sprintf(bucketPolicyTemplate, testPolicy.bucketName, testPolicy.bucketName)
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV4 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV4, err := newTestSignedRequestV4("PUT", getPutPolicyURL("", testPolicy.bucketName),
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(recV4, reqV4)
if recV4.Code != testPolicy.expectedRespStatus {
t.Fatalf("Case %d: Expected the response status to be `%d`, but instead found `%d`", i+1, testPolicy.expectedRespStatus, recV4.Code)
}
}
// testcases with input and expected output for DeleteBucketPolicyHandler.
testCases := []struct {
bucketName string
accessKey string
secretKey string
// expected response.
expectedRespStatus int
}{
// Test case - 1.
{
bucketName: bucketName,
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusNoContent,
},
// Test case - 2.
// Case with non-existent-bucket.
{
bucketName: "non-existent-bucket",
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusNotFound,
},
// Test case - 3.
// Case with non-existent-bucket.
{
bucketName: ".invalid-bucket-name",
accessKey: credentials.AccessKey,
secretKey: credentials.SecretKey,
expectedRespStatus: http.StatusNotFound,
},
}
// Iterating over the cases and deleting the bucket policy and then asserting response.
for i, testCase := range testCases {
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV4 := httptest.NewRecorder()
// construct HTTP request for Delete bucket policy endpoint.
reqV4, err := newTestSignedRequestV4("DELETE", getDeletePolicyURL("", testCase.bucketName),
0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler, DeleteBucketPolicyHandler handles the request.
apiRouter.ServeHTTP(recV4, reqV4)
// Assert the response code with the expected status.
if recV4.Code != testCase.expectedRespStatus {
t.Fatalf("Case %d: Expected the response status to be `%d`, but instead found `%d`", i+1, testCase.expectedRespStatus, recV4.Code)
}
}
// Iterating over the cases and writing the bucket policy.
// its required to write the policies first before running tests on GetBucketPolicy.
for i, testPolicy := range putTestPolicies {
// obtain the put bucket policy request body.
bucketPolicyStr := fmt.Sprintf(bucketPolicyTemplate, testPolicy.bucketName, testPolicy.bucketName)
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for PUT bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("PUT", getPutPolicyURL("", testPolicy.bucketName),
int64(len(bucketPolicyStr)), bytes.NewReader([]byte(bucketPolicyStr)), testPolicy.accessKey, testPolicy.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for PutBucketPolicyHandler: <ERROR> %v", i+1, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(recV2, reqV2)
if recV2.Code != testPolicy.expectedRespStatus {
t.Fatalf("Case %d: Expected the response status to be `%d`, but instead found `%d`", i+1, testPolicy.expectedRespStatus, recV2.Code)
}
}
for i, testCase := range testCases {
// initialize HTTP NewRecorder, this records any mutations to response writer inside the handler.
recV2 := httptest.NewRecorder()
// construct HTTP request for Delete bucket policy endpoint.
reqV2, err := newTestSignedRequestV2("DELETE", getDeletePolicyURL("", testCase.bucketName),
0, nil, testCase.accessKey, testCase.secretKey, nil)
if err != nil {
t.Fatalf("Test %d: Failed to create HTTP request for GetBucketPolicyHandler: <ERROR> %v", i+1, err)
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic of the handler.
// Call the ServeHTTP to execute the handler, DeleteBucketPolicyHandler handles the request.
apiRouter.ServeHTTP(recV2, reqV2)
// Assert the response code with the expected status.
if recV2.Code != testCase.expectedRespStatus {
t.Fatalf("Case %d: Expected the response status to be `%d`, but instead found `%d`", i+1, testCase.expectedRespStatus, recV2.Code)
}
}
// Test for Anonymous/unsigned http request.
// Bucket policy related functions doesn't support anonymous requests, setting policies shouldn't make a difference.
// create unsigned HTTP request for PutBucketPolicyHandler.
anonReq, err := newTestRequest("DELETE", getPutPolicyURL("", bucketName), 0, nil)
if err != nil {
t.Fatalf("MinIO %s: Failed to create an anonymous request for bucket \"%s\": <ERROR> %v",
instanceType, bucketName, err)
}
// ExecObjectLayerAPIAnonTest - Calls the HTTP API handler using the anonymous request, validates the ErrAccessDeniedResponse,
// sets the bucket policy using the policy statement generated from `getWriteOnlyObjectStatement` so that the
// unsigned request goes through and its validated again.
ExecObjectLayerAPIAnonTest(t, obj, "DeleteBucketPolicyHandler", bucketName, "", instanceType, apiRouter, anonReq, getAnonWriteOnlyBucketPolicy(bucketName))
// HTTP request for testing when `objectLayer` is set to `nil`.
// There is no need to use an existing bucket and valid input for creating the request
// since the `objectLayer==nil` check is performed before any other checks inside the handlers.
// The only aim is to generate an HTTP request in a way that the relevant/registered end point is evoked/called.
nilBucket := "dummy-bucket"
nilReq, err := newTestSignedRequestV4("DELETE", getDeletePolicyURL("", nilBucket),
0, nil, "", "", nil)
if err != nil {
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
}
// execute the object layer set to `nil` test.
// `ExecObjectLayerAPINilTest` manages the operation.
ExecObjectLayerAPINilTest(t, nilBucket, "", instanceType, apiRouter, nilReq)
}

View file

@ -1,184 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"encoding/json"
"net/http"
"net/url"
"strconv"
"strings"
"time"
jsoniter "github.com/json-iterator/go"
miniogopolicy "github.com/minio/minio-go/v6/pkg/policy"
xhttp "github.com/minio/minio/legacy/http"
"github.com/minio/minio/legacy/logger"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/handlers"
)
// PolicySys - policy subsystem.
type PolicySys struct{}
// Get returns stored bucket policy
func (sys *PolicySys) Get(bucket string) (*policy.Policy, error) {
return globalBucketMetadataSys.GetPolicyConfig(bucket)
}
// IsAllowed - checks given policy args is allowed to continue the Rest API.
func (sys *PolicySys) IsAllowed(args policy.Args) bool {
p, err := sys.Get(args.BucketName)
if err == nil {
return p.IsAllowed(args)
}
// Log unhandled errors.
if _, ok := err.(BucketPolicyNotFound); !ok {
logger.LogIf(GlobalContext, err)
}
// As policy is not available for given bucket name, returns IsOwner i.e.
// operation is allowed only for owner.
return args.IsOwner
}
// NewPolicySys - creates new policy system.
func NewPolicySys() *PolicySys {
return &PolicySys{}
}
func getConditionValues(r *http.Request, lc string, username string, claims map[string]interface{}) map[string][]string {
currTime := UTCNow()
principalType := "Anonymous"
if username != "" {
principalType = "User"
}
args := map[string][]string{
"CurrentTime": {currTime.Format(time.RFC3339)},
"EpochTime": {strconv.FormatInt(currTime.Unix(), 10)},
"SecureTransport": {strconv.FormatBool(r.TLS != nil)},
"SourceIp": {handlers.GetSourceIP(r)},
"UserAgent": {r.UserAgent()},
"Referer": {r.Referer()},
"principaltype": {principalType},
"userid": {username},
"username": {username},
}
if lc != "" {
args["LocationConstraint"] = []string{lc}
}
cloneHeader := r.Header.Clone()
for _, objLock := range []string{
xhttp.AmzObjectLockMode,
xhttp.AmzObjectLockLegalHold,
xhttp.AmzObjectLockRetainUntilDate,
} {
if values, ok := cloneHeader[objLock]; ok {
args[strings.TrimPrefix(objLock, "X-Amz-")] = values
}
cloneHeader.Del(objLock)
}
for key, values := range cloneHeader {
if existingValues, found := args[key]; found {
args[key] = append(existingValues, values...)
} else {
args[key] = values
}
}
var cloneURLValues = url.Values{}
for k, v := range r.URL.Query() {
cloneURLValues[k] = v
}
for _, objLock := range []string{
xhttp.AmzObjectLockMode,
xhttp.AmzObjectLockLegalHold,
xhttp.AmzObjectLockRetainUntilDate,
} {
if values, ok := cloneURLValues[objLock]; ok {
args[strings.TrimPrefix(objLock, "X-Amz-")] = values
}
cloneURLValues.Del(objLock)
}
for key, values := range cloneURLValues {
if existingValues, found := args[key]; found {
args[key] = append(existingValues, values...)
} else {
args[key] = values
}
}
// JWT specific values
for k, v := range claims {
vStr, ok := v.(string)
if ok {
args[k] = []string{vStr}
}
}
return args
}
// PolicyToBucketAccessPolicy - converts policy.Policy to minio-go/policy.BucketAccessPolicy.
func PolicyToBucketAccessPolicy(bucketPolicy *policy.Policy) (*miniogopolicy.BucketAccessPolicy, error) {
// Return empty BucketAccessPolicy for empty bucket policy.
if bucketPolicy == nil {
return &miniogopolicy.BucketAccessPolicy{Version: policy.DefaultVersion}, nil
}
data, err := json.Marshal(bucketPolicy)
if err != nil {
// This should not happen because bucketPolicy is valid to convert to JSON data.
return nil, err
}
var policyInfo miniogopolicy.BucketAccessPolicy
var json = jsoniter.ConfigCompatibleWithStandardLibrary
if err = json.Unmarshal(data, &policyInfo); err != nil {
// This should not happen because data is valid to JSON data.
return nil, err
}
return &policyInfo, nil
}
// BucketAccessPolicyToPolicy - converts minio-go/policy.BucketAccessPolicy to policy.Policy.
func BucketAccessPolicyToPolicy(policyInfo *miniogopolicy.BucketAccessPolicy) (*policy.Policy, error) {
data, err := json.Marshal(policyInfo)
if err != nil {
// This should not happen because policyInfo is valid to convert to JSON data.
return nil, err
}
var bucketPolicy policy.Policy
var json = jsoniter.ConfigCompatibleWithStandardLibrary
if err = json.Unmarshal(data, &bucketPolicy); err != nil {
// This should not happen because data is valid to JSON data.
return nil, err
}
return &bucketPolicy, nil
}

View file

@ -1,257 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"context"
"encoding/json"
"fmt"
"time"
"github.com/minio/minio/legacy/config"
"github.com/minio/minio/legacy/logger"
"github.com/minio/minio/pkg/env"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/madmin"
)
// BucketQuotaSys - map of bucket and quota configuration.
type BucketQuotaSys struct {
bucketStorageCache timedValue
}
// Get - Get quota configuration.
func (sys *BucketQuotaSys) Get(bucketName string) (*madmin.BucketQuota, error) {
if globalIsGateway {
objAPI := newObjectLayerFn()
if objAPI == nil {
return nil, errServerNotInitialized
}
return &madmin.BucketQuota{}, nil
}
return globalBucketMetadataSys.GetQuotaConfig(bucketName)
}
// NewBucketQuotaSys returns initialized BucketQuotaSys
func NewBucketQuotaSys() *BucketQuotaSys {
return &BucketQuotaSys{}
}
// parseBucketQuota parses BucketQuota from json
func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota, err error) {
quotaCfg = &madmin.BucketQuota{}
if err = json.Unmarshal(data, quotaCfg); err != nil {
return quotaCfg, err
}
if !quotaCfg.IsValid() {
return quotaCfg, fmt.Errorf("Invalid quota config %#v", quotaCfg)
}
return
}
func (sys *BucketQuotaSys) check(ctx context.Context, bucket string, size int64) error {
objAPI := newObjectLayerWithoutSafeModeFn()
if objAPI == nil {
return errServerNotInitialized
}
q, err := sys.Get(bucket)
if err != nil {
return nil
}
if q.Type == madmin.FIFOQuota {
return nil
}
if q.Quota == 0 {
// No quota set return quickly.
return nil
}
sys.bucketStorageCache.Once.Do(func() {
sys.bucketStorageCache.TTL = 10 * time.Second
sys.bucketStorageCache.Update = func() (interface{}, error) {
return loadDataUsageFromBackend(ctx, objAPI)
}
})
v, err := sys.bucketStorageCache.Get()
if err != nil {
return err
}
dui := v.(DataUsageInfo)
bui, ok := dui.BucketsUsage[bucket]
if !ok {
// bucket not found, cannot enforce quota
// call will fail anyways later.
return nil
}
if (bui.Size + uint64(size)) > q.Quota {
return BucketQuotaExceeded{Bucket: bucket}
}
return nil
}
func enforceBucketQuota(ctx context.Context, bucket string, size int64) error {
if size < 0 {
return nil
}
return globalBucketQuotaSys.check(ctx, bucket, size)
}
const (
bgQuotaInterval = 1 * time.Hour
)
// initQuotaEnforcement starts the routine that deletes objects in bucket
// that exceeds the FIFO quota
func initQuotaEnforcement(ctx context.Context, objAPI ObjectLayer) {
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn {
go startBucketQuotaEnforcement(ctx, objAPI)
}
}
func startBucketQuotaEnforcement(ctx context.Context, objAPI ObjectLayer) {
for {
select {
case <-ctx.Done():
return
case <-time.NewTimer(bgQuotaInterval).C:
logger.LogIf(ctx, enforceFIFOQuota(ctx, objAPI))
}
}
}
// enforceFIFOQuota deletes objects in FIFO order until sufficient objects
// have been deleted so as to bring bucket usage within quota
func enforceFIFOQuota(ctx context.Context, objectAPI ObjectLayer) error {
// Turn off quota enforcement if data usage info is unavailable.
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOff {
return nil
}
buckets, err := objectAPI.ListBuckets(ctx)
if err != nil {
return err
}
dataUsageInfo, err := loadDataUsageFromBackend(ctx, objectAPI)
if err != nil {
return err
}
for _, binfo := range buckets {
bucket := binfo.Name
bui, ok := dataUsageInfo.BucketsUsage[bucket]
if !ok {
// bucket doesn't exist anymore, or we
// do not have any information to proceed.
continue
}
// Check if the current bucket has quota restrictions, if not skip it
cfg, err := globalBucketQuotaSys.Get(bucket)
if err != nil {
continue
}
if cfg.Type != madmin.FIFOQuota {
continue
}
var toFree uint64
if bui.Size > cfg.Quota && cfg.Quota > 0 {
toFree = bui.Size - cfg.Quota
}
if toFree == 0 {
continue
}
// Allocate new results channel to receive ObjectInfo.
objInfoCh := make(chan ObjectInfo)
// Walk through all objects
if err := objectAPI.Walk(ctx, bucket, "", objInfoCh); err != nil {
return err
}
// reuse the fileScorer used by disk cache to score entries by
// ModTime to find the oldest objects in bucket to delete. In
// the context of bucket quota enforcement - number of hits are
// irrelevant.
scorer, err := newFileScorer(toFree, time.Now().Unix(), 1)
if err != nil {
return err
}
rcfg, _ := globalBucketObjectLockSys.Get(bucket)
for obj := range objInfoCh {
// skip objects currently under retention
if rcfg.LockEnabled && enforceRetentionForDeletion(ctx, obj) {
continue
}
scorer.addFile(obj.Name, obj.ModTime, obj.Size, 1)
}
var objects []string
numKeys := len(scorer.fileNames())
for i, key := range scorer.fileNames() {
objects = append(objects, key)
if len(objects) < maxDeleteList && (i < numKeys-1) {
// skip deletion until maxObjectList or end of slice
continue
}
if len(objects) == 0 {
break
}
// Deletes a list of objects.
deleteErrs, err := objectAPI.DeleteObjects(ctx, bucket, objects)
if err != nil {
logger.LogIf(ctx, err)
} else {
for i := range deleteErrs {
if deleteErrs[i] != nil {
logger.LogIf(ctx, deleteErrs[i])
continue
}
// Notify object deleted event.
sendEvent(eventArgs{
EventName: event.ObjectRemovedDelete,
BucketName: bucket,
Object: ObjectInfo{
Name: objects[i],
},
Host: "Internal: [FIFO-QUOTA-EXPIRY]",
})
}
objects = nil
}
}
}
return nil
}

View file

@ -1,39 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2015, 2016 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
// DO NOT EDIT THIS FILE DIRECTLY. These are build-time constants
// set through buildscripts/gen-ldflags.go.
var (
// GOPATH - GOPATH value at the time of build.
GOPATH = ""
// GOROOT - GOROOT value at the time of build.
GOROOT = ""
// Go get development tag.
goGetTag = "DEVELOPMENT.GOGET"
// Version - version time.RFC3339.
Version = goGetTag
// ReleaseTag - release tag in TAG.%Y-%m-%dT%H-%M-%SZ.
ReleaseTag = goGetTag
// CommitID - latest commit id.
CommitID = goGetTag
// ShortCommitID - first 12 characters from CommitID.
ShortCommitID = CommitID[:12]
)

View file

@ -1,279 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2017-2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"crypto/x509"
"encoding/gob"
"errors"
"net"
"os"
"path/filepath"
"strings"
"time"
dns2 "github.com/miekg/dns"
"github.com/minio/cli"
"github.com/minio/minio-go/v6/pkg/set"
"github.com/minio/minio/legacy/config"
"github.com/minio/minio/legacy/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/certs"
"github.com/minio/minio/pkg/env"
)
func init() {
logger.Init(GOPATH, GOROOT)
logger.RegisterError(config.FmtError)
// Initialize globalConsoleSys system
globalConsoleSys = NewConsoleLogger(GlobalContext)
logger.AddTarget(globalConsoleSys)
gob.Register(StorageErr(""))
}
func verifyObjectLayerFeatures(name string, objAPI ObjectLayer) {
if (globalAutoEncryption || GlobalKMS != nil) && !objAPI.IsEncryptionSupported() {
logger.Fatal(errInvalidArgument,
"Encryption support is requested but '%s' does not support encryption", name)
}
if strings.HasPrefix(name, "gateway") {
if GlobalGatewaySSE.IsSet() && GlobalKMS == nil {
uiErr := config.ErrInvalidGWSSEEnvValue(nil).Msg("MINIO_GATEWAY_SSE set but KMS is not configured")
logger.Fatal(uiErr, "Unable to start gateway with SSE")
}
}
if globalCompressConfig.Enabled && !objAPI.IsCompressionSupported() {
logger.Fatal(errInvalidArgument,
"Compression support is requested but '%s' does not support compression", name)
}
}
// Check for updates and print a notification message
func checkUpdate(mode string) {
// Its OK to ignore any errors during doUpdate() here.
if updateMsg, _, currentReleaseTime, latestReleaseTime, err := getUpdateInfo(2*time.Second, mode); err == nil {
if updateMsg == "" {
return
}
if globalInplaceUpdateDisabled {
logStartupMessage(updateMsg)
} else {
logStartupMessage(prepareUpdateMessage("Run `mc admin update`", latestReleaseTime.Sub(currentReleaseTime)))
}
}
}
func newConfigDirFromCtx(ctx *cli.Context, option string, getDefaultDir func() string) (*ConfigDir, bool) {
var dir string
var dirSet bool
switch {
case ctx.IsSet(option):
dir = ctx.String(option)
dirSet = true
case ctx.GlobalIsSet(option):
dir = ctx.GlobalString(option)
dirSet = true
// cli package does not expose parent's option option. Below code is workaround.
if dir == "" || dir == getDefaultDir() {
dirSet = false // Unset to false since GlobalIsSet() true is a false positive.
if ctx.Parent().GlobalIsSet(option) {
dir = ctx.Parent().GlobalString(option)
dirSet = true
}
}
default:
// Neither local nor global option is provided. In this case, try to use
// default directory.
dir = getDefaultDir()
if dir == "" {
logger.FatalIf(errInvalidArgument, "%s option must be provided", option)
}
}
if dir == "" {
logger.FatalIf(errors.New("empty directory"), "%s directory cannot be empty", option)
}
// Disallow relative paths, figure out absolute paths.
dirAbs, err := filepath.Abs(dir)
logger.FatalIf(err, "Unable to fetch absolute path for %s=%s", option, dir)
logger.FatalIf(mkdirAllIgnorePerm(dirAbs), "Unable to create directory specified %s=%s", option, dir)
return &ConfigDir{path: dirAbs}, dirSet
}
func handleCommonCmdArgs(ctx *cli.Context) {
// Get "json" flag from command line argument and
// enable json and quite modes if json flag is turned on.
globalCLIContext.JSON = ctx.IsSet("json") || ctx.GlobalIsSet("json")
if globalCLIContext.JSON {
logger.EnableJSON()
}
// Get quiet flag from command line argument.
globalCLIContext.Quiet = ctx.IsSet("quiet") || ctx.GlobalIsSet("quiet")
if globalCLIContext.Quiet {
logger.EnableQuiet()
}
// Get anonymous flag from command line argument.
globalCLIContext.Anonymous = ctx.IsSet("anonymous") || ctx.GlobalIsSet("anonymous")
if globalCLIContext.Anonymous {
logger.EnableAnonymous()
}
// Fetch address option
globalCLIContext.Addr = ctx.GlobalString("address")
if globalCLIContext.Addr == "" || globalCLIContext.Addr == ":"+GlobalMinioDefaultPort {
globalCLIContext.Addr = ctx.String("address")
}
// Check "no-compat" flag from command line argument.
globalCLIContext.StrictS3Compat = true
if ctx.IsSet("no-compat") || ctx.GlobalIsSet("no-compat") {
globalCLIContext.StrictS3Compat = false
}
// Set all config, certs and CAs directories.
var configSet, certsSet bool
globalConfigDir, configSet = newConfigDirFromCtx(ctx, "config-dir", defaultConfigDir.Get)
globalCertsDir, certsSet = newConfigDirFromCtx(ctx, "certs-dir", defaultCertsDir.Get)
// Remove this code when we deprecate and remove config-dir.
// This code is to make sure we inherit from the config-dir
// option if certs-dir is not provided.
if !certsSet && configSet {
globalCertsDir = &ConfigDir{path: filepath.Join(globalConfigDir.Get(), certsDir)}
}
globalCertsCADir = &ConfigDir{path: filepath.Join(globalCertsDir.Get(), certsCADir)}
logger.FatalIf(mkdirAllIgnorePerm(globalCertsCADir.Get()), "Unable to create certs CA directory at %s", globalCertsCADir.Get())
}
func handleCommonEnvVars() {
wormEnabled, err := config.LookupWorm()
if err != nil {
logger.Fatal(config.ErrInvalidWormValue(err), "Invalid worm configuration")
}
if wormEnabled {
logger.Fatal(errors.New("WORM is deprecated"), "global MINIO_WORM support is removed, please downgrade your server or migrate to https://github.com/minio/minio/tree/master/docs/retention")
}
globalBrowserEnabled, err = config.ParseBool(env.Get(config.EnvBrowser, config.EnableOn))
if err != nil {
logger.Fatal(config.ErrInvalidBrowserValue(err), "Invalid MINIO_BROWSER value in environment variable")
}
globalFSOSync, err = config.ParseBool(env.Get(config.EnvFSOSync, config.EnableOff))
if err != nil {
logger.Fatal(config.ErrInvalidFSOSyncValue(err), "Invalid MINIO_FS_OSYNC value in environment variable")
}
domains := env.Get(config.EnvDomain, "")
if len(domains) != 0 {
for _, domainName := range strings.Split(domains, config.ValueSeparator) {
if _, ok := dns2.IsDomainName(domainName); !ok {
logger.Fatal(config.ErrInvalidDomainValue(nil).Msg("Unknown value `%s`", domainName),
"Invalid MINIO_DOMAIN value in environment variable")
}
globalDomainNames = append(globalDomainNames, domainName)
}
}
publicIPs := env.Get(config.EnvPublicIPs, "")
if len(publicIPs) != 0 {
minioEndpoints := strings.Split(publicIPs, config.ValueSeparator)
var domainIPs = set.NewStringSet()
for _, endpoint := range minioEndpoints {
if net.ParseIP(endpoint) == nil {
// Checking if the IP is a DNS entry.
addrs, err := net.LookupHost(endpoint)
if err != nil {
logger.FatalIf(err, "Unable to initialize MinIO server with [%s] invalid entry found in MINIO_PUBLIC_IPS", endpoint)
}
for _, addr := range addrs {
domainIPs.Add(addr)
}
}
domainIPs.Add(endpoint)
}
updateDomainIPs(domainIPs)
} else {
// Add found interfaces IP address to global domain IPS,
// loopback addresses will be naturally dropped.
updateDomainIPs(mustGetLocalIP4())
}
// In place update is true by default if the MINIO_UPDATE is not set
// or is not set to 'off', if MINIO_UPDATE is set to 'off' then
// in-place update is off.
globalInplaceUpdateDisabled = strings.EqualFold(env.Get(config.EnvUpdate, config.EnableOn), config.EnableOff)
if env.IsSet(config.EnvAccessKey) || env.IsSet(config.EnvSecretKey) {
cred, err := auth.CreateCredentials(env.Get(config.EnvAccessKey, ""), env.Get(config.EnvSecretKey, ""))
if err != nil {
logger.Fatal(config.ErrInvalidCredentials(err),
"Unable to validate credentials inherited from the shell environment")
}
globalActiveCred = cred
globalConfigEncrypted = true
}
if env.IsSet(config.EnvAccessKeyOld) && env.IsSet(config.EnvSecretKeyOld) {
oldCred, err := auth.CreateCredentials(env.Get(config.EnvAccessKeyOld, ""), env.Get(config.EnvSecretKeyOld, ""))
if err != nil {
logger.Fatal(config.ErrInvalidCredentials(err),
"Unable to validate the old credentials inherited from the shell environment")
}
globalOldCred = oldCred
os.Unsetenv(config.EnvAccessKeyOld)
os.Unsetenv(config.EnvSecretKeyOld)
}
}
func logStartupMessage(msg string) {
if globalConsoleSys != nil {
globalConsoleSys.Send(msg, string(logger.All))
}
logger.StartupMessage(msg)
}
func getTLSConfig() (x509Certs []*x509.Certificate, c *certs.Certs, secureConn bool, err error) {
if !(isFile(getPublicCertFile()) && isFile(getPrivateKeyFile())) {
return nil, nil, false, nil
}
if x509Certs, err = config.ParsePublicCertFile(getPublicCertFile()); err != nil {
return nil, nil, false, err
}
c, err = certs.New(getPublicCertFile(), getPrivateKeyFile(), config.LoadX509KeyPair)
if err != nil {
return nil, nil, false, err
}
secureConn = true
return x509Certs, c, secureConn, nil
}

View file

@ -1,82 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"bytes"
"context"
"errors"
"github.com/minio/minio/legacy/logger"
"github.com/minio/minio/pkg/hash"
)
var errConfigNotFound = errors.New("config file not found")
func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]byte, error) {
var buffer bytes.Buffer
// Read entire content by setting size to -1
if err := objAPI.GetObject(ctx, minioMetaBucket, configFile, 0, -1, &buffer, "", ObjectOptions{}); err != nil {
// Treat object not found as config not found.
if isErrObjectNotFound(err) {
return nil, errConfigNotFound
}
logger.GetReqInfo(ctx).AppendTags("configFile", configFile)
logger.LogIf(ctx, err)
return nil, err
}
// Return config not found on empty content.
if buffer.Len() == 0 {
return nil, errConfigNotFound
}
return buffer.Bytes(), nil
}
func deleteConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
err := objAPI.DeleteObject(ctx, minioMetaBucket, configFile)
if err != nil && isErrObjectNotFound(err) {
return errConfigNotFound
}
return err
}
func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data []byte) error {
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)), globalCLIContext.StrictS3Compat)
if err != nil {
return err
}
_, err = objAPI.PutObject(ctx, minioMetaBucket, configFile, NewPutObjReader(hashReader, nil, nil), ObjectOptions{})
return err
}
func checkConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
if _, err := objAPI.GetObjectInfo(ctx, minioMetaBucket, configFile, ObjectOptions{}); err != nil {
// Treat object not found as config not found.
if isErrObjectNotFound(err) {
return errConfigNotFound
}
logger.GetReqInfo(ctx).AppendTags("configFile", configFile)
logger.LogIf(ctx, err)
return err
}
return nil
}

View file

@ -1,629 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2016-2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"fmt"
"strings"
"sync"
"github.com/minio/minio/legacy/config"
"github.com/minio/minio/legacy/config/api"
"github.com/minio/minio/legacy/config/cache"
"github.com/minio/minio/legacy/config/compress"
"github.com/minio/minio/legacy/config/etcd"
"github.com/minio/minio/legacy/config/etcd/dns"
xldap "github.com/minio/minio/legacy/config/identity/ldap"
"github.com/minio/minio/legacy/config/identity/openid"
"github.com/minio/minio/legacy/config/notify"
"github.com/minio/minio/legacy/config/policy/opa"
"github.com/minio/minio/legacy/config/storageclass"
"github.com/minio/minio/legacy/crypto"
xhttp "github.com/minio/minio/legacy/http"
"github.com/minio/minio/legacy/logger"
"github.com/minio/minio/legacy/logger/target/http"
"github.com/minio/minio/pkg/env"
"github.com/minio/minio/pkg/madmin"
)
func initHelp() {
var kvs = map[string]config.KVS{
config.EtcdSubSys: etcd.DefaultKVS,
config.CacheSubSys: cache.DefaultKVS,
config.CompressionSubSys: compress.DefaultKVS,
config.IdentityLDAPSubSys: xldap.DefaultKVS,
config.IdentityOpenIDSubSys: openid.DefaultKVS,
config.PolicyOPASubSys: opa.DefaultKVS,
config.RegionSubSys: config.DefaultRegionKVS,
config.APISubSys: api.DefaultKVS,
config.CredentialsSubSys: config.DefaultCredentialKVS,
config.KmsVaultSubSys: crypto.DefaultVaultKVS,
config.KmsKesSubSys: crypto.DefaultKesKVS,
config.LoggerWebhookSubSys: logger.DefaultKVS,
config.AuditWebhookSubSys: logger.DefaultAuditKVS,
}
for k, v := range notify.DefaultNotificationKVS {
kvs[k] = v
}
if globalIsXL {
kvs[config.StorageClassSubSys] = storageclass.DefaultKVS
}
config.RegisterDefaultKVS(kvs)
// Captures help for each sub-system
var helpSubSys = config.HelpKVS{
config.HelpKV{
Key: config.RegionSubSys,
Description: "label the location of the server",
},
config.HelpKV{
Key: config.CacheSubSys,
Description: "add caching storage tier",
},
config.HelpKV{
Key: config.CompressionSubSys,
Description: "enable server side compression of objects",
},
config.HelpKV{
Key: config.EtcdSubSys,
Description: "federate multiple clusters for IAM and Bucket DNS",
},
config.HelpKV{
Key: config.IdentityOpenIDSubSys,
Description: "enable OpenID SSO support",
},
config.HelpKV{
Key: config.IdentityLDAPSubSys,
Description: "enable LDAP SSO support",
},
config.HelpKV{
Key: config.PolicyOPASubSys,
Description: "enable external OPA for policy enforcement",
},
config.HelpKV{
Key: config.KmsVaultSubSys,
Description: "enable external HashiCorp Vault key management service",
},
config.HelpKV{
Key: config.KmsKesSubSys,
Description: "enable external MinIO key encryption service",
},
config.HelpKV{
Key: config.APISubSys,
Description: "manage global HTTP API call specific features, such as throttling, authentication types, etc.",
},
config.HelpKV{
Key: config.LoggerWebhookSubSys,
Description: "send server logs to webhook endpoints",
MultipleTargets: true,
},
config.HelpKV{
Key: config.AuditWebhookSubSys,
Description: "send audit logs to webhook endpoints",
MultipleTargets: true,
},
config.HelpKV{
Key: config.NotifyWebhookSubSys,
Description: "publish bucket notifications to webhook endpoints",
MultipleTargets: true,
},
config.HelpKV{
Key: config.NotifyAMQPSubSys,
Description: "publish bucket notifications to AMQP endpoints",
MultipleTargets: true,
},
config.HelpKV{
Key: config.NotifyKafkaSubSys,
Description: "publish bucket notifications to Kafka endpoints",
MultipleTargets: true,
},
config.HelpKV{
Key: config.NotifyMQTTSubSys,
Description: "publish bucket notifications to MQTT endpoints",
MultipleTargets: true,
},
config.HelpKV{
Key: config.NotifyNATSSubSys,
Description: "publish bucket notifications to NATS endpoints",
MultipleTargets: true,
},
config.HelpKV{
Key: config.NotifyNSQSubSys,
Description: "publish bucket notifications to NSQ endpoints",
MultipleTargets: true,
},
config.HelpKV{
Key: config.NotifyMySQLSubSys,
Description: "publish bucket notifications to MySQL databases",
MultipleTargets: true,
},
config.HelpKV{
Key: config.NotifyPostgresSubSys,
Description: "publish bucket notifications to Postgres databases",
MultipleTargets: true,
},
config.HelpKV{
Key: config.NotifyESSubSys,
Description: "publish bucket notifications to Elasticsearch endpoints",
MultipleTargets: true,
},
config.HelpKV{
Key: config.NotifyRedisSubSys,
Description: "publish bucket notifications to Redis datastores",
MultipleTargets: true,
},
}
if globalIsXL {
helpSubSys = append(helpSubSys, config.HelpKV{})
copy(helpSubSys[2:], helpSubSys[1:])
helpSubSys[1] = config.HelpKV{
Key: config.StorageClassSubSys,
Description: "define object level redundancy",
}
}
var helpMap = map[string]config.HelpKVS{
"": helpSubSys, // Help for all sub-systems.
config.RegionSubSys: config.RegionHelp,
config.APISubSys: api.Help,
config.StorageClassSubSys: storageclass.Help,
config.EtcdSubSys: etcd.Help,
config.CacheSubSys: cache.Help,
config.CompressionSubSys: compress.Help,
config.IdentityOpenIDSubSys: openid.Help,
config.IdentityLDAPSubSys: xldap.Help,
config.PolicyOPASubSys: opa.Help,
config.KmsVaultSubSys: crypto.HelpVault,
config.KmsKesSubSys: crypto.HelpKes,
config.LoggerWebhookSubSys: logger.Help,
config.AuditWebhookSubSys: logger.HelpAudit,
config.NotifyAMQPSubSys: notify.HelpAMQP,
config.NotifyKafkaSubSys: notify.HelpKafka,
config.NotifyMQTTSubSys: notify.HelpMQTT,
config.NotifyNATSSubSys: notify.HelpNATS,
config.NotifyNSQSubSys: notify.HelpNSQ,
config.NotifyMySQLSubSys: notify.HelpMySQL,
config.NotifyPostgresSubSys: notify.HelpPostgres,
config.NotifyRedisSubSys: notify.HelpRedis,
config.NotifyWebhookSubSys: notify.HelpWebhook,
config.NotifyESSubSys: notify.HelpES,
}
config.RegisterHelpSubSys(helpMap)
}
var (
// globalServerConfig server config.
globalServerConfig config.Config
globalServerConfigMu sync.RWMutex
)
func validateConfig(s config.Config) error {
// Disable merging env values with config for validation.
env.SetEnvOff()
// Enable env values to validate KMS.
defer env.SetEnvOn()
if _, err := config.LookupCreds(s[config.CredentialsSubSys][config.Default]); err != nil {
return err
}
if _, err := config.LookupRegion(s[config.RegionSubSys][config.Default]); err != nil {
return err
}
if _, err := api.LookupConfig(s[config.APISubSys][config.Default]); err != nil {
return err
}
if globalIsXL {
if _, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default],
globalXLSetDriveCount); err != nil {
return err
}
}
if _, err := cache.LookupConfig(s[config.CacheSubSys][config.Default]); err != nil {
return err
}
if _, err := compress.LookupConfig(s[config.CompressionSubSys][config.Default]); err != nil {
return err
}
{
etcdCfg, err := etcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs)
if err != nil {
return err
}
if etcdCfg.Enabled {
etcdClnt, err := etcd.New(etcdCfg)
if err != nil {
return err
}
etcdClnt.Close()
}
}
{
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport())
if err != nil {
return err
}
// Set env to enable master key validation.
// this is needed only for KMS.
env.SetEnvOn()
if _, err = crypto.NewKMS(kmsCfg); err != nil {
return err
}
// Disable merging env values for the rest.
env.SetEnvOff()
}
if _, err := openid.LookupConfig(s[config.IdentityOpenIDSubSys][config.Default],
NewGatewayHTTPTransport(), xhttp.DrainBody); err != nil {
return err
}
{
cfg, err := xldap.Lookup(s[config.IdentityLDAPSubSys][config.Default],
globalRootCAs)
if err != nil {
return err
}
if cfg.Enabled {
conn, cerr := cfg.Connect()
if cerr != nil {
return cerr
}
conn.Close()
}
}
if _, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default],
NewGatewayHTTPTransport(), xhttp.DrainBody); err != nil {
return err
}
if _, err := logger.LookupConfig(s); err != nil {
return err
}
return notify.TestNotificationTargets(s, GlobalContext.Done(), NewGatewayHTTPTransport(),
globalNotificationSys.ConfiguredTargetIDs())
}
func lookupConfigs(s config.Config) {
ctx := GlobalContext
var err error
if !globalActiveCred.IsValid() {
// Env doesn't seem to be set, we fallback to lookup creds from the config.
globalActiveCred, err = config.LookupCreds(s[config.CredentialsSubSys][config.Default])
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Invalid credentials configuration: %w", err))
}
}
etcdCfg, err := etcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs)
if err != nil {
if globalIsGateway {
logger.FatalIf(err, "Unable to initialize etcd config")
} else {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err))
}
}
if etcdCfg.Enabled {
if globalEtcdClient == nil {
globalEtcdClient, err = etcd.New(etcdCfg)
if err != nil {
if globalIsGateway {
logger.FatalIf(err, "Unable to initialize etcd config")
} else {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err))
}
}
}
if len(globalDomainNames) != 0 && !globalDomainIPs.IsEmpty() && globalEtcdClient != nil && globalDNSConfig == nil {
globalDNSConfig, err = dns.NewCoreDNS(etcdCfg.Config,
dns.DomainNames(globalDomainNames),
dns.DomainIPs(globalDomainIPs),
dns.DomainPort(globalMinioPort),
dns.CoreDNSPath(etcdCfg.CoreDNSPath),
)
if err != nil {
if globalIsGateway {
logger.FatalIf(err, "Unable to initialize DNS config")
} else {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize DNS config for %s: %w",
globalDomainNames, err))
}
}
}
}
// Bucket federation is 'true' only when IAM assets are not namespaced
// per tenant and all tenants interested in globally available users
// if namespace was requested such as specifying etcdPathPrefix then
// we assume that users are interested in global bucket support
// but not federation.
globalBucketFederation = etcdCfg.PathPrefix == "" && etcdCfg.Enabled
globalServerRegion, err = config.LookupRegion(s[config.RegionSubSys][config.Default])
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Invalid region configuration: %w", err))
}
apiConfig, err := api.LookupConfig(s[config.APISubSys][config.Default])
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
}
globalAPIConfig.init(apiConfig)
if globalIsXL {
globalStorageClass, err = storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default],
globalXLSetDriveCount)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize storage class config: %w", err))
}
}
globalCacheConfig, err = cache.LookupConfig(s[config.CacheSubSys][config.Default])
if err != nil {
if globalIsGateway {
logger.FatalIf(err, "Unable to setup cache")
} else {
logger.LogIf(ctx, fmt.Errorf("Unable to setup cache: %w", err))
}
}
if globalCacheConfig.Enabled {
if cacheEncKey := env.Get(cache.EnvCacheEncryptionMasterKey, ""); cacheEncKey != "" {
globalCacheKMS, err = crypto.ParseMasterKey(cacheEncKey)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to setup encryption cache: %w", err))
}
}
}
kmsCfg, err := crypto.LookupConfig(s, globalCertsCADir.Get(), NewGatewayHTTPTransport())
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to setup KMS config: %w", err))
}
GlobalKMS, err = crypto.NewKMS(kmsCfg)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to setup KMS with current KMS config: %w", err))
}
// Enable auto-encryption if enabled
globalAutoEncryption = kmsCfg.AutoEncryption
globalCompressConfig, err = compress.LookupConfig(s[config.CompressionSubSys][config.Default])
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to setup Compression: %w", err))
}
globalOpenIDConfig, err = openid.LookupConfig(s[config.IdentityOpenIDSubSys][config.Default],
NewGatewayHTTPTransport(), xhttp.DrainBody)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize OpenID: %w", err))
}
opaCfg, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default],
NewGatewayHTTPTransport(), xhttp.DrainBody)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize OPA: %w", err))
}
globalOpenIDValidators = getOpenIDValidators(globalOpenIDConfig)
globalPolicyOPA = opa.New(opaCfg)
globalLDAPConfig, err = xldap.Lookup(s[config.IdentityLDAPSubSys][config.Default],
globalRootCAs)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to parse LDAP configuration: %w", err))
}
// Load logger targets based on user's configuration
loggerUserAgent := getUserAgent(getMinioMode())
loggerCfg, err := logger.LookupConfig(s)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize logger: %w", err))
}
for _, l := range loggerCfg.HTTP {
if l.Enabled {
// Enable http logging
logger.AddTarget(
http.New(http.WithEndpoint(l.Endpoint),
http.WithAuthToken(l.AuthToken),
http.WithUserAgent(loggerUserAgent),
http.WithLogKind(string(logger.All)),
http.WithTransport(NewGatewayHTTPTransport()),
),
)
}
}
for _, l := range loggerCfg.Audit {
if l.Enabled {
// Enable http audit logging
logger.AddAuditTarget(
http.New(http.WithEndpoint(l.Endpoint),
http.WithAuthToken(l.AuthToken),
http.WithUserAgent(loggerUserAgent),
http.WithLogKind(string(logger.All)),
http.WithTransport(NewGatewayHTTPTransport()),
),
)
}
}
globalConfigTargetList, err = notify.GetNotificationTargets(s, GlobalContext.Done(), NewGatewayHTTPTransport())
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err))
}
globalEnvTargetList, err = notify.GetNotificationTargets(newServerConfig(), GlobalContext.Done(), NewGatewayHTTPTransport())
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err))
}
}
// Help - return sub-system level help
type Help struct {
SubSys string `json:"subSys"`
Description string `json:"description"`
MultipleTargets bool `json:"multipleTargets"`
KeysHelp config.HelpKVS `json:"keysHelp"`
}
// GetHelp - returns help for sub-sys, a key for a sub-system or all the help.
func GetHelp(subSys, key string, envOnly bool) (Help, error) {
if len(subSys) == 0 {
return Help{KeysHelp: config.HelpSubSysMap[subSys]}, nil
}
subSystemValue := strings.SplitN(subSys, config.SubSystemSeparator, 2)
if len(subSystemValue) == 0 {
return Help{}, config.Errorf("invalid number of arguments %s", subSys)
}
subSys = subSystemValue[0]
subSysHelp, ok := config.HelpSubSysMap[""].Lookup(subSys)
if !ok {
return Help{}, config.Errorf("unknown sub-system %s", subSys)
}
h, ok := config.HelpSubSysMap[subSys]
if !ok {
return Help{}, config.Errorf("unknown sub-system %s", subSys)
}
if key != "" {
value, ok := h.Lookup(key)
if !ok {
return Help{}, config.Errorf("unknown key %s for sub-system %s",
key, subSys)
}
h = config.HelpKVS{value}
}
envHelp := config.HelpKVS{}
if envOnly {
// Only for multiple targets, make sure
// to list the ENV, for regular k/v EnableKey is
// implicit, for ENVs we cannot make it implicit.
if subSysHelp.MultipleTargets {
envK := config.EnvPrefix + strings.Join([]string{
strings.ToTitle(subSys), strings.ToTitle(madmin.EnableKey),
}, config.EnvWordDelimiter)
envHelp = append(envHelp, config.HelpKV{
Key: envK,
Description: fmt.Sprintf("enable %s target, default is 'off'", subSys),
Optional: false,
Type: "on|off",
})
}
for _, hkv := range h {
envK := config.EnvPrefix + strings.Join([]string{
strings.ToTitle(subSys), strings.ToTitle(hkv.Key),
}, config.EnvWordDelimiter)
envHelp = append(envHelp, config.HelpKV{
Key: envK,
Description: hkv.Description,
Optional: hkv.Optional,
Type: hkv.Type,
})
}
h = envHelp
}
return Help{
SubSys: subSys,
Description: subSysHelp.Description,
MultipleTargets: subSysHelp.MultipleTargets,
KeysHelp: h,
}, nil
}
func newServerConfig() config.Config {
return config.New()
}
// newSrvConfig - initialize a new server config, saves env parameters if
// found, otherwise use default parameters
func newSrvConfig(objAPI ObjectLayer) error {
// Initialize server config.
srvCfg := newServerConfig()
// Override any values from ENVs.
lookupConfigs(srvCfg)
// hold the mutex lock before a new config is assigned.
globalServerConfigMu.Lock()
globalServerConfig = srvCfg
globalServerConfigMu.Unlock()
// Save config into file.
return saveServerConfig(GlobalContext, objAPI, globalServerConfig)
}
func getValidConfig(objAPI ObjectLayer) (config.Config, error) {
return readServerConfig(GlobalContext, objAPI)
}
// loadConfig - loads a new config from disk, overrides params
// from env if found and valid
func loadConfig(objAPI ObjectLayer) error {
srvCfg, err := getValidConfig(objAPI)
if err != nil {
return err
}
// Override any values from ENVs.
lookupConfigs(srvCfg)
// hold the mutex lock before a new config is assigned.
globalServerConfigMu.Lock()
globalServerConfig = srvCfg
globalServerConfigMu.Unlock()
return nil
}
// getOpenIDValidators - returns ValidatorList which contains
// enabled providers in server config.
// A new authentication provider is added like below
// * Add a new provider in pkg/iam/openid package.
func getOpenIDValidators(cfg openid.Config) *openid.Validators {
validators := openid.NewValidators()
if cfg.JWKS.URL != nil {
validators.Add(openid.NewJWT(cfg))
}
return validators
}

View file

@ -1,60 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2016, 2017, 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"context"
"os"
"testing"
"github.com/minio/minio/legacy/config"
)
func TestServerConfig(t *testing.T) {
objLayer, fsDir, err := prepareFS()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(fsDir)
if err = newTestConfig(globalMinioDefaultRegion, objLayer); err != nil {
t.Fatalf("Init Test config failed")
}
if globalServerRegion != globalMinioDefaultRegion {
t.Errorf("Expecting region `us-east-1` found %s", globalServerRegion)
}
// Set new region and verify.
config.SetRegion(globalServerConfig, "us-west-1")
region, err := config.LookupRegion(globalServerConfig[config.RegionSubSys][config.Default])
if err != nil {
t.Fatal(err)
}
if region != "us-west-1" {
t.Errorf("Expecting region `us-west-1` found %s", globalServerRegion)
}
if err := saveServerConfig(context.Background(), objLayer, globalServerConfig); err != nil {
t.Fatalf("Unable to save updated config file %s", err)
}
// Initialize server config.
if err := loadConfig(objLayer); err != nil {
t.Fatalf("Unable to initialize from updated config file %s", err)
}
}

View file

@ -1,107 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2015, 2016, 2017, 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"os"
"path/filepath"
homedir "github.com/mitchellh/go-homedir"
)
const (
// Default minio configuration directory where below configuration files/directories are stored.
defaultMinioConfigDir = ".minio"
// Directory contains below files/directories for HTTPS configuration.
certsDir = "certs"
// Directory contains all CA certificates other than system defaults for HTTPS.
certsCADir = "CAs"
// Public certificate file for HTTPS.
publicCertFile = "public.crt"
// Private key file for HTTPS.
privateKeyFile = "private.key"
)
// ConfigDir - points to a user set directory.
type ConfigDir struct {
path string
}
func getDefaultConfigDir() string {
homeDir, err := homedir.Dir()
if err != nil {
return ""
}
return filepath.Join(homeDir, defaultMinioConfigDir)
}
func getDefaultCertsDir() string {
return filepath.Join(getDefaultConfigDir(), certsDir)
}
func getDefaultCertsCADir() string {
return filepath.Join(getDefaultCertsDir(), certsCADir)
}
var (
// Default config, certs and CA directories.
defaultConfigDir = &ConfigDir{path: getDefaultConfigDir()}
defaultCertsDir = &ConfigDir{path: getDefaultCertsDir()}
defaultCertsCADir = &ConfigDir{path: getDefaultCertsCADir()}
// Points to current configuration directory -- deprecated, to be removed in future.
globalConfigDir = defaultConfigDir
// Points to current certs directory set by user with --certs-dir
globalCertsDir = defaultCertsDir
// Points to relative path to certs directory and is <value-of-certs-dir>/CAs
globalCertsCADir = defaultCertsCADir
)
// Get - returns current directory.
func (dir *ConfigDir) Get() string {
return dir.path
}
// Attempts to create all directories, ignores any permission denied errors.
func mkdirAllIgnorePerm(path string) error {
err := os.MkdirAll(path, 0700)
if err != nil {
// It is possible in kubernetes like deployments this directory
// is already mounted and is not writable, ignore any write errors.
if os.IsPermission(err) {
err = nil
}
}
return err
}
func getConfigFile() string {
return filepath.Join(globalConfigDir.Get(), minioConfigFile)
}
func getPublicCertFile() string {
return filepath.Join(globalCertsDir.Get(), publicCertFile)
}
func getPrivateKeyFile() string {
return filepath.Join(globalCertsDir.Get(), privateKeyFile)
}

View file

@ -1,305 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"bytes"
"context"
"errors"
"fmt"
"time"
"unicode/utf8"
etcd "github.com/coreos/etcd/clientv3"
"github.com/minio/minio/legacy/config"
"github.com/minio/minio/legacy/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/madmin"
)
func handleEncryptedConfigBackend(objAPI ObjectLayer, server bool) error {
if !server {
return nil
}
encrypted, err := checkBackendEncrypted(objAPI)
if err != nil {
return fmt.Errorf("Unable to encrypt config %w", err)
}
if encrypted {
// backend is encrypted, but credentials are not specified
// we shall fail right here. if not proceed forward.
if !globalConfigEncrypted || !globalActiveCred.IsValid() {
return config.ErrMissingCredentialsBackendEncrypted(nil)
}
} else {
// backend is not yet encrypted, check if encryption of
// backend is requested if not return nil and proceed
// forward.
if !globalConfigEncrypted {
return nil
}
if !globalActiveCred.IsValid() {
return config.ErrMissingCredentialsBackendEncrypted(nil)
}
}
// Migrate IAM configuration
if err = migrateConfigPrefixToEncrypted(objAPI, globalOldCred, encrypted); err != nil {
return fmt.Errorf("Unable to migrate all config at .minio.sys/config/: %w", err)
}
return nil
}
const (
backendEncryptedFile = "backend-encrypted"
)
var (
backendEncryptedMigrationIncomplete = []byte("incomplete")
backendEncryptedMigrationComplete = []byte("encrypted")
)
func checkBackendEtcdEncrypted(ctx context.Context, client *etcd.Client) (bool, error) {
data, err := readKeyEtcd(ctx, client, backendEncryptedFile)
if err != nil && err != errConfigNotFound {
return false, err
}
return err == nil && bytes.Equal(data, backendEncryptedMigrationComplete), nil
}
func checkBackendEncrypted(objAPI ObjectLayer) (bool, error) {
data, err := readConfig(GlobalContext, objAPI, backendEncryptedFile)
if err != nil && err != errConfigNotFound {
return false, err
}
return err == nil && bytes.Equal(data, backendEncryptedMigrationComplete), nil
}
// decryptData - decrypts input data with more that one credentials,
func decryptData(edata []byte, creds ...auth.Credentials) ([]byte, error) {
var err error
var data []byte
for _, cred := range creds {
data, err = madmin.DecryptData(cred.String(), bytes.NewReader(edata))
if err != nil {
if err == madmin.ErrMaliciousData {
continue
}
return nil, err
}
break
}
return data, err
}
func migrateIAMConfigsEtcdToEncrypted(ctx context.Context, client *etcd.Client) error {
encrypted, err := checkBackendEtcdEncrypted(ctx, client)
if err != nil {
return err
}
if encrypted {
// backend is encrypted, but credentials are not specified
// we shall fail right here. if not proceed forward.
if !globalConfigEncrypted || !globalActiveCred.IsValid() {
return config.ErrMissingCredentialsBackendEncrypted(nil)
}
} else {
// backend is not yet encrypted, check if encryption of
// backend is requested if not return nil and proceed
// forward.
if !globalConfigEncrypted {
return nil
}
if !globalActiveCred.IsValid() {
return errInvalidArgument
}
}
if encrypted {
// No key rotation requested, and backend is
// already encrypted. We proceed without migration.
if !globalOldCred.IsValid() {
return nil
}
// No real reason to rotate if old and new creds are same.
if globalOldCred.Equal(globalActiveCred) {
return nil
}
logger.Info("Attempting rotation of encrypted IAM users and policies on etcd with newly supplied credentials")
} else {
logger.Info("Attempting encryption of all IAM users and policies on etcd")
}
listCtx, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
r, err := client.Get(listCtx, minioConfigPrefix, etcd.WithPrefix(), etcd.WithKeysOnly())
if err != nil {
return err
}
if err = saveKeyEtcd(ctx, client, backendEncryptedFile, backendEncryptedMigrationIncomplete); err != nil {
return err
}
for _, kv := range r.Kvs {
var (
cdata []byte
cencdata []byte
)
cdata, err = readKeyEtcd(ctx, client, string(kv.Key))
if err != nil {
switch err {
case errConfigNotFound:
// Perhaps not present or someone deleted it.
continue
}
return err
}
var data []byte
// Is rotating of creds requested?
if globalOldCred.IsValid() {
data, err = decryptData(cdata, globalOldCred, globalActiveCred)
if err != nil {
if err == madmin.ErrMaliciousData {
return config.ErrInvalidRotatingCredentialsBackendEncrypted(nil)
}
return err
}
} else {
data = cdata
}
if !utf8.Valid(data) {
_, err = decryptData(data, globalActiveCred)
if err == nil {
// Config is already encrypted with right keys
continue
}
return errors.New("config data not in plain-text form or encrypted")
}
cencdata, err = madmin.EncryptData(globalActiveCred.String(), data)
if err != nil {
return err
}
if err = saveKeyEtcd(ctx, client, string(kv.Key), cencdata); err != nil {
return err
}
}
if encrypted && globalActiveCred.IsValid() && globalOldCred.IsValid() {
logger.Info("Rotation complete, please make sure to unset MINIO_ACCESS_KEY_OLD and MINIO_SECRET_KEY_OLD envs")
}
return saveKeyEtcd(ctx, client, backendEncryptedFile, backendEncryptedMigrationComplete)
}
func migrateConfigPrefixToEncrypted(objAPI ObjectLayer, activeCredOld auth.Credentials, encrypted bool) error {
if encrypted {
// No key rotation requested, and backend is
// already encrypted. We proceed without migration.
if !activeCredOld.IsValid() {
return nil
}
// No real reason to rotate if old and new creds are same.
if activeCredOld.Equal(globalActiveCred) {
return nil
}
logger.Info("Attempting rotation of encrypted config, IAM users and policies on MinIO with newly supplied credentials")
} else {
logger.Info("Attempting encryption of all config, IAM users and policies on MinIO backend")
}
err := saveConfig(GlobalContext, objAPI, backendEncryptedFile, backendEncryptedMigrationIncomplete)
if err != nil {
return err
}
var marker string
for {
res, err := objAPI.ListObjects(GlobalContext, minioMetaBucket,
minioConfigPrefix, marker, "", maxObjectList)
if err != nil {
return err
}
for _, obj := range res.Objects {
var (
cdata []byte
cencdata []byte
)
cdata, err = readConfig(GlobalContext, objAPI, obj.Name)
if err != nil {
return err
}
var data []byte
// Is rotating of creds requested?
if activeCredOld.IsValid() {
data, err = decryptData(cdata, activeCredOld, globalActiveCred)
if err != nil {
if err == madmin.ErrMaliciousData {
return config.ErrInvalidRotatingCredentialsBackendEncrypted(nil)
}
return err
}
} else {
data = cdata
}
if !utf8.Valid(data) {
_, err = decryptData(data, globalActiveCred)
if err == nil {
// Config is already encrypted with right keys
continue
}
return errors.New("config data not in plain-text form or encrypted")
}
cencdata, err = madmin.EncryptData(globalActiveCred.String(), data)
if err != nil {
return err
}
if err = saveConfig(GlobalContext, objAPI, obj.Name, cencdata); err != nil {
return err
}
}
if !res.IsTruncated {
break
}
marker = res.NextMarker
}
if encrypted && globalActiveCred.IsValid() && activeCredOld.IsValid() {
logger.Info("Rotation complete, please make sure to unset MINIO_ACCESS_KEY_OLD and MINIO_SECRET_KEY_OLD envs")
}
return saveConfig(GlobalContext, objAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
}

View file

@ -1,75 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"bytes"
"testing"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/madmin"
)
func TestDecryptData(t *testing.T) {
cred1 := auth.Credentials{
AccessKey: "minio",
SecretKey: "minio123",
}
cred2 := auth.Credentials{
AccessKey: "minio",
SecretKey: "minio1234",
}
data := []byte(`config data`)
edata1, err := madmin.EncryptData(cred1.String(), data)
if err != nil {
t.Fatal(err)
}
edata2, err := madmin.EncryptData(cred2.String(), data)
if err != nil {
t.Fatal(err)
}
tests := []struct {
edata []byte
creds []auth.Credentials
success bool
}{
{edata1, []auth.Credentials{cred1, cred2}, true},
{edata2, []auth.Credentials{cred1, cred2}, true},
{data, []auth.Credentials{cred1, cred2}, false},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
ddata, err := decryptData(test.edata, test.creds...)
if err != nil && test.success {
t.Errorf("Expected success, saw failure %v", err)
}
if err == nil && !test.success {
t.Error("Expected failure, saw success")
}
if test.success {
if !bytes.Equal(ddata, data) {
t.Errorf("Expected %s, got %s", string(data), string(ddata))
}
}
})
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,364 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2016, 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/minio/minio/legacy/config"
)
// Test if config v1 is purged
func TestServerConfigMigrateV1(t *testing.T) {
objLayer, fsDir, err := prepareFS()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(fsDir)
err = newTestConfig(globalMinioDefaultRegion, objLayer)
if err != nil {
t.Fatalf("Init Test config failed")
}
rootPath, err := ioutil.TempDir(globalTestTmpDir, "minio-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(rootPath)
globalConfigDir = &ConfigDir{path: rootPath}
globalObjLayerMutex.Lock()
globalObjectAPI = objLayer
globalObjLayerMutex.Unlock()
// Create a V1 config json file and store it
configJSON := "{ \"version\":\"1\", \"accessKeyId\":\"abcde\", \"secretAccessKey\":\"abcdefgh\"}"
configPath := rootPath + "/fsUsers.json"
if err := ioutil.WriteFile(configPath, []byte(configJSON), 0644); err != nil {
t.Fatal("Unexpected error: ", err)
}
// Fire a migrateConfig()
if err := migrateConfig(); err != nil {
t.Fatal("Unexpected error: ", err)
}
// Check if config v1 is removed from filesystem
if _, err := os.Stat(configPath); err == nil || !os.IsNotExist(err) {
t.Fatal("Config V1 file is not purged")
}
// Initialize server config and check again if everything is fine
if err := loadConfig(objLayer); err != nil {
t.Fatalf("Unable to initialize from updated config file %s", err)
}
}
// Test if all migrate code returns nil when config file does not
// exist
func TestServerConfigMigrateInexistentConfig(t *testing.T) {
rootPath, err := ioutil.TempDir(globalTestTmpDir, "minio-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(rootPath)
globalConfigDir = &ConfigDir{path: rootPath}
if err := migrateV2ToV3(); err != nil {
t.Fatal("migrate v2 to v3 should succeed when no config file is found")
}
if err := migrateV3ToV4(); err != nil {
t.Fatal("migrate v3 to v4 should succeed when no config file is found")
}
if err := migrateV4ToV5(); err != nil {
t.Fatal("migrate v4 to v5 should succeed when no config file is found")
}
if err := migrateV5ToV6(); err != nil {
t.Fatal("migrate v5 to v6 should succeed when no config file is found")
}
if err := migrateV6ToV7(); err != nil {
t.Fatal("migrate v6 to v7 should succeed when no config file is found")
}
if err := migrateV7ToV8(); err != nil {
t.Fatal("migrate v7 to v8 should succeed when no config file is found")
}
if err := migrateV8ToV9(); err != nil {
t.Fatal("migrate v8 to v9 should succeed when no config file is found")
}
if err := migrateV9ToV10(); err != nil {
t.Fatal("migrate v9 to v10 should succeed when no config file is found")
}
if err := migrateV10ToV11(); err != nil {
t.Fatal("migrate v10 to v11 should succeed when no config file is found")
}
if err := migrateV11ToV12(); err != nil {
t.Fatal("migrate v11 to v12 should succeed when no config file is found")
}
if err := migrateV12ToV13(); err != nil {
t.Fatal("migrate v12 to v13 should succeed when no config file is found")
}
if err := migrateV13ToV14(); err != nil {
t.Fatal("migrate v13 to v14 should succeed when no config file is found")
}
if err := migrateV14ToV15(); err != nil {
t.Fatal("migrate v14 to v15 should succeed when no config file is found")
}
if err := migrateV15ToV16(); err != nil {
t.Fatal("migrate v15 to v16 should succeed when no config file is found")
}
if err := migrateV16ToV17(); err != nil {
t.Fatal("migrate v16 to v17 should succeed when no config file is found")
}
if err := migrateV17ToV18(); err != nil {
t.Fatal("migrate v17 to v18 should succeed when no config file is found")
}
if err := migrateV18ToV19(); err != nil {
t.Fatal("migrate v18 to v19 should succeed when no config file is found")
}
if err := migrateV19ToV20(); err != nil {
t.Fatal("migrate v19 to v20 should succeed when no config file is found")
}
if err := migrateV20ToV21(); err != nil {
t.Fatal("migrate v20 to v21 should succeed when no config file is found")
}
if err := migrateV21ToV22(); err != nil {
t.Fatal("migrate v21 to v22 should succeed when no config file is found")
}
if err := migrateV22ToV23(); err != nil {
t.Fatal("migrate v22 to v23 should succeed when no config file is found")
}
if err := migrateV23ToV24(); err != nil {
t.Fatal("migrate v23 to v24 should succeed when no config file is found")
}
if err := migrateV24ToV25(); err != nil {
t.Fatal("migrate v24 to v25 should succeed when no config file is found")
}
if err := migrateV25ToV26(); err != nil {
t.Fatal("migrate v25 to v26 should succeed when no config file is found")
}
if err := migrateV26ToV27(); err != nil {
t.Fatal("migrate v26 to v27 should succeed when no config file is found")
}
if err := migrateV27ToV28(); err != nil {
t.Fatal("migrate v27 to v28 should succeed when no config file is found")
}
}
// Test if a config migration from v2 to v33 is successfully done
func TestServerConfigMigrateV2toV33(t *testing.T) {
rootPath, err := ioutil.TempDir(globalTestTmpDir, "minio-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(rootPath)
globalConfigDir = &ConfigDir{path: rootPath}
objLayer, fsDir, err := prepareFS()
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(fsDir)
configPath := rootPath + SlashSeparator + minioConfigFile
// Create a corrupted config file
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\","), 0644); err != nil {
t.Fatal("Unexpected error: ", err)
}
// Fire a migrateConfig()
if err := migrateConfig(); err == nil {
t.Fatal("migration should fail with corrupted config file")
}
accessKey := "accessfoo"
secretKey := "secretfoo"
// Create a V2 config json file and store it
configJSON := "{ \"version\":\"2\", \"credentials\": {\"accessKeyId\":\"" + accessKey + "\", \"secretAccessKey\":\"" + secretKey + "\", \"region\":\"us-east-1\"}, \"mongoLogger\":{\"addr\":\"127.0.0.1:3543\", \"db\":\"foodb\", \"collection\":\"foo\"}, \"syslogLogger\":{\"network\":\"127.0.0.1:543\", \"addr\":\"addr\"}, \"fileLogger\":{\"filename\":\"log.out\"}}"
if err := ioutil.WriteFile(configPath, []byte(configJSON), 0644); err != nil {
t.Fatal("Unexpected error: ", err)
}
// Fire a migrateConfig()
if err := migrateConfig(); err != nil {
t.Fatal("Unexpected error: ", err)
}
if err := migrateConfigToMinioSys(objLayer); err != nil {
t.Fatal("Unexpected error: ", err)
}
if err := migrateMinioSysConfig(objLayer); err != nil {
t.Fatal("Unexpected error: ", err)
}
if err := migrateMinioSysConfigToKV(objLayer); err != nil {
t.Fatal("Unexpected error: ", err)
}
// Initialize server config and check again if everything is fine
if err := loadConfig(objLayer); err != nil {
t.Fatalf("Unable to initialize from updated config file %s", err)
}
// Check if accessKey and secretKey are not altered during migration
caccessKey := globalServerConfig[config.CredentialsSubSys][config.Default].Get(config.AccessKey)
if caccessKey != accessKey {
t.Fatalf("Access key lost during migration, expected: %v, found:%v", accessKey, caccessKey)
}
csecretKey := globalServerConfig[config.CredentialsSubSys][config.Default].Get(config.SecretKey)
if csecretKey != secretKey {
t.Fatalf("Secret key lost during migration, expected: %v, found: %v", secretKey, csecretKey)
}
}
// Test if all migrate code returns error with corrupted config files
func TestServerConfigMigrateFaultyConfig(t *testing.T) {
rootPath, err := ioutil.TempDir(globalTestTmpDir, "minio-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(rootPath)
globalConfigDir = &ConfigDir{path: rootPath}
configPath := rootPath + SlashSeparator + minioConfigFile
// Create a corrupted config file
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"test\":"), 0644); err != nil {
t.Fatal("Unexpected error: ", err)
}
// Test different migrate versions and be sure they are returning an error
if err := migrateV2ToV3(); err == nil {
t.Fatal("migrateConfigV2ToV3() should fail with a corrupted json")
}
if err := migrateV3ToV4(); err == nil {
t.Fatal("migrateConfigV3ToV4() should fail with a corrupted json")
}
if err := migrateV4ToV5(); err == nil {
t.Fatal("migrateConfigV4ToV5() should fail with a corrupted json")
}
if err := migrateV5ToV6(); err == nil {
t.Fatal("migrateConfigV5ToV6() should fail with a corrupted json")
}
if err := migrateV6ToV7(); err == nil {
t.Fatal("migrateConfigV6ToV7() should fail with a corrupted json")
}
if err := migrateV7ToV8(); err == nil {
t.Fatal("migrateConfigV7ToV8() should fail with a corrupted json")
}
if err := migrateV8ToV9(); err == nil {
t.Fatal("migrateConfigV8ToV9() should fail with a corrupted json")
}
if err := migrateV9ToV10(); err == nil {
t.Fatal("migrateConfigV9ToV10() should fail with a corrupted json")
}
if err := migrateV10ToV11(); err == nil {
t.Fatal("migrateConfigV10ToV11() should fail with a corrupted json")
}
if err := migrateV11ToV12(); err == nil {
t.Fatal("migrateConfigV11ToV12() should fail with a corrupted json")
}
if err := migrateV12ToV13(); err == nil {
t.Fatal("migrateConfigV12ToV13() should fail with a corrupted json")
}
if err := migrateV13ToV14(); err == nil {
t.Fatal("migrateConfigV13ToV14() should fail with a corrupted json")
}
if err := migrateV14ToV15(); err == nil {
t.Fatal("migrateConfigV14ToV15() should fail with a corrupted json")
}
if err := migrateV15ToV16(); err == nil {
t.Fatal("migrateConfigV15ToV16() should fail with a corrupted json")
}
if err := migrateV16ToV17(); err == nil {
t.Fatal("migrateConfigV16ToV17() should fail with a corrupted json")
}
if err := migrateV17ToV18(); err == nil {
t.Fatal("migrateConfigV17ToV18() should fail with a corrupted json")
}
if err := migrateV18ToV19(); err == nil {
t.Fatal("migrateConfigV18ToV19() should fail with a corrupted json")
}
if err := migrateV19ToV20(); err == nil {
t.Fatal("migrateConfigV19ToV20() should fail with a corrupted json")
}
if err := migrateV20ToV21(); err == nil {
t.Fatal("migrateConfigV20ToV21() should fail with a corrupted json")
}
if err := migrateV21ToV22(); err == nil {
t.Fatal("migrateConfigV21ToV22() should fail with a corrupted json")
}
if err := migrateV22ToV23(); err == nil {
t.Fatal("migrateConfigV22ToV23() should fail with a corrupted json")
}
if err := migrateV23ToV24(); err == nil {
t.Fatal("migrateConfigV23ToV24() should fail with a corrupted json")
}
if err := migrateV24ToV25(); err == nil {
t.Fatal("migrateConfigV24ToV25() should fail with a corrupted json")
}
if err := migrateV25ToV26(); err == nil {
t.Fatal("migrateConfigV25ToV26() should fail with a corrupted json")
}
if err := migrateV26ToV27(); err == nil {
t.Fatal("migrateConfigV26ToV27() should fail with a corrupted json")
}
if err := migrateV27ToV28(); err == nil {
t.Fatal("migrateConfigV27ToV28() should fail with a corrupted json")
}
}
// Test if all migrate code returns error with corrupted config files
func TestServerConfigMigrateCorruptedConfig(t *testing.T) {
rootPath, err := ioutil.TempDir(globalTestTmpDir, "minio-")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(rootPath)
globalConfigDir = &ConfigDir{path: rootPath}
configPath := rootPath + SlashSeparator + minioConfigFile
for i := 3; i <= 17; i++ {
// Create a corrupted config file
if err = ioutil.WriteFile(configPath, []byte(fmt.Sprintf("{ \"version\":\"%d\", \"credential\": { \"accessKey\": 1 } }", i)),
0644); err != nil {
t.Fatal("Unexpected error: ", err)
}
// Test different migrate versions and be sure they are returning an error
if err = migrateConfig(); err == nil {
t.Fatal("migrateConfig() should fail with a corrupted json")
}
}
// Create a corrupted config file for version '2'.
if err = ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"credentials\": { \"accessKeyId\": 1 } }"), 0644); err != nil {
t.Fatal("Unexpected error: ", err)
}
// Test different migrate versions and be sure they are returning an error
if err = migrateConfig(); err == nil {
t.Fatal("migrateConfig() should fail with a corrupted json")
}
}

View file

@ -1,877 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2016, 2017, 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"sync"
"github.com/minio/minio/legacy/config"
"github.com/minio/minio/legacy/config/cache"
"github.com/minio/minio/legacy/config/compress"
xldap "github.com/minio/minio/legacy/config/identity/ldap"
"github.com/minio/minio/legacy/config/identity/openid"
"github.com/minio/minio/legacy/config/notify"
"github.com/minio/minio/legacy/config/policy/opa"
"github.com/minio/minio/legacy/config/storageclass"
"github.com/minio/minio/legacy/crypto"
"github.com/minio/minio/legacy/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/event/target"
"github.com/minio/minio/pkg/quick"
)
/////////////////// Config V1 ///////////////////
type configV1 struct {
Version string `json:"version"`
AccessKey string `json:"accessKeyId"`
SecretKey string `json:"secretAccessKey"`
}
/////////////////// Config V2 ///////////////////
type configV2 struct {
Version string `json:"version"`
Credentials struct {
AccessKey string `json:"accessKeyId"`
SecretKey string `json:"secretAccessKey"`
Region string `json:"region"`
} `json:"credentials"`
MongoLogger struct {
Addr string `json:"addr"`
DB string `json:"db"`
Collection string `json:"collection"`
} `json:"mongoLogger"`
SyslogLogger struct {
Network string `json:"network"`
Addr string `json:"addr"`
} `json:"syslogLogger"`
FileLogger struct {
Filename string `json:"filename"`
} `json:"fileLogger"`
}
/////////////////// Config V3 ///////////////////
// backendV3 type.
type backendV3 struct {
Type string `json:"type"`
Disk string `json:"disk,omitempty"`
Disks []string `json:"disks,omitempty"`
}
// syslogLogger v3
type syslogLoggerV3 struct {
Enable bool `json:"enable"`
Addr string `json:"address"`
Level string `json:"level"`
}
// loggerV3 type.
type loggerV3 struct {
Console struct {
Enable bool `json:"enable"`
Level string `json:"level"`
}
File struct {
Enable bool `json:"enable"`
Filename string `json:"fileName"`
Level string `json:"level"`
}
Syslog struct {
Enable bool `json:"enable"`
Addr string `json:"address"`
Level string `json:"level"`
} `json:"syslog"`
// Add new loggers here.
}
// configV3 server configuration version '3'.
type configV3 struct {
Version string `json:"version"`
// Backend configuration.
Backend backendV3 `json:"backend"`
// http Server configuration.
Addr string `json:"address"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
// Additional error logging configuration.
Logger loggerV3 `json:"logger"`
}
// logger type representing version '4' logger config.
type loggerV4 struct {
Console struct {
Enable bool `json:"enable"`
Level string `json:"level"`
} `json:"console"`
File struct {
Enable bool `json:"enable"`
Filename string `json:"fileName"`
Level string `json:"level"`
} `json:"file"`
Syslog struct {
Enable bool `json:"enable"`
Addr string `json:"address"`
Level string `json:"level"`
} `json:"syslog"`
}
// configV4 server configuration version '4'.
type configV4 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
// Additional error logging configuration.
Logger loggerV4 `json:"logger"`
}
// logger type representing version '5' logger config.
type loggerV5 struct {
Console struct {
Enable bool `json:"enable"`
Level string `json:"level"`
} `json:"console"`
File struct {
Enable bool `json:"enable"`
Filename string `json:"fileName"`
Level string `json:"level"`
} `json:"file"`
Syslog struct {
Enable bool `json:"enable"`
Addr string `json:"address"`
Level string `json:"level"`
} `json:"syslog"`
AMQP struct {
Enable bool `json:"enable"`
Level string `json:"level"`
URL string `json:"url"`
Exchange string `json:"exchange"`
RoutingKey string `json:"routingKey"`
ExchangeType string `json:"exchangeType"`
Mandatory bool `json:"mandatory"`
Immediate bool `json:"immediate"`
Durable bool `json:"durable"`
Internal bool `json:"internal"`
NoWait bool `json:"noWait"`
AutoDeleted bool `json:"autoDeleted"`
} `json:"amqp"`
ElasticSearch struct {
Enable bool `json:"enable"`
Level string `json:"level"`
URL string `json:"url"`
Index string `json:"index"`
} `json:"elasticsearch"`
Redis struct {
Enable bool `json:"enable"`
Level string `json:"level"`
Addr string `json:"address"`
Password string `json:"password"`
Key string `json:"key"`
} `json:"redis"`
}
// configV5 server configuration version '5'.
type configV5 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
// Additional error logging configuration.
Logger loggerV5 `json:"logger"`
}
// consoleLogger - default logger if not other logging is enabled.
type consoleLoggerV1 struct {
Enable bool `json:"enable"`
Level string `json:"level"`
}
type fileLoggerV1 struct {
Enable bool `json:"enable"`
Filename string `json:"fileName"`
Level string `json:"level"`
}
type loggerV6 struct {
Console consoleLoggerV1 `json:"console"`
File fileLoggerV1 `json:"file"`
Syslog syslogLoggerV3 `json:"syslog"`
}
// configV6 server configuration version '6'.
type configV6 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
// Additional error logging configuration.
Logger loggerV6 `json:"logger"`
// Notification queue configuration.
Notify notifierV1 `json:"notify"`
}
// Notifier represents collection of supported notification queues in version
// 1 without NATS streaming.
type notifierV1 struct {
AMQP map[string]target.AMQPArgs `json:"amqp"`
NATS map[string]natsNotifyV1 `json:"nats"`
ElasticSearch map[string]target.ElasticsearchArgs `json:"elasticsearch"`
Redis map[string]target.RedisArgs `json:"redis"`
PostgreSQL map[string]target.PostgreSQLArgs `json:"postgresql"`
Kafka map[string]target.KafkaArgs `json:"kafka"`
}
// Notifier represents collection of supported notification queues in version 2
// with NATS streaming but without webhook.
type notifierV2 struct {
AMQP map[string]target.AMQPArgs `json:"amqp"`
NATS map[string]target.NATSArgs `json:"nats"`
ElasticSearch map[string]target.ElasticsearchArgs `json:"elasticsearch"`
Redis map[string]target.RedisArgs `json:"redis"`
PostgreSQL map[string]target.PostgreSQLArgs `json:"postgresql"`
Kafka map[string]target.KafkaArgs `json:"kafka"`
}
// configV7 server configuration version '7'.
type serverConfigV7 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
// Additional error logging configuration.
Logger loggerV6 `json:"logger"`
// Notification queue configuration.
Notify notifierV1 `json:"notify"`
}
// serverConfigV8 server configuration version '8'. Adds NATS notify.Config
// configuration.
type serverConfigV8 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
// Additional error logging configuration.
Logger loggerV6 `json:"logger"`
// Notification queue configuration.
Notify notifierV1 `json:"notify"`
}
// serverConfigV9 server configuration version '9'. Adds PostgreSQL
// notify.Config configuration.
type serverConfigV9 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
// Additional error logging configuration.
Logger loggerV6 `json:"logger"`
// Notification queue configuration.
Notify notifierV1 `json:"notify"`
}
type loggerV7 struct {
sync.RWMutex
Console consoleLoggerV1 `json:"console"`
File fileLoggerV1 `json:"file"`
}
// serverConfigV10 server configuration version '10' which is like
// version '9' except it drops support of syslog config, and makes the
// RWMutex global (so it does not exist in this struct).
type serverConfigV10 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
// Additional error logging configuration.
Logger loggerV7 `json:"logger"`
// Notification queue configuration.
Notify notifierV1 `json:"notify"`
}
// natsNotifyV1 - structure was valid until config V 11
type natsNotifyV1 struct {
Enable bool `json:"enable"`
Address string `json:"address"`
Subject string `json:"subject"`
Username string `json:"username"`
Password string `json:"password"`
Token string `json:"token"`
Secure bool `json:"secure"`
PingInterval int64 `json:"pingInterval"`
}
// serverConfigV11 server configuration version '11' which is like
// version '10' except it adds support for Kafka notifications.
type serverConfigV11 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
// Additional error logging configuration.
Logger loggerV7 `json:"logger"`
// Notification queue configuration.
Notify notifierV1 `json:"notify"`
}
// serverConfigV12 server configuration version '12' which is like
// version '11' except it adds support for NATS streaming notifications.
type serverConfigV12 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
// Additional error logging configuration.
Logger loggerV7 `json:"logger"`
// Notification queue configuration.
Notify notifierV2 `json:"notify"`
}
type notifierV3 struct {
AMQP map[string]target.AMQPArgs `json:"amqp"`
Elasticsearch map[string]target.ElasticsearchArgs `json:"elasticsearch"`
Kafka map[string]target.KafkaArgs `json:"kafka"`
MQTT map[string]target.MQTTArgs `json:"mqtt"`
MySQL map[string]target.MySQLArgs `json:"mysql"`
NATS map[string]target.NATSArgs `json:"nats"`
PostgreSQL map[string]target.PostgreSQLArgs `json:"postgresql"`
Redis map[string]target.RedisArgs `json:"redis"`
Webhook map[string]target.WebhookArgs `json:"webhook"`
}
// serverConfigV13 server configuration version '13' which is like
// version '12' except it adds support for webhook notification.
type serverConfigV13 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
// Additional error logging configuration.
Logger *loggerV7 `json:"logger"`
// Notification queue configuration.
Notify *notifierV3 `json:"notify"`
}
// serverConfigV14 server configuration version '14' which is like
// version '13' except it adds support of browser param.
type serverConfigV14 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Browser config.BoolFlag `json:"browser"`
// Additional error logging configuration.
Logger *loggerV7 `json:"logger"`
// Notification queue configuration.
Notify *notifierV3 `json:"notify"`
}
// serverConfigV15 server configuration version '15' which is like
// version '14' except it adds mysql support
type serverConfigV15 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Browser config.BoolFlag `json:"browser"`
// Additional error logging configuration.
Logger *loggerV7 `json:"logger"`
// Notification queue configuration.
Notify *notifierV3 `json:"notify"`
}
// FileLogger is introduced to workaround the dependency about logrus
type FileLogger struct {
Enable bool `json:"enable"`
Filename string `json:"filename"`
}
// ConsoleLogger is introduced to workaround the dependency about logrus
type ConsoleLogger struct {
Enable bool `json:"enable"`
}
// Loggers struct is defined with FileLogger and ConsoleLogger
// although they are removed from logging logic. They are
// kept here just to workaround the dependency migration
// code/logic has on them.
type loggers struct {
sync.RWMutex
Console ConsoleLogger `json:"console"`
File FileLogger `json:"file"`
}
// serverConfigV16 server configuration version '16' which is like
// version '15' except it makes a change to logging configuration.
type serverConfigV16 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Browser config.BoolFlag `json:"browser"`
// Additional error logging configuration.
Logger *loggers `json:"logger"`
// Notification queue configuration.
Notify *notifierV3 `json:"notify"`
}
// serverConfigV17 server configuration version '17' which is like
// version '16' except it adds support for "format" parameter in
// database event notification targets: PostgreSQL, MySQL, Redis and
// Elasticsearch.
type serverConfigV17 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Browser config.BoolFlag `json:"browser"`
// Additional error logging configuration.
Logger *loggers `json:"logger"`
// Notification queue configuration.
Notify *notifierV3 `json:"notify"`
}
// serverConfigV18 server configuration version '18' which is like
// version '17' except it adds support for "deliveryMode" parameter in
// the AMQP notification target.
type serverConfigV18 struct {
sync.RWMutex
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Browser config.BoolFlag `json:"browser"`
// Additional error logging configuration.
Logger *loggers `json:"logger"`
// Notification queue configuration.
Notify *notifierV3 `json:"notify"`
}
// serverConfigV19 server configuration version '19' which is like
// version '18' except it adds support for MQTT notifications.
type serverConfigV19 struct {
sync.RWMutex
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Browser config.BoolFlag `json:"browser"`
// Additional error logging configuration.
Logger *loggers `json:"logger"`
// Notification queue configuration.
Notify *notifierV3 `json:"notify"`
}
// serverConfigV20 server configuration version '20' which is like
// version '19' except it adds support for VirtualHostDomain
type serverConfigV20 struct {
sync.RWMutex
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Browser config.BoolFlag `json:"browser"`
Domain string `json:"domain"`
// Additional error logging configuration.
Logger *loggers `json:"logger"`
// Notification queue configuration.
Notify *notifierV3 `json:"notify"`
}
// serverConfigV21 is just like version '20' without logger field
type serverConfigV21 struct {
sync.RWMutex
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Browser config.BoolFlag `json:"browser"`
Domain string `json:"domain"`
// Notification queue configuration.
Notify *notifierV3 `json:"notify"`
}
// serverConfigV22 is just like version '21' with added support
// for StorageClass.
type serverConfigV22 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Browser config.BoolFlag `json:"browser"`
Domain string `json:"domain"`
// Storage class configuration
StorageClass storageclass.Config `json:"storageclass"`
// Notification queue configuration.
Notify notifierV3 `json:"notify"`
}
// serverConfigV23 is just like version '22' with addition of cache field.
type serverConfigV23 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Browser config.BoolFlag `json:"browser"`
Domain string `json:"domain"`
// Storage class configuration
StorageClass storageclass.Config `json:"storageclass"`
// Cache configuration
Cache cache.Config `json:"cache"`
// Notification queue configuration.
Notify notifierV3 `json:"notify"`
}
// serverConfigV24 is just like version '23', we had to revert
// the changes which were made in 6fb06045028b7a57c37c60a612c8e50735279ab4
type serverConfigV24 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Browser config.BoolFlag `json:"browser"`
Domain string `json:"domain"`
// Storage class configuration
StorageClass storageclass.Config `json:"storageclass"`
// Cache configuration
Cache cache.Config `json:"cache"`
// Notification queue configuration.
Notify notifierV3 `json:"notify"`
}
// serverConfigV25 is just like version '24', stores additionally
// worm variable.
type serverConfigV25 struct {
quick.Config `json:"-"` // ignore interfaces
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Browser config.BoolFlag `json:"browser"`
Worm config.BoolFlag `json:"worm"`
Domain string `json:"domain"`
// Storage class configuration
StorageClass storageclass.Config `json:"storageclass"`
// Cache configuration
Cache cache.Config `json:"cache"`
// Notification queue configuration.
Notify notifierV3 `json:"notify"`
}
// serverConfigV26 is just like version '25', stores additionally
// cache max use value in 'cache.Config'.
type serverConfigV26 struct {
quick.Config `json:"-"` // ignore interfaces
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Browser config.BoolFlag `json:"browser"`
Worm config.BoolFlag `json:"worm"`
Domain string `json:"domain"`
// Storage class configuration
StorageClass storageclass.Config `json:"storageclass"`
// Cache configuration
Cache cache.Config `json:"cache"`
// Notification queue configuration.
Notify notifierV3 `json:"notify"`
}
// serverConfigV27 is just like version '26', stores additionally
// the logger field
type serverConfigV27 struct {
quick.Config `json:"-"` // ignore interfaces
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Browser config.BoolFlag `json:"browser"`
Worm config.BoolFlag `json:"worm"`
Domain string `json:"domain"`
// Storage class configuration
StorageClass storageclass.Config `json:"storageclass"`
// Cache configuration
Cache cache.Config `json:"cache"`
// Notification queue configuration.
Notify notifierV3 `json:"notify"`
// Logger configuration
Logger logger.Config `json:"logger"`
}
// serverConfigV28 is just like version '27', additionally
// storing KMS config
type serverConfigV28 struct {
quick.Config `json:"-"` // ignore interfaces
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Worm config.BoolFlag `json:"worm"`
// Storage class configuration
StorageClass storageclass.Config `json:"storageclass"`
// Cache configuration
Cache cache.Config `json:"cache"`
// KMS configuration
KMS crypto.KMSConfig `json:"kms"`
// Notification queue configuration.
Notify notifierV3 `json:"notify"`
// Logger configuration
Logger logger.Config `json:"logger"`
}
// serverConfigV29 is just like version '28'.
type serverConfigV29 serverConfigV28
// serverConfigV30 is just like version '29', stores additionally
// extensions and mimetypes fields for compression.
type serverConfigV30 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Worm config.BoolFlag `json:"worm"`
// Storage class configuration
StorageClass storageclass.Config `json:"storageclass"`
// Cache configuration
Cache cache.Config `json:"cache"`
// KMS configuration
KMS crypto.KMSConfig `json:"kms"`
// Notification queue configuration.
Notify notifierV3 `json:"notify"`
// Logger configuration
Logger logger.Config `json:"logger"`
// Compression configuration
Compression compress.Config `json:"compress"`
}
// serverConfigV31 is just like version '30', with OPA and OpenID configuration.
type serverConfigV31 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Worm config.BoolFlag `json:"worm"`
// Storage class configuration
StorageClass storageclass.Config `json:"storageclass"`
// Cache configuration
Cache cache.Config `json:"cache"`
// KMS configuration
KMS crypto.KMSConfig `json:"kms"`
// Notification queue configuration.
Notify notifierV3 `json:"notify"`
// Logger configuration
Logger logger.Config `json:"logger"`
// Compression configuration
Compression compress.Config `json:"compress"`
// OpenID configuration
OpenID openid.Config `json:"openid"`
// External policy enforcements.
Policy struct {
// OPA configuration.
OPA opa.Args `json:"opa"`
// Add new external policy enforcements here.
} `json:"policy"`
}
// serverConfigV32 is just like version '31' with added nsq notifer.
type serverConfigV32 struct {
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Worm config.BoolFlag `json:"worm"`
// Storage class configuration
StorageClass storageclass.Config `json:"storageclass"`
// Cache configuration
Cache cache.Config `json:"cache"`
// KMS configuration
KMS crypto.KMSConfig `json:"kms"`
// Notification queue configuration.
Notify notify.Config `json:"notify"`
// Logger configuration
Logger logger.Config `json:"logger"`
// Compression configuration
Compression compress.Config `json:"compress"`
// OpenID configuration
OpenID openid.Config `json:"openid"`
// External policy enforcements.
Policy struct {
// OPA configuration.
OPA opa.Args `json:"opa"`
// Add new external policy enforcements here.
} `json:"policy"`
}
// serverConfigV33 is just like version '32', removes clientID from NATS and MQTT, and adds queueDir, queueLimit in all notification targets.
type serverConfigV33 struct {
quick.Config `json:"-"` // ignore interfaces
Version string `json:"version"`
// S3 API configuration.
Credential auth.Credentials `json:"credential"`
Region string `json:"region"`
Worm config.BoolFlag `json:"worm"`
// Storage class configuration
StorageClass storageclass.Config `json:"storageclass"`
// Cache configuration
Cache cache.Config `json:"cache"`
// KMS configuration
KMS crypto.KMSConfig `json:"kms"`
// Notification queue configuration.
Notify notify.Config `json:"notify"`
// Logger configuration
Logger logger.Config `json:"logger"`
// Compression configuration
Compression compress.Config `json:"compress"`
// OpenID configuration
OpenID openid.Config `json:"openid"`
// External policy enforcements.
Policy struct {
// OPA configuration.
OPA opa.Args `json:"opa"`
// Add new external policy enforcements here.
} `json:"policy"`
LDAPServerConfig xldap.Config `json:"ldapserverconfig"`
}

View file

@ -1,250 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package legacy
import (
"bytes"
"context"
"encoding/json"
"path"
"sort"
"strings"
"time"
jsoniter "github.com/json-iterator/go"
"github.com/minio/minio/legacy/config"
"github.com/minio/minio/pkg/madmin"
)
const (
minioConfigPrefix = "config"
kvPrefix = ".kv"
// Captures all the previous SetKV operations and allows rollback.
minioConfigHistoryPrefix = minioConfigPrefix + "/history"
// MinIO configuration file.
minioConfigFile = "config.json"
)
func listServerConfigHistory(ctx context.Context, objAPI ObjectLayer, withData bool, count int) (
[]madmin.ConfigHistoryEntry, error) {
var configHistory []madmin.ConfigHistoryEntry
// List all kvs
marker := ""
for {
res, err := objAPI.ListObjects(ctx, minioMetaBucket, minioConfigHistoryPrefix, marker, "", maxObjectList)
if err != nil {
return nil, err
}
for _, obj := range res.Objects {
cfgEntry := madmin.ConfigHistoryEntry{
RestoreID: strings.TrimSuffix(path.Base(obj.Name), kvPrefix),
CreateTime: obj.ModTime, // ModTime is createTime for config history entries.
}
if withData {
data, err := readConfig(ctx, objAPI, obj.Name)
if err != nil {
return nil, err
}
if globalConfigEncrypted {
data, err = madmin.DecryptData(globalActiveCred.String(), bytes.NewReader(data))
if err != nil {
return nil, err
}
}
cfgEntry.Data = string(data)
}
configHistory = append(configHistory, cfgEntry)
count--
if count == 0 {
break
}
}
if !res.IsTruncated {
// We are done here
break
}
marker = res.NextMarker
}
sort.Slice(configHistory, func(i, j int) bool {
return configHistory[i].CreateTime.Before(configHistory[j].CreateTime)
})
return configHistory, nil
}
func delServerConfigHistory(ctx context.Context, objAPI ObjectLayer, uuidKV string) error {
historyFile := pathJoin(minioConfigHistoryPrefix, uuidKV+kvPrefix)
return objAPI.DeleteObject(ctx, minioMetaBucket, historyFile)
}
func readServerConfigHistory(ctx context.Context, objAPI ObjectLayer, uuidKV string) ([]byte, error) {
historyFile := pathJoin(minioConfigHistoryPrefix, uuidKV+kvPrefix)
data, err := readConfig(ctx, objAPI, historyFile)
if err != nil {
return nil, err
}
if globalConfigEncrypted {
data, err = madmin.DecryptData(globalActiveCred.String(), bytes.NewReader(data))
}
return data, err
}
func saveServerConfigHistory(ctx context.Context, objAPI ObjectLayer, kv []byte) error {
uuidKV := mustGetUUID() + kvPrefix
historyFile := pathJoin(minioConfigHistoryPrefix, uuidKV)
var err error
if globalConfigEncrypted {
kv, err = madmin.EncryptData(globalActiveCred.String(), kv)
if err != nil {
return err
}
}
// Save the new config KV settings into the history path.
return saveConfig(ctx, objAPI, historyFile, kv)
}
func saveServerConfig(ctx context.Context, objAPI ObjectLayer, config interface{}) error {
data, err := json.Marshal(config)
if err != nil {
return err
}
if globalConfigEncrypted {
data, err = madmin.EncryptData(globalActiveCred.String(), data)
if err != nil {
return err
}
}
configFile := path.Join(minioConfigPrefix, minioConfigFile)
// Save the new config in the std config path
return saveConfig(ctx, objAPI, configFile, data)
}
func readServerConfig(ctx context.Context, objAPI ObjectLayer) (config.Config, error) {
configFile := path.Join(minioConfigPrefix, minioConfigFile)
configData, err := readConfig(ctx, objAPI, configFile)
if err != nil {
// Config not found for some reason, allow things to continue
// by initializing a new fresh config in safe mode.
if err == errConfigNotFound && newObjectLayerFn() == nil {
return newServerConfig(), nil
}
return nil, err
}
if globalConfigEncrypted {
configData, err = madmin.DecryptData(globalActiveCred.String(), bytes.NewReader(configData))
if err != nil {
if err == madmin.ErrMaliciousData {
return nil, config.ErrInvalidCredentialsBackendEncrypted(nil)
}
return nil, err
}
}
var srvCfg = config.New()
var json = jsoniter.ConfigCompatibleWithStandardLibrary
if err = json.Unmarshal(configData, &srvCfg); err != nil {
return nil, err
}
// Add any missing entries
return srvCfg.Merge(), nil
}
// ConfigSys - config system.
type ConfigSys struct{}
// Load - load config.json.
func (sys *ConfigSys) Load(objAPI ObjectLayer) error {
return sys.Init(objAPI)
}
// WatchConfigNASDisk - watches nas disk on periodic basis.
func (sys *ConfigSys) WatchConfigNASDisk(ctx context.Context, objAPI ObjectLayer) {
configInterval := globalRefreshIAMInterval
watchDisk := func() {
for {
select {
case <-ctx.Done():
return
case <-time.After(configInterval):
loadConfig(objAPI)
}
}
}
// Refresh configSys in background for NAS gateway.
go watchDisk()
}
// Init - initializes config system from config.json.
func (sys *ConfigSys) Init(objAPI ObjectLayer) error {
if objAPI == nil {
return errInvalidArgument
}
return initConfig(objAPI)
}
// NewConfigSys - creates new config system object.
func NewConfigSys() *ConfigSys {
return &ConfigSys{}
}
// Initialize and load config from remote etcd or local config directory
func initConfig(objAPI ObjectLayer) error {
if objAPI == nil {
return errServerNotInitialized
}
if isFile(getConfigFile()) {
if err := migrateConfig(); err != nil {
return err
}
}
// Migrates ${HOME}/.minio/config.json or config.json.deprecated
// to '<export_path>/.minio.sys/config/config.json'
// ignore if the file doesn't exist.
// If etcd is set then migrates /config/config.json
// to '<export_path>/.minio.sys/config/config.json'
if err := migrateConfigToMinioSys(objAPI); err != nil {
return err
}
// Migrates backend '<export_path>/.minio.sys/config/config.json' to latest version.
if err := migrateMinioSysConfig(objAPI); err != nil {
return err
}
// Migrates backend '<export_path>/.minio.sys/config/config.json' to
// latest config format.
if err := migrateMinioSysConfigToKV(objAPI); err != nil {
return err
}
return loadConfig(objAPI)
}

View file

@ -1,117 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package api
import (
"encoding/json"
"errors"
"strconv"
"strings"
"time"
"github.com/minio/minio/legacy/config"
"github.com/minio/minio/pkg/env"
)
// API sub-system constants
const (
apiRequestsMax = "requests_max"
apiRequestsDeadline = "requests_deadline"
apiReadyDeadline = "ready_deadline"
apiCorsAllowOrigin = "cors_allow_origin"
EnvAPIRequestsMax = "MINIO_API_REQUESTS_MAX"
EnvAPIRequestsDeadline = "MINIO_API_REQUESTS_DEADLINE"
EnvAPIReadyDeadline = "MINIO_API_READY_DEADLINE"
EnvAPICorsAllowOrigin = "MINIO_API_CORS_ALLOW_ORIGIN"
)
// DefaultKVS - default storage class config
var (
DefaultKVS = config.KVS{
config.KV{
Key: apiRequestsMax,
Value: "0",
},
config.KV{
Key: apiRequestsDeadline,
Value: "10s",
},
config.KV{
Key: apiReadyDeadline,
Value: "10s",
},
config.KV{
Key: apiCorsAllowOrigin,
Value: "*",
},
}
)
// Config storage class configuration
type Config struct {
APIRequestsMax int `json:"requests_max"`
APIRequestsDeadline time.Duration `json:"requests_deadline"`
APIReadyDeadline time.Duration `json:"ready_deadline"`
APICorsAllowOrigin []string `json:"cors_allow_origin"`
}
// UnmarshalJSON - Validate SS and RRS parity when unmarshalling JSON.
func (sCfg *Config) UnmarshalJSON(data []byte) error {
type Alias Config
aux := &struct {
*Alias
}{
Alias: (*Alias)(sCfg),
}
return json.Unmarshal(data, &aux)
}
// LookupConfig - lookup api config and override with valid environment settings if any.
func LookupConfig(kvs config.KVS) (cfg Config, err error) {
if err = config.CheckValidKeys(config.APISubSys, kvs, DefaultKVS); err != nil {
return cfg, err
}
// Check environment variables parameters
requestsMax, err := strconv.Atoi(env.Get(EnvAPIRequestsMax, kvs.Get(apiRequestsMax)))
if err != nil {
return cfg, err
}
if requestsMax < 0 {
return cfg, errors.New("invalid API max requests value")
}
requestsDeadline, err := time.ParseDuration(env.Get(EnvAPIRequestsDeadline, kvs.Get(apiRequestsDeadline)))
if err != nil {
return cfg, err
}
readyDeadline, err := time.ParseDuration(env.Get(EnvAPIReadyDeadline, kvs.Get(apiReadyDeadline)))
if err != nil {
return cfg, err
}
corsAllowOrigin := strings.Split(env.Get(EnvAPICorsAllowOrigin, kvs.Get(apiCorsAllowOrigin)), ",")
return Config{
APIRequestsMax: requestsMax,
APIRequestsDeadline: requestsDeadline,
APIReadyDeadline: readyDeadline,
APICorsAllowOrigin: corsAllowOrigin,
}, nil
}

View file

@ -1,43 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package api
import "github.com/minio/minio/legacy/config"
// Help template for storageclass feature.
var (
Help = config.HelpKVS{
config.HelpKV{
Key: apiRequestsMax,
Description: `set the maximum number of concurrent requests, e.g. "1600"`,
Optional: true,
Type: "number",
},
config.HelpKV{
Key: apiRequestsDeadline,
Description: `set the deadline for API requests waiting to be processed e.g. "1m"`,
Optional: true,
Type: "duration",
},
config.HelpKV{
Key: apiCorsAllowOrigin,
Description: `set comma separated list of origins allowed for CORS requests e.g. "https://example1.com,https://example2.com"`,
Optional: true,
Type: "csv",
},
}
)

View file

@ -1,90 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2017-2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package config
import (
"encoding/json"
"fmt"
"strconv"
"strings"
)
// BoolFlag - wrapper bool type.
type BoolFlag bool
// String - returns string of BoolFlag.
func (bf BoolFlag) String() string {
if bf {
return "on"
}
return "off"
}
// MarshalJSON - converts BoolFlag into JSON data.
func (bf BoolFlag) MarshalJSON() ([]byte, error) {
return json.Marshal(bf.String())
}
// UnmarshalJSON - parses given data into BoolFlag.
func (bf *BoolFlag) UnmarshalJSON(data []byte) (err error) {
var s string
if err = json.Unmarshal(data, &s); err == nil {
b := BoolFlag(true)
if s == "" {
// Empty string is treated as valid.
*bf = b
} else if b, err = ParseBoolFlag(s); err == nil {
*bf = b
}
}
return err
}
// FormatBool prints stringified version of boolean.
func FormatBool(b bool) string {
if b {
return "on"
}
return "off"
}
// ParseBool returns the boolean value represented by the string.
// It accepts 1, t, T, TRUE, true, True, 0, f, F, FALSE, false, False.
// Any other value returns an error.
func ParseBool(str string) (bool, error) {
switch str {
case "1", "t", "T", "true", "TRUE", "True", "on", "ON", "On":
return true, nil
case "0", "f", "F", "false", "FALSE", "False", "off", "OFF", "Off":
return false, nil
}
if strings.EqualFold(str, "enabled") {
return true, nil
}
if strings.EqualFold(str, "disabled") {
return false, nil
}
return false, fmt.Errorf("ParseBool: parsing '%s': %s", str, strconv.ErrSyntax)
}
// ParseBoolFlag - parses string into BoolFlag.
func ParseBoolFlag(s string) (bf BoolFlag, err error) {
b, err := ParseBool(s)
return BoolFlag(b), err
}

View file

@ -1,128 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2017-2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package config
import (
"testing"
)
// Test BoolFlag.String()
func TestBoolFlagString(t *testing.T) {
var bf BoolFlag
testCases := []struct {
flag BoolFlag
expectedResult string
}{
{bf, "off"},
{BoolFlag(true), "on"},
{BoolFlag(false), "off"},
}
for _, testCase := range testCases {
str := testCase.flag.String()
if testCase.expectedResult != str {
t.Fatalf("expected: %v, got: %v", testCase.expectedResult, str)
}
}
}
// Test BoolFlag.MarshalJSON()
func TestBoolFlagMarshalJSON(t *testing.T) {
var bf BoolFlag
testCases := []struct {
flag BoolFlag
expectedResult string
}{
{bf, `"off"`},
{BoolFlag(true), `"on"`},
{BoolFlag(false), `"off"`},
}
for _, testCase := range testCases {
data, _ := testCase.flag.MarshalJSON()
if testCase.expectedResult != string(data) {
t.Fatalf("expected: %v, got: %v", testCase.expectedResult, string(data))
}
}
}
// Test BoolFlag.UnmarshalJSON()
func TestBoolFlagUnmarshalJSON(t *testing.T) {
testCases := []struct {
data []byte
expectedResult BoolFlag
expectedErr bool
}{
{[]byte(`{}`), BoolFlag(false), true},
{[]byte(`["on"]`), BoolFlag(false), true},
{[]byte(`"junk"`), BoolFlag(false), true},
{[]byte(`""`), BoolFlag(true), false},
{[]byte(`"on"`), BoolFlag(true), false},
{[]byte(`"off"`), BoolFlag(false), false},
{[]byte(`"true"`), BoolFlag(true), false},
{[]byte(`"false"`), BoolFlag(false), false},
{[]byte(`"ON"`), BoolFlag(true), false},
{[]byte(`"OFF"`), BoolFlag(false), false},
}
for _, testCase := range testCases {
var flag BoolFlag
err := (&flag).UnmarshalJSON(testCase.data)
if !testCase.expectedErr && err != nil {
t.Fatalf("error: expected = <nil>, got = %v", err)
}
if testCase.expectedErr && err == nil {
t.Fatalf("error: expected error, got = <nil>")
}
if err == nil && testCase.expectedResult != flag {
t.Fatalf("result: expected: %v, got: %v", testCase.expectedResult, flag)
}
}
}
// Test ParseBoolFlag()
func TestParseBoolFlag(t *testing.T) {
testCases := []struct {
flagStr string
expectedResult BoolFlag
expectedErr bool
}{
{"", BoolFlag(false), true},
{"junk", BoolFlag(false), true},
{"true", BoolFlag(true), false},
{"false", BoolFlag(false), false},
{"ON", BoolFlag(true), false},
{"OFF", BoolFlag(false), false},
{"on", BoolFlag(true), false},
{"off", BoolFlag(false), false},
}
for _, testCase := range testCases {
bf, err := ParseBoolFlag(testCase.flagStr)
if !testCase.expectedErr && err != nil {
t.Fatalf("error: expected = <nil>, got = %v", err)
}
if testCase.expectedErr && err == nil {
t.Fatalf("error: expected error, got = <nil>")
}
if err == nil && testCase.expectedResult != bf {
t.Fatalf("result: expected: %v, got: %v", testCase.expectedResult, bf)
}
}
}

View file

@ -1,154 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cache
import (
"encoding/json"
"errors"
"path/filepath"
"strings"
"github.com/minio/minio/legacy/config"
"github.com/minio/minio/pkg/ellipses"
)
// Config represents cache config settings
type Config struct {
Enabled bool `json:"-"`
Drives []string `json:"drives"`
Expiry int `json:"expiry"`
MaxUse int `json:"maxuse"`
Quota int `json:"quota"`
Exclude []string `json:"exclude"`
After int `json:"after"`
WatermarkLow int `json:"watermark_low"`
WatermarkHigh int `json:"watermark_high"`
Range bool `json:"range"`
}
// UnmarshalJSON - implements JSON unmarshal interface for unmarshalling
// json entries for CacheConfig.
func (cfg *Config) UnmarshalJSON(data []byte) (err error) {
type Alias Config
var _cfg = &struct {
*Alias
}{
Alias: (*Alias)(cfg),
}
if err = json.Unmarshal(data, _cfg); err != nil {
return err
}
if _cfg.Expiry < 0 {
return errors.New("config expiry value should not be negative")
}
if _cfg.MaxUse < 0 {
return errors.New("config max use value should not be null or negative")
}
if _cfg.Quota < 0 {
return errors.New("config quota value should not be null or negative")
}
if _cfg.After < 0 {
return errors.New("cache after value should not be less than 0")
}
if _cfg.WatermarkLow < 0 || _cfg.WatermarkLow > 100 {
return errors.New("config low watermark value should be between 0 and 100")
}
if _cfg.WatermarkHigh < 0 || _cfg.WatermarkHigh > 100 {
return errors.New("config high watermark value should be between 0 and 100")
}
if _cfg.WatermarkLow > 0 && (_cfg.WatermarkLow >= _cfg.WatermarkHigh) {
return errors.New("config low watermark value should be less than high watermark")
}
return nil
}
// Parses given cacheDrivesEnv and returns a list of cache drives.
func parseCacheDrives(drives string) ([]string, error) {
var drivesSlice []string
if len(drives) == 0 {
return drivesSlice, nil
}
drivesSlice = strings.Split(drives, cacheDelimiterLegacy)
if len(drivesSlice) == 1 && drivesSlice[0] == drives {
drivesSlice = strings.Split(drives, cacheDelimiter)
}
var endpoints []string
for _, d := range drivesSlice {
if len(d) == 0 {
return nil, config.ErrInvalidCacheDrivesValue(nil).Msg("cache dir cannot be an empty path")
}
if ellipses.HasEllipses(d) {
s, err := parseCacheDrivePaths(d)
if err != nil {
return nil, err
}
endpoints = append(endpoints, s...)
} else {
endpoints = append(endpoints, d)
}
}
for _, d := range endpoints {
if !filepath.IsAbs(d) {
return nil, config.ErrInvalidCacheDrivesValue(nil).Msg("cache dir should be absolute path: %s", d)
}
}
return endpoints, nil
}
// Parses all arguments and returns a slice of drive paths following the ellipses pattern.
func parseCacheDrivePaths(arg string) (ep []string, err error) {
patterns, perr := ellipses.FindEllipsesPatterns(arg)
if perr != nil {
return []string{}, config.ErrInvalidCacheDrivesValue(nil).Msg(perr.Error())
}
for _, lbls := range patterns.Expand() {
ep = append(ep, strings.Join(lbls, ""))
}
return ep, nil
}
// Parses given cacheExcludesEnv and returns a list of cache exclude patterns.
func parseCacheExcludes(excludes string) ([]string, error) {
var excludesSlice []string
if len(excludes) == 0 {
return excludesSlice, nil
}
excludesSlice = strings.Split(excludes, cacheDelimiterLegacy)
if len(excludesSlice) == 1 && excludesSlice[0] == excludes {
excludesSlice = strings.Split(excludes, cacheDelimiter)
}
for _, e := range excludesSlice {
if len(e) == 0 {
return nil, config.ErrInvalidCacheExcludesValue(nil).Msg("cache exclude path (%s) cannot be empty", e)
}
if strings.HasPrefix(e, "/") {
return nil, config.ErrInvalidCacheExcludesValue(nil).Msg("cache exclude pattern (%s) cannot start with / as prefix", e)
}
}
return excludesSlice, nil
}

View file

@ -1,127 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cache
import (
"reflect"
"runtime"
"testing"
)
// Tests cache drive parsing.
func TestParseCacheDrives(t *testing.T) {
testCases := []struct {
driveStr string
expectedPatterns []string
success bool
}{
// Invalid input
{"bucket1/*;*.png;images/trip/barcelona/*", []string{}, false},
{"bucket1", []string{}, false},
{";;;", []string{}, false},
{",;,;,;", []string{}, false},
}
// Valid inputs
if runtime.GOOS == "windows" {
testCases = append(testCases, struct {
driveStr string
expectedPatterns []string
success bool
}{"C:/home/drive1;C:/home/drive2;C:/home/drive3", []string{"C:/home/drive1", "C:/home/drive2", "C:/home/drive3"}, true})
testCases = append(testCases, struct {
driveStr string
expectedPatterns []string
success bool
}{"C:/home/drive{1...3}", []string{"C:/home/drive1", "C:/home/drive2", "C:/home/drive3"}, true})
testCases = append(testCases, struct {
driveStr string
expectedPatterns []string
success bool
}{"C:/home/drive{1..3}", []string{}, false})
} else {
testCases = append(testCases, struct {
driveStr string
expectedPatterns []string
success bool
}{"/home/drive1;/home/drive2;/home/drive3", []string{"/home/drive1", "/home/drive2", "/home/drive3"}, true})
testCases = append(testCases, struct {
driveStr string
expectedPatterns []string
success bool
}{"/home/drive1,/home/drive2,/home/drive3", []string{"/home/drive1", "/home/drive2", "/home/drive3"}, true})
testCases = append(testCases, struct {
driveStr string
expectedPatterns []string
success bool
}{"/home/drive{1...3}", []string{"/home/drive1", "/home/drive2", "/home/drive3"}, true})
testCases = append(testCases, struct {
driveStr string
expectedPatterns []string
success bool
}{"/home/drive{1..3}", []string{}, false})
}
for i, testCase := range testCases {
drives, err := parseCacheDrives(testCase.driveStr)
if err != nil && testCase.success {
t.Errorf("Test %d: Expected success but failed instead %s", i+1, err)
}
if err == nil && !testCase.success {
t.Errorf("Test %d: Expected failure but passed instead", i+1)
}
if err == nil {
if !reflect.DeepEqual(drives, testCase.expectedPatterns) {
t.Errorf("Test %d: Expected %v, got %v", i+1, testCase.expectedPatterns, drives)
}
}
}
}
// Tests cache exclude parsing.
func TestParseCacheExclude(t *testing.T) {
testCases := []struct {
excludeStr string
expectedPatterns []string
success bool
}{
// Invalid input
{"/home/drive1;/home/drive2;/home/drive3", []string{}, false},
{"/", []string{}, false},
{";;;", []string{}, false},
// valid input
{"bucket1/*;*.png;images/trip/barcelona/*", []string{"bucket1/*", "*.png", "images/trip/barcelona/*"}, true},
{"bucket1/*,*.png,images/trip/barcelona/*", []string{"bucket1/*", "*.png", "images/trip/barcelona/*"}, true},
{"bucket1", []string{"bucket1"}, true},
}
for i, testCase := range testCases {
excludes, err := parseCacheExcludes(testCase.excludeStr)
if err != nil && testCase.success {
t.Errorf("Test %d: Expected success but failed instead %s", i+1, err)
}
if err == nil && !testCase.success {
t.Errorf("Test %d: Expected failure but passed instead", i+1)
}
if err == nil {
if !reflect.DeepEqual(excludes, testCase.expectedPatterns) {
t.Errorf("Test %d: Expected %v, got %v", i+1, testCase.expectedPatterns, excludes)
}
}
}
}

View file

@ -1,78 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cache
import "github.com/minio/minio/legacy/config"
// Help template for caching feature.
var (
Help = config.HelpKVS{
config.HelpKV{
Key: Drives,
Description: `comma separated mountpoints e.g. "/optane1,/optane2"`,
Type: "csv",
},
config.HelpKV{
Key: Expiry,
Description: `cache expiry duration in days e.g. "90"`,
Optional: true,
Type: "number",
},
config.HelpKV{
Key: Quota,
Description: `limit cache drive usage in percentage e.g. "90"`,
Optional: true,
Type: "number",
},
config.HelpKV{
Key: Exclude,
Description: `comma separated wildcard exclusion patterns e.g. "bucket/*.tmp,*.exe"`,
Optional: true,
Type: "csv",
},
config.HelpKV{
Key: config.Comment,
Description: config.DefaultComment,
Optional: true,
Type: "sentence",
},
config.HelpKV{
Key: After,
Description: `minimum accesses before caching an object`,
Optional: true,
Type: "number",
},
config.HelpKV{
Key: WatermarkLow,
Description: `% of cache use at which to stop cache eviction`,
Optional: true,
Type: "number",
},
config.HelpKV{
Key: WatermarkHigh,
Description: `% of cache use at which to start cache eviction`,
Optional: true,
Type: "number",
},
config.HelpKV{
Key: Range,
Description: `set to "on" or "off" caching of independent range requests per object, defaults to "on"`,
Optional: true,
Type: "string",
},
}
)

View file

@ -1,54 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cache
import (
"fmt"
"strings"
"github.com/minio/minio/legacy/config"
)
const (
cacheDelimiterLegacy = ";"
)
// SetCacheConfig - One time migration code needed, for migrating from older config to new for Cache.
func SetCacheConfig(s config.Config, cfg Config) {
if len(cfg.Drives) == 0 {
// Do not save cache if no settings available.
return
}
s[config.CacheSubSys][config.Default] = config.KVS{
config.KV{
Key: Drives,
Value: strings.Join(cfg.Drives, cacheDelimiter),
},
config.KV{
Key: Exclude,
Value: strings.Join(cfg.Exclude, cacheDelimiter),
},
config.KV{
Key: Expiry,
Value: fmt.Sprintf("%d", cfg.Expiry),
},
config.KV{
Key: Quota,
Value: fmt.Sprintf("%d", cfg.MaxUse),
},
}
}

View file

@ -1,215 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cache
import (
"errors"
"strconv"
"github.com/minio/minio/legacy/config"
"github.com/minio/minio/pkg/env"
)
// Cache ENVs
const (
Drives = "drives"
Exclude = "exclude"
Expiry = "expiry"
MaxUse = "maxuse"
Quota = "quota"
After = "after"
WatermarkLow = "watermark_low"
WatermarkHigh = "watermark_high"
Range = "range"
EnvCacheDrives = "MINIO_CACHE_DRIVES"
EnvCacheExclude = "MINIO_CACHE_EXCLUDE"
EnvCacheExpiry = "MINIO_CACHE_EXPIRY"
EnvCacheMaxUse = "MINIO_CACHE_MAXUSE"
EnvCacheQuota = "MINIO_CACHE_QUOTA"
EnvCacheAfter = "MINIO_CACHE_AFTER"
EnvCacheWatermarkLow = "MINIO_CACHE_WATERMARK_LOW"
EnvCacheWatermarkHigh = "MINIO_CACHE_WATERMARK_HIGH"
EnvCacheRange = "MINIO_CACHE_RANGE"
EnvCacheEncryptionMasterKey = "MINIO_CACHE_ENCRYPTION_MASTER_KEY"
DefaultExpiry = "90"
DefaultQuota = "80"
DefaultAfter = "0"
DefaultWaterMarkLow = "70"
DefaultWaterMarkHigh = "80"
)
// DefaultKVS - default KV settings for caching.
var (
DefaultKVS = config.KVS{
config.KV{
Key: Drives,
Value: "",
},
config.KV{
Key: Exclude,
Value: "",
},
config.KV{
Key: Expiry,
Value: DefaultExpiry,
},
config.KV{
Key: Quota,
Value: DefaultQuota,
},
config.KV{
Key: After,
Value: DefaultAfter,
},
config.KV{
Key: WatermarkLow,
Value: DefaultWaterMarkLow,
},
config.KV{
Key: WatermarkHigh,
Value: DefaultWaterMarkHigh,
},
config.KV{
Key: Range,
Value: config.EnableOn,
},
}
)
const (
cacheDelimiter = ","
)
// Enabled returns if cache is enabled.
func Enabled(kvs config.KVS) bool {
drives := kvs.Get(Drives)
return drives != ""
}
// LookupConfig - extracts cache configuration provided by environment
// variables and merge them with provided CacheConfiguration.
func LookupConfig(kvs config.KVS) (Config, error) {
cfg := Config{}
if err := config.CheckValidKeys(config.CacheSubSys, kvs, DefaultKVS); err != nil {
return cfg, err
}
drives := env.Get(EnvCacheDrives, kvs.Get(Drives))
if len(drives) == 0 {
return cfg, nil
}
var err error
cfg.Drives, err = parseCacheDrives(drives)
if err != nil {
return cfg, err
}
cfg.Enabled = true
if excludes := env.Get(EnvCacheExclude, kvs.Get(Exclude)); excludes != "" {
cfg.Exclude, err = parseCacheExcludes(excludes)
if err != nil {
return cfg, err
}
}
if expiryStr := env.Get(EnvCacheExpiry, kvs.Get(Expiry)); expiryStr != "" {
cfg.Expiry, err = strconv.Atoi(expiryStr)
if err != nil {
return cfg, config.ErrInvalidCacheExpiryValue(err)
}
}
if maxUseStr := env.Get(EnvCacheMaxUse, kvs.Get(MaxUse)); maxUseStr != "" {
cfg.MaxUse, err = strconv.Atoi(maxUseStr)
if err != nil {
return cfg, config.ErrInvalidCacheQuota(err)
}
// maxUse should be a valid percentage.
if cfg.MaxUse < 0 || cfg.MaxUse > 100 {
err := errors.New("config max use value should not be null or negative")
return cfg, config.ErrInvalidCacheQuota(err)
}
cfg.Quota = cfg.MaxUse
} else if quotaStr := env.Get(EnvCacheQuota, kvs.Get(Quota)); quotaStr != "" {
cfg.Quota, err = strconv.Atoi(quotaStr)
if err != nil {
return cfg, config.ErrInvalidCacheQuota(err)
}
// quota should be a valid percentage.
if cfg.Quota < 0 || cfg.Quota > 100 {
err := errors.New("config quota value should not be null or negative")
return cfg, config.ErrInvalidCacheQuota(err)
}
cfg.MaxUse = cfg.Quota
}
if afterStr := env.Get(EnvCacheAfter, kvs.Get(After)); afterStr != "" {
cfg.After, err = strconv.Atoi(afterStr)
if err != nil {
return cfg, config.ErrInvalidCacheAfter(err)
}
// after should be a valid value >= 0.
if cfg.After < 0 {
err := errors.New("cache after value cannot be less than 0")
return cfg, config.ErrInvalidCacheAfter(err)
}
}
if lowWMStr := env.Get(EnvCacheWatermarkLow, kvs.Get(WatermarkLow)); lowWMStr != "" {
cfg.WatermarkLow, err = strconv.Atoi(lowWMStr)
if err != nil {
return cfg, config.ErrInvalidCacheWatermarkLow(err)
}
// WatermarkLow should be a valid percentage.
if cfg.WatermarkLow < 0 || cfg.WatermarkLow > 100 {
err := errors.New("config min watermark value should be between 0 and 100")
return cfg, config.ErrInvalidCacheWatermarkLow(err)
}
}
if highWMStr := env.Get(EnvCacheWatermarkHigh, kvs.Get(WatermarkHigh)); highWMStr != "" {
cfg.WatermarkHigh, err = strconv.Atoi(highWMStr)
if err != nil {
return cfg, config.ErrInvalidCacheWatermarkHigh(err)
}
// MaxWatermark should be a valid percentage.
if cfg.WatermarkHigh < 0 || cfg.WatermarkHigh > 100 {
err := errors.New("config high watermark value should be between 0 and 100")
return cfg, config.ErrInvalidCacheWatermarkHigh(err)
}
}
if cfg.WatermarkLow > cfg.WatermarkHigh {
err := errors.New("config high watermark value should be greater than low watermark value")
return cfg, config.ErrInvalidCacheWatermarkHigh(err)
}
cfg.Range = true // by default range caching is enabled.
if rangeStr := env.Get(EnvCacheRange, kvs.Get(Range)); rangeStr != "" {
rng, err := config.ParseBool(rangeStr)
if err != nil {
return cfg, config.ErrInvalidCacheRange(err)
}
cfg.Range = rng
}
return cfg, nil
}

View file

@ -1,149 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package config
import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/tls"
"crypto/x509"
"encoding/pem"
"io/ioutil"
"os"
"path"
"github.com/minio/minio/pkg/env"
)
// EnvCertPassword is the environment variable which contains the password used
// to decrypt the TLS private key. It must be set if the TLS private key is
// password protected.
const EnvCertPassword = "MINIO_CERT_PASSWD"
// ParsePublicCertFile - parses public cert into its *x509.Certificate equivalent.
func ParsePublicCertFile(certFile string) (x509Certs []*x509.Certificate, err error) {
// Read certificate file.
var data []byte
if data, err = ioutil.ReadFile(certFile); err != nil {
return nil, err
}
// Trimming leading and tailing white spaces.
data = bytes.TrimSpace(data)
// Parse all certs in the chain.
current := data
for len(current) > 0 {
var pemBlock *pem.Block
if pemBlock, current = pem.Decode(current); pemBlock == nil {
return nil, ErrSSLUnexpectedData(nil).Msg("Could not read PEM block from file %s", certFile)
}
var x509Cert *x509.Certificate
if x509Cert, err = x509.ParseCertificate(pemBlock.Bytes); err != nil {
return nil, ErrSSLUnexpectedData(err)
}
x509Certs = append(x509Certs, x509Cert)
}
if len(x509Certs) == 0 {
return nil, ErrSSLUnexpectedData(nil).Msg("Empty public certificate file %s", certFile)
}
return x509Certs, nil
}
// GetRootCAs - returns all the root CAs into certPool
// at the input certsCADir
func GetRootCAs(certsCAsDir string) (*x509.CertPool, error) {
rootCAs, _ := x509.SystemCertPool()
if rootCAs == nil {
// In some systems (like Windows) system cert pool is
// not supported or no certificates are present on the
// system - so we create a new cert pool.
rootCAs = x509.NewCertPool()
}
fis, err := ioutil.ReadDir(certsCAsDir)
if err != nil {
if os.IsNotExist(err) || os.IsPermission(err) {
// Return success if CA's directory is missing or permission denied.
err = nil
}
return rootCAs, err
}
// Load all custom CA files.
for _, fi := range fis {
caCert, err := ioutil.ReadFile(path.Join(certsCAsDir, fi.Name()))
if err != nil {
// ignore files which are not readable.
continue
}
rootCAs.AppendCertsFromPEM(caCert)
}
return rootCAs, nil
}
// LoadX509KeyPair - load an X509 key pair (private key , certificate)
// from the provided paths. The private key may be encrypted and is
// decrypted using the ENV_VAR: MINIO_CERT_PASSWD.
func LoadX509KeyPair(certFile, keyFile string) (tls.Certificate, error) {
certPEMBlock, err := ioutil.ReadFile(certFile)
if err != nil {
return tls.Certificate{}, ErrSSLUnexpectedError(err)
}
keyPEMBlock, err := ioutil.ReadFile(keyFile)
if err != nil {
return tls.Certificate{}, ErrSSLUnexpectedError(err)
}
key, rest := pem.Decode(keyPEMBlock)
if len(rest) > 0 {
return tls.Certificate{}, ErrSSLUnexpectedData(nil).Msg("The private key contains additional data")
}
if x509.IsEncryptedPEMBlock(key) {
password := env.Get(EnvCertPassword, "")
if len(password) == 0 {
return tls.Certificate{}, ErrSSLNoPassword(nil)
}
decryptedKey, decErr := x509.DecryptPEMBlock(key, []byte(password))
if decErr != nil {
return tls.Certificate{}, ErrSSLWrongPassword(decErr)
}
keyPEMBlock = pem.EncodeToMemory(&pem.Block{Type: key.Type, Bytes: decryptedKey})
}
cert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock)
if err != nil {
return tls.Certificate{}, ErrSSLUnexpectedData(nil).Msg(err.Error())
}
// Ensure that the private key is not a P-384 or P-521 EC key.
// The Go TLS stack does not provide constant-time implementations of P-384 and P-521.
if priv, ok := cert.PrivateKey.(crypto.Signer); ok {
if pub, ok := priv.Public().(*ecdsa.PublicKey); ok {
switch pub.Params().Name {
case "P-384":
fallthrough
case "P-521":
// unfortunately there is no cleaner way to check
return tls.Certificate{}, ErrSSLUnexpectedData(nil).Msg("tls: the ECDSA curve '%s' is not supported", pub.Params().Name)
}
}
}
return cert, nil
}

View file

@ -1,498 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package config
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"testing"
)
func createTempFile(prefix, content string) (tempFile string, err error) {
var tmpfile *os.File
if tmpfile, err = ioutil.TempFile("", prefix); err != nil {
return tempFile, err
}
if _, err = tmpfile.Write([]byte(content)); err != nil {
return tempFile, err
}
if err = tmpfile.Close(); err != nil {
return tempFile, err
}
tempFile = tmpfile.Name()
return tempFile, err
}
func TestParsePublicCertFile(t *testing.T) {
tempFile1, err := createTempFile("public-cert-file", "")
if err != nil {
t.Fatalf("Unable to create temporary file. %v", err)
}
defer os.Remove(tempFile1)
tempFile2, err := createTempFile("public-cert-file",
`-----BEGIN CERTIFICATE-----
MIICdTCCAd4CCQCO5G/W1xcE9TANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJa
WTEOMAwGA1UECBMFTWluaW8xETAPBgNVBAcTCEludGVybmV0MQ4wDAYDVQQKEwVN
aW5pbzEOMAwGA1UECxMFTWluaW8xDjAMBgNVBAMTBU1pbmlvMR0wGwYJKoZIhvcN
AQkBFg50ZXN0c0BtaW5pby5pbzAeFw0xNjEwMTQxMTM0MjJaFw0xNzEwMTQxMTM0
MjJaMH8xCzAJBgNVBAYTAlpZMQ4wDAYDVQQIEwVNaW5pbzERMA8GA1UEBxMISW50
ZXJuZXQxDjAMBgNVBA-some-junk-Q4wDAYDVQQLEwVNaW5pbzEOMAwGA1UEAxMF
TWluaW8xHTAbBgkqhkiG9w0BCQEWDnRlc3RzQG1pbmlvLmlvMIGfMA0GCSqGSIb3
DQEBAQUAA4GNADCBiQKBgQDwNUYB/Sj79WsUE8qnXzzh2glSzWxUE79sCOpQYK83
HWkrl5WxlG8ZxDR1IQV9Ex/lzigJu8G+KXahon6a+3n5GhNrYRe5kIXHQHz0qvv4
aMulqlnYpvSfC83aaO9GVBtwXS/O4Nykd7QBg4nZlazVmsGk7POOjhpjGShRsqpU
JwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBALqjOA6bD8BEl7hkQ8XwX/owSAL0URDe
nUfCOsXgIIAqgw4uTCLOfCJVZNKmRT+KguvPAQ6Z80vau2UxPX5Q2Q+OHXDRrEnK
FjqSBgLP06Qw7a++bshlWGTt5bHWOneW3EQikedckVuIKPkOCib9yGi4VmBBjdFE
M9ofSEt/bdRD
-----END CERTIFICATE-----`)
if err != nil {
t.Fatalf("Unable to create temporary file. %v", err)
}
defer os.Remove(tempFile2)
tempFile3, err := createTempFile("public-cert-file",
`-----BEGIN CERTIFICATE-----
MIICdTCCAd4CCQCO5G/W1xcE9TANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJa
WTEOMAwGA1UECBMFTWluaW8xETAPBgNVBAcTCEludGVybmV0MQ4wDAYDVQQKEwVN
aW5pbzEOMAwGA1UECxMFTWluaW8xDjAMBgNVBAMTBU1pbmlvMR0wGwYJKoZIhvcN
AQkBFg50ZXN0c0BtaW5pby5pbzAeFw0xNjEwMTQxMTM0MjJaFw0xNzEwMTQxMTM0
MjJaMH8xCzAJBgNVBAYTAlpZMQ4wDAYDVQQIEwVNaW5pbzERMA8GA1UEBxMISW50
ZXJuZXQxDjAMBgNVBAabababababaQ4wDAYDVQQLEwVNaW5pbzEOMAwGA1UEAxMF
TWluaW8xHTAbBgkqhkiG9w0BCQEWDnRlc3RzQG1pbmlvLmlvMIGfMA0GCSqGSIb3
DQEBAQUAA4GNADCBiQKBgQDwNUYB/Sj79WsUE8qnXzzh2glSzWxUE79sCOpQYK83
HWkrl5WxlG8ZxDR1IQV9Ex/lzigJu8G+KXahon6a+3n5GhNrYRe5kIXHQHz0qvv4
aMulqlnYpvSfC83aaO9GVBtwXS/O4Nykd7QBg4nZlazVmsGk7POOjhpjGShRsqpU
JwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBALqjOA6bD8BEl7hkQ8XwX/owSAL0URDe
nUfCOsXgIIAqgw4uTCLOfCJVZNKmRT+KguvPAQ6Z80vau2UxPX5Q2Q+OHXDRrEnK
FjqSBgLP06Qw7a++bshlWGTt5bHWOneW3EQikedckVuIKPkOCib9yGi4VmBBjdFE
M9ofSEt/bdRD
-----END CERTIFICATE-----`)
if err != nil {
t.Fatalf("Unable to create temporary file. %v", err)
}
defer os.Remove(tempFile3)
tempFile4, err := createTempFile("public-cert-file",
`-----BEGIN CERTIFICATE-----
MIICdTCCAd4CCQCO5G/W1xcE9TANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJa
WTEOMAwGA1UECBMFTWluaW8xETAPBgNVBAcTCEludGVybmV0MQ4wDAYDVQQKEwVN
aW5pbzEOMAwGA1UECxMFTWluaW8xDjAMBgNVBAMTBU1pbmlvMR0wGwYJKoZIhvcN
AQkBFg50ZXN0c0BtaW5pby5pbzAeFw0xNjEwMTQxMTM0MjJaFw0xNzEwMTQxMTM0
MjJaMH8xCzAJBgNVBAYTAlpZMQ4wDAYDVQQIEwVNaW5pbzERMA8GA1UEBxMISW50
ZXJuZXQxDjAMBgNVBAoTBU1pbmlvMQ4wDAYDVQQLEwVNaW5pbzEOMAwGA1UEAxMF
TWluaW8xHTAbBgkqhkiG9w0BCQEWDnRlc3RzQG1pbmlvLmlvMIGfMA0GCSqGSIb3
DQEBAQUAA4GNADCBiQKBgQDwNUYB/Sj79WsUE8qnXzzh2glSzWxUE79sCOpQYK83
HWkrl5WxlG8ZxDR1IQV9Ex/lzigJu8G+KXahon6a+3n5GhNrYRe5kIXHQHz0qvv4
aMulqlnYpvSfC83aaO9GVBtwXS/O4Nykd7QBg4nZlazVmsGk7POOjhpjGShRsqpU
JwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBALqjOA6bD8BEl7hkQ8XwX/owSAL0URDe
nUfCOsXgIIAqgw4uTCLOfCJVZNKmRT+KguvPAQ6Z80vau2UxPX5Q2Q+OHXDRrEnK
FjqSBgLP06Qw7a++bshlWGTt5bHWOneW3EQikedckVuIKPkOCib9yGi4VmBBjdFE
M9ofSEt/bdRD
-----END CERTIFICATE-----`)
if err != nil {
t.Fatalf("Unable to create temporary file. %v", err)
}
defer os.Remove(tempFile4)
tempFile5, err := createTempFile("public-cert-file",
`-----BEGIN CERTIFICATE-----
MIICdTCCAd4CCQCO5G/W1xcE9TANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJa
WTEOMAwGA1UECBMFTWluaW8xETAPBgNVBAcTCEludGVybmV0MQ4wDAYDVQQKEwVN
aW5pbzEOMAwGA1UECxMFTWluaW8xDjAMBgNVBAMTBU1pbmlvMR0wGwYJKoZIhvcN
AQkBFg50ZXN0c0BtaW5pby5pbzAeFw0xNjEwMTQxMTM0MjJaFw0xNzEwMTQxMTM0
MjJaMH8xCzAJBgNVBAYTAlpZMQ4wDAYDVQQIEwVNaW5pbzERMA8GA1UEBxMISW50
ZXJuZXQxDjAMBgNVBAoTBU1pbmlvMQ4wDAYDVQQLEwVNaW5pbzEOMAwGA1UEAxMF
TWluaW8xHTAbBgkqhkiG9w0BCQEWDnRlc3RzQG1pbmlvLmlvMIGfMA0GCSqGSIb3
DQEBAQUAA4GNADCBiQKBgQDwNUYB/Sj79WsUE8qnXzzh2glSzWxUE79sCOpQYK83
HWkrl5WxlG8ZxDR1IQV9Ex/lzigJu8G+KXahon6a+3n5GhNrYRe5kIXHQHz0qvv4
aMulqlnYpvSfC83aaO9GVBtwXS/O4Nykd7QBg4nZlazVmsGk7POOjhpjGShRsqpU
JwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBALqjOA6bD8BEl7hkQ8XwX/owSAL0URDe
nUfCOsXgIIAqgw4uTCLOfCJVZNKmRT+KguvPAQ6Z80vau2UxPX5Q2Q+OHXDRrEnK
FjqSBgLP06Qw7a++bshlWGTt5bHWOneW3EQikedckVuIKPkOCib9yGi4VmBBjdFE
M9ofSEt/bdRD
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIICdTCCAd4CCQCO5G/W1xcE9TANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJa
WTEOMAwGA1UECBMFTWluaW8xETAPBgNVBAcTCEludGVybmV0MQ4wDAYDVQQKEwVN
aW5pbzEOMAwGA1UECxMFTWluaW8xDjAMBgNVBAMTBU1pbmlvMR0wGwYJKoZIhvcN
AQkBFg50ZXN0c0BtaW5pby5pbzAeFw0xNjEwMTQxMTM0MjJaFw0xNzEwMTQxMTM0
MjJaMH8xCzAJBgNVBAYTAlpZMQ4wDAYDVQQIEwVNaW5pbzERMA8GA1UEBxMISW50
ZXJuZXQxDjAMBgNVBAoTBU1pbmlvMQ4wDAYDVQQLEwVNaW5pbzEOMAwGA1UEAxMF
TWluaW8xHTAbBgkqhkiG9w0BCQEWDnRlc3RzQG1pbmlvLmlvMIGfMA0GCSqGSIb3
DQEBAQUAA4GNADCBiQKBgQDwNUYB/Sj79WsUE8qnXzzh2glSzWxUE79sCOpQYK83
HWkrl5WxlG8ZxDR1IQV9Ex/lzigJu8G+KXahon6a+3n5GhNrYRe5kIXHQHz0qvv4
aMulqlnYpvSfC83aaO9GVBtwXS/O4Nykd7QBg4nZlazVmsGk7POOjhpjGShRsqpU
JwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBALqjOA6bD8BEl7hkQ8XwX/owSAL0URDe
nUfCOsXgIIAqgw4uTCLOfCJVZNKmRT+KguvPAQ6Z80vau2UxPX5Q2Q+OHXDRrEnK
FjqSBgLP06Qw7a++bshlWGTt5bHWOneW3EQikedckVuIKPkOCib9yGi4VmBBjdFE
M9ofSEt/bdRD
-----END CERTIFICATE-----`)
if err != nil {
t.Fatalf("Unable to create temporary file. %v", err)
}
defer os.Remove(tempFile5)
nonexistentErr := fmt.Errorf("open nonexistent-file: no such file or directory")
if runtime.GOOS == "windows" {
// Below concatenation is done to get rid of goline error
// "error strings should not be capitalized or end with punctuation or a newline"
nonexistentErr = fmt.Errorf("open nonexistent-file:" + " The system cannot find the file specified.")
}
testCases := []struct {
certFile string
expectedResultLen int
expectedErr error
}{
{"nonexistent-file", 0, nonexistentErr},
{tempFile1, 0, fmt.Errorf("Empty public certificate file %s", tempFile1)},
{tempFile2, 0, fmt.Errorf("Could not read PEM block from file %s", tempFile2)},
{tempFile3, 0, fmt.Errorf("asn1: structure error: sequence tag mismatch")},
{tempFile4, 1, nil},
{tempFile5, 2, nil},
}
for _, testCase := range testCases {
certs, err := ParsePublicCertFile(testCase.certFile)
if testCase.expectedErr == nil {
if err != nil {
t.Fatalf("error: expected = <nil>, got = %v", err)
}
} else if err == nil {
t.Fatalf("error: expected = %v, got = <nil>", testCase.expectedErr)
} else if testCase.expectedErr.Error() != err.Error() {
t.Fatalf("error: expected = %v, got = %v", testCase.expectedErr, err)
}
if len(certs) != testCase.expectedResultLen {
t.Fatalf("certs: expected = %v, got = %v", testCase.expectedResultLen, len(certs))
}
}
}
func TestGetRootCAs(t *testing.T) {
emptydir, err := ioutil.TempDir("", "test-get-root-cas")
if err != nil {
t.Fatalf("Unable create temp directory. %v", emptydir)
}
defer os.RemoveAll(emptydir)
dir1, err := ioutil.TempDir("", "test-get-root-cas")
if err != nil {
t.Fatalf("Unable create temp directory. %v", dir1)
}
defer os.RemoveAll(dir1)
if err = os.Mkdir(filepath.Join(dir1, "empty-dir"), 0755); err != nil {
t.Fatalf("Unable create empty dir. %v", err)
}
dir2, err := ioutil.TempDir("", "test-get-root-cas")
if err != nil {
t.Fatalf("Unable create temp directory. %v", dir2)
}
defer os.RemoveAll(dir2)
if err = ioutil.WriteFile(filepath.Join(dir2, "empty-file"), []byte{}, 0644); err != nil {
t.Fatalf("Unable create test file. %v", err)
}
testCases := []struct {
certCAsDir string
expectedErr error
}{
// ignores non-existent directories.
{"nonexistent-dir", nil},
// Ignores directories.
{dir1, nil},
// Ignore empty directory.
{emptydir, nil},
// Loads the cert properly.
{dir2, nil},
}
for _, testCase := range testCases {
_, err := GetRootCAs(testCase.certCAsDir)
if testCase.expectedErr == nil {
if err != nil {
t.Fatalf("error: expected = <nil>, got = %v", err)
}
} else if err == nil {
t.Fatalf("error: expected = %v, got = <nil>", testCase.expectedErr)
} else if testCase.expectedErr.Error() != err.Error() {
t.Fatalf("error: expected = %v, got = %v", testCase.expectedErr, err)
}
}
}
func TestLoadX509KeyPair(t *testing.T) {
for i, testCase := range loadX509KeyPairTests {
privateKey, err := createTempFile("private.key", testCase.privateKey)
if err != nil {
t.Fatalf("Test %d: failed to create tmp private key file: %v", i, err)
}
certificate, err := createTempFile("public.crt", testCase.certificate)
if err != nil {
os.Remove(privateKey)
t.Fatalf("Test %d: failed to create tmp certificate file: %v", i, err)
}
os.Unsetenv(EnvCertPassword)
if testCase.password != "" {
os.Setenv(EnvCertPassword, testCase.password)
}
_, err = LoadX509KeyPair(certificate, privateKey)
if err != nil && !testCase.shouldFail {
t.Errorf("Test %d: test should succeed but it failed: %v", i, err)
}
if err == nil && testCase.shouldFail {
t.Errorf("Test %d: test should fail but it succeed", i)
}
os.Remove(privateKey)
os.Remove(certificate)
}
}
var loadX509KeyPairTests = []struct {
password string
privateKey, certificate string
shouldFail bool
}{
{
password: "foobar",
privateKey: `-----BEGIN RSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: AES-128-CBC,CC483BF11678C35F9F02A1AD85DAE285
nMDFd+Qxk1f+S7LwMitmMofNXYNbCY4L1QEqPOOx5wnjNF1wSxmEkL7+h8W4Y/vb
AQt/7TCcUSuSqEMl45nUIcCbhBos5wz+ShvFiez3qKwmR5HSURvqyN6PIJeAbU+h
uw/cvAQsCH1Cq+gYkDJqjrizPhGqg7mSkqyeST3PbOl+ZXc0wynIjA34JSwO3c5j
cF7XKHETtNGj1+AiLruX4wYZAJwQnK375fCoNVMO992zC6K83d8kvGMUgmJjkiIj
q3s4ymFGfoo0S/XNDQXgE5A5QjAKRKUyW2i7pHIIhTyOpeJQeFHDi2/zaZRxoCog
lD2/HKLi5xJtRelZaaGyEJ20c05VzaSZ+EtRIN33foNdyQQL6iAUU3hJ6JlcmRIB
bRfX4XPH1w9UfFU5ZKwUciCoDcL65bsyv/y56ItljBp7Ok+UUKl0H4myFNOSfsuU
IIj4neslnAvwQ8SN4XUpug+7pGF+2m/5UDwRzSUN1H2RfgWN95kqR+tYqCq/E+KO
i0svzFrljSHswsFoPBqKngI7hHwc9QTt5q4frXwj9I4F6HHrTKZnC5M4ef26sbJ1
r7JRmkt0h/GfcS355b0uoBTtF1R8tSJo85Zh47wE+ucdjEvy9/pjnzKqIoJo9bNZ
ri+ue7GhH5EUca1Kd10bH8FqTF+8AHh4yW6xMxSkSgFGp7KtraAVpdp+6kosymqh
dz9VMjA8i28btfkS2isRaCpyumaFYJ3DJMFYhmeyt6gqYovmRLX0qrBf8nrkFTAA
ZmykWsc8ErsCudxlDmKVemuyFL7jtm9IRPq+Jh+IrmixLJFx8PKkNAM6g+A8irx8
piw+yhRsVy5Jk2QeIqvbpxN6BfCNcix4sWkusiCJrAqQFuSm26Mhh53Ig1DXG4d3
6QY1T8tW80Q6JHUtDR+iOPqW6EmrNiEopzirvhGv9FicXZ0Lo2yKJueeeihWhFLL
GmlnCjWVMO4hoo8lWCHv95JkPxGMcecCacKKUbHlXzCGyw3+eeTEHMWMEhziLeBy
HZJ1/GReI3Sx7XlUCkG4468Yz3PpmbNIk/U5XKE7TGuxKmfcWQpu022iF/9DrKTz
KVhKimCBXJX345bCFe1rN2z5CV6sv87FkMs5Y+OjPw6qYFZPVKO2TdUUBcpXbQMg
UW+Kuaax9W7214Stlil727MjRCiH1+0yODg4nWj4pTSocA5R3pn5cwqrjMu97OmL
ESx4DHmy4keeSy3+AIAehCZlwgeLb70/xCSRhJMIMS9Q6bz8CPkEWN8bBZt95oeo
37LqZ7lNmq61fs1x1tq0VUnI9HwLFEnsiubp6RG0Yu8l/uImjjjXa/ytW2GXrfUi
zM22dOntu6u23iBxRBJRWdFTVUz7qrdu+PHavr+Y7TbCeiBwiypmz5llf823UIVx
btamI6ziAq2gKZhObIhut7sjaLkAyTLlNVkNN1WNaplAXpW25UFVk93MHbvZ27bx
9iLGs/qB2kDTUjffSQoHTLY1GoLxv83RgVspUGQjslztEEpWfYvGfVLcgYLv933B
aRW9BRoNZ0czKx7Lhuwjreyb5IcWDarhC8q29ZkkWsQQonaPb0kTEFJul80Yqk0k
-----END RSA PRIVATE KEY-----`,
certificate: `-----BEGIN CERTIFICATE-----
MIIDiTCCAnGgAwIBAgIJAK5m5S7EE46kMA0GCSqGSIb3DQEBCwUAMFsxCzAJBgNV
BAYTAlVTMQ4wDAYDVQQIDAVzdGF0ZTERMA8GA1UEBwwIbG9jYXRpb24xFTATBgNV
BAoMDG9yZ2FuaXphdGlvbjESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTE3MTIxODE4
MDUyOFoXDTI3MTIxNjE4MDUyOFowWzELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBXN0
YXRlMREwDwYDVQQHDAhsb2NhdGlvbjEVMBMGA1UECgwMb3JnYW5pemF0aW9uMRIw
EAYDVQQDDAlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
AQDPJfYY5Dhsntrqwyu7ZgKM/zrlKEjCwGHhWJBdZdeZCHQlY8ISrtDxxp2XMmI6
HsszalEhNF9fk3vSXWclTuomG03fgGzP4R6QpcwGUCxhRF1J+0b64Yi8pw2uEGsR
GuMwLhGorcWalNoihgHc0BQ4vO8aaTNTX7iD06olesP6vGNu/S8h0VomE+0v9qYc
VF66Zaiv/6OmxAtDpElJjVd0mY7G85BlDlFrVwzd7zhRiuJZ4iDg749Xt9GuuKla
Dvr14glHhP4dQgUbhluJmIHMdx2ZPjk+5FxaDK6I9IUpxczFDe4agDE6lKzU1eLd
cCXRWFOf6q9lTB1hUZfmWfTxAgMBAAGjUDBOMB0GA1UdDgQWBBTQh7lDTq+8salD
0HBNILochiiNaDAfBgNVHSMEGDAWgBTQh7lDTq+8salD0HBNILochiiNaDAMBgNV
HRMEBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAqi9LycxcXKNSDXaPkCKvw7RQy
iMBDGm1kIY++p3tzbUGuaeu85TsswKnqd50AullEU+aQxRRJGfR8eSKzQJMBXLMQ
b4ptYCc5OrZtRHT8NaZ/df2tc6I88kN8dBu6ybcNGsevXA/iNX3kKLW7naxdr5jj
KUudWSuqDCjCmQa5bYb9H6DreLH2lUItSWBa/YmeZ3VSezDCd+XYO53QKwZVj8Jb
bulZmoo7e7HO1qecEzWKL10UYyEbG3UDPtw+NZc142ZYeEhXQ0dsstGAO5hf3hEl
kQyKGUTpDbKLuyYMFsoH73YLjBqNe+UEhPwE+FWpcky1Sp9RTx/oMLpiZaPR
-----END CERTIFICATE-----`,
shouldFail: false,
},
{
password: "password",
privateKey: `-----BEGIN RSA PRIVATE KEY-----
Proc-Type: 4,ENCRYPTED
DEK-Info: AES-128-CBC,CC483BF11678C35F9F02A1AD85DAE285
nMDFd+Qxk1f+S7LwMitmMofNXYNbCY4L1QEqPOOx5wnjNF1wSxmEkL7+h8W4Y/vb
AQt/7TCcUSuSqEMl45nUIcCbhBos5wz+ShvFiez3qKwmR5HSURvqyN6PIJeAbU+h
uw/cvAQsCH1Cq+gYkDJqjrizPhGqg7mSkqyeST3PbOl+ZXc0wynIjA34JSwO3c5j
cF7XKHETtNGj1+AiLruX4wYZAJwQnK375fCoNVMO992zC6K83d8kvGMUgmJjkiIj
q3s4ymFGfoo0S/XNDQXgE5A5QjAKRKUyW2i7pHIIhTyOpeJQeFHDi2/zaZRxoCog
lD2/HKLi5xJtRelZaaGyEJ20c05VzaSZ+EtRIN33foNdyQQL6iAUU3hJ6JlcmRIB
bRfX4XPH1w9UfFU5ZKwUciCoDcL65bsyv/y56ItljBp7Ok+UUKl0H4myFNOSfsuU
IIj4neslnAvwQ8SN4XUpug+7pGF+2m/5UDwRzSUN1H2RfgWN95kqR+tYqCq/E+KO
i0svzFrljSHswsFoPBqKngI7hHwc9QTt5q4frXwj9I4F6HHrTKZnC5M4ef26sbJ1
r7JRmkt0h/GfcS355b0uoBTtF1R8tSJo85Zh47wE+ucdjEvy9/pjnzKqIoJo9bNZ
ri+ue7GhH5EUca1Kd10bH8FqTF+8AHh4yW6xMxSkSgFGp7KtraAVpdp+6kosymqh
dz9VMjA8i28btfkS2isRaCpyumaFYJ3DJMFYhmeyt6gqYovmRLX0qrBf8nrkFTAA
ZmykWsc8ErsCudxlDmKVemuyFL7jtm9IRPq+Jh+IrmixLJFx8PKkNAM6g+A8irx8
piw+yhRsVy5Jk2QeIqvbpxN6BfCNcix4sWkusiCJrAqQFuSm26Mhh53Ig1DXG4d3
6QY1T8tW80Q6JHUtDR+iOPqW6EmrNiEopzirvhGv9FicXZ0Lo2yKJueeeihWhFLL
GmlnCjWVMO4hoo8lWCHv95JkPxGMcecCacKKUbHlXzCGyw3+eeTEHMWMEhziLeBy
HZJ1/GReI3Sx7XlUCkG4468Yz3PpmbNIk/U5XKE7TGuxKmfcWQpu022iF/9DrKTz
KVhKimCBXJX345bCFe1rN2z5CV6sv87FkMs5Y+OjPw6qYFZPVKO2TdUUBcpXbQMg
UW+Kuaax9W7214Stlil727MjRCiH1+0yODg4nWj4pTSocA5R3pn5cwqrjMu97OmL
ESx4DHmy4keeSy3+AIAehCZlwgeLb70/xCSRhJMIMS9Q6bz8CPkEWN8bBZt95oeo
37LqZ7lNmq61fs1x1tq0VUnI9HwLFEnsiubp6RG0Yu8l/uImjjjXa/ytW2GXrfUi
zM22dOntu6u23iBxRBJRWdFTVUz7qrdu+PHavr+Y7TbCeiBwiypmz5llf823UIVx
btamI6ziAq2gKZhObIhut7sjaLkAyTLlNVkNN1WNaplAXpW25UFVk93MHbvZ27bx
9iLGs/qB2kDTUjffSQoHTLY1GoLxv83RgVspUGQjslztEEpWfYvGfVLcgYLv933B
aRW9BRoNZ0czKx7Lhuwjreyb5IcWDarhC8q29ZkkWsQQonaPb0kTEFJul80Yqk0k
-----END RSA PRIVATE KEY-----`,
certificate: `-----BEGIN CERTIFICATE-----
MIIDiTCCAnGgAwIBAgIJAK5m5S7EE46kMA0GCSqGSIb3DQEBCwUAMFsxCzAJBgNV
BAYTAlVTMQ4wDAYDVQQIDAVzdGF0ZTERMA8GA1UEBwwIbG9jYXRpb24xFTATBgNV
BAoMDG9yZ2FuaXphdGlvbjESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTE3MTIxODE4
MDUyOFoXDTI3MTIxNjE4MDUyOFowWzELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBXN0
YXRlMREwDwYDVQQHDAhsb2NhdGlvbjEVMBMGA1UECgwMb3JnYW5pemF0aW9uMRIw
EAYDVQQDDAlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
AQDPJfYY5Dhsntrqwyu7ZgKM/zrlKEjCwGHhWJBdZdeZCHQlY8ISrtDxxp2XMmI6
HsszalEhNF9fk3vSXWclTuomG03fgGzP4R6QpcwGUCxhRF1J+0b64Yi8pw2uEGsR
GuMwLhGorcWalNoihgHc0BQ4vO8aaTNTX7iD06olesP6vGNu/S8h0VomE+0v9qYc
VF66Zaiv/6OmxAtDpElJjVd0mY7G85BlDlFrVwzd7zhRiuJZ4iDg749Xt9GuuKla
Dvr14glHhP4dQgUbhluJmIHMdx2ZPjk+5FxaDK6I9IUpxczFDe4agDE6lKzU1eLd
cCXRWFOf6q9lTB1hUZfmWfTxAgMBAAGjUDBOMB0GA1UdDgQWBBTQh7lDTq+8salD
0HBNILochiiNaDAfBgNVHSMEGDAWgBTQh7lDTq+8salD0HBNILochiiNaDAMBgNV
HRMEBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAqi9LycxcXKNSDXaPkCKvw7RQy
iMBDGm1kIY++p3tzbUGuaeu85TsswKnqd50AullEU+aQxRRJGfR8eSKzQJMBXLMQ
b4ptYCc5OrZtRHT8NaZ/df2tc6I88kN8dBu6ybcNGsevXA/iNX3kKLW7naxdr5jj
KUudWSuqDCjCmQa5bYb9H6DreLH2lUItSWBa/YmeZ3VSezDCd+XYO53QKwZVj8Jb
bulZmoo7e7HO1qecEzWKL10UYyEbG3UDPtw+NZc142ZYeEhXQ0dsstGAO5hf3hEl
kQyKGUTpDbKLuyYMFsoH73YLjBqNe+UEhPwE+FWpcky1Sp9RTx/oMLpiZaPR
-----END CERTIFICATE-----`,
shouldFail: true,
},
{
password: "",
privateKey: `-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEA4K9Qq7vMY2bGkrdFAYpBYNLlCgnnFU+0pi+N+3bjuWmfX/kw
WXBa3SDqKD08PWWzwvBSLPCCUV2IuUd7tBa1pJ2wXkdoDeI5InYHJKrXbSZonni6
Bex7sgnqV/9o8xFkSOleoQWZgyeKGxtt0J/Z+zhpH+zXahwM4wOL3yzLSQt+NCKM
6N96zXYi16DEa89fYwRxPwE1XTRc7Ddggqx+4iRHvYG0fyTNcPB/+UiFw59EE1Sg
QIyTVntVqpsb6s8XdkFxURoLxefhcMVf2kU0T04OWI3gmeavKfTcj8Z2/bjPSsqP
mgkADv9Ru6VnSK/96TW/NwxWJ32PBz6Sbl9LdwIDAQABAoIBABVh+d5uH/RxyoIZ
+PI9kx1A1NVQvfI0RK/wJKYC2YdCuw0qLOTGIY+b20z7DumU7TenIVrvhKdzrFhd
qjMoWh8RdsByMT/pAKD79JATxi64EgrK2IFJ0TfPY8L+JqHDTPT3aK8QVly5/ZW4
1YmePOOAqdiE9Lc/diaApuYVYD9SL/X7fYs1ezOB4oGXoz0rthX77zHMxcEurpK3
VgSnaq7FYTVY7GrFB+ASiAlDIyLwztz08Ijn8aG0QAZ8GFuPGSmPMXWjLwFhRZsa
Gfy5BYiA0bVSnQSPHzAnHu9HyGlsdouVPPvJB3SrvMl+BFhZiUuR8OGSob7z7hfI
hMyHbNECgYEA/gyG7sHAb5mPkhq9JkTv+LrMY5NDZKYcSlbvBlM3kd6Ib3Hxl+6T
FMq2TWIrh2+mT1C14htziHd05dF6St995Tby6CJxTj6a/2Odnfm+JcOou/ula4Sz
92nIGlGPTJXstDbHGnRCpk6AomXK02stydTyrCisOw1H+LyTG6aT0q8CgYEA4mkO
hfLJkgmJzWIhxHR901uWHz/LId0gC6FQCeaqWmRup6Bl97f0U6xokw4tw8DJOncF
yZpYRXUXhdv/FXCjtXvAhKIX5+e+3dlzPHIdekSfcY00ip/ifAS1OyVviJia+cna
eJgq8WLHxJZim9Ah93NlPyiqGPwtasub90qjZbkCgYEA35WK02o1wII3dvCNc7bM
M+3CoAglEdmXoF1uM/TdPUXKcbqoU3ymeXAGjYhOov3CMp/n0z0xqvLnMLPxmx+i
ny6DDYXyjlhO9WFogHYhwP636+mHJl8+PAsfDvqk0VRJZDmpdUDIv7DrSQGpRfRX
8f+2K4oIOlhv9RuRpI4wHwUCgYB8OjaMyn1NEsy4k2qBt4U+jhcdyEv1pbWqi/U1
qYm5FTgd44VvWVDHBGdQoMv9h28iFCJpzrU2Txv8B4y7v9Ujg+ZLIAFL7j0szt5K
wTZpWvO9Q0Qb98Q2VgL2lADRiyIlglrMJnoRfiisNfOfGKE6e+eGsxI5qUxmN5e5
JQvoiQKBgQCqgyuUBIu/Qsb3qUED/o0S5wCel43Yh/Rl+mxDinOUvJfKJSW2SyEk
+jDo0xw3Opg6ZC5Lj2V809LA/XteaIuyhRuqOopjhHIvIvrYGe+2O8q9/Mv40BYW
0BhJ/Gdseps0C6Z5mTT5Fee4YVlGZuyuNKmKTd4JmqInfBV3ncMWQg==
-----END RSA PRIVATE KEY-----`,
certificate: `-----BEGIN CERTIFICATE-----
MIIDiTCCAnGgAwIBAgIJAIb84Z5Mh31iMA0GCSqGSIb3DQEBCwUAMFsxCzAJBgNV
BAYTAlVTMQ4wDAYDVQQIDAVzdGF0ZTERMA8GA1UEBwwIbG9jYXRpb24xFTATBgNV
BAoMDG9yZ2FuaXphdGlvbjESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTE3MTIxODE4
NTcyM1oXDTI3MTIxNjE4NTcyM1owWzELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBXN0
YXRlMREwDwYDVQQHDAhsb2NhdGlvbjEVMBMGA1UECgwMb3JnYW5pemF0aW9uMRIw
EAYDVQQDDAlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
AQDgr1Cru8xjZsaSt0UBikFg0uUKCecVT7SmL437duO5aZ9f+TBZcFrdIOooPTw9
ZbPC8FIs8IJRXYi5R3u0FrWknbBeR2gN4jkidgckqtdtJmieeLoF7HuyCepX/2jz
EWRI6V6hBZmDJ4obG23Qn9n7OGkf7NdqHAzjA4vfLMtJC340Iozo33rNdiLXoMRr
z19jBHE/ATVdNFzsN2CCrH7iJEe9gbR/JM1w8H/5SIXDn0QTVKBAjJNWe1Wqmxvq
zxd2QXFRGgvF5+FwxV/aRTRPTg5YjeCZ5q8p9NyPxnb9uM9Kyo+aCQAO/1G7pWdI
r/3pNb83DFYnfY8HPpJuX0t3AgMBAAGjUDBOMB0GA1UdDgQWBBQ2/bSCHscnoV+0
d+YJxLu4XLSNIDAfBgNVHSMEGDAWgBQ2/bSCHscnoV+0d+YJxLu4XLSNIDAMBgNV
HRMEBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQC6p4gPwmkoDtRsP1c8IWgXFka+
Q59oe79ZK1RqDE6ZZu0rgw07rPzKr4ofW4hTxnx7PUgKOhWLq9VvwEC/9tDbD0Gw
SKknRZZOiEE3qUZbwNtHMd4UBzpzChTRC6RcwC5zT1/WICMUHxa4b8E2umJuf3Qd
5Y23sXEESx5evr49z6DLcVe2i70o2wJeWs2kaXqhCJt0X7z0rnYqjfFdvxd8dyzt
1DXmE45cLadpWHDg26DMsdchamgnqEo79YUxkH6G/Cb8ZX4igQ/CsxCDOKvccjHO
OncDtuIpK8O7OyfHP3+MBpUFG4P6Ctn7RVcZe9fQweTpfAy18G+loVzuUeOD
-----END CERTIFICATE-----`,
shouldFail: false,
},
{
password: "foobar",
privateKey: `-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEA4K9Qq7vMY2bGkrdFAYpBYNLlCgnnFU+0pi+N+3bjuWmfX/kw
WXBa3SDqKD08PWWzwvBSLPCCUV2IuUd7tBa1pJ2wXkdoDeI5InYHJKrXbSZonni6
Bex7sgnqV/9o8xFkSOleoQWZgyeKGxtt0J/Z+zhpH+zXahwM4wOL3yzLSQt+NCKM
6N96zXYi16DEa89fYwRxPwE1XTRc7Ddggqx+4iRHvYG0fyTNcPB/+UiFw59EE1Sg
QIyTVntVqpsb6s8XdkFxURoLxefhcMVf2kU0T04OWI3gmeavKfTcj8Z2/bjPSsqP
mgkADv9Ru6VnSK/96TW/NwxWJ32PBz6Sbl9LdwIDAQABAoIBABVh+d5uH/RxyoIZ
+PI9kx1A1NVQvfI0RK/wJKYC2YdCuw0qLOTGIY+b20z7DumU7TenIVrvhKdzrFhd
qjMoWh8RdsByMT/pAKD79JATxi64EgrK2IFJ0TfPY8L+JqHDTPT3aK8QVly5/ZW4
1YmePOOAqdiE9Lc/diaApuYVYD9SL/X7fYs1ezOB4oGXoz0rthX77zHMxcEurpK3
VgSnaq7FYTVY7GrFB+ASiAlDIyLwztz08Ijn8aG0QAZ8GFuPGSmPMXWjLwFhRZsa
Gfy5BYiA0bVSnQSPHzAnHu9HyGlsdouVPPvJB3SrvMl+BFhZiUuR8OGSob7z7hfI
hMyHbNECgYEA/gyG7sHAb5mPkhq9JkTv+LrMY5NDZKYcSlbvBlM3kd6Ib3Hxl+6T
FMq2TWIrh2+mT1C14htziHd05dF6St995Tby6CJxTj6a/2Odnfm+JcOou/ula4Sz
92nIGlGPTJXstDbHGnRCpk6AomXK02stydTyrCisOw1H+LyTG6aT0q8CgYEA4mkO
hfLJkgmJzWIhxHR901uWHz/LId0gC6FQCeaqWmRup6Bl97f0U6xokw4tw8DJOncF
yZpYRXUXhdv/FXCjtXvAhKIX5+e+3dlzPHIdekSfcY00ip/ifAS1OyVviJia+cna
eJgq8WLHxJZim9Ah93NlPyiqGPwtasub90qjZbkCgYEA35WK02o1wII3dvCNc7bM
M+3CoAglEdmXoF1uM/TdPUXKcbqoU3ymeXAGjYhOov3CMp/n0z0xqvLnMLPxmx+i
ny6DDYXyjlhO9WFogHYhwP636+mHJl8+PAsfDvqk0VRJZDmpdUDIv7DrSQGpRfRX
8f+2K4oIOlhv9RuRpI4wHwUCgYB8OjaMyn1NEsy4k2qBt4U+jhcdyEv1pbWqi/U1
qYm5FTgd44VvWVDHBGdQoMv9h28iFCJpzrU2Txv8B4y7v9Ujg+ZLIAFL7j0szt5K
wTZpWvO9Q0Qb98Q2VgL2lADRiyIlglrMJnoRfiisNfOfGKE6e+eGsxI5qUxmN5e5
JQvoiQKBgQCqgyuUBIu/Qsb3qUED/o0S5wCel43Yh/Rl+mxDinOUvJfKJSW2SyEk
+jDo0xw3Opg6ZC5Lj2V809LA/XteaIuyhRuqOopjhHIvIvrYGe+2O8q9/Mv40BYW
0BhJ/Gdseps0C6Z5mTT5Fee4YVlGZuyuNKmKTd4JmqInfBV3ncMWQg==
-----END RSA PRIVATE KEY-----`,
certificate: `-----BEGIN CERTIFICATE-----
MIIDiTCCAnGgAwIBAgIJAIb84Z5Mh31iMA0GCSqGSIb3DQEBCwUAMFsxCzAJBgNV
BAYTAlVTMQ4wDAYDVQQIDAVzdGF0ZTERMA8GA1UEBwwIbG9jYXRpb24xFTATBgNV
BAoMDG9yZ2FuaXphdGlvbjESMBAGA1UEAwwJbG9jYWxob3N0MB4XDTE3MTIxODE4
NTcyM1oXDTI3MTIxNjE4NTcyM1owWzELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBXN0
YXRlMREwDwYDVQQHDAhsb2NhdGlvbjEVMBMGA1UECgwMb3JnYW5pemF0aW9uMRIw
EAYDVQQDDAlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
AQDgr1Cru8xjZsaSt0UBikFg0uUKCecVT7SmL437duO5aZ9f+TBZcFrdIOooPTw9
ZbPC8FIs8IJRXYi5R3u0FrWknbBeR2gN4jkidgckqtdtJmieeLoF7HuyCepX/2jz
EWRI6V6hBZmDJ4obG23Qn9n7OGkf7NdqHAzjA4vfLMtJC340Iozo33rNdiLXoMRr
z19jBHE/ATVdNFzsN2CCrH7iJEe9gbR/JM1w8H/5SIXDn0QTVKBAjJNWe1Wqmxvq
zxd2QXFRGgvF5+FwxV/aRTRPTg5YjeCZ5q8p9NyPxnb9uM9Kyo+aCQAO/1G7pWdI
r/3pNb83DFYnfY8HPpJuX0t3AgMBAAGjUDBOMB0GA1UdDgQWBBQ2/bSCHscnoV+0
d+YJxLu4XLSNIDAfBgNVHSMEGDAWgBQ2/bSCHscnoV+0d+YJxLu4XLSNIDAMBgNV
HRMEBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQC6p4gPwmkoDtRsP1c8IWgXFka+
Q59oe79ZK1RqDE6ZZu0rgw07rPzKr4ofW4hTxnx7PUgKOhWLq9VvwEC/9tDbD0Gw
SKknRZZOiEE3qUZbwNtHMd4UBzpzChTRC6RcwC5zT1/WICMUHxa4b8E2umJuf3Qd
5Y23sXEESx5evr49z6DLcVe2i70o2wJeWs2kaXqhCJt0X7z0rnYqjfFdvxd8dyzt
1DXmE45cLadpWHDg26DMsdchamgnqEo79YUxkH6G/Cb8ZX4igQ/CsxCDOKvccjHO
OncDtuIpK8O7OyfHP3+MBpUFG4P6Ctn7RVcZe9fQweTpfAy18G+loVzuUeOD
-----END CERTIFICATE-----`,
shouldFail: false,
},
}

View file

@ -1,92 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package config
import (
"crypto/x509"
"crypto/x509/pkix"
"fmt"
"net/http"
"strings"
color "github.com/minio/minio/pkg/color"
)
// Extra ASN1 OIDs that we may need to handle
var (
oidEmailAddress = []int{1, 2, 840, 113549, 1, 9, 1}
)
// printName prints the fields of a distinguished name, which include such
// things as its common name and locality.
func printName(names []pkix.AttributeTypeAndValue, buf *strings.Builder) []string {
values := []string{}
for _, name := range names {
oid := name.Type
if len(oid) == 4 && oid[0] == 2 && oid[1] == 5 && oid[2] == 4 {
switch oid[3] {
case 3:
values = append(values, fmt.Sprintf("CN=%s", name.Value))
case 6:
values = append(values, fmt.Sprintf("C=%s", name.Value))
case 8:
values = append(values, fmt.Sprintf("ST=%s", name.Value))
case 10:
values = append(values, fmt.Sprintf("O=%s", name.Value))
case 11:
values = append(values, fmt.Sprintf("OU=%s", name.Value))
default:
values = append(values, fmt.Sprintf("UnknownOID=%s", name.Type.String()))
}
} else if oid.Equal(oidEmailAddress) {
values = append(values, fmt.Sprintf("emailAddress=%s", name.Value))
} else {
values = append(values, fmt.Sprintf("UnknownOID=%s", name.Type.String()))
}
}
if len(values) > 0 {
buf.WriteString(values[0])
for i := 1; i < len(values); i++ {
buf.WriteString(", " + values[i])
}
buf.WriteString("\n")
}
return values
}
// CertificateText returns a human-readable string representation
// of the certificate cert. The format is similar to the OpenSSL
// way of printing certificates (not identical).
func CertificateText(cert *x509.Certificate) string {
var buf strings.Builder
buf.WriteString(color.Blue("\nCertificate:\n"))
if cert.SignatureAlgorithm != x509.UnknownSignatureAlgorithm {
buf.WriteString(color.Blue("%4sSignature Algorithm: ", "") + color.Bold(fmt.Sprintf("%s\n", cert.SignatureAlgorithm)))
}
// Issuer information
buf.WriteString(color.Blue("%4sIssuer: ", ""))
printName(cert.Issuer.Names, &buf)
// Validity information
buf.WriteString(color.Blue("%4sValidity\n", ""))
buf.WriteString(color.Bold(fmt.Sprintf("%8sNot Before: %s\n", "", cert.NotBefore.Format(http.TimeFormat))))
buf.WriteString(color.Bold(fmt.Sprintf("%8sNot After : %s\n", "", cert.NotAfter.Format(http.TimeFormat))))
return buf.String()
}

View file

@ -1,132 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package compress
import (
"fmt"
"strings"
"github.com/minio/minio/legacy/config"
"github.com/minio/minio/pkg/env"
)
// Config represents the compression settings.
type Config struct {
Enabled bool `json:"enabled"`
Extensions []string `json:"extensions"`
MimeTypes []string `json:"mime-types"`
}
// Compression environment variables
const (
Extensions = "extensions"
MimeTypes = "mime_types"
EnvCompressState = "MINIO_COMPRESS_ENABLE"
EnvCompressExtensions = "MINIO_COMPRESS_EXTENSIONS"
EnvCompressMimeTypes = "MINIO_COMPRESS_MIME_TYPES"
// Include-list for compression.
DefaultExtensions = ".txt,.log,.csv,.json,.tar,.xml,.bin"
DefaultMimeTypes = "text/*,application/json,application/xml"
)
// DefaultKVS - default KV config for compression settings
var (
DefaultKVS = config.KVS{
config.KV{
Key: config.Enable,
Value: config.EnableOff,
},
config.KV{
Key: Extensions,
Value: DefaultExtensions,
},
config.KV{
Key: MimeTypes,
Value: DefaultMimeTypes,
},
}
)
// Parses the given compression exclude list `extensions` or `content-types`.
func parseCompressIncludes(include string) ([]string, error) {
includes := strings.Split(include, config.ValueSeparator)
for _, e := range includes {
if len(e) == 0 {
return nil, config.ErrInvalidCompressionIncludesValue(nil).Msg("extension/mime-type cannot be empty")
}
if e == "/" {
return nil, config.ErrInvalidCompressionIncludesValue(nil).Msg("extension/mime-type cannot be '/'")
}
}
return includes, nil
}
// LookupConfig - lookup compression config.
func LookupConfig(kvs config.KVS) (Config, error) {
var err error
cfg := Config{}
if err = config.CheckValidKeys(config.CompressionSubSys, kvs, DefaultKVS); err != nil {
return cfg, err
}
compress := env.Get(EnvCompress, "")
if compress == "" {
compress = env.Get(EnvCompressState, kvs.Get(config.Enable))
}
cfg.Enabled, err = config.ParseBool(compress)
if err != nil {
// Parsing failures happen due to empty KVS, ignore it.
if kvs.Empty() {
return cfg, nil
}
return cfg, err
}
if !cfg.Enabled {
return cfg, nil
}
compressExtensions := env.Get(EnvCompressExtensions, kvs.Get(Extensions))
compressMimeTypes := env.Get(EnvCompressMimeTypes, kvs.Get(MimeTypes))
compressMimeTypesLegacy := env.Get(EnvCompressMimeTypesLegacy, kvs.Get(MimeTypes))
if compressExtensions != "" || compressMimeTypes != "" || compressMimeTypesLegacy != "" {
if compressExtensions != "" {
extensions, err := parseCompressIncludes(compressExtensions)
if err != nil {
return cfg, fmt.Errorf("%s: Invalid MINIO_COMPRESS_EXTENSIONS value (`%s`)", err, extensions)
}
cfg.Extensions = extensions
}
if compressMimeTypes != "" {
mimeTypes, err := parseCompressIncludes(compressMimeTypes)
if err != nil {
return cfg, fmt.Errorf("%s: Invalid MINIO_COMPRESS_MIME_TYPES value (`%s`)", err, mimeTypes)
}
cfg.MimeTypes = mimeTypes
}
if compressMimeTypesLegacy != "" {
mimeTypes, err := parseCompressIncludes(compressMimeTypesLegacy)
if err != nil {
return cfg, fmt.Errorf("%s: Invalid MINIO_COMPRESS_MIME_TYPES value (`%s`)", err, mimeTypes)
}
cfg.MimeTypes = mimeTypes
}
}
return cfg, nil
}

View file

@ -1,57 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package compress
import (
"reflect"
"testing"
)
func TestParseCompressIncludes(t *testing.T) {
testCases := []struct {
str string
expectedPatterns []string
success bool
}{
// invalid input
{",,,", []string{}, false},
{"", []string{}, false},
{",", []string{}, false},
{"/", []string{}, false},
{"text/*,/", []string{}, false},
// valid input
{".txt,.log", []string{".txt", ".log"}, true},
{"text/*,application/json", []string{"text/*", "application/json"}, true},
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.str, func(t *testing.T) {
gotPatterns, err := parseCompressIncludes(testCase.str)
if !testCase.success && err == nil {
t.Error("expected failure but success instead")
}
if testCase.success && err != nil {
t.Errorf("expected success but failed instead %s", err)
}
if testCase.success && !reflect.DeepEqual(testCase.expectedPatterns, gotPatterns) {
t.Errorf("expected patterns %s but got %s", testCase.expectedPatterns, gotPatterns)
}
})
}
}

View file

@ -1,43 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package compress
import "github.com/minio/minio/legacy/config"
// Help template for compress feature.
var (
Help = config.HelpKVS{
config.HelpKV{
Key: Extensions,
Description: `comma separated file extensions e.g. ".txt,.log,.csv"`,
Optional: true,
Type: "csv",
},
config.HelpKV{
Key: MimeTypes,
Description: `comma separated wildcard mime-types e.g. "text/*,application/json,application/xml"`,
Optional: true,
Type: "csv",
},
config.HelpKV{
Key: config.Comment,
Description: config.DefaultComment,
Optional: true,
Type: "sentence",
},
}
)

View file

@ -1,51 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package compress
import (
"strings"
"github.com/minio/minio/legacy/config"
)
// Legacy envs.
const (
EnvCompress = "MINIO_COMPRESS"
EnvCompressMimeTypesLegacy = "MINIO_COMPRESS_MIMETYPES"
)
// SetCompressionConfig - One time migration code needed, for migrating from older config to new for Compression.
func SetCompressionConfig(s config.Config, cfg Config) {
if !cfg.Enabled {
// No need to save disabled settings in new config.
return
}
s[config.CompressionSubSys][config.Default] = config.KVS{
config.KV{
Key: config.Enable,
Value: config.EnableOn,
},
config.KV{
Key: Extensions,
Value: strings.Join(cfg.Extensions, config.ValueSeparator),
},
config.KV{
Key: MimeTypes,
Value: strings.Join(cfg.MimeTypes, config.ValueSeparator),
},
}
}

View file

@ -1,712 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package config
import (
"bufio"
"fmt"
"io"
"regexp"
"strings"
"github.com/minio/minio-go/v6/pkg/set"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/env"
"github.com/minio/minio/pkg/madmin"
)
// Error config error type
type Error struct {
Err string
}
// Errorf - formats according to a format specifier and returns
// the string as a value that satisfies error of type config.Error
func Errorf(format string, a ...interface{}) error {
return Error{Err: fmt.Sprintf(format, a...)}
}
func (e Error) Error() string {
return e.Err
}
// Default keys
const (
Default = madmin.Default
Enable = madmin.EnableKey
Comment = madmin.CommentKey
// Enable values
EnableOn = madmin.EnableOn
EnableOff = madmin.EnableOff
RegionName = "name"
AccessKey = "access_key"
SecretKey = "secret_key"
)
// Top level config constants.
const (
CredentialsSubSys = "credentials"
PolicyOPASubSys = "policy_opa"
IdentityOpenIDSubSys = "identity_openid"
IdentityLDAPSubSys = "identity_ldap"
CacheSubSys = "cache"
RegionSubSys = "region"
EtcdSubSys = "etcd"
StorageClassSubSys = "storage_class"
APISubSys = "api"
CompressionSubSys = "compression"
KmsVaultSubSys = "kms_vault"
KmsKesSubSys = "kms_kes"
LoggerWebhookSubSys = "logger_webhook"
AuditWebhookSubSys = "audit_webhook"
// Add new constants here if you add new fields to config.
)
// Notification config constants.
const (
NotifyKafkaSubSys = "notify_kafka"
NotifyMQTTSubSys = "notify_mqtt"
NotifyMySQLSubSys = "notify_mysql"
NotifyNATSSubSys = "notify_nats"
NotifyNSQSubSys = "notify_nsq"
NotifyESSubSys = "notify_elasticsearch"
NotifyAMQPSubSys = "notify_amqp"
NotifyPostgresSubSys = "notify_postgres"
NotifyRedisSubSys = "notify_redis"
NotifyWebhookSubSys = "notify_webhook"
// Add new constants here if you add new fields to config.
)
// SubSystems - all supported sub-systems
var SubSystems = set.CreateStringSet([]string{
CredentialsSubSys,
RegionSubSys,
EtcdSubSys,
CacheSubSys,
APISubSys,
StorageClassSubSys,
CompressionSubSys,
KmsVaultSubSys,
KmsKesSubSys,
LoggerWebhookSubSys,
AuditWebhookSubSys,
PolicyOPASubSys,
IdentityLDAPSubSys,
IdentityOpenIDSubSys,
NotifyAMQPSubSys,
NotifyESSubSys,
NotifyKafkaSubSys,
NotifyMQTTSubSys,
NotifyMySQLSubSys,
NotifyNATSSubSys,
NotifyNSQSubSys,
NotifyPostgresSubSys,
NotifyRedisSubSys,
NotifyWebhookSubSys,
}...)
// SubSystemsSingleTargets - subsystems which only support single target.
var SubSystemsSingleTargets = set.CreateStringSet([]string{
CredentialsSubSys,
RegionSubSys,
EtcdSubSys,
CacheSubSys,
APISubSys,
StorageClassSubSys,
CompressionSubSys,
KmsVaultSubSys,
KmsKesSubSys,
PolicyOPASubSys,
IdentityLDAPSubSys,
IdentityOpenIDSubSys,
}...)
// Constant separators
const (
SubSystemSeparator = madmin.SubSystemSeparator
KvSeparator = madmin.KvSeparator
KvSpaceSeparator = madmin.KvSpaceSeparator
KvComment = madmin.KvComment
KvNewline = madmin.KvNewline
KvDoubleQuote = madmin.KvDoubleQuote
KvSingleQuote = madmin.KvSingleQuote
// Env prefix used for all envs in MinIO
EnvPrefix = "MINIO_"
EnvWordDelimiter = `_`
)
// DefaultKVS - default kvs for all sub-systems
var DefaultKVS map[string]KVS
// RegisterDefaultKVS - this function saves input kvsMap
// globally, this should be called only once preferably
// during `init()`.
func RegisterDefaultKVS(kvsMap map[string]KVS) {
DefaultKVS = map[string]KVS{}
for subSys, kvs := range kvsMap {
DefaultKVS[subSys] = kvs
}
}
// HelpSubSysMap - help for all individual KVS for each sub-systems
// also carries a special empty sub-system which dumps
// help for each sub-system key.
var HelpSubSysMap map[string]HelpKVS
// RegisterHelpSubSys - this function saves
// input help KVS for each sub-system globally,
// this function should be called only once
// preferably in during `init()`.
func RegisterHelpSubSys(helpKVSMap map[string]HelpKVS) {
HelpSubSysMap = map[string]HelpKVS{}
for subSys, hkvs := range helpKVSMap {
HelpSubSysMap[subSys] = hkvs
}
}
// KV - is a shorthand of each key value.
type KV struct {
Key string `json:"key"`
Value string `json:"value"`
}
// KVS - is a shorthand for some wrapper functions
// to operate on list of key values.
type KVS []KV
// Empty - return if kv is empty
func (kvs KVS) Empty() bool {
return len(kvs) == 0
}
// Keys returns the list of keys for the current KVS
func (kvs KVS) Keys() []string {
var keys = make([]string, len(kvs))
var foundComment bool
for i := range kvs {
if kvs[i].Key == madmin.CommentKey {
foundComment = true
}
keys[i] = kvs[i].Key
}
// Comment KV not found, add it explicitly.
if !foundComment {
keys = append(keys, madmin.CommentKey)
}
return keys
}
func (kvs KVS) String() string {
var s strings.Builder
for _, kv := range kvs {
// Do not need to print if state is on
if kv.Key == Enable && kv.Value == EnableOn {
continue
}
s.WriteString(kv.Key)
s.WriteString(KvSeparator)
spc := madmin.HasSpace(kv.Value)
if spc {
s.WriteString(KvDoubleQuote)
}
s.WriteString(kv.Value)
if spc {
s.WriteString(KvDoubleQuote)
}
s.WriteString(KvSpaceSeparator)
}
return s.String()
}
// Set sets a value, if not sets a default value.
func (kvs *KVS) Set(key, value string) {
for i, kv := range *kvs {
if kv.Key == key {
(*kvs)[i] = KV{
Key: key,
Value: value,
}
return
}
}
*kvs = append(*kvs, KV{
Key: key,
Value: value,
})
}
// Get - returns the value of a key, if not found returns empty.
func (kvs KVS) Get(key string) string {
v, ok := kvs.Lookup(key)
if ok {
return v
}
return ""
}
// Lookup - lookup a key in a list of KVS
func (kvs KVS) Lookup(key string) (string, bool) {
for _, kv := range kvs {
if kv.Key == key {
return kv.Value, true
}
}
return "", false
}
// Config - MinIO server config structure.
type Config map[string]map[string]KVS
// DelFrom - deletes all keys in the input reader.
func (c Config) DelFrom(r io.Reader) error {
scanner := bufio.NewScanner(r)
for scanner.Scan() {
// Skip any empty lines, or comment like characters
text := scanner.Text()
if text == "" || strings.HasPrefix(text, KvComment) {
continue
}
if err := c.DelKVS(text); err != nil {
return err
}
}
if err := scanner.Err(); err != nil {
return err
}
return nil
}
// ReadFrom - implements io.ReaderFrom interface
func (c Config) ReadFrom(r io.Reader) (int64, error) {
var n int
scanner := bufio.NewScanner(r)
for scanner.Scan() {
// Skip any empty lines, or comment like characters
text := scanner.Text()
if text == "" || strings.HasPrefix(text, KvComment) {
continue
}
if err := c.SetKVS(text, DefaultKVS); err != nil {
return 0, err
}
n += len(text)
}
if err := scanner.Err(); err != nil {
return 0, err
}
return int64(n), nil
}
type configWriteTo struct {
Config
filterByKey string
}
// NewConfigWriteTo - returns a struct which
// allows for serializing the config/kv struct
// to a io.WriterTo
func NewConfigWriteTo(cfg Config, key string) io.WriterTo {
return &configWriteTo{Config: cfg, filterByKey: key}
}
// WriteTo - implements io.WriterTo interface implementation for config.
func (c *configWriteTo) WriteTo(w io.Writer) (int64, error) {
kvsTargets, err := c.GetKVS(c.filterByKey, DefaultKVS)
if err != nil {
return 0, err
}
var n int
for _, target := range kvsTargets {
m1, _ := w.Write([]byte(target.SubSystem))
m2, _ := w.Write([]byte(KvSpaceSeparator))
m3, _ := w.Write([]byte(target.KVS.String()))
if len(kvsTargets) > 1 {
m4, _ := w.Write([]byte(KvNewline))
n += m1 + m2 + m3 + m4
} else {
n += m1 + m2 + m3
}
}
return int64(n), nil
}
// Default KV configs for worm and region
var (
DefaultCredentialKVS = KVS{
KV{
Key: AccessKey,
Value: auth.DefaultAccessKey,
},
KV{
Key: SecretKey,
Value: auth.DefaultSecretKey,
},
}
DefaultRegionKVS = KVS{
KV{
Key: RegionName,
Value: "",
},
}
)
// LookupCreds - lookup credentials from config.
func LookupCreds(kv KVS) (auth.Credentials, error) {
if err := CheckValidKeys(CredentialsSubSys, kv, DefaultCredentialKVS); err != nil {
return auth.Credentials{}, err
}
accessKey := kv.Get(AccessKey)
secretKey := kv.Get(SecretKey)
if accessKey == "" || secretKey == "" {
accessKey = auth.DefaultAccessKey
secretKey = auth.DefaultSecretKey
}
return auth.CreateCredentials(accessKey, secretKey)
}
var validRegionRegex = regexp.MustCompile("^[a-zA-Z][a-zA-Z0-9-_-]+$")
// LookupRegion - get current region.
func LookupRegion(kv KVS) (string, error) {
if err := CheckValidKeys(RegionSubSys, kv, DefaultRegionKVS); err != nil {
return "", err
}
region := env.Get(EnvRegion, "")
if region == "" {
region = env.Get(EnvRegionName, kv.Get(RegionName))
}
if region != "" {
if validRegionRegex.MatchString(region) {
return region, nil
}
return "", Errorf(
"region '%s' is invalid, expected simple characters such as [us-east-1, myregion...]",
region)
}
return "", nil
}
// CheckValidKeys - checks if inputs KVS has the necessary keys,
// returns error if it find extra or superflous keys.
func CheckValidKeys(subSys string, kv KVS, validKVS KVS) error {
nkv := KVS{}
for _, kv := range kv {
// Comment is a valid key, its also fully optional
// ignore it since it is a valid key for all
// sub-systems.
if kv.Key == Comment {
continue
}
if _, ok := validKVS.Lookup(kv.Key); !ok {
nkv = append(nkv, kv)
}
}
if len(nkv) > 0 {
return Errorf(
"found invalid keys (%s) for '%s' sub-system, use 'mc admin config reset myminio %s' to fix invalid keys", nkv.String(), subSys, subSys)
}
return nil
}
// LookupWorm - check if worm is enabled
func LookupWorm() (bool, error) {
return ParseBool(env.Get(EnvWorm, EnableOff))
}
// Merge - merges a new config with all the
// missing values for default configs,
// returns a config.
func (c Config) Merge() Config {
cp := New()
for subSys, tgtKV := range c {
for tgt := range tgtKV {
ckvs := c[subSys][tgt]
for _, kv := range cp[subSys][Default] {
_, ok := c[subSys][tgt].Lookup(kv.Key)
if !ok {
ckvs.Set(kv.Key, kv.Value)
}
}
cp[subSys][tgt] = ckvs
}
}
return cp
}
// New - initialize a new server config.
func New() Config {
srvCfg := make(Config)
for _, k := range SubSystems.ToSlice() {
srvCfg[k] = map[string]KVS{}
srvCfg[k][Default] = DefaultKVS[k]
}
return srvCfg
}
// Target signifies an individual target
type Target struct {
SubSystem string
KVS KVS
}
// Targets sub-system targets
type Targets []Target
// GetKVS - get kvs from specific subsystem.
func (c Config) GetKVS(s string, defaultKVS map[string]KVS) (Targets, error) {
if len(s) == 0 {
return nil, Errorf("input cannot be empty")
}
inputs := strings.Fields(s)
if len(inputs) > 1 {
return nil, Errorf("invalid number of arguments %s", s)
}
subSystemValue := strings.SplitN(inputs[0], SubSystemSeparator, 2)
if len(subSystemValue) == 0 {
return nil, Errorf("invalid number of arguments %s", s)
}
found := SubSystems.Contains(subSystemValue[0])
if !found {
// Check for sub-prefix only if the input value is only a
// single value, this rejects invalid inputs if any.
found = !SubSystems.FuncMatch(strings.HasPrefix, subSystemValue[0]).IsEmpty() && len(subSystemValue) == 1
}
if !found {
return nil, Errorf("unknown sub-system %s", s)
}
targets := Targets{}
subSysPrefix := subSystemValue[0]
if len(subSystemValue) == 2 {
if len(subSystemValue[1]) == 0 {
return nil, Errorf("sub-system target '%s' cannot be empty", s)
}
kvs, ok := c[subSysPrefix][subSystemValue[1]]
if !ok {
return nil, Errorf("sub-system target '%s' doesn't exist", s)
}
for _, kv := range defaultKVS[subSysPrefix] {
_, ok = kvs.Lookup(kv.Key)
if !ok {
kvs.Set(kv.Key, kv.Value)
}
}
targets = append(targets, Target{
SubSystem: inputs[0],
KVS: kvs,
})
} else {
hkvs := HelpSubSysMap[""]
// Use help for sub-system to preserve the order.
for _, hkv := range hkvs {
if !strings.HasPrefix(hkv.Key, subSysPrefix) {
continue
}
if c[hkv.Key][Default].Empty() {
targets = append(targets, Target{
SubSystem: hkv.Key,
KVS: defaultKVS[hkv.Key],
})
}
for k, kvs := range c[hkv.Key] {
for _, dkv := range defaultKVS[hkv.Key] {
_, ok := kvs.Lookup(dkv.Key)
if !ok {
kvs.Set(dkv.Key, dkv.Value)
}
}
if k != Default {
targets = append(targets, Target{
SubSystem: hkv.Key + SubSystemSeparator + k,
KVS: kvs,
})
} else {
targets = append(targets, Target{
SubSystem: hkv.Key,
KVS: kvs,
})
}
}
}
}
return targets, nil
}
// DelKVS - delete a specific key.
func (c Config) DelKVS(s string) error {
if len(s) == 0 {
return Errorf("input arguments cannot be empty")
}
inputs := strings.Fields(s)
if len(inputs) > 1 {
return Errorf("invalid number of arguments %s", s)
}
subSystemValue := strings.SplitN(inputs[0], SubSystemSeparator, 2)
if len(subSystemValue) == 0 {
return Errorf("invalid number of arguments %s", s)
}
if !SubSystems.Contains(subSystemValue[0]) {
// Unknown sub-system found try to remove it anyways.
delete(c, subSystemValue[0])
return nil
}
tgt := Default
subSys := subSystemValue[0]
if len(subSystemValue) == 2 {
if len(subSystemValue[1]) == 0 {
return Errorf("sub-system target '%s' cannot be empty", s)
}
tgt = subSystemValue[1]
}
_, ok := c[subSys][tgt]
if !ok {
return Errorf("sub-system %s already deleted", s)
}
delete(c[subSys], tgt)
return nil
}
// Clone - clones a config map entirely.
func (c Config) Clone() Config {
cp := New()
for subSys, tgtKV := range c {
cp[subSys] = make(map[string]KVS)
for tgt, kv := range tgtKV {
cp[subSys][tgt] = append(cp[subSys][tgt], kv...)
}
}
return cp
}
// SetKVS - set specific key values per sub-system.
func (c Config) SetKVS(s string, defaultKVS map[string]KVS) error {
if len(s) == 0 {
return Errorf("input arguments cannot be empty")
}
inputs := strings.SplitN(s, KvSpaceSeparator, 2)
if len(inputs) <= 1 {
return Errorf("invalid number of arguments '%s'", s)
}
subSystemValue := strings.SplitN(inputs[0], SubSystemSeparator, 2)
if len(subSystemValue) == 0 {
return Errorf("invalid number of arguments %s", s)
}
if !SubSystems.Contains(subSystemValue[0]) {
return Errorf("unknown sub-system %s", s)
}
if SubSystemsSingleTargets.Contains(subSystemValue[0]) && len(subSystemValue) == 2 {
return Errorf("sub-system '%s' only supports single target", subSystemValue[0])
}
tgt := Default
subSys := subSystemValue[0]
if len(subSystemValue) == 2 {
tgt = subSystemValue[1]
}
fields := madmin.KvFields(inputs[1], defaultKVS[subSys].Keys())
if len(fields) == 0 {
return Errorf("sub-system '%s' cannot have empty keys", subSys)
}
var kvs = KVS{}
var prevK string
for _, v := range fields {
kv := strings.SplitN(v, KvSeparator, 2)
if len(kv) == 0 {
continue
}
if len(kv) == 1 && prevK != "" {
value := strings.Join([]string{
kvs.Get(prevK),
madmin.SanitizeValue(kv[0]),
}, KvSpaceSeparator)
kvs.Set(prevK, value)
continue
}
if len(kv) == 2 {
prevK = kv[0]
kvs.Set(prevK, madmin.SanitizeValue(kv[1]))
continue
}
return Errorf("key '%s', cannot have empty value", kv[0])
}
_, ok := kvs.Lookup(Enable)
// Check if state is required
_, enableRequired := defaultKVS[subSys].Lookup(Enable)
if !ok && enableRequired {
// implicit state "on" if not specified.
kvs.Set(Enable, EnableOn)
}
currKVS, ok := c[subSys][tgt]
if !ok {
currKVS = defaultKVS[subSys]
} else {
for _, kv := range defaultKVS[subSys] {
if _, ok = currKVS.Lookup(kv.Key); !ok {
currKVS.Set(kv.Key, kv.Value)
}
}
}
for _, kv := range kvs {
if kv.Key == Comment {
// Skip comment and add it later.
continue
}
currKVS.Set(kv.Key, kv.Value)
}
v, ok := kvs.Lookup(Comment)
if ok {
currKVS.Set(Comment, v)
}
hkvs := HelpSubSysMap[subSys]
for _, hkv := range hkvs {
var enabled bool
if enableRequired {
enabled = currKVS.Get(Enable) == EnableOn
} else {
// when enable arg is not required
// then it is implicit on for the sub-system.
enabled = true
}
v, _ := currKVS.Lookup(hkv.Key)
if v == "" && !hkv.Optional && enabled {
// Return error only if the
// key is enabled, for state=off
// let it be empty.
return Errorf(
"'%s' is not optional for '%s' sub-system, please check '%s' documentation",
hkv.Key, subSys, subSys)
}
}
c[subSys][tgt] = currKVS
return nil
}

View file

@ -1,133 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package config
import (
"testing"
"github.com/minio/minio/pkg/madmin"
)
func TestKVFields(t *testing.T) {
tests := []struct {
input string
keys []string
expectedFields map[string]struct{}
}{
// No keys present
{
input: "",
keys: []string{"comment"},
expectedFields: map[string]struct{}{},
},
// No keys requested for tokenizing
{
input: `comment="Hi this is my comment ="`,
keys: []string{},
expectedFields: map[string]struct{}{},
},
// Single key requested and present
{
input: `comment="Hi this is my comment ="`,
keys: []string{"comment"},
expectedFields: map[string]struct{}{`comment="Hi this is my comment ="`: {}},
},
// Keys and input order of k=v is same.
{
input: `connection_string="host=localhost port=2832" comment="really long comment"`,
keys: []string{"connection_string", "comment"},
expectedFields: map[string]struct{}{
`connection_string="host=localhost port=2832"`: {},
`comment="really long comment"`: {},
},
},
// Keys with spaces in between
{
input: `enable=on format=namespace connection_string=" host=localhost port=5432 dbname = cesnietor sslmode=disable" table=holicrayoli`,
keys: []string{"enable", "connection_string", "comment", "format", "table"},
expectedFields: map[string]struct{}{
`enable=on`: {},
`format=namespace`: {},
`connection_string=" host=localhost port=5432 dbname = cesnietor sslmode=disable"`: {},
`table=holicrayoli`: {},
},
},
// One of the keys is not present and order of input has changed.
{
input: `comment="really long comment" connection_string="host=localhost port=2832"`,
keys: []string{"connection_string", "comment", "format"},
expectedFields: map[string]struct{}{
`connection_string="host=localhost port=2832"`: {},
`comment="really long comment"`: {},
},
},
// Incorrect delimiter, expected fields should be empty.
{
input: `comment:"really long comment" connection_string:"host=localhost port=2832"`,
keys: []string{"connection_string", "comment"},
expectedFields: map[string]struct{}{},
},
// Incorrect type of input v/s required keys.
{
input: `comme="really long comment" connection_str="host=localhost port=2832"`,
keys: []string{"connection_string", "comment"},
expectedFields: map[string]struct{}{},
},
}
for _, test := range tests {
test := test
t.Run("", func(t *testing.T) {
gotFields := madmin.KvFields(test.input, test.keys)
if len(gotFields) != len(test.expectedFields) {
t.Errorf("Expected keys %d, found %d", len(test.expectedFields), len(gotFields))
}
found := true
for _, field := range gotFields {
_, ok := test.expectedFields[field]
found = found && ok
}
if !found {
t.Errorf("Expected %s, got %s", test.expectedFields, gotFields)
}
})
}
}
func TestValidRegion(t *testing.T) {
tests := []struct {
name string
success bool
}{
{name: "us-east-1", success: true},
{name: "us_east", success: true},
{name: "helloWorld", success: true},
{name: "-fdslka", success: false},
{name: "^00[", success: false},
{name: "my region", success: false},
{name: "%%$#!", success: false},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
ok := validRegionRegex.MatchString(test.name)
if test.success != ok {
t.Errorf("Expected %t, got %t", test.success, ok)
}
})
}
}

View file

@ -1,41 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package config
// Config value separator
const (
ValueSeparator = ","
)
// Top level common ENVs
const (
EnvAccessKey = "MINIO_ACCESS_KEY"
EnvSecretKey = "MINIO_SECRET_KEY"
EnvAccessKeyOld = "MINIO_ACCESS_KEY_OLD"
EnvSecretKeyOld = "MINIO_SECRET_KEY_OLD"
EnvBrowser = "MINIO_BROWSER"
EnvDomain = "MINIO_DOMAIN"
EnvRegionName = "MINIO_REGION_NAME"
EnvPublicIPs = "MINIO_PUBLIC_IPS"
EnvEndpoints = "MINIO_ENDPOINTS"
EnvFSOSync = "MINIO_FS_OSYNC"
EnvUpdate = "MINIO_UPDATE"
EnvWorm = "MINIO_WORM" // legacy
EnvRegion = "MINIO_REGION" // legacy
)

View file

@ -1,154 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2018-2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package config
import (
"errors"
"fmt"
"io"
"net"
"syscall"
"github.com/minio/minio/pkg/color"
)
// Err is a structure which contains all information
// to print a fatal error message in json or pretty mode
// Err implements error so we can use it anywhere
type Err struct {
msg string
detail string
action string
hint string
}
// Clone returns a new Err struct with the same information
func (u Err) Clone() Err {
return Err{
msg: u.msg,
detail: u.detail,
action: u.action,
hint: u.hint,
}
}
// Error returns the error message
func (u Err) Error() string {
if u.detail == "" {
if u.msg != "" {
return u.msg
}
return "<nil>"
}
return u.detail
}
// Msg - Replace the current error's message
func (u Err) Msg(m string, args ...interface{}) Err {
e := u.Clone()
e.msg = fmt.Sprintf(m, args...)
return e
}
// Hint - Replace the current error's message
func (u Err) Hint(m string, args ...interface{}) Err {
e := u.Clone()
e.hint = fmt.Sprintf(m, args...)
return e
}
// ErrFn function wrapper
type ErrFn func(err error) Err
// Create a UI error generator, this is needed to simplify
// the update of the detailed error message in several places
// in MinIO code
func newErrFn(msg, action, hint string) ErrFn {
return func(err error) Err {
u := Err{
msg: msg,
action: action,
hint: hint,
}
if err != nil {
u.detail = err.Error()
}
return u
}
}
// ErrorToErr inspects the passed error and transforms it
// to the appropriate UI error.
func ErrorToErr(err error) Err {
if err == nil {
return Err{}
}
// If this is already a Err, do nothing
if e, ok := err.(Err); ok {
return e
}
// Show a generic message for known golang errors
if errors.Is(err, syscall.EADDRINUSE) {
return ErrPortAlreadyInUse(err).Msg("Specified port is already in use")
} else if errors.Is(err, syscall.EACCES) || errors.Is(err, syscall.EPERM) {
switch err.(type) {
case *net.OpError:
return ErrPortAccess(err).Msg("Insufficient permissions to use specified port")
}
return ErrNoPermissionsToAccessDirFiles(err).Msg("Insufficient permissions to access path")
} else if errors.Is(err, io.ErrUnexpectedEOF) {
return ErrUnexpectedDataContent(err)
} else {
// Failed to identify what type of error this, return a simple UI error
return Err{msg: err.Error()}
}
}
// FmtError converts a fatal error message to a more clear error
// using some colors
func FmtError(introMsg string, err error, jsonFlag bool) string {
renderedTxt := ""
uiErr := ErrorToErr(err)
// JSON print
if jsonFlag {
// Message text in json should be simple
if uiErr.detail != "" {
return uiErr.msg + ": " + uiErr.detail
}
return uiErr.msg
}
// Pretty print error message
introMsg += ": "
if uiErr.msg != "" {
introMsg += color.Bold(uiErr.msg)
} else {
introMsg += color.Bold(err.Error())
}
renderedTxt += color.Red(introMsg) + "\n"
// Add action message
if uiErr.action != "" {
renderedTxt += "> " + color.BgYellow(color.Black(uiErr.action)) + "\n"
}
// Add hint
if uiErr.hint != "" {
renderedTxt += color.Bold("HINT:") + "\n"
renderedTxt += " " + uiErr.hint
}
return renderedTxt
}

View file

@ -1,285 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2018-2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package config
// UI errors
var (
ErrInvalidBrowserValue = newErrFn(
"Invalid browser value",
"Please check the passed value",
"Browser can only accept `on` and `off` values. To disable web browser access, set this value to `off`",
)
ErrInvalidFSOSyncValue = newErrFn(
"Invalid O_SYNC value",
"Please check the passed value",
"Can only accept `on` and `off` values. To enable O_SYNC for fs backend, set this value to `on`",
)
ErrInvalidDomainValue = newErrFn(
"Invalid domain value",
"Please check the passed value",
"Domain can only accept DNS compatible values",
)
ErrInvalidErasureSetSize = newErrFn(
"Invalid erasure set size",
"Please check the passed value",
"Erasure set can only accept any of [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] values",
)
ErrInvalidWormValue = newErrFn(
"Invalid WORM value",
"Please check the passed value",
"WORM can only accept `on` and `off` values. To enable WORM, set this value to `on`",
)
ErrInvalidCacheDrivesValue = newErrFn(
"Invalid cache drive value",
"Please check the value in this ENV variable",
"MINIO_CACHE_DRIVES: Mounted drives or directories are delimited by `,`",
)
ErrInvalidCacheExcludesValue = newErrFn(
"Invalid cache excludes value",
"Please check the passed value",
"MINIO_CACHE_EXCLUDE: Cache exclusion patterns are delimited by `,`",
)
ErrInvalidCacheExpiryValue = newErrFn(
"Invalid cache expiry value",
"Please check the passed value",
"MINIO_CACHE_EXPIRY: Valid cache expiry duration must be in days",
)
ErrInvalidCacheQuota = newErrFn(
"Invalid cache quota value",
"Please check the passed value",
"MINIO_CACHE_QUOTA: Valid cache quota value must be between 0-100",
)
ErrInvalidCacheAfter = newErrFn(
"Invalid cache after value",
"Please check the passed value",
"MINIO_CACHE_AFTER: Valid cache after value must be 0 or greater",
)
ErrInvalidCacheWatermarkLow = newErrFn(
"Invalid cache low watermark value",
"Please check the passed value",
"MINIO_CACHE_WATERMARK_LOW: Valid cache low watermark value must be between 0-100",
)
ErrInvalidCacheWatermarkHigh = newErrFn(
"Invalid cache high watermark value",
"Please check the passed value",
"MINIO_CACHE_WATERMARK_HIGH: Valid cache high watermark value must be between 0-100",
)
ErrInvalidCacheEncryptionKey = newErrFn(
"Invalid cache encryption master key value",
"Please check the passed value",
"MINIO_CACHE_ENCRYPTION_MASTER_KEY: For more information, please refer to https://docs.min.io/docs/minio-disk-cache-guide",
)
ErrInvalidCacheRange = newErrFn(
"Invalid cache range value",
"Please check the passed value",
"MINIO_CACHE_RANGE: Valid expected value is `on` or `off`",
)
ErrInvalidRotatingCredentialsBackendEncrypted = newErrFn(
"Invalid rotating credentials",
"Please set correct rotating credentials in the environment for decryption",
`Detected encrypted config backend, correct old access and secret keys should be specified via environment variables MINIO_ACCESS_KEY_OLD and MINIO_SECRET_KEY_OLD to be able to re-encrypt the MinIO config, user IAM and policies with new credentials`,
)
ErrInvalidCredentialsBackendEncrypted = newErrFn(
"Invalid credentials",
"Please set correct credentials in the environment for decryption",
`Detected encrypted config backend, correct access and secret keys should be specified via environment variables MINIO_ACCESS_KEY and MINIO_SECRET_KEY to be able to decrypt the MinIO config, user IAM and policies`,
)
ErrMissingCredentialsBackendEncrypted = newErrFn(
"Credentials missing",
"Please set your credentials in the environment",
`Detected encrypted config backend, access and secret keys should be specified via environment variables MINIO_ACCESS_KEY and MINIO_SECRET_KEY to be able to decrypt the MinIO config, user IAM and policies`,
)
ErrInvalidCredentials = newErrFn(
"Invalid credentials",
"Please provide correct credentials",
`Access key length should be at least 3, and secret key length at least 8 characters`,
)
ErrEnvCredentialsMissingGateway = newErrFn(
"Credentials missing",
"Please set your credentials in the environment",
`In Gateway mode, access and secret keys should be specified via environment variables MINIO_ACCESS_KEY and MINIO_SECRET_KEY respectively`,
)
ErrEnvCredentialsMissingDistributed = newErrFn(
"Credentials missing",
"Please set your credentials in the environment",
`In distributed server mode, access and secret keys should be specified via environment variables MINIO_ACCESS_KEY and MINIO_SECRET_KEY respectively`,
)
ErrInvalidErasureEndpoints = newErrFn(
"Invalid endpoint(s) in erasure mode",
"Please provide correct combination of local/remote paths",
"For more information, please refer to https://docs.min.io/docs/minio-erasure-code-quickstart-guide",
)
ErrInvalidNumberOfErasureEndpoints = newErrFn(
"Invalid total number of endpoints for erasure mode",
"Please provide an even number of endpoints greater or equal to 4",
"For more information, please refer to https://docs.min.io/docs/minio-erasure-code-quickstart-guide",
)
ErrStorageClassValue = newErrFn(
"Invalid storage class value",
"Please check the value",
`MINIO_STORAGE_CLASS_STANDARD: Format "EC:<Default_Parity_Standard_Class>" (e.g. "EC:3"). This sets the number of parity disks for MinIO server in Standard mode. Objects are stored in Standard mode, if storage class is not defined in Put request
MINIO_STORAGE_CLASS_RRS: Format "EC:<Default_Parity_Reduced_Redundancy_Class>" (e.g. "EC:3"). This sets the number of parity disks for MinIO server in Reduced Redundancy mode. Objects are stored in Reduced Redundancy mode, if Put request specifies RRS storage class
Refer to the link https://github.com/minio/minio/tree/master/docs/erasure/storage-class for more information`,
)
ErrUnexpectedBackendVersion = newErrFn(
"Backend version seems to be too recent",
"Please update to the latest MinIO version",
"",
)
ErrInvalidAddressFlag = newErrFn(
"--address input is invalid",
"Please check --address parameter",
`--address binds to a specific ADDRESS:PORT, ADDRESS can be an IPv4/IPv6 address or hostname (default port is ':9000')
Examples: --address ':443'
--address '172.16.34.31:9000'
--address '[fe80::da00:a6c8:e3ae:ddd7]:9000'`,
)
ErrInvalidFSEndpoint = newErrFn(
"Invalid endpoint for standalone FS mode",
"Please check the FS endpoint",
`FS mode requires only one writable disk path
Example 1:
$ minio server /data/minio/`,
)
ErrUnsupportedBackend = newErrFn(
"Unable to write to the backend",
"Please ensure your disk supports O_DIRECT",
"",
)
ErrCorruptedBackend = newErrFn(
"Unable to use the specified backend, pre-existing content detected",
"Please ensure your disk mount does not have any pre-existing content",
"",
)
ErrUnableToWriteInBackend = newErrFn(
"Unable to write to the backend",
"Please ensure MinIO binary has write permissions for the backend",
`Verify if MinIO binary is running as the same user who has write permissions for the backend`,
)
ErrPortAlreadyInUse = newErrFn(
"Port is already in use",
"Please ensure no other program uses the same address/port",
"",
)
ErrPortAccess = newErrFn(
"Unable to use specified port",
"Please ensure MinIO binary has 'cap_net_bind_service=+ep' permissions",
`Use 'sudo setcap cap_net_bind_service=+ep /path/to/minio' to provide sufficient permissions`,
)
ErrNoPermissionsToAccessDirFiles = newErrFn(
"Missing permissions to access the specified path",
"Please ensure the specified path can be accessed",
"",
)
ErrSSLUnexpectedError = newErrFn(
"Invalid TLS certificate",
"Please check the content of your certificate data",
`Only PEM (x.509) format is accepted as valid public & private certificates`,
)
ErrSSLUnexpectedData = newErrFn(
"Invalid TLS certificate",
"Please check your certificate",
"",
)
ErrSSLNoPassword = newErrFn(
"Missing TLS password",
"Please set the password to environment variable `MINIO_CERT_PASSWD` so that the private key can be decrypted",
"",
)
ErrNoCertsAndHTTPSEndpoints = newErrFn(
"HTTPS specified in endpoints, but no TLS certificate is found on the local machine",
"Please add TLS certificate or use HTTP endpoints only",
"Refer to https://docs.min.io/docs/how-to-secure-access-to-minio-server-with-tls for information about how to load a TLS certificate in your server",
)
ErrCertsAndHTTPEndpoints = newErrFn(
"HTTP specified in endpoints, but the server in the local machine is configured with a TLS certificate",
"Please remove the certificate in the configuration directory or switch to HTTPS",
"",
)
ErrSSLWrongPassword = newErrFn(
"Unable to decrypt the private key using the provided password",
"Please set the correct password in environment variable `MINIO_CERT_PASSWD`",
"",
)
ErrUnexpectedDataContent = newErrFn(
"Unexpected data content",
"Please contact MinIO at https://slack.min.io",
"",
)
ErrUnexpectedError = newErrFn(
"Unexpected error",
"Please contact MinIO at https://slack.min.io",
"",
)
ErrInvalidCompressionIncludesValue = newErrFn(
"Invalid compression include value",
"Please check the passed value",
"Compress extensions/mime-types are delimited by `,`. For eg, MINIO_COMPRESS_MIME_TYPES=\"A,B,C\"",
)
ErrInvalidGWSSEValue = newErrFn(
"Invalid gateway SSE value",
"Please check the passed value",
"MINIO_GATEWAY_SSE: Gateway SSE accepts only C and S3 as valid values. Delimit by `;` to set more than one value",
)
ErrInvalidGWSSEEnvValue = newErrFn(
"Invalid gateway SSE configuration",
"",
"Refer to https://docs.min.io/docs/minio-kms-quickstart-guide.html for setting up SSE",
)
)

View file

@ -1,293 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dns
import (
"context"
"encoding/json"
"errors"
"fmt"
"net"
"sort"
"strings"
"time"
"github.com/minio/minio-go/v6/pkg/set"
"github.com/coredns/coredns/plugin/etcd/msg"
"github.com/coreos/etcd/clientv3"
)
// ErrNoEntriesFound - Indicates no entries were found for the given key (directory)
var ErrNoEntriesFound = errors.New("No entries found for this key")
const etcdPathSeparator = "/"
// create a new coredns service record for the bucket.
func newCoreDNSMsg(ip string, port string, ttl uint32, t time.Time) ([]byte, error) {
return json.Marshal(&SrvRecord{
Host: ip,
Port: json.Number(port),
TTL: ttl,
CreationDate: t,
})
}
// Close closes the internal etcd client and cannot be used further
func (c *CoreDNS) Close() {
c.etcdClient.Close()
}
// List - Retrieves list of DNS entries for the domain.
func (c *CoreDNS) List() (map[string][]SrvRecord, error) {
var srvRecords = map[string][]SrvRecord{}
for _, domainName := range c.domainNames {
key := msg.Path(fmt.Sprintf("%s.", domainName), c.prefixPath)
records, err := c.list(key)
if err != nil {
return nil, err
}
for _, record := range records {
if record.Key == "" {
continue
}
srvRecords[record.Key] = append(srvRecords[record.Key], record)
}
}
return srvRecords, nil
}
// Get - Retrieves DNS records for a bucket.
func (c *CoreDNS) Get(bucket string) ([]SrvRecord, error) {
var srvRecords []SrvRecord
for _, domainName := range c.domainNames {
key := msg.Path(fmt.Sprintf("%s.%s.", bucket, domainName), c.prefixPath)
records, err := c.list(key)
if err != nil {
return nil, err
}
// Make sure we have record.Key is empty
// this can only happen when record.Key
// has bucket entry with exact prefix
// match any record.Key which do not
// match the prefixes we skip them.
for _, record := range records {
if record.Key != "" {
continue
}
srvRecords = append(srvRecords, record)
}
}
if len(srvRecords) == 0 {
return nil, ErrNoEntriesFound
}
return srvRecords, nil
}
// msgUnPath converts a etcd path to domainname.
func msgUnPath(s string) string {
ks := strings.Split(strings.Trim(s, etcdPathSeparator), etcdPathSeparator)
for i, j := 0, len(ks)-1; i < j; i, j = i+1, j-1 {
ks[i], ks[j] = ks[j], ks[i]
}
return strings.Join(ks, ".")
}
// Retrieves list of entries under the key passed.
// Note that this method fetches entries upto only two levels deep.
func (c *CoreDNS) list(key string) ([]SrvRecord, error) {
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
r, err := c.etcdClient.Get(ctx, key, clientv3.WithPrefix())
defer cancel()
if err != nil {
return nil, err
}
if r.Count == 0 {
key = strings.TrimSuffix(key, etcdPathSeparator)
r, err = c.etcdClient.Get(ctx, key)
if err != nil {
return nil, err
}
}
var srvRecords []SrvRecord
for _, n := range r.Kvs {
var srvRecord SrvRecord
if err = json.Unmarshal([]byte(n.Value), &srvRecord); err != nil {
return nil, err
}
srvRecord.Key = strings.TrimPrefix(string(n.Key), key)
srvRecord.Key = strings.TrimSuffix(srvRecord.Key, srvRecord.Host)
// Skip non-bucket entry like for a key
// /skydns/net/miniocloud/10.0.0.1 that may exist as
// dns entry for the server (rather than the bucket
// itself).
if srvRecord.Key == "" {
continue
}
srvRecord.Key = msgUnPath(srvRecord.Key)
srvRecords = append(srvRecords, srvRecord)
}
sort.Slice(srvRecords, func(i int, j int) bool {
return srvRecords[i].Key < srvRecords[j].Key
})
return srvRecords, nil
}
// Put - Adds DNS entries into etcd endpoint in CoreDNS etcd message format.
func (c *CoreDNS) Put(bucket string) error {
c.Delete(bucket) // delete any existing entries.
t := time.Now().UTC()
for ip := range c.domainIPs {
bucketMsg, err := newCoreDNSMsg(ip, c.domainPort, defaultTTL, t)
if err != nil {
return err
}
for _, domainName := range c.domainNames {
key := msg.Path(fmt.Sprintf("%s.%s", bucket, domainName), c.prefixPath)
key = key + etcdPathSeparator + ip
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
_, err = c.etcdClient.Put(ctx, key, string(bucketMsg))
defer cancel()
if err != nil {
ctx, cancel = context.WithTimeout(context.Background(), defaultContextTimeout)
c.etcdClient.Delete(ctx, key)
defer cancel()
return err
}
}
}
return nil
}
// Delete - Removes DNS entries added in Put().
func (c *CoreDNS) Delete(bucket string) error {
for _, domainName := range c.domainNames {
key := msg.Path(fmt.Sprintf("%s.%s.", bucket, domainName), c.prefixPath)
srvRecords, err := c.list(key)
if err != nil {
return err
}
for _, record := range srvRecords {
dctx, dcancel := context.WithTimeout(context.Background(), defaultContextTimeout)
if _, err = c.etcdClient.Delete(dctx, key+etcdPathSeparator+record.Host); err != nil {
dcancel()
return err
}
dcancel()
}
}
return nil
}
// DeleteRecord - Removes a specific DNS entry
func (c *CoreDNS) DeleteRecord(record SrvRecord) error {
for _, domainName := range c.domainNames {
key := msg.Path(fmt.Sprintf("%s.%s.", record.Key, domainName), c.prefixPath)
dctx, dcancel := context.WithTimeout(context.Background(), defaultContextTimeout)
if _, err := c.etcdClient.Delete(dctx, key+etcdPathSeparator+record.Host); err != nil {
dcancel()
return err
}
dcancel()
}
return nil
}
// CoreDNS - represents dns config for coredns server.
type CoreDNS struct {
domainNames []string
domainIPs set.StringSet
domainPort string
prefixPath string
etcdClient *clientv3.Client
}
// Option - functional options pattern style
type Option func(*CoreDNS)
// DomainNames set a list of domain names used by this CoreDNS
// client setting, note this will fail if set to empty when
// constructor initializes.
func DomainNames(domainNames []string) Option {
return func(args *CoreDNS) {
args.domainNames = domainNames
}
}
// DomainIPs set a list of custom domain IPs, note this will
// fail if set to empty when constructor initializes.
func DomainIPs(domainIPs set.StringSet) Option {
return func(args *CoreDNS) {
args.domainIPs = domainIPs
}
}
// DomainPort - is a string version of server port
func DomainPort(domainPort string) Option {
return func(args *CoreDNS) {
args.domainPort = domainPort
}
}
// CoreDNSPath - custom prefix on etcd to populate DNS
// service records, optional and can be empty.
// if empty then c.prefixPath is used i.e "/skydns"
func CoreDNSPath(prefix string) Option {
return func(args *CoreDNS) {
args.prefixPath = prefix
}
}
// NewCoreDNS - initialize a new coreDNS set/unset values.
func NewCoreDNS(cfg clientv3.Config, setters ...Option) (*CoreDNS, error) {
etcdClient, err := clientv3.New(cfg)
if err != nil {
return nil, err
}
args := &CoreDNS{
etcdClient: etcdClient,
}
for _, setter := range setters {
setter(args)
}
if len(args.domainNames) == 0 || args.domainIPs.IsEmpty() {
return nil, errors.New("invalid argument")
}
// strip ports off of domainIPs
domainIPsWithoutPorts := args.domainIPs.ApplyFunc(func(ip string) string {
host, _, err := net.SplitHostPort(ip)
if err != nil {
if strings.Contains(err.Error(), "missing port in address") {
host = ip
}
}
return host
})
args.domainIPs = domainIPsWithoutPorts
return args, nil
}

View file

@ -1,56 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dns
import (
"encoding/json"
"time"
)
const (
defaultTTL = 30
defaultContextTimeout = 5 * time.Minute
)
// SrvRecord - represents a DNS service record
type SrvRecord struct {
Host string `json:"host,omitempty"`
Port json.Number `json:"port,omitempty"`
Priority int `json:"priority,omitempty"`
Weight int `json:"weight,omitempty"`
Text string `json:"text,omitempty"`
Mail bool `json:"mail,omitempty"` // Be an MX record. Priority becomes Preference.
TTL uint32 `json:"ttl,omitempty"`
// Holds info about when the entry was created first.
CreationDate time.Time `json:"creationDate"`
// When a SRV record with a "Host: IP-address" is added, we synthesize
// a srv.Target domain name. Normally we convert the full Key where
// the record lives to a DNS name and use this as the srv.Target. When
// TargetStrip > 0 we strip the left most TargetStrip labels from the
// DNS name.
TargetStrip int `json:"targetstrip,omitempty"`
// Group is used to group (or *not* to group) different services
// together. Services with an identical Group are returned in
// the same answer.
Group string `json:"group,omitempty"`
// Key carries the original key used during Put().
Key string `json:"-"`
}

View file

@ -1,167 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package etcd
import (
"crypto/tls"
"crypto/x509"
"strings"
"time"
"github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/clientv3/namespace"
"github.com/minio/minio/legacy/config"
"github.com/minio/minio/pkg/env"
xnet "github.com/minio/minio/pkg/net"
)
const (
// Default values used while communicating with etcd.
defaultDialTimeout = 5 * time.Second
defaultDialKeepAlive = 30 * time.Second
)
// etcd environment values
const (
Endpoints = "endpoints"
PathPrefix = "path_prefix"
CoreDNSPath = "coredns_path"
ClientCert = "client_cert"
ClientCertKey = "client_cert_key"
EnvEtcdEndpoints = "MINIO_ETCD_ENDPOINTS"
EnvEtcdPathPrefix = "MINIO_ETCD_PATH_PREFIX"
EnvEtcdCoreDNSPath = "MINIO_ETCD_COREDNS_PATH"
EnvEtcdClientCert = "MINIO_ETCD_CLIENT_CERT"
EnvEtcdClientCertKey = "MINIO_ETCD_CLIENT_CERT_KEY"
)
// DefaultKVS - default KV settings for etcd.
var (
DefaultKVS = config.KVS{
config.KV{
Key: Endpoints,
Value: "",
},
config.KV{
Key: PathPrefix,
Value: "",
},
config.KV{
Key: CoreDNSPath,
Value: "/skydns",
},
config.KV{
Key: ClientCert,
Value: "",
},
config.KV{
Key: ClientCertKey,
Value: "",
},
}
)
// Config - server etcd config.
type Config struct {
Enabled bool `json:"enabled"`
PathPrefix string `json:"pathPrefix"`
CoreDNSPath string `json:"coreDNSPath"`
clientv3.Config
}
// New - initialize new etcd client.
func New(cfg Config) (*clientv3.Client, error) {
if !cfg.Enabled {
return nil, nil
}
cli, err := clientv3.New(cfg.Config)
if err != nil {
return nil, err
}
cli.KV = namespace.NewKV(cli.KV, cfg.PathPrefix)
cli.Watcher = namespace.NewWatcher(cli.Watcher, cfg.PathPrefix)
cli.Lease = namespace.NewLease(cli.Lease, cfg.PathPrefix)
return cli, nil
}
func parseEndpoints(endpoints string) ([]string, bool, error) {
etcdEndpoints := strings.Split(endpoints, config.ValueSeparator)
var etcdSecure bool
for _, endpoint := range etcdEndpoints {
u, err := xnet.ParseHTTPURL(endpoint)
if err != nil {
return nil, false, err
}
if etcdSecure && u.Scheme == "http" {
return nil, false, config.Errorf("all endpoints should be https or http: %s", endpoint)
}
// If one of the endpoint is https, we will use https directly.
etcdSecure = etcdSecure || u.Scheme == "https"
}
return etcdEndpoints, etcdSecure, nil
}
// Enabled returns if etcd is enabled.
func Enabled(kvs config.KVS) bool {
endpoints := kvs.Get(Endpoints)
return endpoints != ""
}
// LookupConfig - Initialize new etcd config.
func LookupConfig(kvs config.KVS, rootCAs *x509.CertPool) (Config, error) {
cfg := Config{}
if err := config.CheckValidKeys(config.EtcdSubSys, kvs, DefaultKVS); err != nil {
return cfg, err
}
endpoints := env.Get(EnvEtcdEndpoints, kvs.Get(Endpoints))
if endpoints == "" {
return cfg, nil
}
etcdEndpoints, etcdSecure, err := parseEndpoints(endpoints)
if err != nil {
return cfg, err
}
cfg.Enabled = true
cfg.DialTimeout = defaultDialTimeout
cfg.DialKeepAliveTime = defaultDialKeepAlive
cfg.Endpoints = etcdEndpoints
cfg.CoreDNSPath = env.Get(EnvEtcdCoreDNSPath, kvs.Get(CoreDNSPath))
// Default path prefix for all keys on etcd, other than CoreDNSPath.
cfg.PathPrefix = env.Get(EnvEtcdPathPrefix, kvs.Get(PathPrefix))
if etcdSecure {
cfg.TLS = &tls.Config{
RootCAs: rootCAs,
}
// This is only to support client side certificate authentication
// https://coreos.com/etcd/docs/latest/op-guide/security.html
etcdClientCertFile := env.Get(EnvEtcdClientCert, kvs.Get(ClientCert))
etcdClientCertKey := env.Get(EnvEtcdClientCertKey, kvs.Get(ClientCertKey))
if etcdClientCertFile != "" && etcdClientCertKey != "" {
cfg.TLS.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) {
cert, err := tls.LoadX509KeyPair(etcdClientCertFile, etcdClientCertKey)
return &cert, err
}
}
}
return cfg, nil
}

View file

@ -1,66 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package etcd
import (
"reflect"
"testing"
)
// TestParseEndpoints - tests parseEndpoints function with valid and invalid inputs.
func TestParseEndpoints(t *testing.T) {
testCases := []struct {
s string
endpoints []string
secure bool
success bool
}{
// Invalid inputs
{"https://localhost:2379,http://localhost:2380", nil, false, false},
{",,,", nil, false, false},
{"", nil, false, false},
{"ftp://localhost:2379", nil, false, false},
{"http://localhost:2379000", nil, false, false},
// Valid inputs
{"https://localhost:2379,https://localhost:2380", []string{
"https://localhost:2379", "https://localhost:2380"},
true, true},
{"http://localhost:2379", []string{"http://localhost:2379"}, false, true},
}
for _, testCase := range testCases {
testCase := testCase
t.Run(testCase.s, func(t *testing.T) {
endpoints, secure, err := parseEndpoints(testCase.s)
if err != nil && testCase.success {
t.Errorf("expected to succeed but failed with %s", err)
}
if !testCase.success && err == nil {
t.Error("expected failure but succeeded instead")
}
if testCase.success {
if !reflect.DeepEqual(endpoints, testCase.endpoints) {
t.Errorf("expected %s, got %s", testCase.endpoints, endpoints)
}
if secure != testCase.secure {
t.Errorf("expected %t, got %t", testCase.secure, secure)
}
}
})
}
}

View file

@ -1,60 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package etcd
import "github.com/minio/minio/legacy/config"
// etcd config documented in default config
var (
Help = config.HelpKVS{
config.HelpKV{
Key: Endpoints,
Description: `comma separated list of etcd endpoints e.g. "http://localhost:2379"`,
Type: "csv",
},
config.HelpKV{
Key: PathPrefix,
Description: `namespace prefix to isolate tenants e.g. "customer1/"`,
Optional: true,
Type: "path",
},
config.HelpKV{
Key: CoreDNSPath,
Description: `shared bucket DNS records, default is "/skydns"`,
Optional: true,
Type: "path",
},
config.HelpKV{
Key: ClientCert,
Description: `client cert for mTLS authentication`,
Optional: true,
Type: "path",
},
config.HelpKV{
Key: ClientCertKey,
Description: `client cert key for mTLS authentication`,
Optional: true,
Type: "path",
},
config.HelpKV{
Key: config.Comment,
Description: config.DefaultComment,
Optional: true,
Type: "sentence",
},
}
)

View file

@ -1,63 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package config
// HelpKV - implements help messages for keys
// with value as description of the keys.
type HelpKV struct {
Key string `json:"key"`
Type string `json:"type"`
Description string `json:"description"`
Optional bool `json:"optional"`
// Indicates if sub-sys supports multiple targets.
MultipleTargets bool `json:"multipleTargets"`
}
// HelpKVS - implement order of keys help messages.
type HelpKVS []HelpKV
// Lookup - lookup a key from help kvs.
func (hkvs HelpKVS) Lookup(key string) (HelpKV, bool) {
for _, hkv := range hkvs {
if hkv.Key == key {
return hkv, true
}
}
return HelpKV{}, false
}
// DefaultComment used across all sub-systems.
const DefaultComment = "optionally add a comment to this setting"
// Region and Worm help is documented in default config
var (
RegionHelp = HelpKVS{
HelpKV{
Key: RegionName,
Type: "string",
Description: `name of the location of the server e.g. "us-west-rack2"`,
Optional: true,
},
HelpKV{
Key: Comment,
Type: "sentence",
Description: DefaultComment,
Optional: true,
},
}
)

View file

@ -1,378 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ldap
import (
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"net"
"strings"
"time"
"github.com/minio/minio/legacy/config"
"github.com/minio/minio/pkg/env"
ldap "gopkg.in/ldap.v3"
)
const (
defaultLDAPExpiry = time.Hour * 1
)
// Config contains AD/LDAP server connectivity information.
type Config struct {
Enabled bool `json:"enabled"`
// E.g. "ldap.minio.io:636"
ServerAddr string `json:"serverAddr"`
// STS credentials expiry duration
STSExpiryDuration string `json:"stsExpiryDuration"`
// Format string for usernames
UsernameFormat string `json:"usernameFormat"`
UsernameFormats []string `json:"-"`
UsernameSearchFilter string `json:"-"`
UsernameSearchBaseDNS []string `json:"-"`
GroupSearchBaseDN string `json:"groupSearchBaseDN"`
GroupSearchBaseDNS []string `json:"-"`
GroupSearchFilter string `json:"groupSearchFilter"`
GroupNameAttribute string `json:"groupNameAttribute"`
stsExpiryDuration time.Duration // contains converted value
tlsSkipVerify bool // allows skipping TLS verification
serverInsecure bool // allows plain text connection to LDAP Server
serverStartTLS bool // allows plain text connection to LDAP Server
rootCAs *x509.CertPool
}
// LDAP keys and envs.
const (
ServerAddr = "server_addr"
STSExpiry = "sts_expiry"
UsernameFormat = "username_format"
UsernameSearchFilter = "username_search_filter"
UsernameSearchBaseDN = "username_search_base_dn"
GroupSearchFilter = "group_search_filter"
GroupNameAttribute = "group_name_attribute"
GroupSearchBaseDN = "group_search_base_dn"
TLSSkipVerify = "tls_skip_verify"
ServerInsecure = "server_insecure"
ServerStartTLS = "server_starttls"
EnvServerAddr = "MINIO_IDENTITY_LDAP_SERVER_ADDR"
EnvSTSExpiry = "MINIO_IDENTITY_LDAP_STS_EXPIRY"
EnvTLSSkipVerify = "MINIO_IDENTITY_LDAP_TLS_SKIP_VERIFY"
EnvServerInsecure = "MINIO_IDENTITY_LDAP_SERVER_INSECURE"
EnvServerStartTLS = "MINIO_IDENTITY_LDAP_SERVER_STARTTLS"
EnvUsernameFormat = "MINIO_IDENTITY_LDAP_USERNAME_FORMAT"
EnvUsernameSearchFilter = "MINIO_IDENTITY_LDAP_USERNAME_SEARCH_FILTER"
EnvUsernameSearchBaseDN = "MINIO_IDENTITY_LDAP_USERNAME_SEARCH_BASE_DN"
EnvGroupSearchFilter = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_FILTER"
EnvGroupNameAttribute = "MINIO_IDENTITY_LDAP_GROUP_NAME_ATTRIBUTE"
EnvGroupSearchBaseDN = "MINIO_IDENTITY_LDAP_GROUP_SEARCH_BASE_DN"
)
// DefaultKVS - default config for LDAP config
var (
DefaultKVS = config.KVS{
config.KV{
Key: ServerAddr,
Value: "",
},
config.KV{
Key: UsernameFormat,
Value: "",
},
config.KV{
Key: UsernameSearchFilter,
Value: "",
},
config.KV{
Key: UsernameSearchBaseDN,
Value: "",
},
config.KV{
Key: GroupSearchFilter,
Value: "",
},
config.KV{
Key: GroupNameAttribute,
Value: "",
},
config.KV{
Key: GroupSearchBaseDN,
Value: "",
},
config.KV{
Key: STSExpiry,
Value: "1h",
},
config.KV{
Key: TLSSkipVerify,
Value: config.EnableOff,
},
config.KV{
Key: ServerInsecure,
Value: config.EnableOff,
},
config.KV{
Key: ServerStartTLS,
Value: config.EnableOff,
},
}
)
const (
dnDelimiter = ";"
)
func getGroups(conn *ldap.Conn, sreq *ldap.SearchRequest) ([]string, error) {
var groups []string
sres, err := conn.Search(sreq)
if err != nil {
return nil, err
}
for _, entry := range sres.Entries {
// We only queried one attribute,
// so we only look up the first one.
groups = append(groups, entry.Attributes[0].Values...)
}
return groups, nil
}
func (l *Config) bind(conn *ldap.Conn, username, password string) ([]string, error) {
var bindDNS = make([]string, len(l.UsernameFormats))
for i, usernameFormat := range l.UsernameFormats {
bindDN := fmt.Sprintf(usernameFormat, username)
// Bind with user credentials to validate the password
if err := conn.Bind(bindDN, password); err != nil {
return nil, err
}
bindDNS[i] = bindDN
}
return bindDNS, nil
}
var standardAttributes = []string{
"givenName",
"sn",
"cn",
"memberOf",
"email",
}
// Bind - binds to ldap, searches LDAP and returns list of groups.
func (l *Config) Bind(username, password string) ([]string, error) {
conn, err := l.Connect()
if err != nil {
return nil, err
}
defer conn.Close()
bindDNS, err := l.bind(conn, username, password)
if err != nil {
return nil, err
}
var groups []string
if l.UsernameSearchFilter != "" {
for _, userSearchBase := range l.UsernameSearchBaseDNS {
filter := strings.Replace(l.UsernameSearchFilter, "%s",
ldap.EscapeFilter(username), -1)
searchRequest := ldap.NewSearchRequest(
userSearchBase,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
filter,
standardAttributes,
nil,
)
groups, err = getGroups(conn, searchRequest)
if err != nil {
return nil, err
}
}
}
if l.GroupSearchFilter != "" {
for _, groupSearchBase := range l.GroupSearchBaseDNS {
var filters []string
if l.GroupNameAttribute == "" {
filters = []string{strings.Replace(l.GroupSearchFilter, "%s",
ldap.EscapeFilter(username), -1)}
} else {
// With group name attribute specified, make sure to
// include search queries for CN distinguished name
for _, bindDN := range bindDNS {
filters = append(filters, strings.Replace(l.GroupSearchFilter, "%s",
ldap.EscapeFilter(bindDN), -1))
}
}
for _, filter := range filters {
searchRequest := ldap.NewSearchRequest(
groupSearchBase,
ldap.ScopeWholeSubtree, ldap.NeverDerefAliases, 0, 0, false,
filter,
standardAttributes,
nil,
)
var newGroups []string
newGroups, err = getGroups(conn, searchRequest)
if err != nil {
return nil, err
}
groups = append(groups, newGroups...)
}
}
}
return groups, nil
}
// Connect connect to ldap server.
func (l *Config) Connect() (ldapConn *ldap.Conn, err error) {
if l == nil {
return nil, errors.New("LDAP is not configured")
}
if _, _, err = net.SplitHostPort(l.ServerAddr); err != nil {
// User default LDAP port if none specified "636"
l.ServerAddr = net.JoinHostPort(l.ServerAddr, "636")
}
if l.serverInsecure {
return ldap.Dial("tcp", l.ServerAddr)
}
if l.serverStartTLS {
conn, err := ldap.Dial("tcp", l.ServerAddr)
if err != nil {
return nil, err
}
err = conn.StartTLS(&tls.Config{
InsecureSkipVerify: l.tlsSkipVerify,
RootCAs: l.rootCAs,
})
return conn, err
}
return ldap.DialTLS("tcp", l.ServerAddr, &tls.Config{
InsecureSkipVerify: l.tlsSkipVerify,
RootCAs: l.rootCAs,
})
}
// GetExpiryDuration - return parsed expiry duration.
func (l Config) GetExpiryDuration() time.Duration {
return l.stsExpiryDuration
}
// Enabled returns if jwks is enabled.
func Enabled(kvs config.KVS) bool {
return kvs.Get(ServerAddr) != ""
}
// Lookup - initializes LDAP config, overrides config, if any ENV values are set.
func Lookup(kvs config.KVS, rootCAs *x509.CertPool) (l Config, err error) {
l = Config{}
if err = config.CheckValidKeys(config.IdentityLDAPSubSys, kvs, DefaultKVS); err != nil {
return l, err
}
ldapServer := env.Get(EnvServerAddr, kvs.Get(ServerAddr))
if ldapServer == "" {
return l, nil
}
l.Enabled = true
l.ServerAddr = ldapServer
l.stsExpiryDuration = defaultLDAPExpiry
if v := env.Get(EnvSTSExpiry, kvs.Get(STSExpiry)); v != "" {
expDur, err := time.ParseDuration(v)
if err != nil {
return l, errors.New("LDAP expiry time err:" + err.Error())
}
if expDur <= 0 {
return l, errors.New("LDAP expiry time has to be positive")
}
l.STSExpiryDuration = v
l.stsExpiryDuration = expDur
}
if v := env.Get(EnvServerInsecure, kvs.Get(ServerInsecure)); v != "" {
l.serverInsecure, err = config.ParseBool(v)
if err != nil {
return l, err
}
}
if v := env.Get(EnvServerStartTLS, kvs.Get(ServerStartTLS)); v != "" {
l.serverStartTLS, err = config.ParseBool(v)
if err != nil {
return l, err
}
}
if v := env.Get(EnvTLSSkipVerify, kvs.Get(TLSSkipVerify)); v != "" {
l.tlsSkipVerify, err = config.ParseBool(v)
if err != nil {
return l, err
}
}
if v := env.Get(EnvUsernameFormat, kvs.Get(UsernameFormat)); v != "" {
if !strings.Contains(v, "%s") {
return l, errors.New("LDAP username format doesn't have '%s' substitution")
}
l.UsernameFormats = strings.Split(v, dnDelimiter)
} else {
return l, fmt.Errorf("'%s' cannot be empty and must have a value", UsernameFormat)
}
if v := env.Get(EnvUsernameSearchFilter, kvs.Get(UsernameSearchFilter)); v != "" {
if !strings.Contains(v, "%s") {
return l, errors.New("LDAP username search filter doesn't have '%s' substitution")
}
l.UsernameSearchFilter = v
}
if v := env.Get(EnvUsernameSearchBaseDN, kvs.Get(UsernameSearchBaseDN)); v != "" {
l.UsernameSearchBaseDNS = strings.Split(v, dnDelimiter)
}
grpSearchFilter := env.Get(EnvGroupSearchFilter, kvs.Get(GroupSearchFilter))
grpSearchNameAttr := env.Get(EnvGroupNameAttribute, kvs.Get(GroupNameAttribute))
grpSearchBaseDN := env.Get(EnvGroupSearchBaseDN, kvs.Get(GroupSearchBaseDN))
// Either all group params must be set or none must be set.
var allSet bool
if grpSearchFilter != "" {
if grpSearchNameAttr == "" || grpSearchBaseDN == "" {
return l, errors.New("All group related parameters must be set")
}
allSet = true
}
if allSet {
l.GroupSearchFilter = grpSearchFilter
l.GroupNameAttribute = grpSearchNameAttr
l.GroupSearchBaseDNS = strings.Split(grpSearchBaseDN, dnDelimiter)
}
l.rootCAs = rootCAs
return l, nil
}

View file

@ -1,86 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ldap
import "github.com/minio/minio/legacy/config"
// Help template for LDAP identity feature.
var (
Help = config.HelpKVS{
config.HelpKV{
Key: ServerAddr,
Description: `AD/LDAP server address e.g. "myldapserver.com:636"`,
Type: "address",
},
config.HelpKV{
Key: UsernameFormat,
Description: `";" separated list of username bind DNs e.g. "uid=%s,cn=accounts,dc=myldapserver,dc=com"`,
Type: "list",
},
config.HelpKV{
Key: UsernameSearchFilter,
Description: `user search filter, for example "(cn=%s)" or "(sAMAccountName=%s)" or "(uid=%s)"`,
Type: "string",
},
config.HelpKV{
Key: GroupSearchFilter,
Description: `search filter for groups e.g. "(&(objectclass=groupOfNames)(memberUid=%s))"`,
Type: "string",
},
config.HelpKV{
Key: GroupSearchBaseDN,
Description: `";" separated list of group search base DNs e.g. "dc=myldapserver,dc=com"`,
Type: "list",
},
config.HelpKV{
Key: UsernameSearchBaseDN,
Description: `";" separated list of username search DNs`,
Type: "list",
Optional: true,
},
config.HelpKV{
Key: GroupNameAttribute,
Description: `search attribute for group name e.g. "cn"`,
Optional: true,
Type: "string",
},
config.HelpKV{
Key: STSExpiry,
Description: `temporary credentials validity duration in s,m,h,d. Default is "1h"`,
Optional: true,
Type: "duration",
},
config.HelpKV{
Key: TLSSkipVerify,
Description: `trust server TLS without verification, defaults to "off" (verify)`,
Optional: true,
Type: "on|off",
},
config.HelpKV{
Key: ServerInsecure,
Description: `allow plain text connection to AD/LDAP server, defaults to "off"`,
Optional: true,
Type: "on|off",
},
config.HelpKV{
Key: config.Comment,
Description: config.DefaultComment,
Optional: true,
Type: "sentence",
},
}
)

View file

@ -1,53 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ldap
import "github.com/minio/minio/legacy/config"
// SetIdentityLDAP - One time migration code needed, for migrating from older config to new for LDAPConfig.
func SetIdentityLDAP(s config.Config, ldapArgs Config) {
if !ldapArgs.Enabled {
// ldap not enabled no need to preserve it in new settings.
return
}
s[config.IdentityLDAPSubSys][config.Default] = config.KVS{
config.KV{
Key: ServerAddr,
Value: ldapArgs.ServerAddr,
},
config.KV{
Key: STSExpiry,
Value: ldapArgs.STSExpiryDuration,
},
config.KV{
Key: UsernameFormat,
Value: ldapArgs.UsernameFormat,
},
config.KV{
Key: GroupSearchFilter,
Value: ldapArgs.GroupSearchFilter,
},
config.KV{
Key: GroupNameAttribute,
Value: ldapArgs.GroupNameAttribute,
},
config.KV{
Key: GroupSearchBaseDN,
Value: ldapArgs.GroupSearchBaseDN,
},
}
}

View file

@ -1,53 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package openid
import (
"crypto"
"github.com/dgrijalva/jwt-go"
// Needed for SHA3 to work - See: https://golang.org/src/crypto/crypto.go?s=1034:1288
_ "golang.org/x/crypto/sha3"
)
// Specific instances for EC256 and company
var (
SigningMethodES3256 *jwt.SigningMethodECDSA
SigningMethodES3384 *jwt.SigningMethodECDSA
SigningMethodES3512 *jwt.SigningMethodECDSA
)
func init() {
// ES256
SigningMethodES3256 = &jwt.SigningMethodECDSA{Name: "ES3256", Hash: crypto.SHA3_256, KeySize: 32, CurveBits: 256}
jwt.RegisterSigningMethod(SigningMethodES3256.Alg(), func() jwt.SigningMethod {
return SigningMethodES3256
})
// ES384
SigningMethodES3384 = &jwt.SigningMethodECDSA{Name: "ES3384", Hash: crypto.SHA3_384, KeySize: 48, CurveBits: 384}
jwt.RegisterSigningMethod(SigningMethodES3384.Alg(), func() jwt.SigningMethod {
return SigningMethodES3384
})
// ES512
SigningMethodES3512 = &jwt.SigningMethodECDSA{Name: "ES3512", Hash: crypto.SHA3_512, KeySize: 66, CurveBits: 521}
jwt.RegisterSigningMethod(SigningMethodES3512.Alg(), func() jwt.SigningMethod {
return SigningMethodES3512
})
}

View file

@ -1,54 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package openid
import "github.com/minio/minio/legacy/config"
// Help template for OpenID identity feature.
var (
Help = config.HelpKVS{
config.HelpKV{
Key: ConfigURL,
Description: `openid discovery document e.g. "https://accounts.google.com/.well-known/openid-configuration"`,
Type: "url",
},
config.HelpKV{
Key: ClientID,
Description: `unique public identifier for apps e.g. "292085223830.apps.googleusercontent.com"`,
Type: "string",
Optional: true,
},
config.HelpKV{
Key: ClaimName,
Description: `JWT canned policy claim name, defaults to "policy"`,
Optional: true,
Type: "string",
},
config.HelpKV{
Key: ClaimPrefix,
Description: `JWT claim namespace prefix e.g. "customer1/"`,
Optional: true,
Type: "string",
},
config.HelpKV{
Key: config.Comment,
Description: config.DefaultComment,
Optional: true,
Type: "sentence",
},
}
)

View file

@ -1,137 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package openid
import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"encoding/base64"
"encoding/binary"
"errors"
"fmt"
"math/big"
"strings"
)
// JWKS - https://tools.ietf.org/html/rfc7517
type JWKS struct {
Keys []*JWKS `json:"keys,omitempty"`
Kty string `json:"kty"`
Use string `json:"use,omitempty"`
Kid string `json:"kid,omitempty"`
Alg string `json:"alg,omitempty"`
Crv string `json:"crv,omitempty"`
X string `json:"x,omitempty"`
Y string `json:"y,omitempty"`
D string `json:"d,omitempty"`
N string `json:"n,omitempty"`
E string `json:"e,omitempty"`
K string `json:"k,omitempty"`
}
func safeDecode(str string) ([]byte, error) {
lenMod4 := len(str) % 4
if lenMod4 > 0 {
str = str + strings.Repeat("=", 4-lenMod4)
}
return base64.URLEncoding.DecodeString(str)
}
var (
errMalformedJWKRSAKey = errors.New("malformed JWK RSA key")
errMalformedJWKECKey = errors.New("malformed JWK EC key")
)
// DecodePublicKey - decodes JSON Web Key (JWK) as public key
func (key *JWKS) DecodePublicKey() (crypto.PublicKey, error) {
switch key.Kty {
case "RSA":
if key.N == "" || key.E == "" {
return nil, errMalformedJWKRSAKey
}
// decode exponent
data, err := safeDecode(key.E)
if err != nil {
return nil, errMalformedJWKRSAKey
}
if len(data) < 4 {
ndata := make([]byte, 4)
copy(ndata[4-len(data):], data)
data = ndata
}
pubKey := &rsa.PublicKey{
N: &big.Int{},
E: int(binary.BigEndian.Uint32(data[:])),
}
data, err = safeDecode(key.N)
if err != nil {
return nil, errMalformedJWKRSAKey
}
pubKey.N.SetBytes(data)
return pubKey, nil
case "EC":
if key.Crv == "" || key.X == "" || key.Y == "" {
return nil, errMalformedJWKECKey
}
var curve elliptic.Curve
switch key.Crv {
case "P-224":
curve = elliptic.P224()
case "P-256":
curve = elliptic.P256()
case "P-384":
curve = elliptic.P384()
case "P-521":
curve = elliptic.P521()
default:
return nil, fmt.Errorf("Unknown curve type: %s", key.Crv)
}
pubKey := &ecdsa.PublicKey{
Curve: curve,
X: &big.Int{},
Y: &big.Int{},
}
data, err := safeDecode(key.X)
if err != nil {
return nil, errMalformedJWKECKey
}
pubKey.X.SetBytes(data)
data, err = safeDecode(key.Y)
if err != nil {
return nil, errMalformedJWKECKey
}
pubKey.Y.SetBytes(data)
return pubKey, nil
default:
return nil, fmt.Errorf("Unknown JWK key type %s", key.Kty)
}
}

View file

@ -1,103 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package openid
import (
"bytes"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rsa"
"encoding/json"
"testing"
)
// A.1 - Example public keys
func TestPublicKey(t *testing.T) {
const jsonkey = `{"keys":
[
{"kty":"EC",
"crv":"P-256",
"x":"MKBCTNIcKUSDii11ySs3526iDZ8AiTo7Tu6KPAqv7D4",
"y":"4Etl6SRW2YiLUrN5vfvVHuhp7x8PxltmWWlbbM4IFyM",
"use":"enc",
"kid":"1"},
{"kty":"RSA",
"n": "0vx7agoebGcQSuuPiLJXZptN9nndrQmbXEps2aiAFbWhM78LhWx4cbbfAAtVT86zwu1RK7aPFFxuhDR1L6tSoc_BJECPebWKRXjBZCiFV4n3oknjhMstn64tZ_2W-5JsGY4Hc5n9yBXArwl93lqt7_RN5w6Cf0h4QyQ5v-65YGjQR0_FDW2QvzqY368QQMicAtaSqzs8KJZgnYb9c7d0zgdAZHzu6qMQvRL5hajrn1n91CbOpbISD08qNLyrdkt-bFTWhAI4vMQFh6WeZu0fM4lFd2NcRwr3XPksINHaQ-G_xBniIqbw0Ls1jF44-csFCur-kEgU8awapJzKnqDKgw",
"e":"AQAB",
"alg":"RS256",
"kid":"2011-04-29"}
]
}`
var jk JWKS
if err := json.Unmarshal([]byte(jsonkey), &jk); err != nil {
t.Fatal("Unmarshal: ", err)
} else if len(jk.Keys) != 2 {
t.Fatalf("Expected 2 keys, got %d", len(jk.Keys))
}
keys := make([]crypto.PublicKey, len(jk.Keys))
for ii, jks := range jk.Keys {
var err error
keys[ii], err = jks.DecodePublicKey()
if err != nil {
t.Fatalf("Failed to decode key %d: %v", ii, err)
}
}
if key0, ok := keys[0].(*ecdsa.PublicKey); !ok {
t.Fatalf("Expected ECDSA key[0], got %T", keys[0])
} else if key1, ok := keys[1].(*rsa.PublicKey); !ok {
t.Fatalf("Expected RSA key[1], got %T", keys[1])
} else if key0.Curve != elliptic.P256() {
t.Fatal("Key[0] is not using P-256 curve")
} else if !bytes.Equal(key0.X.Bytes(), []byte{0x30, 0xa0, 0x42, 0x4c, 0xd2,
0x1c, 0x29, 0x44, 0x83, 0x8a, 0x2d, 0x75, 0xc9, 0x2b, 0x37, 0xe7, 0x6e, 0xa2,
0xd, 0x9f, 0x0, 0x89, 0x3a, 0x3b, 0x4e, 0xee, 0x8a, 0x3c, 0xa, 0xaf, 0xec, 0x3e}) {
t.Fatalf("Bad key[0].X, got %v", key0.X.Bytes())
} else if !bytes.Equal(key0.Y.Bytes(), []byte{0xe0, 0x4b, 0x65, 0xe9, 0x24,
0x56, 0xd9, 0x88, 0x8b, 0x52, 0xb3, 0x79, 0xbd, 0xfb, 0xd5, 0x1e, 0xe8,
0x69, 0xef, 0x1f, 0xf, 0xc6, 0x5b, 0x66, 0x59, 0x69, 0x5b, 0x6c, 0xce,
0x8, 0x17, 0x23}) {
t.Fatalf("Bad key[0].Y, got %v", key0.Y.Bytes())
} else if key1.E != 0x10001 {
t.Fatalf("Bad key[1].E: %d", key1.E)
} else if !bytes.Equal(key1.N.Bytes(), []byte{0xd2, 0xfc, 0x7b, 0x6a, 0xa, 0x1e,
0x6c, 0x67, 0x10, 0x4a, 0xeb, 0x8f, 0x88, 0xb2, 0x57, 0x66, 0x9b, 0x4d, 0xf6,
0x79, 0xdd, 0xad, 0x9, 0x9b, 0x5c, 0x4a, 0x6c, 0xd9, 0xa8, 0x80, 0x15, 0xb5,
0xa1, 0x33, 0xbf, 0xb, 0x85, 0x6c, 0x78, 0x71, 0xb6, 0xdf, 0x0, 0xb, 0x55,
0x4f, 0xce, 0xb3, 0xc2, 0xed, 0x51, 0x2b, 0xb6, 0x8f, 0x14, 0x5c, 0x6e, 0x84,
0x34, 0x75, 0x2f, 0xab, 0x52, 0xa1, 0xcf, 0xc1, 0x24, 0x40, 0x8f, 0x79, 0xb5,
0x8a, 0x45, 0x78, 0xc1, 0x64, 0x28, 0x85, 0x57, 0x89, 0xf7, 0xa2, 0x49, 0xe3,
0x84, 0xcb, 0x2d, 0x9f, 0xae, 0x2d, 0x67, 0xfd, 0x96, 0xfb, 0x92, 0x6c, 0x19,
0x8e, 0x7, 0x73, 0x99, 0xfd, 0xc8, 0x15, 0xc0, 0xaf, 0x9, 0x7d, 0xde, 0x5a,
0xad, 0xef, 0xf4, 0x4d, 0xe7, 0xe, 0x82, 0x7f, 0x48, 0x78, 0x43, 0x24, 0x39,
0xbf, 0xee, 0xb9, 0x60, 0x68, 0xd0, 0x47, 0x4f, 0xc5, 0xd, 0x6d, 0x90, 0xbf,
0x3a, 0x98, 0xdf, 0xaf, 0x10, 0x40, 0xc8, 0x9c, 0x2, 0xd6, 0x92, 0xab, 0x3b,
0x3c, 0x28, 0x96, 0x60, 0x9d, 0x86, 0xfd, 0x73, 0xb7, 0x74, 0xce, 0x7, 0x40,
0x64, 0x7c, 0xee, 0xea, 0xa3, 0x10, 0xbd, 0x12, 0xf9, 0x85, 0xa8, 0xeb, 0x9f,
0x59, 0xfd, 0xd4, 0x26, 0xce, 0xa5, 0xb2, 0x12, 0xf, 0x4f, 0x2a, 0x34, 0xbc,
0xab, 0x76, 0x4b, 0x7e, 0x6c, 0x54, 0xd6, 0x84, 0x2, 0x38, 0xbc, 0xc4, 0x5, 0x87,
0xa5, 0x9e, 0x66, 0xed, 0x1f, 0x33, 0x89, 0x45, 0x77, 0x63, 0x5c, 0x47, 0xa,
0xf7, 0x5c, 0xf9, 0x2c, 0x20, 0xd1, 0xda, 0x43, 0xe1, 0xbf, 0xc4, 0x19, 0xe2,
0x22, 0xa6, 0xf0, 0xd0, 0xbb, 0x35, 0x8c, 0x5e, 0x38, 0xf9, 0xcb, 0x5, 0xa, 0xea,
0xfe, 0x90, 0x48, 0x14, 0xf1, 0xac, 0x1a, 0xa4, 0x9c, 0xca, 0x9e, 0xa0, 0xca, 0x83}) {
t.Fatalf("Bad key[1].N, got %v", key1.N.Bytes())
}
}

View file

@ -1,362 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2018-2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package openid
import (
"crypto"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strconv"
"time"
jwtgo "github.com/dgrijalva/jwt-go"
"github.com/minio/minio/legacy/config"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/env"
iampolicy "github.com/minio/minio/pkg/iam/policy"
xnet "github.com/minio/minio/pkg/net"
)
// Config - OpenID Config
// RSA authentication target arguments
type Config struct {
JWKS struct {
URL *xnet.URL `json:"url"`
} `json:"jwks"`
URL *xnet.URL `json:"url,omitempty"`
ClaimPrefix string `json:"claimPrefix,omitempty"`
ClaimName string `json:"claimName,omitempty"`
DiscoveryDoc DiscoveryDoc
ClientID string
publicKeys map[string]crypto.PublicKey
transport *http.Transport
closeRespFn func(io.ReadCloser)
}
// PopulatePublicKey - populates a new publickey from the JWKS URL.
func (r *Config) PopulatePublicKey() error {
if r.JWKS.URL == nil || r.JWKS.URL.String() == "" {
return nil
}
transport := http.DefaultTransport
if r.transport != nil {
transport = r.transport
}
client := &http.Client{
Transport: transport,
}
resp, err := client.Get(r.JWKS.URL.String())
if err != nil {
return err
}
defer r.closeRespFn(resp.Body)
if resp.StatusCode != http.StatusOK {
return errors.New(resp.Status)
}
var jwk JWKS
if err = json.NewDecoder(resp.Body).Decode(&jwk); err != nil {
return err
}
for _, key := range jwk.Keys {
r.publicKeys[key.Kid], err = key.DecodePublicKey()
if err != nil {
return err
}
}
return nil
}
// UnmarshalJSON - decodes JSON data.
func (r *Config) UnmarshalJSON(data []byte) error {
// subtype to avoid recursive call to UnmarshalJSON()
type subConfig Config
var sr subConfig
if err := json.Unmarshal(data, &sr); err != nil {
return err
}
ar := Config(sr)
if ar.JWKS.URL == nil || ar.JWKS.URL.String() == "" {
*r = ar
return nil
}
*r = ar
return nil
}
// JWT - rs client grants provider details.
type JWT struct {
Config
}
// GetDefaultExpiration - returns the expiration seconds expected.
func GetDefaultExpiration(dsecs string) (time.Duration, error) {
defaultExpiryDuration := time.Duration(60) * time.Minute // Defaults to 1hr.
if dsecs != "" {
expirySecs, err := strconv.ParseInt(dsecs, 10, 64)
if err != nil {
return 0, auth.ErrInvalidDuration
}
// The duration, in seconds, of the role session.
// The value can range from 900 seconds (15 minutes)
// to 12 hours.
if expirySecs < 900 || expirySecs > 43200 {
return 0, auth.ErrInvalidDuration
}
defaultExpiryDuration = time.Duration(expirySecs) * time.Second
}
return defaultExpiryDuration, nil
}
func updateClaimsExpiry(dsecs string, claims map[string]interface{}) error {
expStr := claims["exp"]
if expStr == "" {
return ErrTokenExpired
}
// No custom duration requested, the claims can be used as is.
if dsecs == "" {
return nil
}
expAt, err := auth.ExpToInt64(expStr)
if err != nil {
return err
}
defaultExpiryDuration, err := GetDefaultExpiration(dsecs)
if err != nil {
return err
}
// Verify if JWT expiry is lesser than default expiry duration,
// if that is the case then set the default expiration to be
// from the JWT expiry claim.
if time.Unix(expAt, 0).UTC().Sub(time.Now().UTC()) < defaultExpiryDuration {
defaultExpiryDuration = time.Unix(expAt, 0).UTC().Sub(time.Now().UTC())
} // else honor the specified expiry duration.
expiry := time.Now().UTC().Add(defaultExpiryDuration).Unix()
claims["exp"] = strconv.FormatInt(expiry, 10) // update with new expiry.
return nil
}
// Validate - validates the access token.
func (p *JWT) Validate(token, dsecs string) (map[string]interface{}, error) {
jp := new(jwtgo.Parser)
jp.ValidMethods = []string{
"RS256", "RS384", "RS512", "ES256", "ES384", "ES512",
"RS3256", "RS3384", "RS3512", "ES3256", "ES3384", "ES3512",
}
keyFuncCallback := func(jwtToken *jwtgo.Token) (interface{}, error) {
kid, ok := jwtToken.Header["kid"].(string)
if !ok {
return nil, fmt.Errorf("Invalid kid value %v", jwtToken.Header["kid"])
}
return p.publicKeys[kid], nil
}
var claims jwtgo.MapClaims
jwtToken, err := jp.ParseWithClaims(token, &claims, keyFuncCallback)
if err != nil {
if err = p.PopulatePublicKey(); err != nil {
return nil, err
}
jwtToken, err = jwtgo.ParseWithClaims(token, &claims, keyFuncCallback)
if err != nil {
return nil, err
}
}
if !jwtToken.Valid {
return nil, ErrTokenExpired
}
if err = updateClaimsExpiry(dsecs, claims); err != nil {
return nil, err
}
return claims, nil
}
// ID returns the provider name and authentication type.
func (p *JWT) ID() ID {
return "jwt"
}
// OpenID keys and envs.
const (
JwksURL = "jwks_url"
ConfigURL = "config_url"
ClaimName = "claim_name"
ClaimPrefix = "claim_prefix"
ClientID = "client_id"
EnvIdentityOpenIDClientID = "MINIO_IDENTITY_OPENID_CLIENT_ID"
EnvIdentityOpenIDJWKSURL = "MINIO_IDENTITY_OPENID_JWKS_URL"
EnvIdentityOpenIDURL = "MINIO_IDENTITY_OPENID_CONFIG_URL"
EnvIdentityOpenIDClaimName = "MINIO_IDENTITY_OPENID_CLAIM_NAME"
EnvIdentityOpenIDClaimPrefix = "MINIO_IDENTITY_OPENID_CLAIM_PREFIX"
)
// DiscoveryDoc - parses the output from openid-configuration
// for example https://accounts.google.com/.well-known/openid-configuration
type DiscoveryDoc struct {
Issuer string `json:"issuer,omitempty"`
AuthEndpoint string `json:"authorization_endpoint,omitempty"`
TokenEndpoint string `json:"token_endpoint,omitempty"`
UserInfoEndpoint string `json:"userinfo_endpoint,omitempty"`
RevocationEndpoint string `json:"revocation_endpoint,omitempty"`
JwksURI string `json:"jwks_uri,omitempty"`
ResponseTypesSupported []string `json:"response_types_supported,omitempty"`
SubjectTypesSupported []string `json:"subject_types_supported,omitempty"`
IDTokenSigningAlgValuesSupported []string `json:"id_token_signing_alg_values_supported,omitempty"`
ScopesSupported []string `json:"scopes_supported,omitempty"`
TokenEndpointAuthMethods []string `json:"token_endpoint_auth_methods_supported,omitempty"`
ClaimsSupported []string `json:"claims_supported,omitempty"`
CodeChallengeMethodsSupported []string `json:"code_challenge_methods_supported,omitempty"`
}
func parseDiscoveryDoc(u *xnet.URL, transport *http.Transport, closeRespFn func(io.ReadCloser)) (DiscoveryDoc, error) {
d := DiscoveryDoc{}
req, err := http.NewRequest(http.MethodGet, u.String(), nil)
if err != nil {
return d, err
}
clnt := http.Client{
Transport: transport,
}
resp, err := clnt.Do(req)
if err != nil {
clnt.CloseIdleConnections()
return d, err
}
defer closeRespFn(resp.Body)
if resp.StatusCode != http.StatusOK {
return d, err
}
dec := json.NewDecoder(resp.Body)
if err = dec.Decode(&d); err != nil {
return d, err
}
return d, nil
}
// DefaultKVS - default config for OpenID config
var (
DefaultKVS = config.KVS{
config.KV{
Key: ConfigURL,
Value: "",
},
config.KV{
Key: ClientID,
Value: "",
},
config.KV{
Key: ClaimName,
Value: iampolicy.PolicyName,
},
config.KV{
Key: ClaimPrefix,
Value: "",
},
config.KV{
Key: JwksURL,
Value: "",
},
}
)
// Enabled returns if jwks is enabled.
func Enabled(kvs config.KVS) bool {
return kvs.Get(JwksURL) != ""
}
// LookupConfig lookup jwks from config, override with any ENVs.
func LookupConfig(kvs config.KVS, transport *http.Transport, closeRespFn func(io.ReadCloser)) (c Config, err error) {
if err = config.CheckValidKeys(config.IdentityOpenIDSubSys, kvs, DefaultKVS); err != nil {
return c, err
}
jwksURL := env.Get(EnvIamJwksURL, "") // Legacy
if jwksURL == "" {
jwksURL = env.Get(EnvIdentityOpenIDJWKSURL, kvs.Get(JwksURL))
}
c = Config{
ClaimName: env.Get(EnvIdentityOpenIDClaimName, kvs.Get(ClaimName)),
ClaimPrefix: env.Get(EnvIdentityOpenIDClaimPrefix, kvs.Get(ClaimPrefix)),
publicKeys: make(map[string]crypto.PublicKey),
ClientID: env.Get(EnvIdentityOpenIDClientID, kvs.Get(ClientID)),
transport: transport,
closeRespFn: closeRespFn,
}
configURL := env.Get(EnvIdentityOpenIDURL, kvs.Get(ConfigURL))
if configURL != "" {
c.URL, err = xnet.ParseHTTPURL(configURL)
if err != nil {
return c, err
}
c.DiscoveryDoc, err = parseDiscoveryDoc(c.URL, transport, closeRespFn)
if err != nil {
return c, err
}
}
if c.ClaimName == "" {
c.ClaimName = iampolicy.PolicyName
}
if jwksURL == "" {
// Fallback to discovery document jwksURL
jwksURL = c.DiscoveryDoc.JwksURI
}
if jwksURL == "" {
return c, nil
}
c.JWKS.URL, err = xnet.ParseHTTPURL(jwksURL)
if err != nil {
return c, err
}
if err = c.PopulatePublicKey(); err != nil {
return c, err
}
return c, nil
}
// NewJWT - initialize new jwt authenticator.
func NewJWT(c Config) *JWT {
return &JWT{c}
}

View file

@ -1,156 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2018-2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package openid
import (
"crypto"
"encoding/json"
"net/url"
"testing"
"time"
xnet "github.com/minio/minio/pkg/net"
)
func TestUpdateClaimsExpiry(t *testing.T) {
testCases := []struct {
exp interface{}
dsecs string
expectedFailure bool
}{
{"", "", true},
{"-1", "0", true},
{"-1", "900", true},
{"1574812326", "900", false},
{1574812326, "900", false},
{int64(1574812326), "900", false},
{int(1574812326), "900", false},
{uint(1574812326), "900", false},
{uint64(1574812326), "900", false},
{json.Number("1574812326"), "900", false},
{1574812326.000, "900", false},
{time.Duration(3) * time.Minute, "900", false},
}
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
claims := map[string]interface{}{}
claims["exp"] = testCase.exp
err := updateClaimsExpiry(testCase.dsecs, claims)
if err != nil && !testCase.expectedFailure {
t.Errorf("Expected success, got failure %s", err)
}
if err == nil && testCase.expectedFailure {
t.Error("Expected failure, got success")
}
})
}
}
func TestJWT(t *testing.T) {
const jsonkey = `{"keys":
[
{"kty":"RSA",
"n": "0vx7agoebGcQSuuPiLJXZptN9nndrQmbXEps2aiAFbWhM78LhWx4cbbfAAtVT86zwu1RK7aPFFxuhDR1L6tSoc_BJECPebWKRXjBZCiFV4n3oknjhMstn64tZ_2W-5JsGY4Hc5n9yBXArwl93lqt7_RN5w6Cf0h4QyQ5v-65YGjQR0_FDW2QvzqY368QQMicAtaSqzs8KJZgnYb9c7d0zgdAZHzu6qMQvRL5hajrn1n91CbOpbISD08qNLyrdkt-bFTWhAI4vMQFh6WeZu0fM4lFd2NcRwr3XPksINHaQ-G_xBniIqbw0Ls1jF44-csFCur-kEgU8awapJzKnqDKgw",
"e":"AQAB",
"alg":"RS256",
"kid":"2011-04-29"}
]
}`
var jk JWKS
if err := json.Unmarshal([]byte(jsonkey), &jk); err != nil {
t.Fatal("Unmarshal: ", err)
} else if len(jk.Keys) != 1 {
t.Fatalf("Expected 1 keys, got %d", len(jk.Keys))
}
keys := make(map[string]crypto.PublicKey, len(jk.Keys))
for ii, jks := range jk.Keys {
var err error
keys[jks.Kid], err = jks.DecodePublicKey()
if err != nil {
t.Fatalf("Failed to decode key %d: %v", ii, err)
}
}
u1, err := xnet.ParseHTTPURL("http://localhost:8443")
if err != nil {
t.Fatal(err)
}
cfg := Config{}
cfg.JWKS.URL = u1
cfg.publicKeys = keys
jwt := NewJWT(cfg)
if jwt.ID() != "jwt" {
t.Fatalf("Uexpected id %s for the validator", jwt.ID())
}
u, err := url.Parse("http://localhost:8443/?Token=invalid")
if err != nil {
t.Fatal(err)
}
if _, err := jwt.Validate(u.Query().Get("Token"), ""); err == nil {
t.Fatal(err)
}
}
func TestDefaultExpiryDuration(t *testing.T) {
testCases := []struct {
reqURL string
duration time.Duration
expectErr bool
}{
{
reqURL: "http://localhost:8443/?Token=xxxxx",
duration: time.Duration(60) * time.Minute,
},
{
reqURL: "http://localhost:8443/?DurationSeconds=9s",
expectErr: true,
},
{
reqURL: "http://localhost:8443/?DurationSeconds=43201",
expectErr: true,
},
{
reqURL: "http://localhost:8443/?DurationSeconds=800",
expectErr: true,
},
{
reqURL: "http://localhost:8443/?DurationSeconds=901",
duration: time.Duration(901) * time.Second,
},
}
for i, testCase := range testCases {
u, err := url.Parse(testCase.reqURL)
if err != nil {
t.Fatal(err)
}
d, err := GetDefaultExpiration(u.Query().Get("DurationSeconds"))
gotErr := (err != nil)
if testCase.expectErr != gotErr {
t.Errorf("Test %d: Expected %v, got %v with error %s", i+1, testCase.expectErr, gotErr, err)
}
if d != testCase.duration {
t.Errorf("Test %d: Expected duration %d, got %d", i+1, testCase.duration, d)
}
}
}

View file

@ -1,46 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package openid
import "github.com/minio/minio/legacy/config"
// Legacy envs
const (
EnvIamJwksURL = "MINIO_IAM_JWKS_URL"
)
// SetIdentityOpenID - One time migration code needed, for migrating from older config to new for OpenIDConfig.
func SetIdentityOpenID(s config.Config, cfg Config) {
if cfg.JWKS.URL == nil || cfg.JWKS.URL.String() == "" {
// No need to save not-enabled settings in new config.
return
}
s[config.IdentityOpenIDSubSys][config.Default] = config.KVS{
config.KV{
Key: JwksURL,
Value: cfg.JWKS.URL.String(),
},
config.KV{
Key: ConfigURL,
Value: "",
},
config.KV{
Key: ClaimPrefix,
Value: "",
},
}
}

View file

@ -1,53 +0,0 @@
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package openid
import (
"crypto"
"github.com/dgrijalva/jwt-go"
// Needed for SHA3 to work - See: https://golang.org/src/crypto/crypto.go?s=1034:1288
_ "golang.org/x/crypto/sha3"
)
// Specific instances for RS256 and company
var (
SigningMethodRS3256 *jwt.SigningMethodRSA
SigningMethodRS3384 *jwt.SigningMethodRSA
SigningMethodRS3512 *jwt.SigningMethodRSA
)
func init() {
// RS3256
SigningMethodRS3256 = &jwt.SigningMethodRSA{Name: "RS3256", Hash: crypto.SHA3_256}
jwt.RegisterSigningMethod(SigningMethodRS3256.Alg(), func() jwt.SigningMethod {
return SigningMethodRS3256
})
// RS3384
SigningMethodRS3384 = &jwt.SigningMethodRSA{Name: "RS3384", Hash: crypto.SHA3_384}
jwt.RegisterSigningMethod(SigningMethodRS3384.Alg(), func() jwt.SigningMethod {
return SigningMethodRS3384
})
// RS3512
SigningMethodRS3512 = &jwt.SigningMethodRSA{Name: "RS3512", Hash: crypto.SHA3_512}
jwt.RegisterSigningMethod(SigningMethodRS3512.Alg(), func() jwt.SigningMethod {
return SigningMethodRS3512
})
}

Some files were not shown because too many files have changed in this diff Show more