[#308] Fix linter issues
/ DCO (pull_request) Successful in 1m31s Details
/ Builds (1.20) (pull_request) Successful in 1m59s Details
/ Builds (1.21) (pull_request) Successful in 1m37s Details
/ Vulncheck (pull_request) Successful in 2m14s Details
/ Lint (pull_request) Successful in 4m7s Details
/ Tests (1.20) (pull_request) Successful in 2m28s Details
/ Tests (1.21) (pull_request) Successful in 1m58s Details

Signed-off-by: Alex Vanin <a.vanin@yadro.com>
pull/308/head
Alexey Vanin 2024-02-16 18:24:27 +03:00
parent 0f3b4ab0ed
commit 563c1d9bd7
3 changed files with 6 additions and 6 deletions

View File

@ -48,7 +48,7 @@ func (k *ListSessionKey) String() string {
// NewListSessionCache is a constructor which creates an object of ListObjectsCache with the given lifetime of entries. // NewListSessionCache is a constructor which creates an object of ListObjectsCache with the given lifetime of entries.
func NewListSessionCache(config *Config) *ListSessionCache { func NewListSessionCache(config *Config) *ListSessionCache {
gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).EvictedFunc(func(key interface{}, val interface{}) { gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).EvictedFunc(func(_ interface{}, val interface{}) {
session, ok := val.(*data.ListSession) session, ok := val.(*data.ListSession)
if !ok { if !ok {
config.Logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", val)), config.Logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", val)),

View File

@ -129,15 +129,15 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
} }
var dstSize uint64 var dstSize uint64
if srcSize, err := layer.GetObjectSize(srcObjInfo); err != nil { srcSize, err := layer.GetObjectSize(srcObjInfo)
if err != nil {
h.logAndSendError(w, "failed to get source object size", reqInfo, err) h.logAndSendError(w, "failed to get source object size", reqInfo, err)
return return
} else if srcSize > layer.UploadMaxSize { //https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html } else if srcSize > layer.UploadMaxSize { // https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
h.logAndSendError(w, "too bid object to copy with single copy operation, use multipart upload copy instead", reqInfo, errors.GetAPIError(errors.ErrInvalidRequestLargeCopy)) h.logAndSendError(w, "too bid object to copy with single copy operation, use multipart upload copy instead", reqInfo, errors.GetAPIError(errors.ErrInvalidRequestLargeCopy))
return return
} else {
dstSize = srcSize
} }
dstSize = srcSize
args, err := parseCopyObjectArgs(r.Header) args, err := parseCopyObjectArgs(r.Header)
if err != nil { if err != nil {

View File

@ -180,7 +180,7 @@ func TestMultipartUploadSize(t *testing.T) {
equalDataSlices(t, data[partSize:], part) equalDataSlices(t, data[partSize:], part)
}) })
t.Run("check correct size when part copy", func(t *testing.T) { t.Run("check correct size when part copy", func(_ *testing.T) {
objName2 := "obj2" objName2 := "obj2"
uploadInfo := createMultipartUpload(hc, bktName, objName2, headers) uploadInfo := createMultipartUpload(hc, bktName, objName2, headers)
sourceCopy := bktName + "/" + objName sourceCopy := bktName + "/" + objName