s3: minor cleanups for archive storage class handling
This commit is contained in:
parent
8ca58b487c
commit
a763a5c67d
2 changed files with 19 additions and 13 deletions
|
@ -326,8 +326,10 @@ func (be *Backend) Path() string {
|
|||
}
|
||||
|
||||
// useStorageClass returns whether file should be saved in the provided Storage Class
|
||||
// For archive storage classes, only data files are stored using that class; metadata
|
||||
// must remain instantly accessible.
|
||||
func (be *Backend) useStorageClass(h backend.Handle) bool {
|
||||
var notArchiveClass bool = be.cfg.StorageClass != "GLACIER" && be.cfg.StorageClass != "DEEP_ARCHIVE"
|
||||
notArchiveClass := be.cfg.StorageClass != "GLACIER" && be.cfg.StorageClass != "DEEP_ARCHIVE"
|
||||
isDataFile := h.Type == backend.PackFile && !h.IsMetadata
|
||||
return isDataFile || notArchiveClass
|
||||
}
|
||||
|
@ -336,15 +338,16 @@ func (be *Backend) useStorageClass(h backend.Handle) bool {
|
|||
func (be *Backend) Save(ctx context.Context, h backend.Handle, rd backend.RewindReader) error {
|
||||
objName := be.Filename(h)
|
||||
|
||||
opts := minio.PutObjectOptions{ContentType: "application/octet-stream"}
|
||||
|
||||
opts := minio.PutObjectOptions{
|
||||
ContentType: "application/octet-stream",
|
||||
// the only option with the high-level api is to let the library handle the checksum computation
|
||||
SendContentMd5: true,
|
||||
// only use multipart uploads for very large files
|
||||
PartSize: 200 * 1024 * 1024,
|
||||
}
|
||||
if be.useStorageClass(h) {
|
||||
opts.StorageClass = be.cfg.StorageClass
|
||||
}
|
||||
// the only option with the high-level api is to let the library handle the checksum computation
|
||||
opts.SendContentMd5 = true
|
||||
// only use multipart uploads for very large files
|
||||
opts.PartSize = 200 * 1024 * 1024
|
||||
|
||||
info, err := be.client.PutObject(ctx, be.cfg.Bucket, objName, io.NopCloser(rd), int64(rd.Length()), opts)
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue