forked from TrueCloudLab/frostfs-s3-gw
Initial commit based on https://github.com/minio/minio/releases/tag/RELEASE.2020-07-02T00-15-09Z
This commit is contained in:
commit
9bf57615b0
1206 changed files with 281445 additions and 0 deletions
99
cmd/data-usage.go
Normal file
99
cmd/data-usage.go
Normal file
|
@ -0,0 +1,99 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
||||
const (
|
||||
envDataUsageCrawlConf = "MINIO_DISK_USAGE_CRAWL_ENABLE"
|
||||
envDataUsageCrawlDelay = "MINIO_DISK_USAGE_CRAWL_DELAY"
|
||||
envDataUsageCrawlDebug = "MINIO_DISK_USAGE_CRAWL_DEBUG"
|
||||
|
||||
dataUsageRoot = SlashSeparator
|
||||
dataUsageBucket = minioMetaBucket + SlashSeparator + bucketMetaPrefix
|
||||
|
||||
dataUsageObjName = ".usage.json"
|
||||
dataUsageCacheName = ".usage-cache.bin"
|
||||
dataUsageBloomName = ".bloomcycle.bin"
|
||||
)
|
||||
|
||||
// storeDataUsageInBackend will store all objects sent on the gui channel until closed.
|
||||
func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, gui <-chan DataUsageInfo) {
|
||||
for dataUsageInfo := range gui {
|
||||
dataUsageJSON, err := json.Marshal(dataUsageInfo)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
size := int64(len(dataUsageJSON))
|
||||
r, err := hash.NewReader(bytes.NewReader(dataUsageJSON), size, "", "", size, false)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
_, err = objAPI.PutObject(ctx, dataUsageBucket, dataUsageObjName, NewPutObjReader(r, nil, nil), ObjectOptions{})
|
||||
if !isErrBucketNotFound(err) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func loadDataUsageFromBackend(ctx context.Context, objAPI ObjectLayer) (DataUsageInfo, error) {
|
||||
var dataUsageInfoJSON bytes.Buffer
|
||||
|
||||
err := objAPI.GetObject(ctx, dataUsageBucket, dataUsageObjName, 0, -1, &dataUsageInfoJSON, "", ObjectOptions{})
|
||||
if err != nil {
|
||||
if isErrObjectNotFound(err) || isErrBucketNotFound(err) {
|
||||
return DataUsageInfo{}, nil
|
||||
}
|
||||
return DataUsageInfo{}, toObjectErr(err, dataUsageBucket, dataUsageObjName)
|
||||
}
|
||||
|
||||
var dataUsageInfo DataUsageInfo
|
||||
var json = jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
err = json.Unmarshal(dataUsageInfoJSON.Bytes(), &dataUsageInfo)
|
||||
if err != nil {
|
||||
return DataUsageInfo{}, err
|
||||
}
|
||||
|
||||
// For forward compatibility reasons, we need to add this code.
|
||||
if len(dataUsageInfo.BucketsUsage) == 0 {
|
||||
dataUsageInfo.BucketsUsage = make(map[string]BucketUsageInfo, len(dataUsageInfo.BucketSizes))
|
||||
for bucket, size := range dataUsageInfo.BucketSizes {
|
||||
dataUsageInfo.BucketsUsage[bucket] = BucketUsageInfo{Size: size}
|
||||
}
|
||||
}
|
||||
|
||||
// For backward compatibility reasons, we need to add this code.
|
||||
if len(dataUsageInfo.BucketSizes) == 0 {
|
||||
dataUsageInfo.BucketSizes = make(map[string]uint64, len(dataUsageInfo.BucketsUsage))
|
||||
for bucket, bui := range dataUsageInfo.BucketsUsage {
|
||||
dataUsageInfo.BucketSizes[bucket] = bui.Size
|
||||
}
|
||||
}
|
||||
|
||||
return dataUsageInfo, nil
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue