2020-08-03 11:48:33 +00:00
|
|
|
package layer
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2021-05-28 20:48:23 +00:00
|
|
|
"crypto/ecdsa"
|
2021-05-20 10:14:17 +00:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
2020-08-03 11:48:33 +00:00
|
|
|
"io"
|
2020-10-24 13:09:22 +00:00
|
|
|
"net/url"
|
2021-06-25 12:54:25 +00:00
|
|
|
"sort"
|
2021-07-01 10:54:51 +00:00
|
|
|
"strings"
|
2020-08-03 15:08:55 +00:00
|
|
|
"time"
|
2020-08-03 11:48:33 +00:00
|
|
|
|
2021-05-26 16:48:27 +00:00
|
|
|
"github.com/nspcc-dev/neofs-api-go/pkg/client"
|
2021-06-04 13:01:42 +00:00
|
|
|
cid "github.com/nspcc-dev/neofs-api-go/pkg/container/id"
|
2021-06-23 20:21:15 +00:00
|
|
|
"github.com/nspcc-dev/neofs-api-go/pkg/netmap"
|
2020-10-19 01:04:37 +00:00
|
|
|
"github.com/nspcc-dev/neofs-api-go/pkg/object"
|
2020-11-27 12:36:15 +00:00
|
|
|
"github.com/nspcc-dev/neofs-api-go/pkg/owner"
|
2021-05-18 11:10:08 +00:00
|
|
|
"github.com/nspcc-dev/neofs-s3-gw/api"
|
2021-06-21 10:54:57 +00:00
|
|
|
"github.com/nspcc-dev/neofs-s3-gw/creds/accessbox"
|
2021-05-28 08:46:13 +00:00
|
|
|
"github.com/nspcc-dev/neofs-sdk-go/pkg/pool"
|
2020-08-03 11:48:33 +00:00
|
|
|
"go.uber.org/zap"
|
2020-08-22 02:41:58 +00:00
|
|
|
"google.golang.org/grpc/codes"
|
|
|
|
"google.golang.org/grpc/status"
|
2020-08-03 11:48:33 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
type (
|
|
|
|
layer struct {
|
2021-05-28 20:48:23 +00:00
|
|
|
pool pool.Pool
|
|
|
|
log *zap.Logger
|
2020-10-19 01:04:37 +00:00
|
|
|
}
|
|
|
|
|
2021-05-13 20:25:31 +00:00
|
|
|
// Params stores basic API parameters.
|
2020-10-19 01:04:37 +00:00
|
|
|
Params struct {
|
2021-05-28 20:48:23 +00:00
|
|
|
Pool pool.Pool
|
|
|
|
Logger *zap.Logger
|
|
|
|
Timeout time.Duration
|
|
|
|
Key *ecdsa.PrivateKey
|
2020-08-03 11:48:33 +00:00
|
|
|
}
|
|
|
|
|
2021-05-13 20:25:31 +00:00
|
|
|
// GetObjectParams stores object get request parameters.
|
2020-08-03 11:48:33 +00:00
|
|
|
GetObjectParams struct {
|
2021-06-24 11:10:00 +00:00
|
|
|
Range *RangeParams
|
2020-08-03 11:48:33 +00:00
|
|
|
Bucket string
|
|
|
|
Object string
|
|
|
|
Offset int64
|
|
|
|
Length int64
|
|
|
|
Writer io.Writer
|
|
|
|
}
|
|
|
|
|
2021-06-24 11:10:00 +00:00
|
|
|
// RangeParams stores range header request parameters.
|
|
|
|
RangeParams struct {
|
|
|
|
Start uint64
|
|
|
|
End uint64
|
|
|
|
}
|
|
|
|
|
2021-05-13 20:25:31 +00:00
|
|
|
// PutObjectParams stores object put request parameters.
|
2020-08-03 11:48:33 +00:00
|
|
|
PutObjectParams struct {
|
|
|
|
Bucket string
|
|
|
|
Object string
|
|
|
|
Size int64
|
|
|
|
Reader io.Reader
|
|
|
|
Header map[string]string
|
|
|
|
}
|
|
|
|
|
2021-05-13 20:25:31 +00:00
|
|
|
// CopyObjectParams stores object copy request parameters.
|
2020-08-03 11:48:33 +00:00
|
|
|
CopyObjectParams struct {
|
|
|
|
SrcBucket string
|
|
|
|
DstBucket string
|
|
|
|
SrcObject string
|
|
|
|
DstObject string
|
2021-07-01 07:45:55 +00:00
|
|
|
SrcSize int64
|
2020-08-19 23:31:04 +00:00
|
|
|
Header map[string]string
|
2020-08-03 11:48:33 +00:00
|
|
|
}
|
2021-06-23 20:21:15 +00:00
|
|
|
// CreateBucketParams stores bucket create request parameters.
|
|
|
|
CreateBucketParams struct {
|
|
|
|
Name string
|
|
|
|
ACL uint32
|
|
|
|
Policy *netmap.PlacementPolicy
|
|
|
|
}
|
2021-06-23 20:25:00 +00:00
|
|
|
// DeleteBucketParams stores delete bucket request parameters.
|
|
|
|
DeleteBucketParams struct {
|
|
|
|
Name string
|
|
|
|
}
|
2021-07-05 19:18:58 +00:00
|
|
|
// ListObjectVersionsParams stores list objects versions parameters.
|
|
|
|
ListObjectVersionsParams struct {
|
|
|
|
Bucket string
|
|
|
|
Delimiter string
|
|
|
|
KeyMarker string
|
|
|
|
MaxKeys int
|
|
|
|
Prefix string
|
|
|
|
VersionIDMarker string
|
|
|
|
Encode string
|
|
|
|
}
|
2020-08-03 11:48:33 +00:00
|
|
|
|
2021-05-13 20:25:31 +00:00
|
|
|
// NeoFS provides basic NeoFS interface.
|
2020-08-03 15:08:55 +00:00
|
|
|
NeoFS interface {
|
2020-10-19 01:04:37 +00:00
|
|
|
Get(ctx context.Context, address *object.Address) (*object.Object, error)
|
2020-08-03 15:08:55 +00:00
|
|
|
}
|
|
|
|
|
2021-05-13 20:25:31 +00:00
|
|
|
// Client provides S3 API client interface.
|
2020-08-03 11:48:33 +00:00
|
|
|
Client interface {
|
2020-08-03 15:08:55 +00:00
|
|
|
NeoFS
|
|
|
|
|
2020-10-23 00:12:37 +00:00
|
|
|
ListBuckets(ctx context.Context) ([]*BucketInfo, error)
|
2020-08-03 11:48:33 +00:00
|
|
|
GetBucketInfo(ctx context.Context, name string) (*BucketInfo, error)
|
2021-06-23 20:21:15 +00:00
|
|
|
CreateBucket(ctx context.Context, p *CreateBucketParams) (*cid.ID, error)
|
2021-06-23 20:25:00 +00:00
|
|
|
DeleteBucket(ctx context.Context, p *DeleteBucketParams) error
|
2020-08-03 11:48:33 +00:00
|
|
|
|
|
|
|
GetObject(ctx context.Context, p *GetObjectParams) error
|
|
|
|
GetObjectInfo(ctx context.Context, bucketName, objectName string) (*ObjectInfo, error)
|
|
|
|
|
|
|
|
PutObject(ctx context.Context, p *PutObjectParams) (*ObjectInfo, error)
|
|
|
|
|
|
|
|
CopyObject(ctx context.Context, p *CopyObjectParams) (*ObjectInfo, error)
|
|
|
|
|
|
|
|
ListObjects(ctx context.Context, p *ListObjectsParams) (*ListObjectsInfo, error)
|
2021-07-05 19:18:58 +00:00
|
|
|
ListObjectVersions(ctx context.Context, p *ListObjectVersionsParams) (*ListObjectVersionsInfo, error)
|
2020-08-03 11:48:33 +00:00
|
|
|
|
|
|
|
DeleteObject(ctx context.Context, bucket, object string) error
|
2020-08-22 02:42:27 +00:00
|
|
|
DeleteObjects(ctx context.Context, bucket string, objects []string) []error
|
2020-08-03 11:48:33 +00:00
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2020-11-27 12:36:15 +00:00
|
|
|
var (
|
2021-05-13 20:25:31 +00:00
|
|
|
// ErrObjectExists is returned on attempts to create already existing object.
|
|
|
|
ErrObjectExists = errors.New("object exists")
|
|
|
|
// ErrObjectNotExists is returned on attempts to work with non-existing object.
|
2020-11-27 12:36:15 +00:00
|
|
|
ErrObjectNotExists = errors.New("object not exists")
|
2021-07-07 14:56:29 +00:00
|
|
|
// ErrBucketAlreadyExists is returned on attempts to create already existing bucket.
|
|
|
|
ErrBucketAlreadyExists = errors.New("bucket exists")
|
2021-07-09 08:57:44 +00:00
|
|
|
// ErrBucketNotFound is returned on attempts to get not existing bucket.
|
|
|
|
ErrBucketNotFound = errors.New("bucket not found")
|
2020-11-27 12:36:15 +00:00
|
|
|
)
|
|
|
|
|
2021-07-05 19:18:58 +00:00
|
|
|
const (
|
|
|
|
// ETag (hex encoded md5sum) of empty string.
|
|
|
|
emptyETag = "d41d8cd98f00b204e9800998ecf8427e"
|
|
|
|
unversionedObjectVersionID = "null"
|
|
|
|
)
|
2021-07-01 10:54:51 +00:00
|
|
|
|
2021-05-13 20:25:31 +00:00
|
|
|
// NewLayer creates instance of layer. It checks credentials
|
2020-08-03 11:48:33 +00:00
|
|
|
// and establishes gRPC connection with node.
|
2021-05-28 20:48:23 +00:00
|
|
|
func NewLayer(log *zap.Logger, conns pool.Pool) Client {
|
2020-08-03 11:48:33 +00:00
|
|
|
return &layer{
|
2021-05-28 20:48:23 +00:00
|
|
|
pool: conns,
|
|
|
|
log: log,
|
2020-11-24 07:01:38 +00:00
|
|
|
}
|
2020-08-03 11:48:33 +00:00
|
|
|
}
|
|
|
|
|
2020-11-27 12:36:15 +00:00
|
|
|
// Owner returns owner id from BearerToken (context) or from client owner.
|
|
|
|
func (n *layer) Owner(ctx context.Context) *owner.ID {
|
2021-06-21 10:54:57 +00:00
|
|
|
if data, ok := ctx.Value(api.GateData).(*accessbox.GateData); ok && data != nil {
|
|
|
|
return data.BearerToken.Issuer()
|
2020-11-27 12:36:15 +00:00
|
|
|
}
|
|
|
|
|
2021-05-28 20:48:23 +00:00
|
|
|
return n.pool.OwnerID()
|
2020-11-27 12:36:15 +00:00
|
|
|
}
|
|
|
|
|
2021-06-15 13:59:51 +00:00
|
|
|
// BearerOpt returns client.WithBearer call option with token from context or with nil token.
|
|
|
|
func (n *layer) BearerOpt(ctx context.Context) client.CallOption {
|
2021-06-21 10:54:57 +00:00
|
|
|
if data, ok := ctx.Value(api.GateData).(*accessbox.GateData); ok && data != nil {
|
|
|
|
return client.WithBearer(data.BearerToken)
|
2021-06-15 13:59:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return client.WithBearer(nil)
|
|
|
|
}
|
|
|
|
|
2021-06-21 10:54:57 +00:00
|
|
|
// SessionOpt returns client.WithSession call option with token from context or with nil token.
|
|
|
|
func (n *layer) SessionOpt(ctx context.Context) client.CallOption {
|
|
|
|
if data, ok := ctx.Value(api.GateData).(*accessbox.GateData); ok && data != nil {
|
|
|
|
return client.WithSession(data.SessionToken)
|
|
|
|
}
|
|
|
|
|
|
|
|
return client.WithSession(nil)
|
|
|
|
}
|
|
|
|
|
2021-05-13 19:25:32 +00:00
|
|
|
// Get NeoFS Object by refs.Address (should be used by auth.Center).
|
2020-10-19 01:04:37 +00:00
|
|
|
func (n *layer) Get(ctx context.Context, address *object.Address) (*object.Object, error) {
|
2021-05-26 16:48:27 +00:00
|
|
|
ops := new(client.GetObjectParams).WithAddress(address)
|
2021-06-15 13:59:51 +00:00
|
|
|
return n.pool.GetObject(ctx, ops, n.BearerOpt(ctx))
|
2020-08-03 15:08:55 +00:00
|
|
|
}
|
|
|
|
|
2021-05-20 10:14:17 +00:00
|
|
|
// GetBucketInfo returns bucket info by name.
|
2020-08-03 11:48:33 +00:00
|
|
|
func (n *layer) GetBucketInfo(ctx context.Context, name string) (*BucketInfo, error) {
|
2020-10-24 13:09:22 +00:00
|
|
|
name, err := url.QueryUnescape(name)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-06-11 11:52:03 +00:00
|
|
|
containerID := new(cid.ID)
|
|
|
|
if err := containerID.Parse(name); err != nil {
|
2021-06-13 11:32:13 +00:00
|
|
|
list, err := n.containerList(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
for _, bkt := range list {
|
|
|
|
if bkt.Name == name {
|
|
|
|
return bkt, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-09 08:57:44 +00:00
|
|
|
return nil, ErrBucketNotFound
|
2020-08-03 11:48:33 +00:00
|
|
|
}
|
|
|
|
|
2021-06-11 11:52:03 +00:00
|
|
|
return n.containerInfo(ctx, containerID)
|
2020-08-03 11:48:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListBuckets returns all user containers. Name of the bucket is a container
|
|
|
|
// id. Timestamp is omitted since it is not saved in neofs container.
|
2020-10-23 00:12:37 +00:00
|
|
|
func (n *layer) ListBuckets(ctx context.Context) ([]*BucketInfo, error) {
|
2020-08-03 11:48:33 +00:00
|
|
|
return n.containerList(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjects returns objects from the container. It ignores tombstones and
|
|
|
|
// storage groups.
|
|
|
|
// ctx, bucket, prefix, continuationToken, delimiter, maxKeys
|
|
|
|
func (n *layer) ListObjects(ctx context.Context, p *ListObjectsParams) (*ListObjectsInfo, error) {
|
|
|
|
// todo: make pagination when search response will be gRPC stream,
|
|
|
|
// pagination must be implemented with cache, because search results
|
|
|
|
// may be different between search calls
|
|
|
|
var (
|
2020-10-19 01:04:37 +00:00
|
|
|
err error
|
|
|
|
bkt *BucketInfo
|
|
|
|
ids []*object.ID
|
2020-08-03 11:48:33 +00:00
|
|
|
result ListObjectsInfo
|
2021-06-29 09:59:33 +00:00
|
|
|
uniqNames = make(map[string]bool)
|
2020-08-03 11:48:33 +00:00
|
|
|
)
|
|
|
|
|
2021-07-14 08:58:28 +00:00
|
|
|
if p.MaxKeys == 0 {
|
|
|
|
return &result, nil
|
|
|
|
}
|
|
|
|
|
2020-10-19 01:04:37 +00:00
|
|
|
if bkt, err = n.GetBucketInfo(ctx, p.Bucket); err != nil {
|
2020-08-03 11:48:33 +00:00
|
|
|
return nil, err
|
2020-10-19 01:04:37 +00:00
|
|
|
} else if ids, err = n.objectSearch(ctx, &findParams{cid: bkt.CID}); err != nil {
|
2020-08-03 11:48:33 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-10-19 01:04:37 +00:00
|
|
|
ln := len(ids)
|
2020-08-03 11:48:33 +00:00
|
|
|
// todo: check what happens if there is more than maxKeys objects
|
|
|
|
if ln > p.MaxKeys {
|
|
|
|
ln = p.MaxKeys
|
|
|
|
}
|
|
|
|
|
2021-07-01 10:54:51 +00:00
|
|
|
mostRecentModified := time.Time{}
|
|
|
|
needDirectoryAsKey := p.Version == 2 && len(p.Prefix) > 0 && len(p.Delimiter) > 0 && strings.HasSuffix(p.Prefix, p.Delimiter)
|
2020-10-19 01:04:37 +00:00
|
|
|
result.Objects = make([]*ObjectInfo, 0, ln)
|
2020-08-03 11:48:33 +00:00
|
|
|
|
2020-10-19 01:04:37 +00:00
|
|
|
for _, id := range ids {
|
|
|
|
addr := object.NewAddress()
|
|
|
|
addr.SetObjectID(id)
|
|
|
|
addr.SetContainerID(bkt.CID)
|
2020-08-03 11:48:33 +00:00
|
|
|
|
|
|
|
meta, err := n.objectHead(ctx, addr)
|
|
|
|
if err != nil {
|
|
|
|
n.log.Warn("could not fetch object meta", zap.Error(err))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-10-19 01:04:37 +00:00
|
|
|
// // ignore tombstone objects
|
|
|
|
// _, hdr := meta.LastHeader(object.HeaderType(object.TombstoneHdr))
|
|
|
|
// if hdr != nil {
|
|
|
|
// continue
|
|
|
|
// }
|
2020-08-03 11:48:33 +00:00
|
|
|
|
|
|
|
// ignore storage group objects
|
2020-10-19 01:04:37 +00:00
|
|
|
// _, hdr = meta.LastHeader(object.HeaderType(object.StorageGroupHdr))
|
|
|
|
// if hdr != nil {
|
|
|
|
// continue
|
|
|
|
// }
|
2020-08-03 11:48:33 +00:00
|
|
|
|
|
|
|
// dirs don't exist in neofs, gateway stores full path to the file
|
|
|
|
// in object header, e.g. `filename`:`/this/is/path/file.txt`
|
|
|
|
|
|
|
|
// prefix argument contains full dir path from the root, e.g. `/this/is/`
|
|
|
|
|
|
|
|
// to emulate dirs we take dirs in path, compare it with prefix
|
|
|
|
// and look for entities after prefix. If entity does not have any
|
|
|
|
// sub-entities, then it is a file, else directory.
|
|
|
|
|
2021-06-29 09:59:33 +00:00
|
|
|
if oi := objectInfoFromMeta(bkt, meta, p.Prefix, p.Delimiter); oi != nil {
|
2021-07-01 10:54:51 +00:00
|
|
|
if needDirectoryAsKey && oi.Created.After(mostRecentModified) {
|
|
|
|
mostRecentModified = oi.Created
|
|
|
|
}
|
2020-08-03 11:48:33 +00:00
|
|
|
// use only unique dir names
|
2021-06-29 09:59:33 +00:00
|
|
|
if _, ok := uniqNames[oi.Name]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if len(p.Marker) > 0 && oi.Name <= p.Marker {
|
|
|
|
continue
|
2020-08-03 11:48:33 +00:00
|
|
|
}
|
2021-06-29 09:59:33 +00:00
|
|
|
|
|
|
|
uniqNames[oi.Name] = oi.isDir
|
|
|
|
|
|
|
|
result.Objects = append(result.Objects, oi)
|
2020-08-03 11:48:33 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-25 12:54:25 +00:00
|
|
|
sort.Slice(result.Objects, func(i, j int) bool {
|
|
|
|
return result.Objects[i].Name < result.Objects[j].Name
|
|
|
|
})
|
|
|
|
|
|
|
|
if len(result.Objects) > p.MaxKeys {
|
|
|
|
result.IsTruncated = true
|
|
|
|
result.Objects = result.Objects[:p.MaxKeys]
|
|
|
|
result.NextMarker = result.Objects[len(result.Objects)-1].Name
|
|
|
|
}
|
2021-06-29 09:59:33 +00:00
|
|
|
|
2021-07-04 20:25:53 +00:00
|
|
|
fillPrefixes(&result, uniqNames)
|
2021-07-01 10:54:51 +00:00
|
|
|
if needDirectoryAsKey {
|
|
|
|
res := []*ObjectInfo{{
|
|
|
|
Name: p.Prefix,
|
|
|
|
Created: mostRecentModified,
|
|
|
|
HashSum: emptyETag,
|
|
|
|
}}
|
|
|
|
result.Objects = append(res, result.Objects...)
|
|
|
|
}
|
2021-06-29 09:59:33 +00:00
|
|
|
|
2020-08-03 11:48:33 +00:00
|
|
|
return &result, nil
|
|
|
|
}
|
|
|
|
|
2021-07-04 20:25:53 +00:00
|
|
|
func fillPrefixes(result *ListObjectsInfo, directories map[string]bool) {
|
|
|
|
index := 0
|
|
|
|
for range result.Objects {
|
|
|
|
name := result.Objects[index].Name
|
|
|
|
if isDir := directories[name]; isDir {
|
|
|
|
result.Objects = append(result.Objects[:index], result.Objects[index+1:]...)
|
|
|
|
result.Prefixes = append(result.Prefixes, name)
|
|
|
|
} else {
|
|
|
|
index++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-03 11:48:33 +00:00
|
|
|
// GetObject from storage.
|
|
|
|
func (n *layer) GetObject(ctx context.Context, p *GetObjectParams) error {
|
2020-10-19 01:04:37 +00:00
|
|
|
var (
|
|
|
|
err error
|
|
|
|
oid *object.ID
|
2020-10-23 00:12:37 +00:00
|
|
|
bkt *BucketInfo
|
2020-10-19 01:04:37 +00:00
|
|
|
)
|
2020-08-03 11:48:33 +00:00
|
|
|
|
2020-10-23 00:12:37 +00:00
|
|
|
if bkt, err = n.GetBucketInfo(ctx, p.Bucket); err != nil {
|
2021-05-20 10:14:17 +00:00
|
|
|
return fmt.Errorf("couldn't find bucket: %s : %w", p.Bucket, err)
|
2020-10-23 00:12:37 +00:00
|
|
|
} else if oid, err = n.objectFindID(ctx, &findParams{cid: bkt.CID, val: p.Object}); err != nil {
|
2021-05-20 10:14:17 +00:00
|
|
|
return fmt.Errorf("search of the object failed: cid: %s, val: %s : %w", bkt.CID, p.Object, err)
|
2020-08-03 11:48:33 +00:00
|
|
|
}
|
|
|
|
|
2020-10-19 01:04:37 +00:00
|
|
|
addr := object.NewAddress()
|
|
|
|
addr.SetObjectID(oid)
|
2020-10-23 00:12:37 +00:00
|
|
|
addr.SetContainerID(bkt.CID)
|
2020-10-19 01:04:37 +00:00
|
|
|
|
2021-06-24 11:10:00 +00:00
|
|
|
params := &getParams{
|
|
|
|
Writer: p.Writer,
|
2020-11-24 07:01:38 +00:00
|
|
|
address: addr,
|
2021-06-24 11:10:00 +00:00
|
|
|
offset: p.Offset,
|
|
|
|
length: p.Length,
|
|
|
|
}
|
2020-10-19 01:04:37 +00:00
|
|
|
|
2021-06-24 11:10:00 +00:00
|
|
|
if p.Range != nil {
|
|
|
|
objRange := object.NewRange()
|
|
|
|
objRange.SetOffset(p.Range.Start)
|
|
|
|
// Range header is inclusive
|
|
|
|
objRange.SetLength(p.Range.End - p.Range.Start + 1)
|
|
|
|
params.Range = objRange
|
|
|
|
_, err = n.objectRange(ctx, params)
|
|
|
|
} else {
|
|
|
|
_, err = n.objectGet(ctx, params)
|
|
|
|
}
|
2020-08-03 11:48:33 +00:00
|
|
|
|
2021-05-20 10:14:17 +00:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("couldn't get object, cid: %s : %w", bkt.CID, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
2020-08-03 11:48:33 +00:00
|
|
|
}
|
|
|
|
|
2021-06-04 13:01:42 +00:00
|
|
|
func (n *layer) checkObject(ctx context.Context, cid *cid.ID, filename string) error {
|
2020-11-27 12:36:15 +00:00
|
|
|
var err error
|
|
|
|
|
|
|
|
if _, err = n.objectFindID(ctx, &findParams{cid: cid, val: filename}); err == nil {
|
|
|
|
return ErrObjectExists
|
|
|
|
} else if state, ok := status.FromError(err); !ok || state == nil {
|
|
|
|
return err
|
|
|
|
} else if state.Code() == codes.NotFound {
|
|
|
|
return ErrObjectNotExists
|
|
|
|
}
|
|
|
|
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-08-03 11:48:33 +00:00
|
|
|
// GetObjectInfo returns meta information about the object.
|
2020-10-19 01:04:37 +00:00
|
|
|
func (n *layer) GetObjectInfo(ctx context.Context, bucketName, filename string) (*ObjectInfo, error) {
|
|
|
|
var (
|
2020-10-24 13:09:22 +00:00
|
|
|
err error
|
|
|
|
oid *object.ID
|
|
|
|
bkt *BucketInfo
|
2020-10-19 01:04:37 +00:00
|
|
|
meta *object.Object
|
|
|
|
)
|
2020-08-03 11:48:33 +00:00
|
|
|
|
2020-10-24 13:09:22 +00:00
|
|
|
if bkt, err = n.GetBucketInfo(ctx, bucketName); err != nil {
|
2020-11-27 12:36:15 +00:00
|
|
|
n.log.Error("could not fetch bucket info", zap.Error(err))
|
2020-08-03 11:48:33 +00:00
|
|
|
return nil, err
|
2020-10-24 13:09:22 +00:00
|
|
|
} else if oid, err = n.objectFindID(ctx, &findParams{cid: bkt.CID, val: filename}); err != nil {
|
2020-11-27 12:36:15 +00:00
|
|
|
n.log.Error("could not find object id", zap.Error(err))
|
2020-08-03 11:48:33 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2020-10-19 01:04:37 +00:00
|
|
|
addr := object.NewAddress()
|
|
|
|
addr.SetObjectID(oid)
|
2020-10-24 13:09:22 +00:00
|
|
|
addr.SetContainerID(bkt.CID)
|
2020-08-03 11:48:33 +00:00
|
|
|
|
2020-10-19 01:04:37 +00:00
|
|
|
if meta, err = n.objectHead(ctx, addr); err != nil {
|
2020-11-27 12:36:15 +00:00
|
|
|
n.log.Error("could not fetch object head", zap.Error(err))
|
2020-08-03 11:48:33 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2021-06-29 09:59:33 +00:00
|
|
|
return objectInfoFromMeta(bkt, meta, "", ""), nil
|
2020-10-19 01:04:37 +00:00
|
|
|
}
|
2020-08-03 11:48:33 +00:00
|
|
|
|
2020-10-19 01:04:37 +00:00
|
|
|
// PutObject into storage.
|
|
|
|
func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*ObjectInfo, error) {
|
|
|
|
return n.objectPut(ctx, p)
|
2020-08-03 11:48:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CopyObject from one bucket into another bucket.
|
|
|
|
func (n *layer) CopyObject(ctx context.Context, p *CopyObjectParams) (*ObjectInfo, error) {
|
|
|
|
pr, pw := io.Pipe()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
err := n.GetObject(ctx, &GetObjectParams{
|
|
|
|
Bucket: p.SrcBucket,
|
|
|
|
Object: p.SrcObject,
|
|
|
|
Writer: pw,
|
|
|
|
})
|
|
|
|
|
2020-10-24 13:09:22 +00:00
|
|
|
if err = pw.CloseWithError(err); err != nil {
|
|
|
|
n.log.Error("could not get object", zap.Error(err))
|
|
|
|
}
|
2020-08-03 11:48:33 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
return n.PutObject(ctx, &PutObjectParams{
|
|
|
|
Bucket: p.DstBucket,
|
|
|
|
Object: p.DstObject,
|
2021-07-01 07:45:55 +00:00
|
|
|
Size: p.SrcSize,
|
2020-08-03 11:48:33 +00:00
|
|
|
Reader: pr,
|
2021-07-01 07:45:55 +00:00
|
|
|
Header: p.Header,
|
2020-08-03 11:48:33 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-08-20 19:41:52 +00:00
|
|
|
// DeleteObject removes all objects with passed nice name.
|
2020-10-19 01:04:37 +00:00
|
|
|
func (n *layer) DeleteObject(ctx context.Context, bucket, filename string) error {
|
|
|
|
var (
|
|
|
|
err error
|
|
|
|
ids []*object.ID
|
2020-10-24 13:09:22 +00:00
|
|
|
bkt *BucketInfo
|
2020-10-19 01:04:37 +00:00
|
|
|
)
|
|
|
|
|
2020-10-24 13:09:22 +00:00
|
|
|
if bkt, err = n.GetBucketInfo(ctx, bucket); err != nil {
|
2020-08-22 02:41:58 +00:00
|
|
|
return &api.DeleteError{
|
|
|
|
Err: err,
|
2020-10-19 01:04:37 +00:00
|
|
|
Object: filename,
|
2020-08-22 02:41:58 +00:00
|
|
|
}
|
2020-10-24 13:09:22 +00:00
|
|
|
} else if ids, err = n.objectSearch(ctx, &findParams{cid: bkt.CID, val: filename}); err != nil {
|
2020-08-22 02:41:58 +00:00
|
|
|
return &api.DeleteError{
|
|
|
|
Err: err,
|
2020-10-19 01:04:37 +00:00
|
|
|
Object: filename,
|
2020-08-22 02:41:58 +00:00
|
|
|
}
|
2020-08-20 19:41:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, id := range ids {
|
2020-10-19 01:04:37 +00:00
|
|
|
addr := object.NewAddress()
|
|
|
|
addr.SetObjectID(id)
|
2020-10-24 13:09:22 +00:00
|
|
|
addr.SetContainerID(bkt.CID)
|
2020-10-19 01:04:37 +00:00
|
|
|
|
|
|
|
if err = n.objectDelete(ctx, addr); err != nil {
|
2020-08-22 02:41:58 +00:00
|
|
|
return &api.DeleteError{
|
|
|
|
Err: err,
|
2020-10-19 01:04:37 +00:00
|
|
|
Object: filename,
|
2020-08-22 02:41:58 +00:00
|
|
|
}
|
2020-08-20 19:41:52 +00:00
|
|
|
}
|
2020-08-03 11:48:33 +00:00
|
|
|
}
|
|
|
|
|
2020-08-20 19:41:52 +00:00
|
|
|
return nil
|
2020-08-03 11:48:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteObjects from the storage.
|
2020-08-22 02:41:58 +00:00
|
|
|
func (n *layer) DeleteObjects(ctx context.Context, bucket string, objects []string) []error {
|
2020-08-03 11:48:33 +00:00
|
|
|
var errs = make([]error, 0, len(objects))
|
|
|
|
|
|
|
|
for i := range objects {
|
2020-08-22 02:41:58 +00:00
|
|
|
if err := n.DeleteObject(ctx, bucket, objects[i]); err != nil {
|
|
|
|
errs = append(errs, err)
|
|
|
|
}
|
2020-08-03 11:48:33 +00:00
|
|
|
}
|
|
|
|
|
2020-08-22 02:41:58 +00:00
|
|
|
return errs
|
2020-08-03 11:48:33 +00:00
|
|
|
}
|
2021-06-23 20:21:15 +00:00
|
|
|
|
|
|
|
func (n *layer) CreateBucket(ctx context.Context, p *CreateBucketParams) (*cid.ID, error) {
|
2021-07-07 14:56:29 +00:00
|
|
|
_, err := n.GetBucketInfo(ctx, p.Name)
|
|
|
|
if err != nil {
|
2021-07-09 08:57:44 +00:00
|
|
|
if errors.Is(err, ErrBucketNotFound) {
|
2021-07-07 14:56:29 +00:00
|
|
|
return n.createContainer(ctx, p)
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil, ErrBucketAlreadyExists
|
2021-06-23 20:21:15 +00:00
|
|
|
}
|
2021-06-23 20:25:00 +00:00
|
|
|
|
|
|
|
func (n *layer) DeleteBucket(ctx context.Context, p *DeleteBucketParams) error {
|
|
|
|
bucketInfo, err := n.GetBucketInfo(ctx, p.Name)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return n.deleteContainer(ctx, bucketInfo.CID)
|
|
|
|
}
|
2021-07-05 19:18:58 +00:00
|
|
|
|
|
|
|
func (n *layer) ListObjectVersions(ctx context.Context, p *ListObjectVersionsParams) (*ListObjectVersionsInfo, error) {
|
|
|
|
var (
|
|
|
|
res = ListObjectVersionsInfo{}
|
|
|
|
err error
|
|
|
|
bkt *BucketInfo
|
|
|
|
ids []*object.ID
|
|
|
|
uniqNames = make(map[string]bool)
|
|
|
|
)
|
|
|
|
|
|
|
|
if bkt, err = n.GetBucketInfo(ctx, p.Bucket); err != nil {
|
|
|
|
return nil, err
|
|
|
|
} else if ids, err = n.objectSearch(ctx, &findParams{cid: bkt.CID}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
versions := make([]*ObjectVersionInfo, 0, len(ids))
|
|
|
|
// todo: deletemarkers is empty now, we will use it after proper realization of versioning
|
|
|
|
deleted := make([]*DeletedObjectInfo, 0, len(ids))
|
|
|
|
res.DeleteMarker = deleted
|
|
|
|
|
|
|
|
for _, id := range ids {
|
|
|
|
addr := object.NewAddress()
|
|
|
|
addr.SetObjectID(id)
|
|
|
|
addr.SetContainerID(bkt.CID)
|
|
|
|
|
|
|
|
meta, err := n.objectHead(ctx, addr)
|
|
|
|
if err != nil {
|
|
|
|
n.log.Warn("could not fetch object meta", zap.Error(err))
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if ov := objectVersionInfoFromMeta(bkt, meta, p.Prefix, p.Delimiter); ov != nil {
|
|
|
|
if _, ok := uniqNames[ov.Object.Name]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if len(p.KeyMarker) > 0 && ov.Object.Name <= p.KeyMarker {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
uniqNames[ov.Object.Name] = ov.Object.isDir
|
|
|
|
versions = append(versions, ov)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sort.Slice(versions, func(i, j int) bool {
|
|
|
|
return versions[i].Object.Name < versions[j].Object.Name
|
|
|
|
})
|
|
|
|
|
|
|
|
if len(versions) > p.MaxKeys {
|
|
|
|
res.IsTruncated = true
|
|
|
|
|
|
|
|
lastVersion := versions[p.MaxKeys-1]
|
|
|
|
res.KeyMarker = lastVersion.Object.Name
|
|
|
|
res.VersionIDMarker = lastVersion.VersionID
|
|
|
|
|
|
|
|
nextVersion := versions[p.MaxKeys]
|
|
|
|
res.NextKeyMarker = nextVersion.Object.Name
|
|
|
|
res.NextVersionIDMarker = nextVersion.VersionID
|
|
|
|
|
|
|
|
versions = versions[:p.MaxKeys]
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, ov := range versions {
|
|
|
|
if isDir := uniqNames[ov.Object.Name]; isDir {
|
|
|
|
res.CommonPrefixes = append(res.CommonPrefixes, &ov.Object.Name)
|
|
|
|
} else {
|
|
|
|
res.Version = append(res.Version, ov)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return &res, nil
|
|
|
|
}
|