forked from TrueCloudLab/frostfs-s3-gw
[#549] Move layer logic to handler
Signed-off-by: Denis Kirillov <denis@nspcc.ru>
This commit is contained in:
parent
d6424ebeac
commit
b00ad03ddc
60 changed files with 3459 additions and 3892 deletions
|
@ -19,7 +19,6 @@ import (
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/eacl"
|
"github.com/nspcc-dev/neofs-sdk-go/eacl"
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/object"
|
"github.com/nspcc-dev/neofs-sdk-go/object"
|
||||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||||
|
@ -167,7 +166,7 @@ func (h *handler) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
bucketACL, err := h.obj.GetBucketACL(r.Context(), bktInfo)
|
bucketACL, err := h.getBucketACL(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not fetch bucket acl", reqInfo, err)
|
h.logAndSendError(w, "could not fetch bucket acl", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -180,7 +179,7 @@ func (h *handler) GetBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) bearerTokenIssuerKey(ctx context.Context) (*keys.PublicKey, error) {
|
func (h *handler) bearerTokenIssuerKey(ctx context.Context) (*keys.PublicKey, error) {
|
||||||
box, err := layer.GetBoxData(ctx)
|
box, err := getBoxData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -243,7 +242,7 @@ func (h *handler) PutBucketACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) updateBucketACL(r *http.Request, astChild *ast, bktInfo *data.BucketInfo, sessionToken *session.Container) (bool, error) {
|
func (h *handler) updateBucketACL(r *http.Request, astChild *ast, bktInfo *data.BucketInfo, sessionToken *session.Container) (bool, error) {
|
||||||
bucketACL, err := h.obj.GetBucketACL(r.Context(), bktInfo)
|
bucketACL, err := h.getBucketACL(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, fmt.Errorf("could not get bucket eacl: %w", err)
|
return false, fmt.Errorf("could not get bucket eacl: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -267,13 +266,13 @@ func (h *handler) updateBucketACL(r *http.Request, astChild *ast, bktInfo *data.
|
||||||
return false, fmt.Errorf("could not translate ast to table: %w", err)
|
return false, fmt.Errorf("could not translate ast to table: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &layer.PutBucketACLParams{
|
p := &PutBucketACLParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
EACL: table,
|
EACL: table,
|
||||||
SessionToken: sessionToken,
|
SessionToken: sessionToken,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.PutBucketACL(r.Context(), p); err != nil {
|
if err = h.putBucketACL(r.Context(), p); err != nil {
|
||||||
return false, fmt.Errorf("could not put bucket acl: %w", err)
|
return false, fmt.Errorf("could not put bucket acl: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -289,19 +288,19 @@ func (h *handler) GetObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
bucketACL, err := h.obj.GetBucketACL(r.Context(), bktInfo)
|
bucketACL, err := h.getBucketACL(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not fetch bucket acl", reqInfo, err)
|
h.logAndSendError(w, "could not fetch bucket acl", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
prm := &layer.HeadObjectParams{
|
prm := &HeadObjectParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Object: reqInfo.ObjectName,
|
Object: reqInfo.ObjectName,
|
||||||
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
||||||
}
|
}
|
||||||
|
|
||||||
objInfo, err := h.obj.GetObjectInfo(r.Context(), prm)
|
objInfo, err := h.getObjectInfo(r.Context(), prm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not object info", reqInfo, err)
|
h.logAndSendError(w, "could not object info", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -333,13 +332,13 @@ func (h *handler) PutObjectACLHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &layer.HeadObjectParams{
|
p := &HeadObjectParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Object: reqInfo.ObjectName,
|
Object: reqInfo.ObjectName,
|
||||||
VersionID: versionID,
|
VersionID: versionID,
|
||||||
}
|
}
|
||||||
|
|
||||||
objInfo, err := h.obj.GetObjectInfo(r.Context(), p)
|
objInfo, err := h.getObjectInfo(r.Context(), p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get object info", reqInfo, err)
|
h.logAndSendError(w, "could not get object info", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -397,7 +396,7 @@ func (h *handler) GetBucketPolicyHandler(w http.ResponseWriter, r *http.Request)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
bucketACL, err := h.obj.GetBucketACL(r.Context(), bktInfo)
|
bucketACL, err := h.getBucketACL(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not fetch bucket acl", reqInfo, err)
|
h.logAndSendError(w, "could not fetch bucket acl", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -1326,7 +1325,7 @@ func isWriteOperation(op eacl.Operation) bool {
|
||||||
return op == eacl.OperationDelete || op == eacl.OperationPut
|
return op == eacl.OperationDelete || op == eacl.OperationPut
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) encodeObjectACL(bucketACL *layer.BucketACL, bucketName, objectVersion string) *AccessControlPolicy {
|
func (h *handler) encodeObjectACL(bucketACL *BucketACL, bucketName, objectVersion string) *AccessControlPolicy {
|
||||||
res := &AccessControlPolicy{
|
res := &AccessControlPolicy{
|
||||||
Owner: Owner{
|
Owner: Owner{
|
||||||
ID: bucketACL.Info.Owner.String(),
|
ID: bucketACL.Info.Owner.String(),
|
||||||
|
@ -1394,7 +1393,7 @@ func (h *handler) encodeObjectACL(bucketACL *layer.BucketACL, bucketName, object
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) encodeBucketACL(bucketName string, bucketACL *layer.BucketACL) *AccessControlPolicy {
|
func (h *handler) encodeBucketACL(bucketName string, bucketACL *BucketACL) *AccessControlPolicy {
|
||||||
return h.encodeObjectACL(bucketACL, bucketName, "")
|
return h.encodeObjectACL(bucketACL, bucketName, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1375,7 +1375,7 @@ func putBucketPolicy(hc *handlerContext, bktName string, bktPolicy *bucketPolicy
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkLastRecords(t *testing.T, tc *handlerContext, bktInfo *data.BucketInfo, action eacl.Action) {
|
func checkLastRecords(t *testing.T, tc *handlerContext, bktInfo *data.BucketInfo, action eacl.Action) {
|
||||||
bktACL, err := tc.Layer().GetBucketACL(tc.Context(), bktInfo)
|
bktACL, err := tc.h.getBucketACL(tc.Context(), bktInfo)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
length := len(bktACL.EACL.Records())
|
length := len(bktACL.EACL.Records())
|
||||||
|
@ -1421,7 +1421,7 @@ func createBucket(t *testing.T, tc *handlerContext, bktName string, box *accessb
|
||||||
tc.Handler().CreateBucketHandler(w, r)
|
tc.Handler().CreateBucketHandler(w, r)
|
||||||
assertStatus(t, w, http.StatusOK)
|
assertStatus(t, w, http.StatusOK)
|
||||||
|
|
||||||
bktInfo, err := tc.Layer().GetBucketInfo(tc.Context(), bktName)
|
bktInfo, err := tc.h.getBucketInfo(tc.Context(), bktName)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return bktInfo
|
return bktInfo
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,11 +1,14 @@
|
||||||
package handler
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/nats-io/nats.go"
|
||||||
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -13,14 +16,33 @@ import (
|
||||||
type (
|
type (
|
||||||
handler struct {
|
handler struct {
|
||||||
log *zap.Logger
|
log *zap.Logger
|
||||||
obj layer.Client
|
|
||||||
notificator Notificator
|
|
||||||
cfg *Config
|
cfg *Config
|
||||||
|
neoFS NeoFS
|
||||||
|
notificator Notificator
|
||||||
|
resolver BucketResolver
|
||||||
|
cache *Cache
|
||||||
|
treeService TreeService
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnonymousKey contains data for anonymous requests.
|
||||||
|
AnonymousKey struct {
|
||||||
|
Key *keys.PrivateKey
|
||||||
}
|
}
|
||||||
|
|
||||||
Notificator interface {
|
Notificator interface {
|
||||||
SendNotifications(topics map[string]string, p *SendNotificationParams) error
|
SendNotifications(topics map[string]string, p *SendNotificationParams) error
|
||||||
SendTestNotification(topic, bucketName, requestID, HostID string, now time.Time) error
|
SendTestNotification(topic, bucketName, requestID, HostID string, now time.Time) error
|
||||||
|
|
||||||
|
Subscribe(context.Context, string, MsgHandler) error
|
||||||
|
Listen(context.Context)
|
||||||
|
}
|
||||||
|
|
||||||
|
MsgHandler interface {
|
||||||
|
HandleMessage(context.Context, *nats.Msg) error
|
||||||
|
}
|
||||||
|
|
||||||
|
BucketResolver interface {
|
||||||
|
Resolve(ctx context.Context, name string) (cid.ID, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Config contains data which handler needs to keep.
|
// Config contains data which handler needs to keep.
|
||||||
|
@ -29,6 +51,11 @@ type (
|
||||||
DefaultMaxAge int
|
DefaultMaxAge int
|
||||||
NotificatorEnabled bool
|
NotificatorEnabled bool
|
||||||
CopiesNumber uint32
|
CopiesNumber uint32
|
||||||
|
AnonKey AnonymousKey
|
||||||
|
Cache *CachesConfig
|
||||||
|
Resolver BucketResolver
|
||||||
|
TreeService TreeService
|
||||||
|
NeoFS NeoFS
|
||||||
}
|
}
|
||||||
|
|
||||||
PlacementPolicy interface {
|
PlacementPolicy interface {
|
||||||
|
@ -47,10 +74,8 @@ const (
|
||||||
var _ api.Handler = (*handler)(nil)
|
var _ api.Handler = (*handler)(nil)
|
||||||
|
|
||||||
// New creates new api.Handler using given logger and client.
|
// New creates new api.Handler using given logger and client.
|
||||||
func New(log *zap.Logger, obj layer.Client, notificator Notificator, cfg *Config) (api.Handler, error) {
|
func New(ctx context.Context, log *zap.Logger, notificator Notificator, cfg *Config) (api.Handler, error) {
|
||||||
switch {
|
switch {
|
||||||
case obj == nil:
|
|
||||||
return nil, errors.New("empty NeoFS Object Layer")
|
|
||||||
case log == nil:
|
case log == nil:
|
||||||
return nil, errors.New("empty logger")
|
return nil, errors.New("empty logger")
|
||||||
}
|
}
|
||||||
|
@ -61,10 +86,19 @@ func New(log *zap.Logger, obj layer.Client, notificator Notificator, cfg *Config
|
||||||
return nil, errors.New("empty notificator")
|
return nil, errors.New("empty notificator")
|
||||||
}
|
}
|
||||||
|
|
||||||
return &handler{
|
h := &handler{
|
||||||
log: log,
|
log: log,
|
||||||
obj: obj,
|
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
notificator: notificator,
|
notificator: notificator,
|
||||||
}, nil
|
neoFS: cfg.NeoFS,
|
||||||
|
resolver: cfg.Resolver,
|
||||||
|
cache: NewCache(cfg.Cache),
|
||||||
|
treeService: cfg.TreeService,
|
||||||
|
}
|
||||||
|
|
||||||
|
if cfg.NotificatorEnabled {
|
||||||
|
h.notificator.Listen(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return h, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -31,11 +30,11 @@ type (
|
||||||
MaxParts int `xml:"MaxParts,omitempty"`
|
MaxParts int `xml:"MaxParts,omitempty"`
|
||||||
NextPartNumberMarker int `xml:"NextPartNumberMarker,omitempty"`
|
NextPartNumberMarker int `xml:"NextPartNumberMarker,omitempty"`
|
||||||
PartNumberMarker int `xml:"PartNumberMarker,omitempty"`
|
PartNumberMarker int `xml:"PartNumberMarker,omitempty"`
|
||||||
Parts []Part `xml:"Part,omitempty"`
|
Parts []ObjPart `xml:"Part,omitempty"`
|
||||||
PartsCount int `xml:"PartsCount,omitempty"`
|
PartsCount int `xml:"PartsCount,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
Part struct {
|
ObjPart struct {
|
||||||
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
|
ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"`
|
||||||
PartNumber int `xml:"PartNumber,omitempty"`
|
PartNumber int `xml:"PartNumber,omitempty"`
|
||||||
Size int `xml:"Size,omitempty"`
|
Size int `xml:"Size,omitempty"`
|
||||||
|
@ -52,7 +51,7 @@ type (
|
||||||
|
|
||||||
const (
|
const (
|
||||||
eTag = "ETag"
|
eTag = "ETag"
|
||||||
checksum = "Checksum"
|
checksumAttr = "Checksum"
|
||||||
objectParts = "ObjectParts"
|
objectParts = "ObjectParts"
|
||||||
storageClass = "StorageClass"
|
storageClass = "StorageClass"
|
||||||
objectSize = "ObjectSize"
|
objectSize = "ObjectSize"
|
||||||
|
@ -60,7 +59,7 @@ const (
|
||||||
|
|
||||||
var validAttributes = map[string]struct{}{
|
var validAttributes = map[string]struct{}{
|
||||||
eTag: {},
|
eTag: {},
|
||||||
checksum: {},
|
checksumAttr: {},
|
||||||
objectParts: {},
|
objectParts: {},
|
||||||
storageClass: {},
|
storageClass: {},
|
||||||
objectSize: {},
|
objectSize: {},
|
||||||
|
@ -81,13 +80,13 @@ func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Requ
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &layer.HeadObjectParams{
|
p := &HeadObjectParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Object: reqInfo.ObjectName,
|
Object: reqInfo.ObjectName,
|
||||||
VersionID: params.VersionID,
|
VersionID: params.VersionID,
|
||||||
}
|
}
|
||||||
|
|
||||||
extendedInfo, err := h.obj.GetExtendedObjectInfo(r.Context(), p)
|
extendedInfo, err := h.getExtendedObjectInfo(r.Context(), p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not fetch object info", reqInfo, err)
|
h.logAndSendError(w, "could not fetch object info", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -100,7 +99,7 @@ func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Requ
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(info.Headers)); err != nil {
|
if err = encryptionParams.MatchObjectEncryption(FormEncryptionInfo(info.Headers)); err != nil {
|
||||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -110,7 +109,7 @@ func (h *handler) GetObjectAttributesHandler(w http.ResponseWriter, r *http.Requ
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
bktSettings, err := h.getBucketSettings(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -162,7 +161,7 @@ func parseGetObjectAttributeArgs(r *http.Request) (*GetObjectAttributesArgs, err
|
||||||
var err error
|
var err error
|
||||||
maxPartsVal := r.Header.Get(api.AmzMaxParts)
|
maxPartsVal := r.Header.Get(api.AmzMaxParts)
|
||||||
if maxPartsVal == "" {
|
if maxPartsVal == "" {
|
||||||
res.MaxParts = layer.MaxSizePartsList
|
res.MaxParts = MaxSizePartsList
|
||||||
} else if res.MaxParts, err = strconv.Atoi(maxPartsVal); err != nil || res.MaxParts < 0 {
|
} else if res.MaxParts, err = strconv.Atoi(maxPartsVal); err != nil || res.MaxParts < 0 {
|
||||||
return nil, errors.GetAPIError(errors.ErrInvalidMaxKeys)
|
return nil, errors.GetAPIError(errors.ErrInvalidMaxKeys)
|
||||||
}
|
}
|
||||||
|
@ -189,7 +188,7 @@ func encodeToObjectAttributesResponse(info *data.ObjectInfo, p *GetObjectAttribu
|
||||||
resp.StorageClass = "STANDARD"
|
resp.StorageClass = "STANDARD"
|
||||||
case objectSize:
|
case objectSize:
|
||||||
resp.ObjectSize = info.Size
|
resp.ObjectSize = info.Size
|
||||||
case checksum:
|
case checksumAttr:
|
||||||
resp.Checksum = &Checksum{ChecksumSHA256: info.HashSum}
|
resp.Checksum = &Checksum{ChecksumSHA256: info.HashSum}
|
||||||
case objectParts:
|
case objectParts:
|
||||||
parts, err := formUploadAttributes(info, p.MaxParts, p.PartNumberMarker)
|
parts, err := formUploadAttributes(info, p.MaxParts, p.PartNumberMarker)
|
||||||
|
@ -205,20 +204,53 @@ func encodeToObjectAttributesResponse(info *data.ObjectInfo, p *GetObjectAttribu
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func tryDirectory(bktInfo *data.BucketInfo, node *data.NodeVersion, prefix, delimiter string) *data.ObjectInfo {
|
||||||
|
dirName := tryDirectoryName(node, prefix, delimiter)
|
||||||
|
if len(dirName) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &data.ObjectInfo{
|
||||||
|
ID: node.OID, // to use it as continuation token
|
||||||
|
CID: bktInfo.CID,
|
||||||
|
IsDir: true,
|
||||||
|
IsDeleteMarker: node.IsDeleteMarker(),
|
||||||
|
Bucket: bktInfo.Name,
|
||||||
|
Name: dirName,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// tryDirectoryName forms directory name by prefix and delimiter.
|
||||||
|
// If node isn't a directory empty string is returned.
|
||||||
|
// This function doesn't check if node has a prefix. It must do a caller.
|
||||||
|
func tryDirectoryName(node *data.NodeVersion, prefix, delimiter string) string {
|
||||||
|
if len(delimiter) == 0 {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
tail := strings.TrimPrefix(node.FilePath, prefix)
|
||||||
|
index := strings.Index(tail, delimiter)
|
||||||
|
if index >= 0 {
|
||||||
|
return prefix + tail[:index+1]
|
||||||
|
}
|
||||||
|
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
func formUploadAttributes(info *data.ObjectInfo, maxParts, marker int) (*ObjectParts, error) {
|
func formUploadAttributes(info *data.ObjectInfo, maxParts, marker int) (*ObjectParts, error) {
|
||||||
completedParts, ok := info.Headers[layer.UploadCompletedParts]
|
completedParts, ok := info.Headers[UploadCompletedParts]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
partInfos := strings.Split(completedParts, ",")
|
partInfos := strings.Split(completedParts, ",")
|
||||||
parts := make([]Part, len(partInfos))
|
parts := make([]ObjPart, len(partInfos))
|
||||||
for i, p := range partInfos {
|
for i, p := range partInfos {
|
||||||
part, err := layer.ParseCompletedPartHeader(p)
|
part, err := ParseCompletedPartHeader(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid completed part: %w", err)
|
return nil, fmt.Errorf("invalid completed part: %w", err)
|
||||||
}
|
}
|
||||||
parts[i] = Part{
|
parts[i] = ObjPart{
|
||||||
PartNumber: part.PartNumber,
|
PartNumber: part.PartNumber,
|
||||||
Size: int(part.Size),
|
Size: int(part.Size),
|
||||||
ChecksumSHA256: part.ETag,
|
ChecksumSHA256: part.ETag,
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
package layer
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/cache"
|
"github.com/nspcc-dev/neofs-s3-gw/api/cache"
|
|
@ -10,7 +10,6 @@ import (
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/auth"
|
"github.com/nspcc-dev/neofs-s3-gw/api/auth"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/session"
|
"github.com/nspcc-dev/neofs-sdk-go/session"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -68,7 +67,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
srcObjPrm := &layer.HeadObjectParams{
|
srcObjPrm := &HeadObjectParams{
|
||||||
Object: srcObject,
|
Object: srcObject,
|
||||||
VersionID: versionID,
|
VersionID: versionID,
|
||||||
}
|
}
|
||||||
|
@ -84,7 +83,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
settings, err := h.obj.GetBucketSettings(r.Context(), dstBktInfo)
|
settings, err := h.getBucketSettings(r.Context(), dstBktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -97,7 +96,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
extendedSrcObjInfo, err := h.obj.GetExtendedObjectInfo(r.Context(), srcObjPrm)
|
extendedSrcObjInfo, err := h.getExtendedObjectInfo(r.Context(), srcObjPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not find object", reqInfo, err)
|
h.logAndSendError(w, "could not find object", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -126,8 +125,8 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
tagPrm := &layer.GetObjectTaggingParams{
|
tagPrm := &GetObjectTaggingParams{
|
||||||
ObjectVersion: &layer.ObjectVersion{
|
ObjectVersion: &ObjectVersion{
|
||||||
BktInfo: srcObjPrm.BktInfo,
|
BktInfo: srcObjPrm.BktInfo,
|
||||||
ObjectName: srcObject,
|
ObjectName: srcObject,
|
||||||
VersionID: srcObjInfo.VersionID(),
|
VersionID: srcObjInfo.VersionID(),
|
||||||
|
@ -135,7 +134,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
NodeVersion: extendedSrcObjInfo.NodeVersion,
|
NodeVersion: extendedSrcObjInfo.NodeVersion,
|
||||||
}
|
}
|
||||||
|
|
||||||
_, tagSet, err = h.obj.GetObjectTagging(r.Context(), tagPrm)
|
_, tagSet, err = h.getObjectTagging(r.Context(), tagPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get object tagging", reqInfo, err)
|
h.logAndSendError(w, "could not get object tagging", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -148,7 +147,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(srcObjInfo.Headers)); err != nil {
|
if err = encryptionParams.MatchObjectEncryption(FormEncryptionInfo(srcObjInfo.Headers)); err != nil {
|
||||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -173,7 +172,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
params := &layer.CopyObjectParams{
|
params := &CopyObjectParams{
|
||||||
SrcObject: srcObjInfo,
|
SrcObject: srcObjInfo,
|
||||||
ScrBktInfo: srcObjPrm.BktInfo,
|
ScrBktInfo: srcObjPrm.BktInfo,
|
||||||
DstBktInfo: dstBktInfo,
|
DstBktInfo: dstBktInfo,
|
||||||
|
@ -191,7 +190,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
additional := []zap.Field{zap.String("src_bucket_name", srcBucket), zap.String("src_object_name", srcObject)}
|
additional := []zap.Field{zap.String("src_bucket_name", srcBucket), zap.String("src_object_name", srcObject)}
|
||||||
extendedDstObjInfo, err := h.obj.CopyObject(r.Context(), params)
|
extendedDstObjInfo, err := h.copyObject(r.Context(), params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "couldn't copy object", reqInfo, err, additional...)
|
h.logAndSendError(w, "couldn't copy object", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
|
@ -210,21 +209,21 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &layer.PutBucketACLParams{
|
p := &PutBucketACLParams{
|
||||||
BktInfo: dstBktInfo,
|
BktInfo: dstBktInfo,
|
||||||
EACL: newEaclTable,
|
EACL: newEaclTable,
|
||||||
SessionToken: sessionTokenEACL,
|
SessionToken: sessionTokenEACL,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.PutBucketACL(r.Context(), p); err != nil {
|
if err = h.putBucketACL(r.Context(), p); err != nil {
|
||||||
h.logAndSendError(w, "could not put bucket acl", reqInfo, err)
|
h.logAndSendError(w, "could not put bucket acl", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if tagSet != nil {
|
if tagSet != nil {
|
||||||
tagPrm := &layer.PutObjectTaggingParams{
|
tagPrm := &PutObjectTaggingParams{
|
||||||
ObjectVersion: &layer.ObjectVersion{
|
ObjectVersion: &ObjectVersion{
|
||||||
BktInfo: dstBktInfo,
|
BktInfo: dstBktInfo,
|
||||||
ObjectName: reqInfo.ObjectName,
|
ObjectName: reqInfo.ObjectName,
|
||||||
VersionID: dstObjInfo.VersionID(),
|
VersionID: dstObjInfo.VersionID(),
|
||||||
|
@ -232,7 +231,7 @@ func (h *handler) CopyObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
TagSet: tagSet,
|
TagSet: tagSet,
|
||||||
NodeVersion: extendedDstObjInfo.NodeVersion,
|
NodeVersion: extendedDstObjInfo.NodeVersion,
|
||||||
}
|
}
|
||||||
if _, err = h.obj.PutObjectTagging(r.Context(), tagPrm); err != nil {
|
if _, err = h.putObjectTagging(r.Context(), tagPrm); err != nil {
|
||||||
h.logAndSendError(w, "could not upload object tagging", reqInfo, err)
|
h.logAndSendError(w, "could not upload object tagging", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,13 +1,19 @@
|
||||||
package handler
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"encoding/xml"
|
||||||
|
errorsStd "errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||||
|
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -17,6 +23,8 @@ const (
|
||||||
wildcard = "*"
|
wildcard = "*"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var supportedMethods = map[string]struct{}{"GET": {}, "HEAD": {}, "POST": {}, "PUT": {}, "DELETE": {}}
|
||||||
|
|
||||||
func (h *handler) GetBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) GetBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := api.GetReqInfo(r.Context())
|
reqInfo := api.GetReqInfo(r.Context())
|
||||||
|
|
||||||
|
@ -26,7 +34,7 @@ func (h *handler) GetBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
cors, err := h.obj.GetBucketCORS(r.Context(), bktInfo)
|
cors, err := h.getBucketCORS(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get cors", reqInfo, err)
|
h.logAndSendError(w, "could not get cors", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -47,13 +55,13 @@ func (h *handler) PutBucketCorsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &layer.PutCORSParams{
|
p := &PutCORSParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Reader: r.Body,
|
Reader: r.Body,
|
||||||
CopiesNumber: h.cfg.CopiesNumber,
|
CopiesNumber: h.cfg.CopiesNumber,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.PutBucketCORS(r.Context(), p); err != nil {
|
if err = h.putBucketCORS(r.Context(), p); err != nil {
|
||||||
h.logAndSendError(w, "could not put cors configuration", reqInfo, err)
|
h.logAndSendError(w, "could not put cors configuration", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -70,7 +78,7 @@ func (h *handler) DeleteBucketCorsHandler(w http.ResponseWriter, r *http.Request
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.DeleteBucketCORS(r.Context(), bktInfo); err != nil {
|
if err = h.deleteBucketCORS(r.Context(), bktInfo); err != nil {
|
||||||
h.logAndSendError(w, "could not delete cors", reqInfo, err)
|
h.logAndSendError(w, "could not delete cors", reqInfo, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -89,13 +97,13 @@ func (h *handler) AppendCORSHeaders(w http.ResponseWriter, r *http.Request) {
|
||||||
if reqInfo.BucketName == "" {
|
if reqInfo.BucketName == "" {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
bktInfo, err := h.obj.GetBucketInfo(r.Context(), reqInfo.BucketName)
|
bktInfo, err := h.getBucketInfo(r.Context(), reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.log.Warn("get bucket info", zap.Error(err))
|
h.log.Warn("get bucket info", zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
cors, err := h.obj.GetBucketCORS(r.Context(), bktInfo)
|
cors, err := h.getBucketCORS(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.log.Warn("get bucket cors", zap.Error(err))
|
h.log.Warn("get bucket cors", zap.Error(err))
|
||||||
return
|
return
|
||||||
|
@ -137,7 +145,7 @@ func (h *handler) AppendCORSHeaders(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := api.GetReqInfo(r.Context())
|
reqInfo := api.GetReqInfo(r.Context())
|
||||||
bktInfo, err := h.obj.GetBucketInfo(r.Context(), reqInfo.BucketName)
|
bktInfo, err := h.getBucketInfo(r.Context(), reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -160,7 +168,7 @@ func (h *handler) Preflight(w http.ResponseWriter, r *http.Request) {
|
||||||
headers = strings.Split(requestHeaders, ", ")
|
headers = strings.Split(requestHeaders, ", ")
|
||||||
}
|
}
|
||||||
|
|
||||||
cors, err := h.obj.GetBucketCORS(r.Context(), bktInfo)
|
cors, err := h.getBucketCORS(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get cors", reqInfo, err)
|
h.logAndSendError(w, "could not get cors", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -223,3 +231,101 @@ func sliceContains(slice []string, str string) bool {
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h *handler) putBucketCORS(ctx context.Context, p *PutCORSParams) error {
|
||||||
|
var (
|
||||||
|
buf bytes.Buffer
|
||||||
|
tee = io.TeeReader(p.Reader, &buf)
|
||||||
|
cors = &data.CORSConfiguration{}
|
||||||
|
)
|
||||||
|
|
||||||
|
if err := xml.NewDecoder(tee).Decode(cors); err != nil {
|
||||||
|
return fmt.Errorf("xml decode cors: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cors.CORSRules == nil {
|
||||||
|
return errors.GetAPIError(errors.ErrMalformedXML)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := checkCORS(cors); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
prm := PrmObjectCreate{
|
||||||
|
Container: p.BktInfo.CID,
|
||||||
|
Creator: p.BktInfo.Owner,
|
||||||
|
Payload: p.Reader,
|
||||||
|
Filepath: p.BktInfo.CORSObjectName(),
|
||||||
|
CreationTime: TimeNow(ctx),
|
||||||
|
CopiesNumber: p.CopiesNumber,
|
||||||
|
}
|
||||||
|
|
||||||
|
objID, _, err := h.objectPutAndHash(ctx, prm, p.BktInfo)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("put system object: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
objIDToDelete, err := h.treeService.PutBucketCORS(ctx, p.BktInfo, objID)
|
||||||
|
objIDToDeleteNotFound := errorsStd.Is(err, ErrNoNodeToRemove)
|
||||||
|
if err != nil && !objIDToDeleteNotFound {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !objIDToDeleteNotFound {
|
||||||
|
if err = h.objectDelete(ctx, p.BktInfo, objIDToDelete); err != nil {
|
||||||
|
h.log.Error("couldn't delete cors object", zap.Error(err),
|
||||||
|
zap.String("cnrID", p.BktInfo.CID.EncodeToString()),
|
||||||
|
zap.String("bucket name", p.BktInfo.Name),
|
||||||
|
zap.String("objID", objIDToDelete.EncodeToString()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
h.cache.PutCORS(h.Owner(ctx), p.BktInfo, cors)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *handler) getBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (*data.CORSConfiguration, error) {
|
||||||
|
cors, err := h.getCORS(ctx, bktInfo)
|
||||||
|
if err != nil {
|
||||||
|
if errorsStd.Is(err, ErrNodeNotFound) {
|
||||||
|
return nil, errors.GetAPIError(errors.ErrNoSuchCORSConfiguration)
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return cors, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *handler) deleteBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) error {
|
||||||
|
objID, err := h.treeService.DeleteBucketCORS(ctx, bktInfo)
|
||||||
|
objIDNotFound := errorsStd.Is(err, ErrNoNodeToRemove)
|
||||||
|
if err != nil && !objIDNotFound {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if !objIDNotFound {
|
||||||
|
if err = h.objectDelete(ctx, bktInfo, objID); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
h.cache.DeleteCORS(bktInfo)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkCORS(cors *data.CORSConfiguration) error {
|
||||||
|
for _, r := range cors.CORSRules {
|
||||||
|
for _, m := range r.AllowedMethods {
|
||||||
|
if _, ok := supportedMethods[m]; !ok {
|
||||||
|
return errors.GetAPIErrorWithError(errors.ErrCORSUnsupportedMethod, fmt.Errorf("unsupported method is %s", m))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, h := range r.ExposeHeaders {
|
||||||
|
if h == wildcard {
|
||||||
|
return errors.GetAPIError(errors.ErrCORSWildcardExposeHeaders)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
|
||||||
apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status"
|
apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status"
|
||||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/session"
|
"github.com/nspcc-dev/neofs-sdk-go/session"
|
||||||
|
@ -60,7 +59,7 @@ type DeleteObjectsResponse struct {
|
||||||
func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := api.GetReqInfo(r.Context())
|
reqInfo := api.GetReqInfo(r.Context())
|
||||||
versionID := reqInfo.URL.Query().Get(api.QueryVersionID)
|
versionID := reqInfo.URL.Query().Get(api.QueryVersionID)
|
||||||
versionedObject := []*layer.VersionedObject{{
|
versionedObject := []*VersionedObject{{
|
||||||
Name: reqInfo.ObjectName,
|
Name: reqInfo.ObjectName,
|
||||||
VersionID: versionID,
|
VersionID: versionID,
|
||||||
}}
|
}}
|
||||||
|
@ -71,18 +70,18 @@ func (h *handler) DeleteObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
bktSettings, err := h.getBucketSettings(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &layer.DeleteObjectParams{
|
p := &DeleteObjectParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Objects: versionedObject,
|
Objects: versionedObject,
|
||||||
Settings: bktSettings,
|
Settings: bktSettings,
|
||||||
}
|
}
|
||||||
deletedObjects := h.obj.DeleteObjects(r.Context(), p)
|
deletedObjects := h.deleteObjects(r.Context(), p)
|
||||||
deletedObject := deletedObjects[0]
|
deletedObject := deletedObjects[0]
|
||||||
if deletedObject.Error != nil {
|
if deletedObject.Error != nil {
|
||||||
if isErrObjectLocked(deletedObject.Error) {
|
if isErrObjectLocked(deletedObject.Error) {
|
||||||
|
@ -176,10 +175,10 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
removed := make(map[string]*layer.VersionedObject)
|
removed := make(map[string]*VersionedObject)
|
||||||
toRemove := make([]*layer.VersionedObject, 0, len(requested.Objects))
|
toRemove := make([]*VersionedObject, 0, len(requested.Objects))
|
||||||
for _, obj := range requested.Objects {
|
for _, obj := range requested.Objects {
|
||||||
versionedObj := &layer.VersionedObject{
|
versionedObj := &VersionedObject{
|
||||||
Name: obj.ObjectName,
|
Name: obj.ObjectName,
|
||||||
VersionID: obj.VersionID,
|
VersionID: obj.VersionID,
|
||||||
}
|
}
|
||||||
|
@ -198,7 +197,7 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
bktSettings, err := h.getBucketSettings(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -211,12 +210,12 @@ func (h *handler) DeleteMultipleObjectsHandler(w http.ResponseWriter, r *http.Re
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
p := &layer.DeleteObjectParams{
|
p := &DeleteObjectParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Objects: toRemove,
|
Objects: toRemove,
|
||||||
Settings: bktSettings,
|
Settings: bktSettings,
|
||||||
}
|
}
|
||||||
deletedObjects := h.obj.DeleteObjects(r.Context(), p)
|
deletedObjects := h.deleteObjects(r.Context(), p)
|
||||||
|
|
||||||
var errs []error
|
var errs []error
|
||||||
for _, obj := range deletedObjects {
|
for _, obj := range deletedObjects {
|
||||||
|
@ -270,12 +269,12 @@ func (h *handler) DeleteBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
var sessionToken *session.Container
|
var sessionToken *session.Container
|
||||||
|
|
||||||
boxData, err := layer.GetBoxData(r.Context())
|
boxData, err := GetBoxData(r.Context())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
sessionToken = boxData.Gate.SessionTokenForDelete()
|
sessionToken = boxData.Gate.SessionTokenForDelete()
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.DeleteBucket(r.Context(), &layer.DeleteBucketParams{
|
if err = h.deleteBucket(r.Context(), &DeleteBucketParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
SessionToken: sessionToken,
|
SessionToken: sessionToken,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
|
|
|
@ -253,7 +253,7 @@ func createBucketAndObject(tc *handlerContext, bktName, objName string) (*data.B
|
||||||
|
|
||||||
func createVersionedBucketAndObject(t *testing.T, tc *handlerContext, bktName, objName string) (*data.BucketInfo, *data.ObjectInfo) {
|
func createVersionedBucketAndObject(t *testing.T, tc *handlerContext, bktName, objName string) (*data.BucketInfo, *data.ObjectInfo) {
|
||||||
createTestBucket(tc, bktName)
|
createTestBucket(tc, bktName)
|
||||||
bktInfo, err := tc.Layer().GetBucketInfo(tc.Context(), bktName)
|
bktInfo, err := tc.h.getBucketInfo(tc.Context(), bktName)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
putBucketVersioning(t, tc, bktName, true)
|
putBucketVersioning(t, tc, bktName, true)
|
||||||
|
|
||||||
|
@ -325,7 +325,7 @@ func putObject(t *testing.T, tc *handlerContext, bktName, objName string) {
|
||||||
|
|
||||||
func createSuspendedBucket(t *testing.T, tc *handlerContext, bktName string) *data.BucketInfo {
|
func createSuspendedBucket(t *testing.T, tc *handlerContext, bktName string) *data.BucketInfo {
|
||||||
createTestBucket(tc, bktName)
|
createTestBucket(tc, bktName)
|
||||||
bktInfo, err := tc.Layer().GetBucketInfo(tc.Context(), bktName)
|
bktInfo, err := tc.h.getBucketInfo(tc.Context(), bktName)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
putBucketVersioning(t, tc, bktName, false)
|
putBucketVersioning(t, tc, bktName, false)
|
||||||
return bktInfo
|
return bktInfo
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
package layer
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"io"
|
"io"
|
|
@ -13,7 +13,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -33,9 +32,9 @@ func TestSimpleGetEncrypted(t *testing.T) {
|
||||||
content := "content"
|
content := "content"
|
||||||
putEncryptedObject(t, tc, bktName, objName, content)
|
putEncryptedObject(t, tc, bktName, objName, content)
|
||||||
|
|
||||||
objInfo, err := tc.Layer().GetObjectInfo(tc.Context(), &layer.HeadObjectParams{BktInfo: bktInfo, Object: objName})
|
objInfo, err := tc.h.getObjectInfo(tc.Context(), &HeadObjectParams{BktInfo: bktInfo, Object: objName})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
obj, err := tc.MockedPool().ReadObject(tc.Context(), layer.PrmObjectRead{Container: bktInfo.CID, Object: objInfo.ID})
|
obj, err := tc.MockedPool().ReadObject(tc.Context(), PrmObjectRead{Container: bktInfo.CID, Object: objInfo.ID})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
encryptedContent, err := io.ReadAll(obj.Payload)
|
encryptedContent, err := io.ReadAll(obj.Payload)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -193,10 +192,10 @@ func completeMultipartUpload(hc *handlerContext, bktName, objName, uploadID stri
|
||||||
query := make(url.Values)
|
query := make(url.Values)
|
||||||
query.Set(uploadIDQuery, uploadID)
|
query.Set(uploadIDQuery, uploadID)
|
||||||
complete := &CompleteMultipartUpload{
|
complete := &CompleteMultipartUpload{
|
||||||
Parts: []*layer.CompletedPart{},
|
Parts: []*CompletedPart{},
|
||||||
}
|
}
|
||||||
for i, tag := range partsETags {
|
for i, tag := range partsETags {
|
||||||
complete.Parts = append(complete.Parts, &layer.CompletedPart{
|
complete.Parts = append(complete.Parts, &CompletedPart{
|
||||||
ETag: tag,
|
ETag: tag,
|
||||||
PartNumber: i + 1,
|
PartNumber: i + 1,
|
||||||
})
|
})
|
||||||
|
@ -286,7 +285,7 @@ func getEncryptedObjectRange(t *testing.T, tc *handlerContext, bktName, objName
|
||||||
|
|
||||||
func setEncryptHeaders(r *http.Request) {
|
func setEncryptHeaders(r *http.Request) {
|
||||||
r.TLS = &tls.ConnectionState{}
|
r.TLS = &tls.ConnectionState{}
|
||||||
r.Header.Set(api.AmzServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
|
r.Header.Set(api.AmzServerSideEncryptionCustomerAlgorithm, AESEncryptionAlgorithm)
|
||||||
r.Header.Set(api.AmzServerSideEncryptionCustomerKey, aes256Key)
|
r.Header.Set(api.AmzServerSideEncryptionCustomerKey, aes256Key)
|
||||||
r.Header.Set(api.AmzServerSideEncryptionCustomerKeyMD5, aes256KeyMD5)
|
r.Header.Set(api.AmzServerSideEncryptionCustomerKeyMD5, aes256KeyMD5)
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,7 +11,6 @@ import (
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -22,7 +21,7 @@ type conditionalArgs struct {
|
||||||
IfNoneMatch string
|
IfNoneMatch string
|
||||||
}
|
}
|
||||||
|
|
||||||
func fetchRangeHeader(headers http.Header, fullSize uint64) (*layer.RangeParams, error) {
|
func fetchRangeHeader(headers http.Header, fullSize uint64) (*RangeParams, error) {
|
||||||
const prefix = "bytes="
|
const prefix = "bytes="
|
||||||
rangeHeader := headers.Get("Range")
|
rangeHeader := headers.Get("Range")
|
||||||
if len(rangeHeader) == 0 {
|
if len(rangeHeader) == 0 {
|
||||||
|
@ -61,7 +60,7 @@ func fetchRangeHeader(headers http.Header, fullSize uint64) (*layer.RangeParams,
|
||||||
if err0 != nil || err1 != nil || start > end || start > fullSize {
|
if err0 != nil || err1 != nil || start > end || start > fullSize {
|
||||||
return nil, errors.GetAPIError(errors.ErrInvalidRange)
|
return nil, errors.GetAPIError(errors.ErrInvalidRange)
|
||||||
}
|
}
|
||||||
return &layer.RangeParams{Start: start, End: end}, nil
|
return &RangeParams{Start: start, End: end}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func overrideResponseHeaders(h http.Header, query url.Values) {
|
func overrideResponseHeaders(h http.Header, query url.Values) {
|
||||||
|
@ -84,8 +83,8 @@ func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.E
|
||||||
}
|
}
|
||||||
h.Set(api.LastModified, info.Created.UTC().Format(http.TimeFormat))
|
h.Set(api.LastModified, info.Created.UTC().Format(http.TimeFormat))
|
||||||
|
|
||||||
if len(info.Headers[layer.AttributeEncryptionAlgorithm]) > 0 {
|
if len(info.Headers[AttributeEncryptionAlgorithm]) > 0 {
|
||||||
h.Set(api.ContentLength, info.Headers[layer.AttributeDecryptedSize])
|
h.Set(api.ContentLength, info.Headers[AttributeDecryptedSize])
|
||||||
addSSECHeaders(h, requestHeader)
|
addSSECHeaders(h, requestHeader)
|
||||||
} else {
|
} else {
|
||||||
h.Set(api.ContentLength, strconv.FormatInt(info.Size, 10))
|
h.Set(api.ContentLength, strconv.FormatInt(info.Size, 10))
|
||||||
|
@ -106,7 +105,7 @@ func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.E
|
||||||
}
|
}
|
||||||
|
|
||||||
for key, val := range info.Headers {
|
for key, val := range info.Headers {
|
||||||
if layer.IsSystemHeader(key) {
|
if IsSystemHeader(key) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
h[api.MetadataPrefix+key] = []string{val}
|
h[api.MetadataPrefix+key] = []string{val}
|
||||||
|
@ -115,7 +114,7 @@ func writeHeaders(h http.Header, requestHeader http.Header, extendedInfo *data.E
|
||||||
|
|
||||||
func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
var (
|
var (
|
||||||
params *layer.RangeParams
|
params *RangeParams
|
||||||
|
|
||||||
reqInfo = api.GetReqInfo(r.Context())
|
reqInfo = api.GetReqInfo(r.Context())
|
||||||
)
|
)
|
||||||
|
@ -132,13 +131,13 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &layer.HeadObjectParams{
|
p := &HeadObjectParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Object: reqInfo.ObjectName,
|
Object: reqInfo.ObjectName,
|
||||||
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
||||||
}
|
}
|
||||||
|
|
||||||
extendedInfo, err := h.obj.GetExtendedObjectInfo(r.Context(), p)
|
extendedInfo, err := h.getExtendedObjectInfo(r.Context(), p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not find object", reqInfo, err)
|
h.logAndSendError(w, "could not find object", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -156,14 +155,14 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(info.Headers)); err != nil {
|
if err = encryptionParams.MatchObjectEncryption(FormEncryptionInfo(info.Headers)); err != nil {
|
||||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
fullSize := info.Size
|
fullSize := info.Size
|
||||||
if encryptionParams.Enabled() {
|
if encryptionParams.Enabled() {
|
||||||
if fullSize, err = strconv.ParseInt(info.Headers[layer.AttributeDecryptedSize], 10, 64); err != nil {
|
if fullSize, err = strconv.ParseInt(info.Headers[AttributeDecryptedSize], 10, 64); err != nil {
|
||||||
h.logAndSendError(w, "invalid decrypted size header", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
|
h.logAndSendError(w, "invalid decrypted size header", reqInfo, errors.GetAPIError(errors.ErrBadRequest))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -174,19 +173,19 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
t := &layer.ObjectVersion{
|
t := &ObjectVersion{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
ObjectName: info.Name,
|
ObjectName: info.Name,
|
||||||
VersionID: info.VersionID(),
|
VersionID: info.VersionID(),
|
||||||
}
|
}
|
||||||
|
|
||||||
tagSet, lockInfo, err := h.obj.GetObjectTaggingAndLock(r.Context(), t, extendedInfo.NodeVersion)
|
tagSet, lockInfo, err := h.getObjectTaggingAndLock(r.Context(), t, extendedInfo.NodeVersion)
|
||||||
if err != nil && !errors.IsS3Error(err, errors.ErrNoSuchKey) {
|
if err != nil && !errors.IsS3Error(err, errors.ErrNoSuchKey) {
|
||||||
h.logAndSendError(w, "could not get object meta data", reqInfo, err)
|
h.logAndSendError(w, "could not get object meta data", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if layer.IsAuthenticatedRequest(r.Context()) {
|
if IsAuthenticatedRequest(r.Context()) {
|
||||||
overrideResponseHeaders(w.Header(), reqInfo.URL.Query())
|
overrideResponseHeaders(w.Header(), reqInfo.URL.Query())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -195,7 +194,7 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
bktSettings, err := h.getBucketSettings(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -208,14 +207,14 @@ func (h *handler) GetObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
w.WriteHeader(http.StatusOK)
|
w.WriteHeader(http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
getParams := &layer.GetObjectParams{
|
getParams := &GetObjectParams{
|
||||||
ObjectInfo: info,
|
ObjectInfo: info,
|
||||||
Writer: w,
|
Writer: w,
|
||||||
Range: params,
|
Range: params,
|
||||||
BucketInfo: bktInfo,
|
BucketInfo: bktInfo,
|
||||||
Encryption: encryptionParams,
|
Encryption: encryptionParams,
|
||||||
}
|
}
|
||||||
if err = h.obj.GetObject(r.Context(), getParams); err != nil {
|
if err = h.getObject(r.Context(), getParams); err != nil {
|
||||||
h.logAndSendError(w, "could not get object", reqInfo, err)
|
h.logAndSendError(w, "could not get object", reqInfo, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -268,7 +267,7 @@ func parseHTTPTime(data string) (*time.Time, error) {
|
||||||
return &result, nil
|
return &result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func writeRangeHeaders(w http.ResponseWriter, params *layer.RangeParams, size int64) {
|
func writeRangeHeaders(w http.ResponseWriter, params *RangeParams, size int64) {
|
||||||
w.Header().Set(api.AcceptRanges, "bytes")
|
w.Header().Set(api.AcceptRanges, "bytes")
|
||||||
w.Header().Set(api.ContentRange, fmt.Sprintf("bytes %d-%d/%d", params.Start, params.End, size))
|
w.Header().Set(api.ContentRange, fmt.Sprintf("bytes %d-%d/%d", params.Start, params.End, size))
|
||||||
w.Header().Set(api.ContentLength, strconv.FormatUint(params.End-params.Start+1, 10))
|
w.Header().Set(api.ContentLength, strconv.FormatUint(params.End-params.Start+1, 10))
|
||||||
|
|
|
@ -8,24 +8,24 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFetchRangeHeader(t *testing.T) {
|
func TestFetchRangeHeader(t *testing.T) {
|
||||||
for _, tc := range []struct {
|
for _, tc := range []struct {
|
||||||
header string
|
header string
|
||||||
expected *layer.RangeParams
|
expected *RangeParams
|
||||||
fullSize uint64
|
fullSize uint64
|
||||||
err bool
|
err bool
|
||||||
}{
|
}{
|
||||||
{header: "bytes=0-256", expected: &layer.RangeParams{Start: 0, End: 256}, fullSize: 257, err: false},
|
{header: "bytes=0-256", expected: &RangeParams{Start: 0, End: 256}, fullSize: 257, err: false},
|
||||||
{header: "bytes=0-0", expected: &layer.RangeParams{Start: 0, End: 0}, fullSize: 1, err: false},
|
{header: "bytes=0-0", expected: &RangeParams{Start: 0, End: 0}, fullSize: 1, err: false},
|
||||||
{header: "bytes=0-256", expected: &layer.RangeParams{Start: 0, End: 255}, fullSize: 256, err: false},
|
{header: "bytes=0-256", expected: &RangeParams{Start: 0, End: 255}, fullSize: 256, err: false},
|
||||||
{header: "bytes=0-", expected: &layer.RangeParams{Start: 0, End: 99}, fullSize: 100, err: false},
|
{header: "bytes=0-", expected: &RangeParams{Start: 0, End: 99}, fullSize: 100, err: false},
|
||||||
{header: "bytes=-10", expected: &layer.RangeParams{Start: 90, End: 99}, fullSize: 100, err: false},
|
{header: "bytes=-10", expected: &RangeParams{Start: 90, End: 99}, fullSize: 100, err: false},
|
||||||
{header: "", err: false},
|
{header: "", err: false},
|
||||||
{header: "bytes=-1-256", err: true},
|
{header: "bytes=-1-256", err: true},
|
||||||
{header: "bytes=256-0", err: true},
|
{header: "bytes=256-0", err: true},
|
||||||
|
@ -170,11 +170,13 @@ func TestGetRange(t *testing.T) {
|
||||||
require.Equal(t, "bcdef", string(end))
|
require.Equal(t, "bcdef", string(end))
|
||||||
}
|
}
|
||||||
|
|
||||||
func putObjectContent(hc *handlerContext, bktName, objName, content string) {
|
func putObjectContent(hc *handlerContext, bktName, objName, content string) (version string, etag string) {
|
||||||
body := bytes.NewReader([]byte(content))
|
body := bytes.NewReader([]byte(content))
|
||||||
w, r := prepareTestPayloadRequest(hc, bktName, objName, body)
|
w, r := prepareTestPayloadRequest(hc, bktName, objName, body)
|
||||||
hc.Handler().PutObjectHandler(w, r)
|
hc.Handler().PutObjectHandler(w, r)
|
||||||
assertStatus(hc.t, w, http.StatusOK)
|
assertStatus(hc.t, w, http.StatusOK)
|
||||||
|
|
||||||
|
return w.Header().Get(api.AmzVersionID), w.Header().Get(api.ETag)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getObjectRange(t *testing.T, tc *handlerContext, bktName, objName string, start, end int) []byte {
|
func getObjectRange(t *testing.T, tc *handlerContext, bktName, objName string, start, end int) []byte {
|
||||||
|
|
|
@ -16,8 +16,7 @@ import (
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
"github.com/nspcc-dev/neofs-s3-gw/internal/resolver"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/resolver"
|
|
||||||
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/object"
|
"github.com/nspcc-dev/neofs-sdk-go/object"
|
||||||
|
@ -31,7 +30,7 @@ type handlerContext struct {
|
||||||
owner user.ID
|
owner user.ID
|
||||||
t *testing.T
|
t *testing.T
|
||||||
h *handler
|
h *handler
|
||||||
tp *layer.TestNeoFS
|
tp *TestNeoFS
|
||||||
context context.Context
|
context context.Context
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,18 +38,24 @@ func (hc *handlerContext) Handler() *handler {
|
||||||
return hc.h
|
return hc.h
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hc *handlerContext) MockedPool() *layer.TestNeoFS {
|
func (hc *handlerContext) MockedPool() *TestNeoFS {
|
||||||
return hc.tp
|
return hc.tp
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hc *handlerContext) Layer() layer.Client {
|
|
||||||
return hc.h.obj
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hc *handlerContext) Context() context.Context {
|
func (hc *handlerContext) Context() context.Context {
|
||||||
return hc.context
|
return hc.context
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (hc *handlerContext) getObjectByID(objID oid.ID) *object.Object {
|
||||||
|
for _, obj := range hc.tp.Objects() {
|
||||||
|
id, _ := obj.ID()
|
||||||
|
if id.Equals(objID) {
|
||||||
|
return obj
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
type placementPolicyMock struct {
|
type placementPolicyMock struct {
|
||||||
defaultPolicy netmap.PlacementPolicy
|
defaultPolicy netmap.PlacementPolicy
|
||||||
}
|
}
|
||||||
|
@ -68,7 +73,7 @@ func prepareHandlerContext(t *testing.T) *handlerContext {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
l := zap.NewExample()
|
l := zap.NewExample()
|
||||||
tp := layer.NewTestNeoFS()
|
tp := NewTestNeoFS()
|
||||||
|
|
||||||
testResolver := &resolver.Resolver{Name: "test_resolver"}
|
testResolver := &resolver.Resolver{Name: "test_resolver"}
|
||||||
testResolver.SetResolveFunc(func(_ context.Context, name string) (cid.ID, error) {
|
testResolver.SetResolveFunc(func(_ context.Context, name string) (cid.ID, error) {
|
||||||
|
@ -78,23 +83,20 @@ func prepareHandlerContext(t *testing.T) *handlerContext {
|
||||||
var owner user.ID
|
var owner user.ID
|
||||||
user.IDFromKey(&owner, key.PrivateKey.PublicKey)
|
user.IDFromKey(&owner, key.PrivateKey.PublicKey)
|
||||||
|
|
||||||
layerCfg := &layer.Config{
|
|
||||||
Caches: layer.DefaultCachesConfigs(zap.NewExample()),
|
|
||||||
AnonKey: layer.AnonymousKey{Key: key},
|
|
||||||
Resolver: testResolver,
|
|
||||||
TreeService: layer.NewTreeService(),
|
|
||||||
}
|
|
||||||
|
|
||||||
var pp netmap.PlacementPolicy
|
var pp netmap.PlacementPolicy
|
||||||
err = pp.DecodeString("REP 1")
|
err = pp.DecodeString("REP 1")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
h := &handler{
|
h := &handler{
|
||||||
log: l,
|
log: l,
|
||||||
obj: layer.NewLayer(l, tp, layerCfg),
|
cache: NewCache(DefaultCachesConfigs(zap.NewExample())),
|
||||||
|
resolver: testResolver,
|
||||||
|
treeService: NewTreeService(),
|
||||||
cfg: &Config{
|
cfg: &Config{
|
||||||
Policy: &placementPolicyMock{defaultPolicy: pp},
|
Policy: &placementPolicyMock{defaultPolicy: pp},
|
||||||
|
AnonKey: AnonymousKey{Key: key},
|
||||||
},
|
},
|
||||||
|
neoFS: tp,
|
||||||
}
|
}
|
||||||
|
|
||||||
return &handlerContext{
|
return &handlerContext{
|
||||||
|
@ -107,22 +109,22 @@ func prepareHandlerContext(t *testing.T) *handlerContext {
|
||||||
}
|
}
|
||||||
|
|
||||||
func createTestBucket(hc *handlerContext, bktName string) *data.BucketInfo {
|
func createTestBucket(hc *handlerContext, bktName string) *data.BucketInfo {
|
||||||
_, err := hc.MockedPool().CreateContainer(hc.Context(), layer.PrmContainerCreate{
|
_, err := hc.MockedPool().CreateContainer(hc.Context(), PrmContainerCreate{
|
||||||
Creator: hc.owner,
|
Creator: hc.owner,
|
||||||
Name: bktName,
|
Name: bktName,
|
||||||
})
|
})
|
||||||
require.NoError(hc.t, err)
|
require.NoError(hc.t, err)
|
||||||
|
|
||||||
bktInfo, err := hc.Layer().GetBucketInfo(hc.Context(), bktName)
|
bktInfo, err := hc.h.getBucketInfo(hc.Context(), bktName)
|
||||||
require.NoError(hc.t, err)
|
require.NoError(hc.t, err)
|
||||||
return bktInfo
|
return bktInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
func createTestBucketWithLock(hc *handlerContext, bktName string, conf *data.ObjectLockConfiguration) *data.BucketInfo {
|
func createTestBucketWithLock(hc *handlerContext, bktName string, conf *data.ObjectLockConfiguration) *data.BucketInfo {
|
||||||
cnrID, err := hc.MockedPool().CreateContainer(hc.Context(), layer.PrmContainerCreate{
|
cnrID, err := hc.MockedPool().CreateContainer(hc.Context(), PrmContainerCreate{
|
||||||
Creator: hc.owner,
|
Creator: hc.owner,
|
||||||
Name: bktName,
|
Name: bktName,
|
||||||
AdditionalAttributes: [][2]string{{layer.AttributeLockEnabled, "true"}},
|
AdditionalAttributes: [][2]string{{AttributeLockEnabled, "true"}},
|
||||||
})
|
})
|
||||||
require.NoError(hc.t, err)
|
require.NoError(hc.t, err)
|
||||||
|
|
||||||
|
@ -135,7 +137,7 @@ func createTestBucketWithLock(hc *handlerContext, bktName string, conf *data.Obj
|
||||||
Owner: ownerID,
|
Owner: ownerID,
|
||||||
}
|
}
|
||||||
|
|
||||||
sp := &layer.PutSettingsParams{
|
sp := &PutSettingsParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Settings: &data.BucketSettings{
|
Settings: &data.BucketSettings{
|
||||||
Versioning: data.VersioningEnabled,
|
Versioning: data.VersioningEnabled,
|
||||||
|
@ -143,7 +145,7 @@ func createTestBucketWithLock(hc *handlerContext, bktName string, conf *data.Obj
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
err = hc.Layer().PutBucketSettings(hc.Context(), sp)
|
err = hc.h.putBucketSettings(hc.Context(), sp)
|
||||||
require.NoError(hc.t, err)
|
require.NoError(hc.t, err)
|
||||||
|
|
||||||
return bktInfo
|
return bktInfo
|
||||||
|
@ -158,7 +160,7 @@ func createTestObject(hc *handlerContext, bktInfo *data.BucketInfo, objName stri
|
||||||
object.AttributeTimestamp: strconv.FormatInt(time.Now().UTC().Unix(), 10),
|
object.AttributeTimestamp: strconv.FormatInt(time.Now().UTC().Unix(), 10),
|
||||||
}
|
}
|
||||||
|
|
||||||
extObjInfo, err := hc.Layer().PutObject(hc.Context(), &layer.PutObjectParams{
|
extObjInfo, err := hc.h.putObject(hc.Context(), &PutObjectParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Object: objName,
|
Object: objName,
|
||||||
Size: int64(len(content)),
|
Size: int64(len(content)),
|
||||||
|
@ -209,17 +211,17 @@ func parseTestResponse(t *testing.T, response *httptest.ResponseRecorder, body i
|
||||||
}
|
}
|
||||||
|
|
||||||
func existInMockedNeoFS(tc *handlerContext, bktInfo *data.BucketInfo, objInfo *data.ObjectInfo) bool {
|
func existInMockedNeoFS(tc *handlerContext, bktInfo *data.BucketInfo, objInfo *data.ObjectInfo) bool {
|
||||||
p := &layer.GetObjectParams{
|
p := &GetObjectParams{
|
||||||
BucketInfo: bktInfo,
|
BucketInfo: bktInfo,
|
||||||
ObjectInfo: objInfo,
|
ObjectInfo: objInfo,
|
||||||
Writer: io.Discard,
|
Writer: io.Discard,
|
||||||
}
|
}
|
||||||
|
|
||||||
return tc.Layer().GetObject(tc.Context(), p) == nil
|
return tc.h.getObject(tc.Context(), p) == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func listOIDsFromMockedNeoFS(t *testing.T, tc *handlerContext, bktName string) []oid.ID {
|
func listOIDsFromMockedNeoFS(t *testing.T, tc *handlerContext, bktName string) []oid.ID {
|
||||||
bktInfo, err := tc.Layer().GetBucketInfo(tc.Context(), bktName)
|
bktInfo, err := tc.h.getBucketInfo(tc.Context(), bktName)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
return tc.MockedPool().AllObjects(bktInfo.CID)
|
return tc.MockedPool().AllObjects(bktInfo.CID)
|
||||||
|
|
|
@ -7,19 +7,18 @@ import (
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
const sizeToDetectType = 512
|
const sizeToDetectType = 512
|
||||||
|
|
||||||
func getRangeToDetectContentType(maxSize int64) *layer.RangeParams {
|
func getRangeToDetectContentType(maxSize int64) *RangeParams {
|
||||||
end := uint64(maxSize)
|
end := uint64(maxSize)
|
||||||
if sizeToDetectType < end {
|
if sizeToDetectType < end {
|
||||||
end = sizeToDetectType
|
end = sizeToDetectType
|
||||||
}
|
}
|
||||||
|
|
||||||
return &layer.RangeParams{
|
return &RangeParams{
|
||||||
Start: 0,
|
Start: 0,
|
||||||
End: end - 1,
|
End: end - 1,
|
||||||
}
|
}
|
||||||
|
@ -40,13 +39,13 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &layer.HeadObjectParams{
|
p := &HeadObjectParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Object: reqInfo.ObjectName,
|
Object: reqInfo.ObjectName,
|
||||||
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
||||||
}
|
}
|
||||||
|
|
||||||
extendedInfo, err := h.obj.GetExtendedObjectInfo(r.Context(), p)
|
extendedInfo, err := h.getExtendedObjectInfo(r.Context(), p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not find object", reqInfo, err)
|
h.logAndSendError(w, "could not find object", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -59,7 +58,7 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = encryptionParams.MatchObjectEncryption(layer.FormEncryptionInfo(info.Headers)); err != nil {
|
if err = encryptionParams.MatchObjectEncryption(FormEncryptionInfo(info.Headers)); err != nil {
|
||||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -69,28 +68,28 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
t := &layer.ObjectVersion{
|
t := &ObjectVersion{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
ObjectName: info.Name,
|
ObjectName: info.Name,
|
||||||
VersionID: info.VersionID(),
|
VersionID: info.VersionID(),
|
||||||
}
|
}
|
||||||
|
|
||||||
tagSet, lockInfo, err := h.obj.GetObjectTaggingAndLock(r.Context(), t, extendedInfo.NodeVersion)
|
tagSet, lockInfo, err := h.getObjectTaggingAndLock(r.Context(), t, extendedInfo.NodeVersion)
|
||||||
if err != nil && !errors.IsS3Error(err, errors.ErrNoSuchKey) {
|
if err != nil && !errors.IsS3Error(err, errors.ErrNoSuchKey) {
|
||||||
h.logAndSendError(w, "could not get object meta data", reqInfo, err)
|
h.logAndSendError(w, "could not get object meta data", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(info.ContentType) == 0 {
|
if len(info.ContentType) == 0 {
|
||||||
if info.ContentType = layer.MimeByFilePath(info.Name); len(info.ContentType) == 0 {
|
if info.ContentType = MimeByFilePath(info.Name); len(info.ContentType) == 0 {
|
||||||
buffer := bytes.NewBuffer(make([]byte, 0, sizeToDetectType))
|
buffer := bytes.NewBuffer(make([]byte, 0, sizeToDetectType))
|
||||||
getParams := &layer.GetObjectParams{
|
getParams := &GetObjectParams{
|
||||||
ObjectInfo: info,
|
ObjectInfo: info,
|
||||||
Writer: buffer,
|
Writer: buffer,
|
||||||
Range: getRangeToDetectContentType(info.Size),
|
Range: getRangeToDetectContentType(info.Size),
|
||||||
BucketInfo: bktInfo,
|
BucketInfo: bktInfo,
|
||||||
}
|
}
|
||||||
if err = h.obj.GetObject(r.Context(), getParams); err != nil {
|
if err = h.getObject(r.Context(), getParams); err != nil {
|
||||||
h.logAndSendError(w, "could not get object", reqInfo, err, zap.Stringer("oid", info.ID))
|
h.logAndSendError(w, "could not get object", reqInfo, err, zap.Stringer("oid", info.ID))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -103,7 +102,7 @@ func (h *handler) HeadObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
bktSettings, err := h.getBucketSettings(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
|
2484
api/handler/layer.go
Normal file
2484
api/handler/layer.go
Normal file
File diff suppressed because it is too large
Load diff
|
@ -18,7 +18,7 @@ func (h *handler) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo = api.GetReqInfo(r.Context())
|
reqInfo = api.GetReqInfo(r.Context())
|
||||||
)
|
)
|
||||||
|
|
||||||
list, err := h.obj.ListBuckets(r.Context())
|
list, err := h.containerList(r.Context())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
|
|
@ -11,7 +11,6 @@ import (
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||||
apiErrors "github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
apiErrors "github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -51,7 +50,7 @@ func (h *handler) PutBucketObjectLockConfigHandler(w http.ResponseWriter, r *htt
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
settings, err := h.getBucketSettings(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "couldn't get bucket settings", reqInfo, err)
|
h.logAndSendError(w, "couldn't get bucket settings", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -61,12 +60,12 @@ func (h *handler) PutBucketObjectLockConfigHandler(w http.ResponseWriter, r *htt
|
||||||
newSettings := *settings
|
newSettings := *settings
|
||||||
newSettings.LockConfiguration = lockingConf
|
newSettings.LockConfiguration = lockingConf
|
||||||
|
|
||||||
sp := &layer.PutSettingsParams{
|
sp := &PutSettingsParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Settings: &newSettings,
|
Settings: &newSettings,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.PutBucketSettings(r.Context(), sp); err != nil {
|
if err = h.putBucketSettings(r.Context(), sp); err != nil {
|
||||||
h.logAndSendError(w, "couldn't put bucket settings", reqInfo, err)
|
h.logAndSendError(w, "couldn't put bucket settings", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -87,7 +86,7 @@ func (h *handler) GetBucketObjectLockConfigHandler(w http.ResponseWriter, r *htt
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
settings, err := h.getBucketSettings(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "couldn't get bucket settings", reqInfo, err)
|
h.logAndSendError(w, "couldn't get bucket settings", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -132,8 +131,8 @@ func (h *handler) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Reque
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &layer.PutLockInfoParams{
|
p := &PutLockInfoParams{
|
||||||
ObjVersion: &layer.ObjectVersion{
|
ObjVersion: &ObjectVersion{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
ObjectName: reqInfo.ObjectName,
|
ObjectName: reqInfo.ObjectName,
|
||||||
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
||||||
|
@ -146,7 +145,7 @@ func (h *handler) PutObjectLegalHoldHandler(w http.ResponseWriter, r *http.Reque
|
||||||
CopiesNumber: h.cfg.CopiesNumber,
|
CopiesNumber: h.cfg.CopiesNumber,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.PutLockInfo(r.Context(), p); err != nil {
|
if err = h.putLockInfo(r.Context(), p); err != nil {
|
||||||
h.logAndSendError(w, "couldn't head put legal hold", reqInfo, err)
|
h.logAndSendError(w, "couldn't head put legal hold", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -167,13 +166,13 @@ func (h *handler) GetObjectLegalHoldHandler(w http.ResponseWriter, r *http.Reque
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &layer.ObjectVersion{
|
p := &ObjectVersion{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
ObjectName: reqInfo.ObjectName,
|
ObjectName: reqInfo.ObjectName,
|
||||||
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
||||||
}
|
}
|
||||||
|
|
||||||
lockInfo, err := h.obj.GetLockInfo(r.Context(), p)
|
lockInfo, err := h.getLockInfo(r.Context(), p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "couldn't head lock object", reqInfo, err)
|
h.logAndSendError(w, "couldn't head lock object", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -215,8 +214,8 @@ func (h *handler) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &layer.PutLockInfoParams{
|
p := &PutLockInfoParams{
|
||||||
ObjVersion: &layer.ObjectVersion{
|
ObjVersion: &ObjectVersion{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
ObjectName: reqInfo.ObjectName,
|
ObjectName: reqInfo.ObjectName,
|
||||||
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
||||||
|
@ -225,7 +224,7 @@ func (h *handler) PutObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
|
||||||
CopiesNumber: h.cfg.CopiesNumber,
|
CopiesNumber: h.cfg.CopiesNumber,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.PutLockInfo(r.Context(), p); err != nil {
|
if err = h.putLockInfo(r.Context(), p); err != nil {
|
||||||
h.logAndSendError(w, "couldn't put legal hold", reqInfo, err)
|
h.logAndSendError(w, "couldn't put legal hold", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -246,13 +245,13 @@ func (h *handler) GetObjectRetentionHandler(w http.ResponseWriter, r *http.Reque
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &layer.ObjectVersion{
|
p := &ObjectVersion{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
ObjectName: reqInfo.ObjectName,
|
ObjectName: reqInfo.ObjectName,
|
||||||
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
||||||
}
|
}
|
||||||
|
|
||||||
lockInfo, err := h.obj.GetLockInfo(r.Context(), p)
|
lockInfo, err := h.getLockInfo(r.Context(), p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "couldn't head lock object", reqInfo, err)
|
h.logAndSendError(w, "couldn't head lock object", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -319,7 +318,7 @@ func formObjectLock(ctx context.Context, bktInfo *data.BucketInfo, defaultConfig
|
||||||
retention := &data.RetentionLock{}
|
retention := &data.RetentionLock{}
|
||||||
defaultRetention := defaultConfig.Rule.DefaultRetention
|
defaultRetention := defaultConfig.Rule.DefaultRetention
|
||||||
retention.IsCompliance = defaultRetention.Mode == complianceMode
|
retention.IsCompliance = defaultRetention.Mode == complianceMode
|
||||||
now := layer.TimeNow(ctx)
|
now := TimeNow(ctx)
|
||||||
if defaultRetention.Days != 0 {
|
if defaultRetention.Days != 0 {
|
||||||
retention.Until = now.Add(time.Duration(defaultRetention.Days) * dayDuration)
|
retention.Until = now.Add(time.Duration(defaultRetention.Days) * dayDuration)
|
||||||
} else {
|
} else {
|
||||||
|
@ -371,7 +370,7 @@ func formObjectLock(ctx context.Context, bktInfo *data.BucketInfo, defaultConfig
|
||||||
objectLock.Retention.ByPassedGovernance = bypass
|
objectLock.Retention.ByPassedGovernance = bypass
|
||||||
}
|
}
|
||||||
|
|
||||||
if objectLock.Retention.Until.Before(layer.TimeNow(ctx)) {
|
if objectLock.Retention.Until.Before(TimeNow(ctx)) {
|
||||||
return nil, apiErrors.GetAPIError(apiErrors.ErrPastObjectLockRetainDate)
|
return nil, apiErrors.GetAPIError(apiErrors.ErrPastObjectLockRetainDate)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -395,7 +394,7 @@ func formObjectLockFromRetention(ctx context.Context, retention *data.Retention,
|
||||||
return nil, apiErrors.GetAPIError(apiErrors.ErrMalformedXML)
|
return nil, apiErrors.GetAPIError(apiErrors.ErrMalformedXML)
|
||||||
}
|
}
|
||||||
|
|
||||||
if retentionDate.Before(layer.TimeNow(ctx)) {
|
if retentionDate.Before(TimeNow(ctx)) {
|
||||||
return nil, apiErrors.GetAPIError(apiErrors.ErrPastObjectLockRetainDate)
|
return nil, apiErrors.GetAPIError(apiErrors.ErrPastObjectLockRetainDate)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -322,9 +322,9 @@ func TestPutBucketLockConfigurationHandler(t *testing.T) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
bktInfo, err := hc.Layer().GetBucketInfo(ctx, tc.bucket)
|
bktInfo, err := hc.h.getBucketInfo(ctx, tc.bucket)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
bktSettings, err := hc.Layer().GetBucketSettings(ctx, bktInfo)
|
bktSettings, err := hc.h.getBucketSettings(ctx, bktInfo)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
actualConf := bktSettings.LockConfiguration
|
actualConf := bktSettings.LockConfiguration
|
||||||
require.True(t, bktSettings.VersioningEnabled())
|
require.True(t, bktSettings.VersioningEnabled())
|
||||||
|
@ -632,3 +632,45 @@ func assertRetentionApproximate(t *testing.T, w *httptest.ResponseRecorder, rete
|
||||||
|
|
||||||
require.InDelta(t, expectedUntil.Unix(), actualUntil.Unix(), delta)
|
require.InDelta(t, expectedUntil.Unix(), actualUntil.Unix(), delta)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestObjectLockAttributes(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
bktName, objName := "bkt-name", "obj-name"
|
||||||
|
|
||||||
|
bktInfo := createTestBucket(hc, bktName)
|
||||||
|
putBucketVersioning(t, hc, bktName, true)
|
||||||
|
|
||||||
|
version1, _ := putObjectContent(hc, bktName, objName, "content obj1 v1")
|
||||||
|
|
||||||
|
p := &PutLockInfoParams{
|
||||||
|
ObjVersion: &ObjectVersion{
|
||||||
|
BktInfo: bktInfo,
|
||||||
|
ObjectName: objName,
|
||||||
|
VersionID: version1,
|
||||||
|
},
|
||||||
|
NewLock: &data.ObjectLock{
|
||||||
|
Retention: &data.RetentionLock{
|
||||||
|
Until: time.Now(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
CopiesNumber: 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := hc.h.putLockInfo(hc.context, p)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
foundLock, err := hc.h.getLockInfo(hc.context, p.ObjVersion)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
lockObj := hc.getObjectByID(foundLock.Retention())
|
||||||
|
require.NotNil(t, lockObj)
|
||||||
|
|
||||||
|
expEpoch := false
|
||||||
|
for _, attr := range lockObj.Attributes() {
|
||||||
|
if attr.Key() == AttributeExpirationEpoch {
|
||||||
|
expEpoch = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Truef(t, expEpoch, "system header __NEOFS__EXPIRATION_EPOCH presence")
|
||||||
|
}
|
||||||
|
|
|
@ -11,7 +11,6 @@ import (
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/session"
|
"github.com/nspcc-dev/neofs-sdk-go/session"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -56,7 +55,7 @@ type (
|
||||||
MaxParts int `xml:"MaxParts,omitempty"`
|
MaxParts int `xml:"MaxParts,omitempty"`
|
||||||
NextPartNumberMarker int `xml:"NextPartNumberMarker,omitempty"`
|
NextPartNumberMarker int `xml:"NextPartNumberMarker,omitempty"`
|
||||||
Owner Owner `xml:"Owner"`
|
Owner Owner `xml:"Owner"`
|
||||||
Parts []*layer.Part `xml:"Part"`
|
Parts []*Part `xml:"Part"`
|
||||||
PartNumberMarker int `xml:"PartNumberMarker,omitempty"`
|
PartNumberMarker int `xml:"PartNumberMarker,omitempty"`
|
||||||
StorageClass string `xml:"StorageClass,omitempty"`
|
StorageClass string `xml:"StorageClass,omitempty"`
|
||||||
UploadID string `xml:"UploadId"`
|
UploadID string `xml:"UploadId"`
|
||||||
|
@ -78,7 +77,7 @@ type (
|
||||||
|
|
||||||
CompleteMultipartUpload struct {
|
CompleteMultipartUpload struct {
|
||||||
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload"`
|
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload"`
|
||||||
Parts []*layer.CompletedPart `xml:"Part"`
|
Parts []*CompletedPart `xml:"Part"`
|
||||||
}
|
}
|
||||||
|
|
||||||
UploadPartCopyResponse struct {
|
UploadPartCopyResponse struct {
|
||||||
|
@ -107,13 +106,13 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
|
||||||
zap.String("Key", reqInfo.ObjectName),
|
zap.String("Key", reqInfo.ObjectName),
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &layer.CreateMultipartParams{
|
p := &CreateMultipartParams{
|
||||||
Info: &layer.UploadInfoParams{
|
Info: &UploadInfoParams{
|
||||||
UploadID: uploadID.String(),
|
UploadID: uploadID.String(),
|
||||||
Bkt: bktInfo,
|
Bkt: bktInfo,
|
||||||
Key: reqInfo.ObjectName,
|
Key: reqInfo.ObjectName,
|
||||||
},
|
},
|
||||||
Data: &layer.UploadData{},
|
Data: &UploadData{},
|
||||||
}
|
}
|
||||||
|
|
||||||
if containsACLHeaders(r) {
|
if containsACLHeaders(r) {
|
||||||
|
@ -154,7 +153,7 @@ func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Re
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.CreateMultipartUpload(r.Context(), p); err != nil {
|
if err = h.createMultipartUpload(r.Context(), p); err != nil {
|
||||||
h.logAndSendError(w, "could create multipart upload", reqInfo, err, additional...)
|
h.logAndSendError(w, "could create multipart upload", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -210,13 +209,13 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
)
|
)
|
||||||
|
|
||||||
partNumber, err := strconv.Atoi(queryValues.Get(partNumberHeaderName))
|
partNumber, err := strconv.Atoi(queryValues.Get(partNumberHeaderName))
|
||||||
if err != nil || partNumber < layer.UploadMinPartNumber || partNumber > layer.UploadMaxPartNumber {
|
if err != nil || partNumber < UploadMinPartNumber || partNumber > UploadMaxPartNumber {
|
||||||
h.logAndSendError(w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber))
|
h.logAndSendError(w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &layer.UploadPartParams{
|
p := &UploadPartParams{
|
||||||
Info: &layer.UploadInfoParams{
|
Info: &UploadInfoParams{
|
||||||
UploadID: uploadID,
|
UploadID: uploadID,
|
||||||
Bkt: bktInfo,
|
Bkt: bktInfo,
|
||||||
Key: reqInfo.ObjectName,
|
Key: reqInfo.ObjectName,
|
||||||
|
@ -232,7 +231,7 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
hash, err := h.obj.UploadPart(r.Context(), p)
|
hash, err := h.UploadPart(r.Context(), p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not upload a part", reqInfo, err, additional...)
|
h.logAndSendError(w, "could not upload a part", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
|
@ -246,7 +245,7 @@ func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
api.WriteSuccessResponseHeadersOnly(w)
|
api.WriteSuccessResponseHeadersOnly(w)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) UploadPartCopyHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
var (
|
var (
|
||||||
versionID string
|
versionID string
|
||||||
reqInfo = api.GetReqInfo(r.Context())
|
reqInfo = api.GetReqInfo(r.Context())
|
||||||
|
@ -256,7 +255,7 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
||||||
)
|
)
|
||||||
|
|
||||||
partNumber, err := strconv.Atoi(queryValues.Get(partNumberHeaderName))
|
partNumber, err := strconv.Atoi(queryValues.Get(partNumberHeaderName))
|
||||||
if err != nil || partNumber < layer.UploadMinPartNumber || partNumber > layer.UploadMaxPartNumber {
|
if err != nil || partNumber < UploadMinPartNumber || partNumber > UploadMaxPartNumber {
|
||||||
h.logAndSendError(w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber))
|
h.logAndSendError(w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -291,7 +290,7 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
srcInfo, err := h.obj.GetObjectInfo(r.Context(), &layer.HeadObjectParams{
|
srcInfo, err := h.getObjectInfo(r.Context(), &HeadObjectParams{
|
||||||
BktInfo: srcBktInfo,
|
BktInfo: srcBktInfo,
|
||||||
Object: srcObject,
|
Object: srcObject,
|
||||||
VersionID: versionID,
|
VersionID: versionID,
|
||||||
|
@ -319,8 +318,8 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &layer.UploadCopyParams{
|
p := &UploadCopyParams{
|
||||||
Info: &layer.UploadInfoParams{
|
Info: &UploadInfoParams{
|
||||||
UploadID: uploadID,
|
UploadID: uploadID,
|
||||||
Bkt: bktInfo,
|
Bkt: bktInfo,
|
||||||
Key: reqInfo.ObjectName,
|
Key: reqInfo.ObjectName,
|
||||||
|
@ -337,12 +336,12 @@ func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = p.Info.Encryption.MatchObjectEncryption(layer.FormEncryptionInfo(srcInfo.Headers)); err != nil {
|
if err = p.Info.Encryption.MatchObjectEncryption(FormEncryptionInfo(srcInfo.Headers)); err != nil {
|
||||||
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
h.logAndSendError(w, "encryption doesn't match object", reqInfo, errors.GetAPIError(errors.ErrBadRequest), zap.Error(err))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := h.obj.UploadPartCopy(r.Context(), p)
|
info, err := h.uploadPartCopy(r.Context(), p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not upload part copy", reqInfo, err, additional...)
|
h.logAndSendError(w, "could not upload part copy", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
|
@ -375,7 +374,7 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
||||||
sessionTokenSetEACL *session.Container
|
sessionTokenSetEACL *session.Container
|
||||||
|
|
||||||
uploadID = r.URL.Query().Get(uploadIDHeaderName)
|
uploadID = r.URL.Query().Get(uploadIDHeaderName)
|
||||||
uploadInfo = &layer.UploadInfoParams{
|
uploadInfo = &UploadInfoParams{
|
||||||
UploadID: uploadID,
|
UploadID: uploadID,
|
||||||
Bkt: bktInfo,
|
Bkt: bktInfo,
|
||||||
Key: reqInfo.ObjectName,
|
Key: reqInfo.ObjectName,
|
||||||
|
@ -394,12 +393,12 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
c := &layer.CompleteMultipartParams{
|
c := &CompleteMultipartParams{
|
||||||
Info: uploadInfo,
|
Info: uploadInfo,
|
||||||
Parts: reqBody.Parts,
|
Parts: reqBody.Parts,
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadData, extendedObjInfo, err := h.obj.CompleteMultipartUpload(r.Context(), c)
|
uploadData, extendedObjInfo, err := h.completeMultipartUpload(r.Context(), c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not complete multipart upload", reqInfo, err, additional...)
|
h.logAndSendError(w, "could not complete multipart upload", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
|
@ -407,8 +406,8 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
||||||
objInfo := extendedObjInfo.ObjectInfo
|
objInfo := extendedObjInfo.ObjectInfo
|
||||||
|
|
||||||
if len(uploadData.TagSet) != 0 {
|
if len(uploadData.TagSet) != 0 {
|
||||||
tagPrm := &layer.PutObjectTaggingParams{
|
tagPrm := &PutObjectTaggingParams{
|
||||||
ObjectVersion: &layer.ObjectVersion{
|
ObjectVersion: &ObjectVersion{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
ObjectName: objInfo.Name,
|
ObjectName: objInfo.Name,
|
||||||
VersionID: objInfo.VersionID(),
|
VersionID: objInfo.VersionID(),
|
||||||
|
@ -416,7 +415,7 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
||||||
TagSet: uploadData.TagSet,
|
TagSet: uploadData.TagSet,
|
||||||
NodeVersion: extendedObjInfo.NodeVersion,
|
NodeVersion: extendedObjInfo.NodeVersion,
|
||||||
}
|
}
|
||||||
if _, err = h.obj.PutObjectTagging(r.Context(), tagPrm); err != nil {
|
if _, err = h.putObjectTagging(r.Context(), tagPrm); err != nil {
|
||||||
h.logAndSendError(w, "could not put tagging file of completed multipart upload", reqInfo, err, additional...)
|
h.logAndSendError(w, "could not put tagging file of completed multipart upload", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -459,7 +458,7 @@ func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.
|
||||||
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
h.log.Error("couldn't send notification: %w", zap.Error(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
bktSettings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
bktSettings, err := h.getBucketSettings(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||||
}
|
}
|
||||||
|
@ -492,7 +491,7 @@ func (h *handler) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Req
|
||||||
queryValues = reqInfo.URL.Query()
|
queryValues = reqInfo.URL.Query()
|
||||||
delimiter = queryValues.Get("delimiter")
|
delimiter = queryValues.Get("delimiter")
|
||||||
prefix = queryValues.Get("prefix")
|
prefix = queryValues.Get("prefix")
|
||||||
maxUploads = layer.MaxSizeUploadsList
|
maxUploads = MaxSizeUploadsList
|
||||||
)
|
)
|
||||||
|
|
||||||
if queryValues.Get("max-uploads") != "" {
|
if queryValues.Get("max-uploads") != "" {
|
||||||
|
@ -506,7 +505,7 @@ func (h *handler) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Req
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &layer.ListMultipartUploadsParams{
|
p := &ListMultipartUploadsParams{
|
||||||
Bkt: bktInfo,
|
Bkt: bktInfo,
|
||||||
Delimiter: delimiter,
|
Delimiter: delimiter,
|
||||||
EncodingType: queryValues.Get("encoding-type"),
|
EncodingType: queryValues.Get("encoding-type"),
|
||||||
|
@ -516,7 +515,7 @@ func (h *handler) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Req
|
||||||
UploadIDMarker: queryValues.Get("upload-id-marker"),
|
UploadIDMarker: queryValues.Get("upload-id-marker"),
|
||||||
}
|
}
|
||||||
|
|
||||||
list, err := h.obj.ListMultipartUploads(r.Context(), p)
|
list, err := h.listMultipartUploads(r.Context(), p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not list multipart uploads", reqInfo, err)
|
h.logAndSendError(w, "could not list multipart uploads", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -542,7 +541,7 @@ func (h *handler) ListPartsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
queryValues = reqInfo.URL.Query()
|
queryValues = reqInfo.URL.Query()
|
||||||
uploadID = queryValues.Get(uploadIDHeaderName)
|
uploadID = queryValues.Get(uploadIDHeaderName)
|
||||||
additional = []zap.Field{zap.String("uploadID", uploadID), zap.String("Key", reqInfo.ObjectName)}
|
additional = []zap.Field{zap.String("uploadID", uploadID), zap.String("Key", reqInfo.ObjectName)}
|
||||||
maxParts = layer.MaxSizePartsList
|
maxParts = MaxSizePartsList
|
||||||
)
|
)
|
||||||
|
|
||||||
if queryValues.Get("max-parts") != "" {
|
if queryValues.Get("max-parts") != "" {
|
||||||
|
@ -551,7 +550,7 @@ func (h *handler) ListPartsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
h.logAndSendError(w, "invalid MaxParts", reqInfo, errors.GetAPIError(errors.ErrInvalidMaxParts), additional...)
|
h.logAndSendError(w, "invalid MaxParts", reqInfo, errors.GetAPIError(errors.ErrInvalidMaxParts), additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if val < layer.MaxSizePartsList {
|
if val < MaxSizePartsList {
|
||||||
maxParts = val
|
maxParts = val
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -563,8 +562,8 @@ func (h *handler) ListPartsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &layer.ListPartsParams{
|
p := &ListPartsParams{
|
||||||
Info: &layer.UploadInfoParams{
|
Info: &UploadInfoParams{
|
||||||
UploadID: uploadID,
|
UploadID: uploadID,
|
||||||
Bkt: bktInfo,
|
Bkt: bktInfo,
|
||||||
Key: reqInfo.ObjectName,
|
Key: reqInfo.ObjectName,
|
||||||
|
@ -579,7 +578,7 @@ func (h *handler) ListPartsHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
list, err := h.obj.ListParts(r.Context(), p)
|
list, err := h.listParts(r.Context(), p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not list parts", reqInfo, err, additional...)
|
h.logAndSendError(w, "could not list parts", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
|
@ -602,7 +601,7 @@ func (h *handler) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Req
|
||||||
uploadID := reqInfo.URL.Query().Get(uploadIDHeaderName)
|
uploadID := reqInfo.URL.Query().Get(uploadIDHeaderName)
|
||||||
additional := []zap.Field{zap.String("uploadID", uploadID), zap.String("Key", reqInfo.ObjectName)}
|
additional := []zap.Field{zap.String("uploadID", uploadID), zap.String("Key", reqInfo.ObjectName)}
|
||||||
|
|
||||||
p := &layer.UploadInfoParams{
|
p := &UploadInfoParams{
|
||||||
UploadID: uploadID,
|
UploadID: uploadID,
|
||||||
Bkt: bktInfo,
|
Bkt: bktInfo,
|
||||||
Key: reqInfo.ObjectName,
|
Key: reqInfo.ObjectName,
|
||||||
|
@ -614,7 +613,7 @@ func (h *handler) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Req
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.AbortMultipartUpload(r.Context(), p); err != nil {
|
if err = h.abortMultipartUpload(r.Context(), p); err != nil {
|
||||||
h.logAndSendError(w, "could not abort multipart upload", reqInfo, err, additional...)
|
h.logAndSendError(w, "could not abort multipart upload", reqInfo, err, additional...)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -622,7 +621,7 @@ func (h *handler) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Req
|
||||||
w.WriteHeader(http.StatusNoContent)
|
w.WriteHeader(http.StatusNoContent)
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeListMultipartUploadsToResponse(info *layer.ListMultipartUploadsInfo, params *layer.ListMultipartUploadsParams) *ListMultipartUploadsResponse {
|
func encodeListMultipartUploadsToResponse(info *ListMultipartUploadsInfo, params *ListMultipartUploadsParams) *ListMultipartUploadsResponse {
|
||||||
res := ListMultipartUploadsResponse{
|
res := ListMultipartUploadsResponse{
|
||||||
Bucket: params.Bkt.Name,
|
Bucket: params.Bkt.Name,
|
||||||
CommonPrefixes: fillPrefixes(info.Prefixes, params.EncodingType),
|
CommonPrefixes: fillPrefixes(info.Prefixes, params.EncodingType),
|
||||||
|
@ -660,7 +659,7 @@ func encodeListMultipartUploadsToResponse(info *layer.ListMultipartUploadsInfo,
|
||||||
return &res
|
return &res
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeListPartsToResponse(info *layer.ListPartsInfo, params *layer.ListPartsParams) *ListPartsResponse {
|
func encodeListPartsToResponse(info *ListPartsInfo, params *ListPartsParams) *ListPartsResponse {
|
||||||
return &ListPartsResponse{
|
return &ListPartsResponse{
|
||||||
XMLName: xml.Name{},
|
XMLName: xml.Name{},
|
||||||
Bucket: params.Info.Bkt.Name,
|
Bucket: params.Info.Bkt.Name,
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
package layer
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"sort"
|
"sort"
|
|
@ -1,4 +1,4 @@
|
||||||
package layer
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
|
@ -1,4 +1,4 @@
|
||||||
package layer
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
|
@ -12,7 +12,6 @@ import (
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/bearer"
|
"github.com/nspcc-dev/neofs-sdk-go/bearer"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -114,14 +113,14 @@ func (h *handler) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Re
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &layer.PutBucketNotificationConfigurationParams{
|
p := &PutBucketNotificationConfigurationParams{
|
||||||
RequestInfo: reqInfo,
|
RequestInfo: reqInfo,
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Configuration: conf,
|
Configuration: conf,
|
||||||
CopiesNumber: h.cfg.CopiesNumber,
|
CopiesNumber: h.cfg.CopiesNumber,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.PutBucketNotificationConfiguration(r.Context(), p); err != nil {
|
if err = h.putBucketNotificationConfiguration(r.Context(), p); err != nil {
|
||||||
h.logAndSendError(w, "couldn't put bucket configuration", reqInfo, err)
|
h.logAndSendError(w, "couldn't put bucket configuration", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -136,7 +135,7 @@ func (h *handler) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Re
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
conf, err := h.obj.GetBucketNotificationConfiguration(r.Context(), bktInfo)
|
conf, err := h.getBucketNotificationConfiguration(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket notification configuration", reqInfo, err)
|
h.logAndSendError(w, "could not get bucket notification configuration", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -153,7 +152,7 @@ func (h *handler) sendNotifications(ctx context.Context, p *SendNotificationPara
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
conf, err := h.obj.GetBucketNotificationConfiguration(ctx, p.BktInfo)
|
conf, err := h.getBucketNotificationConfiguration(ctx, p.BktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to get notification configuration: %w", err)
|
return fmt.Errorf("failed to get notification configuration: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -161,12 +160,12 @@ func (h *handler) sendNotifications(ctx context.Context, p *SendNotificationPara
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
box, err := layer.GetBoxData(ctx)
|
box, err := GetBoxData(ctx)
|
||||||
if err == nil && box.Gate.BearerToken != nil {
|
if err == nil && box.Gate.BearerToken != nil {
|
||||||
p.User = bearer.ResolveIssuer(*box.Gate.BearerToken).EncodeToString()
|
p.User = bearer.ResolveIssuer(*box.Gate.BearerToken).EncodeToString()
|
||||||
}
|
}
|
||||||
|
|
||||||
p.Time = layer.TimeNow(ctx)
|
p.Time = TimeNow(ctx)
|
||||||
|
|
||||||
topics := filterSubjects(conf, p.Event, p.NotificationInfo.Name)
|
topics := filterSubjects(conf, p.Event, p.NotificationInfo.Name)
|
||||||
|
|
||||||
|
@ -193,7 +192,7 @@ func (h *handler) checkBucketConfiguration(ctx context.Context, conf *data.Notif
|
||||||
}
|
}
|
||||||
|
|
||||||
if h.cfg.NotificatorEnabled {
|
if h.cfg.NotificatorEnabled {
|
||||||
if err = h.notificator.SendTestNotification(q.QueueArn, r.BucketName, r.RequestID, r.Host, layer.TimeNow(ctx)); err != nil {
|
if err = h.notificator.SendTestNotification(q.QueueArn, r.BucketName, r.RequestID, r.Host, TimeNow(ctx)); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
|
||||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -27,7 +26,7 @@ func (h *handler) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
list, err := h.obj.ListObjectsV1(r.Context(), params)
|
list, err := h.listObjectsV1(r.Context(), params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -38,7 +37,7 @@ func (h *handler) ListObjectsV1Handler(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeV1(p *layer.ListObjectsParamsV1, list *layer.ListObjectsInfoV1) *ListObjectsV1Response {
|
func encodeV1(p *ListObjectsParamsV1, list *ListObjectsInfoV1) *ListObjectsV1Response {
|
||||||
res := &ListObjectsV1Response{
|
res := &ListObjectsV1Response{
|
||||||
Name: p.BktInfo.Name,
|
Name: p.BktInfo.Name,
|
||||||
EncodingType: p.Encode,
|
EncodingType: p.Encode,
|
||||||
|
@ -71,7 +70,7 @@ func (h *handler) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
list, err := h.obj.ListObjectsV2(r.Context(), params)
|
list, err := h.listObjectsV2(r.Context(), params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -82,7 +81,7 @@ func (h *handler) ListObjectsV2Handler(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeV2(p *layer.ListObjectsParamsV2, list *layer.ListObjectsInfoV2) *ListObjectsV2Response {
|
func encodeV2(p *ListObjectsParamsV2, list *ListObjectsInfoV2) *ListObjectsV2Response {
|
||||||
res := &ListObjectsV2Response{
|
res := &ListObjectsV2Response{
|
||||||
Name: p.BktInfo.Name,
|
Name: p.BktInfo.Name,
|
||||||
EncodingType: p.Encode,
|
EncodingType: p.Encode,
|
||||||
|
@ -103,9 +102,9 @@ func encodeV2(p *layer.ListObjectsParamsV2, list *layer.ListObjectsInfoV2) *List
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseListObjectsArgsV1(reqInfo *api.ReqInfo) (*layer.ListObjectsParamsV1, error) {
|
func parseListObjectsArgsV1(reqInfo *api.ReqInfo) (*ListObjectsParamsV1, error) {
|
||||||
var (
|
var (
|
||||||
res layer.ListObjectsParamsV1
|
res ListObjectsParamsV1
|
||||||
queryValues = reqInfo.URL.Query()
|
queryValues = reqInfo.URL.Query()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -120,9 +119,9 @@ func parseListObjectsArgsV1(reqInfo *api.ReqInfo) (*layer.ListObjectsParamsV1, e
|
||||||
return &res, nil
|
return &res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseListObjectsArgsV2(reqInfo *api.ReqInfo) (*layer.ListObjectsParamsV2, error) {
|
func parseListObjectsArgsV2(reqInfo *api.ReqInfo) (*ListObjectsParamsV2, error) {
|
||||||
var (
|
var (
|
||||||
res layer.ListObjectsParamsV2
|
res ListObjectsParamsV2
|
||||||
queryValues = reqInfo.URL.Query()
|
queryValues = reqInfo.URL.Query()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -142,10 +141,10 @@ func parseListObjectsArgsV2(reqInfo *api.ReqInfo) (*layer.ListObjectsParamsV2, e
|
||||||
return &res, nil
|
return &res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseListObjectArgs(reqInfo *api.ReqInfo) (*layer.ListObjectsParamsCommon, error) {
|
func parseListObjectArgs(reqInfo *api.ReqInfo) (*ListObjectsParamsCommon, error) {
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
res layer.ListObjectsParamsCommon
|
res ListObjectsParamsCommon
|
||||||
queryValues = reqInfo.URL.Query()
|
queryValues = reqInfo.URL.Query()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -223,7 +222,7 @@ func (h *handler) ListBucketObjectVersionsHandler(w http.ResponseWriter, r *http
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
info, err := h.obj.ListObjectVersions(r.Context(), p)
|
info, err := h.listObjectVersions(r.Context(), p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -235,10 +234,10 @@ func (h *handler) ListBucketObjectVersionsHandler(w http.ResponseWriter, r *http
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseListObjectVersionsRequest(reqInfo *api.ReqInfo) (*layer.ListObjectVersionsParams, error) {
|
func parseListObjectVersionsRequest(reqInfo *api.ReqInfo) (*ListObjectVersionsParams, error) {
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
res layer.ListObjectVersionsParams
|
res ListObjectVersionsParams
|
||||||
queryValues = reqInfo.URL.Query()
|
queryValues = reqInfo.URL.Query()
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -257,7 +256,7 @@ func parseListObjectVersionsRequest(reqInfo *api.ReqInfo) (*layer.ListObjectVers
|
||||||
return &res, nil
|
return &res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func encodeListObjectVersionsToResponse(info *layer.ListObjectVersionsInfo, bucketName string) *ListObjectsVersionsResponse {
|
func encodeListObjectVersionsToResponse(info *ListObjectVersionsInfo, bucketName string) *ListObjectsVersionsResponse {
|
||||||
res := ListObjectsVersionsResponse{
|
res := ListObjectsVersionsResponse{
|
||||||
Name: bucketName,
|
Name: bucketName,
|
||||||
IsTruncated: info.IsTruncated,
|
IsTruncated: info.IsTruncated,
|
||||||
|
|
|
@ -182,6 +182,7 @@ func prepareCommonListObjectsQuery(prefix, delimiter string, maxKeys int) url.Va
|
||||||
return query
|
return query
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// maxKeys -1 means omit value.
|
||||||
func listObjectsV1(t *testing.T, tc *handlerContext, bktName, prefix, delimiter, marker string, maxKeys int) *ListObjectsV1Response {
|
func listObjectsV1(t *testing.T, tc *handlerContext, bktName, prefix, delimiter, marker string, maxKeys int) *ListObjectsV1Response {
|
||||||
query := prepareCommonListObjectsQuery(prefix, delimiter, maxKeys)
|
query := prepareCommonListObjectsQuery(prefix, delimiter, maxKeys)
|
||||||
if len(marker) != 0 {
|
if len(marker) != 0 {
|
||||||
|
|
|
@ -20,8 +20,7 @@ import (
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/auth"
|
"github.com/nspcc-dev/neofs-s3-gw/api/auth"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
"github.com/nspcc-dev/neofs-s3-gw/api/handler/encryption"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer/encryption"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/creds/accessbox"
|
"github.com/nspcc-dev/neofs-s3-gw/creds/accessbox"
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/eacl"
|
"github.com/nspcc-dev/neofs-sdk-go/eacl"
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/session"
|
"github.com/nspcc-dev/neofs-sdk-go/session"
|
||||||
|
@ -224,7 +223,7 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
params := &layer.PutObjectParams{
|
params := &PutObjectParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Object: reqInfo.ObjectName,
|
Object: reqInfo.ObjectName,
|
||||||
Reader: r.Body,
|
Reader: r.Body,
|
||||||
|
@ -234,7 +233,7 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
CopiesNumber: copiesNumber,
|
CopiesNumber: copiesNumber,
|
||||||
}
|
}
|
||||||
|
|
||||||
settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
settings, err := h.getBucketSettings(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -246,7 +245,7 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
extendedObjInfo, err := h.obj.PutObject(r.Context(), params)
|
extendedObjInfo, err := h.putObject(r.Context(), params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_, err2 := io.Copy(io.Discard, r.Body)
|
_, err2 := io.Copy(io.Discard, r.Body)
|
||||||
err3 := r.Body.Close()
|
err3 := r.Body.Close()
|
||||||
|
@ -273,8 +272,8 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if tagSet != nil {
|
if tagSet != nil {
|
||||||
tagPrm := &layer.PutObjectTaggingParams{
|
tagPrm := &PutObjectTaggingParams{
|
||||||
ObjectVersion: &layer.ObjectVersion{
|
ObjectVersion: &ObjectVersion{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
ObjectName: objInfo.Name,
|
ObjectName: objInfo.Name,
|
||||||
VersionID: objInfo.VersionID(),
|
VersionID: objInfo.VersionID(),
|
||||||
|
@ -282,20 +281,20 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
TagSet: tagSet,
|
TagSet: tagSet,
|
||||||
NodeVersion: extendedObjInfo.NodeVersion,
|
NodeVersion: extendedObjInfo.NodeVersion,
|
||||||
}
|
}
|
||||||
if _, err = h.obj.PutObjectTagging(r.Context(), tagPrm); err != nil {
|
if _, err = h.putObjectTagging(r.Context(), tagPrm); err != nil {
|
||||||
h.logAndSendError(w, "could not upload object tagging", reqInfo, err)
|
h.logAndSendError(w, "could not upload object tagging", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if newEaclTable != nil {
|
if newEaclTable != nil {
|
||||||
p := &layer.PutBucketACLParams{
|
p := &PutBucketACLParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
EACL: newEaclTable,
|
EACL: newEaclTable,
|
||||||
SessionToken: sessionTokenEACL,
|
SessionToken: sessionTokenEACL,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.PutBucketACL(r.Context(), p); err != nil {
|
if err = h.putBucketACL(r.Context(), p); err != nil {
|
||||||
h.logAndSendError(w, "could not put bucket acl", reqInfo, err)
|
h.logAndSendError(w, "could not put bucket acl", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -313,7 +312,7 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCopiesNumberOrDefault(metadata map[string]string, defaultCopiesNumber uint32) (uint32, error) {
|
func getCopiesNumberOrDefault(metadata map[string]string, defaultCopiesNumber uint32) (uint32, error) {
|
||||||
copiesNumberStr, ok := metadata[layer.AttributeNeofsCopiesNumber]
|
copiesNumberStr, ok := metadata[AttributeNeofsCopiesNumber]
|
||||||
if !ok {
|
if !ok {
|
||||||
return defaultCopiesNumber, nil
|
return defaultCopiesNumber, nil
|
||||||
}
|
}
|
||||||
|
@ -339,7 +338,7 @@ func formEncryptionParams(r *http.Request) (enc encryption.Params, err error) {
|
||||||
return enc, errorsStd.New("encryption available only when TLS is enabled")
|
return enc, errorsStd.New("encryption available only when TLS is enabled")
|
||||||
}
|
}
|
||||||
|
|
||||||
if sseCustomerAlgorithm != layer.AESEncryptionAlgorithm {
|
if sseCustomerAlgorithm != AESEncryptionAlgorithm {
|
||||||
return enc, errors.GetAPIError(errors.ErrInvalidEncryptionAlgorithm)
|
return enc, errors.GetAPIError(errors.ErrInvalidEncryptionAlgorithm)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -348,7 +347,7 @@ func formEncryptionParams(r *http.Request) (enc encryption.Params, err error) {
|
||||||
return enc, errors.GetAPIError(errors.ErrInvalidSSECustomerKey)
|
return enc, errors.GetAPIError(errors.ErrInvalidSSECustomerKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(key) != layer.AESKeySize {
|
if len(key) != AESKeySize {
|
||||||
return enc, errors.GetAPIError(errors.ErrInvalidSSECustomerKey)
|
return enc, errors.GetAPIError(errors.ErrInvalidSSECustomerKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -422,13 +421,13 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
bktInfo, err := h.obj.GetBucketInfo(r.Context(), reqInfo.BucketName)
|
bktInfo, err := h.getBucketInfo(r.Context(), reqInfo.BucketName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
params := &layer.PutObjectParams{
|
params := &PutObjectParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Object: reqInfo.ObjectName,
|
Object: reqInfo.ObjectName,
|
||||||
Reader: contentReader,
|
Reader: contentReader,
|
||||||
|
@ -436,7 +435,7 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
||||||
Header: metadata,
|
Header: metadata,
|
||||||
}
|
}
|
||||||
|
|
||||||
extendedObjInfo, err := h.obj.PutObject(r.Context(), params)
|
extendedObjInfo, err := h.putObject(r.Context(), params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not upload object", reqInfo, err)
|
h.logAndSendError(w, "could not upload object", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -466,8 +465,8 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if tagSet != nil {
|
if tagSet != nil {
|
||||||
tagPrm := &layer.PutObjectTaggingParams{
|
tagPrm := &PutObjectTaggingParams{
|
||||||
ObjectVersion: &layer.ObjectVersion{
|
ObjectVersion: &ObjectVersion{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
ObjectName: objInfo.Name,
|
ObjectName: objInfo.Name,
|
||||||
VersionID: objInfo.VersionID(),
|
VersionID: objInfo.VersionID(),
|
||||||
|
@ -475,26 +474,26 @@ func (h *handler) PostObject(w http.ResponseWriter, r *http.Request) {
|
||||||
NodeVersion: extendedObjInfo.NodeVersion,
|
NodeVersion: extendedObjInfo.NodeVersion,
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err = h.obj.PutObjectTagging(r.Context(), tagPrm); err != nil {
|
if _, err = h.putObjectTagging(r.Context(), tagPrm); err != nil {
|
||||||
h.logAndSendError(w, "could not upload object tagging", reqInfo, err)
|
h.logAndSendError(w, "could not upload object tagging", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if newEaclTable != nil {
|
if newEaclTable != nil {
|
||||||
p := &layer.PutBucketACLParams{
|
p := &PutBucketACLParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
EACL: newEaclTable,
|
EACL: newEaclTable,
|
||||||
SessionToken: sessionTokenEACL,
|
SessionToken: sessionTokenEACL,
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.PutBucketACL(r.Context(), p); err != nil {
|
if err = h.putBucketACL(r.Context(), p); err != nil {
|
||||||
h.logAndSendError(w, "could not put bucket acl", reqInfo, err)
|
h.logAndSendError(w, "could not put bucket acl", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo); err != nil {
|
if settings, err := h.getBucketSettings(r.Context(), bktInfo); err != nil {
|
||||||
h.log.Warn("couldn't get bucket versioning", zap.String("bucket name", reqInfo.BucketName), zap.Error(err))
|
h.log.Warn("couldn't get bucket versioning", zap.String("bucket name", reqInfo.BucketName), zap.Error(err))
|
||||||
} else if settings.VersioningEnabled() {
|
} else if settings.VersioningEnabled() {
|
||||||
w.Header().Set(api.AmzVersionID, objInfo.VersionID())
|
w.Header().Set(api.AmzVersionID, objInfo.VersionID())
|
||||||
|
@ -610,7 +609,7 @@ func (h *handler) getNewEAclTable(r *http.Request, bktInfo *data.BucketInfo, obj
|
||||||
return nil, fmt.Errorf("could not translate policy to ast: %w", err)
|
return nil, fmt.Errorf("could not translate policy to ast: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
bacl, err := h.obj.GetBucketACL(r.Context(), bktInfo)
|
bacl, err := h.getBucketACL(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("could not get bucket eacl: %w", err)
|
return nil, fmt.Errorf("could not get bucket eacl: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -668,7 +667,7 @@ func parseMetadata(r *http.Request) map[string]string {
|
||||||
|
|
||||||
func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
reqInfo := api.GetReqInfo(r.Context())
|
reqInfo := api.GetReqInfo(r.Context())
|
||||||
p := &layer.CreateBucketParams{
|
p := &CreateBucketParams{
|
||||||
Name: reqInfo.BucketName,
|
Name: reqInfo.BucketName,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -703,7 +702,7 @@ func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var policies []*accessbox.ContainerPolicy
|
var policies []*accessbox.ContainerPolicy
|
||||||
boxData, err := layer.GetBoxData(r.Context())
|
boxData, err := GetBoxData(r.Context())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
policies = boxData.Policies
|
policies = boxData.Policies
|
||||||
p.SessionContainerCreation = boxData.Gate.SessionTokenForPut()
|
p.SessionContainerCreation = boxData.Gate.SessionTokenForPut()
|
||||||
|
@ -724,18 +723,18 @@ func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
p.ObjectLockEnabled = isLockEnabled(r.Header)
|
p.ObjectLockEnabled = isLockEnabled(r.Header)
|
||||||
|
|
||||||
bktInfo, err := h.obj.CreateBucket(r.Context(), p)
|
bktInfo, err := h.createBucket(r.Context(), p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not create bucket", reqInfo, err)
|
h.logAndSendError(w, "could not create bucket", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.ObjectLockEnabled {
|
if p.ObjectLockEnabled {
|
||||||
sp := &layer.PutSettingsParams{
|
sp := &PutSettingsParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Settings: &data.BucketSettings{Versioning: data.VersioningEnabled},
|
Settings: &data.BucketSettings{Versioning: data.VersioningEnabled},
|
||||||
}
|
}
|
||||||
if err = h.obj.PutBucketSettings(r.Context(), sp); err != nil {
|
if err = h.putBucketSettings(r.Context(), sp); err != nil {
|
||||||
h.logAndSendError(w, "couldn't enable bucket versioning", reqInfo, err,
|
h.logAndSendError(w, "couldn't enable bucket versioning", reqInfo, err,
|
||||||
zap.String("container_id", bktInfo.CID.EncodeToString()))
|
zap.String("container_id", bktInfo.CID.EncodeToString()))
|
||||||
return
|
return
|
||||||
|
@ -747,7 +746,7 @@ func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
api.WriteSuccessResponseHeadersOnly(w)
|
api.WriteSuccessResponseHeadersOnly(w)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h handler) setPolicy(prm *layer.CreateBucketParams, locationConstraint string, userPolicies []*accessbox.ContainerPolicy) {
|
func (h handler) setPolicy(prm *CreateBucketParams, locationConstraint string, userPolicies []*accessbox.ContainerPolicy) {
|
||||||
prm.Policy = h.cfg.Policy.Default()
|
prm.Policy = h.cfg.Policy.Default()
|
||||||
|
|
||||||
if locationConstraint == "" {
|
if locationConstraint == "" {
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -114,15 +113,15 @@ func TestPutObjectOverrideCopiesNumber(t *testing.T) {
|
||||||
bktInfo := createTestBucket(tc, bktName)
|
bktInfo := createTestBucket(tc, bktName)
|
||||||
|
|
||||||
w, r := prepareTestRequest(tc, bktName, objName, nil)
|
w, r := prepareTestRequest(tc, bktName, objName, nil)
|
||||||
r.Header.Set(api.MetadataPrefix+strings.ToUpper(layer.AttributeNeofsCopiesNumber), "1")
|
r.Header.Set(api.MetadataPrefix+strings.ToUpper(AttributeNeofsCopiesNumber), "1")
|
||||||
tc.Handler().PutObjectHandler(w, r)
|
tc.Handler().PutObjectHandler(w, r)
|
||||||
|
|
||||||
p := &layer.HeadObjectParams{
|
p := &HeadObjectParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Object: objName,
|
Object: objName,
|
||||||
}
|
}
|
||||||
|
|
||||||
objInfo, err := tc.Layer().GetObjectInfo(tc.Context(), p)
|
objInfo, err := tc.h.getObjectInfo(tc.Context(), p)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, "1", objInfo.Headers[layer.AttributeNeofsCopiesNumber])
|
require.Equal(t, "1", objInfo.Headers[AttributeNeofsCopiesNumber])
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
package layer
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
@ -19,26 +19,19 @@ const (
|
||||||
AttributeExpirationEpoch = "__NEOFS__EXPIRATION_EPOCH"
|
AttributeExpirationEpoch = "__NEOFS__EXPIRATION_EPOCH"
|
||||||
)
|
)
|
||||||
|
|
||||||
type PutLockInfoParams struct {
|
func (h *handler) putLockInfo(ctx context.Context, p *PutLockInfoParams) (err error) {
|
||||||
ObjVersion *ObjectVersion
|
|
||||||
NewLock *data.ObjectLock
|
|
||||||
CopiesNumber uint32
|
|
||||||
NodeVersion *data.NodeVersion // optional
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) PutLockInfo(ctx context.Context, p *PutLockInfoParams) (err error) {
|
|
||||||
newLock := p.NewLock
|
newLock := p.NewLock
|
||||||
versionNode := p.NodeVersion
|
versionNode := p.NodeVersion
|
||||||
// sometimes node version can be provided from executing context
|
// sometimes node version can be provided from executing context
|
||||||
// if not, then receive node version from tree service
|
// if not, then receive node version from tree service
|
||||||
if versionNode == nil {
|
if versionNode == nil {
|
||||||
versionNode, err = n.getNodeVersionFromCacheOrNeofs(ctx, p.ObjVersion)
|
versionNode, err = h.getNodeVersionFromCacheOrNeofs(ctx, p.ObjVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
lockInfo, err := n.treeService.GetLock(ctx, p.ObjVersion.BktInfo, versionNode.ID)
|
lockInfo, err := h.treeService.GetLock(ctx, p.ObjVersion.BktInfo, versionNode.ID)
|
||||||
if err != nil && !errorsStd.Is(err, ErrNodeNotFound) {
|
if err != nil && !errorsStd.Is(err, ErrNodeNotFound) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -68,7 +61,7 @@ func (n *layer) PutLockInfo(ctx context.Context, p *PutLockInfoParams) (err erro
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
lock := &data.ObjectLock{Retention: newLock.Retention}
|
lock := &data.ObjectLock{Retention: newLock.Retention}
|
||||||
retentionOID, err := n.putLockObject(ctx, p.ObjVersion.BktInfo, versionNode.OID, lock, p.CopiesNumber)
|
retentionOID, err := h.putLockObject(ctx, p.ObjVersion.BktInfo, versionNode.OID, lock, p.CopiesNumber)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -78,40 +71,40 @@ func (n *layer) PutLockInfo(ctx context.Context, p *PutLockInfoParams) (err erro
|
||||||
if newLock.LegalHold != nil {
|
if newLock.LegalHold != nil {
|
||||||
if newLock.LegalHold.Enabled && !lockInfo.IsLegalHoldSet() {
|
if newLock.LegalHold.Enabled && !lockInfo.IsLegalHoldSet() {
|
||||||
lock := &data.ObjectLock{LegalHold: newLock.LegalHold}
|
lock := &data.ObjectLock{LegalHold: newLock.LegalHold}
|
||||||
legalHoldOID, err := n.putLockObject(ctx, p.ObjVersion.BktInfo, versionNode.OID, lock, p.CopiesNumber)
|
legalHoldOID, err := h.putLockObject(ctx, p.ObjVersion.BktInfo, versionNode.OID, lock, p.CopiesNumber)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
lockInfo.SetLegalHold(legalHoldOID)
|
lockInfo.SetLegalHold(legalHoldOID)
|
||||||
} else if !newLock.LegalHold.Enabled && lockInfo.IsLegalHoldSet() {
|
} else if !newLock.LegalHold.Enabled && lockInfo.IsLegalHoldSet() {
|
||||||
if err = n.objectDelete(ctx, p.ObjVersion.BktInfo, lockInfo.LegalHold()); err != nil {
|
if err = h.objectDelete(ctx, p.ObjVersion.BktInfo, lockInfo.LegalHold()); err != nil {
|
||||||
return fmt.Errorf("couldn't delete lock object '%s' to remove legal hold: %w", lockInfo.LegalHold().EncodeToString(), err)
|
return fmt.Errorf("couldn't delete lock object '%s' to remove legal hold: %w", lockInfo.LegalHold().EncodeToString(), err)
|
||||||
}
|
}
|
||||||
lockInfo.ResetLegalHold()
|
lockInfo.ResetLegalHold()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = n.treeService.PutLock(ctx, p.ObjVersion.BktInfo, versionNode.ID, lockInfo); err != nil {
|
if err = h.treeService.PutLock(ctx, p.ObjVersion.BktInfo, versionNode.ID, lockInfo); err != nil {
|
||||||
return fmt.Errorf("couldn't put lock into tree: %w", err)
|
return fmt.Errorf("couldn't put lock into tree: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
n.cache.PutLockInfo(n.Owner(ctx), lockObjectKey(p.ObjVersion), lockInfo)
|
h.cache.PutLockInfo(h.Owner(ctx), lockObjectKey(p.ObjVersion), lockInfo)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *layer) getNodeVersionFromCacheOrNeofs(ctx context.Context, objVersion *ObjectVersion) (nodeVersion *data.NodeVersion, err error) {
|
func (h *handler) getNodeVersionFromCacheOrNeofs(ctx context.Context, objVersion *ObjectVersion) (nodeVersion *data.NodeVersion, err error) {
|
||||||
// check cache if node version is stored inside extendedObjectVersion
|
// check cache if node version is stored inside extendedObjectVersion
|
||||||
nodeVersion = n.getNodeVersionFromCache(n.Owner(ctx), objVersion)
|
nodeVersion = h.getNodeVersionFromCache(h.Owner(ctx), objVersion)
|
||||||
if nodeVersion == nil {
|
if nodeVersion == nil {
|
||||||
// else get node version from tree service
|
// else get node version from tree service
|
||||||
return n.getNodeVersion(ctx, objVersion)
|
return h.getNodeVersion(ctx, objVersion)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nodeVersion, nil
|
return nodeVersion, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *layer) putLockObject(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID, lock *data.ObjectLock, copiesNumber uint32) (oid.ID, error) {
|
func (h *handler) putLockObject(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID, lock *data.ObjectLock, copiesNumber uint32) (oid.ID, error) {
|
||||||
prm := PrmObjectCreate{
|
prm := PrmObjectCreate{
|
||||||
Container: bktInfo.CID,
|
Container: bktInfo.CID,
|
||||||
Creator: bktInfo.Owner,
|
Creator: bktInfo.Owner,
|
||||||
|
@ -121,27 +114,27 @@ func (n *layer) putLockObject(ctx context.Context, bktInfo *data.BucketInfo, obj
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
prm.Attributes, err = n.attributesFromLock(ctx, lock)
|
prm.Attributes, err = h.attributesFromLock(ctx, lock)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return oid.ID{}, err
|
return oid.ID{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
id, _, err := n.objectPutAndHash(ctx, prm, bktInfo)
|
id, _, err := h.objectPutAndHash(ctx, prm, bktInfo)
|
||||||
return id, err
|
return id, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *layer) GetLockInfo(ctx context.Context, objVersion *ObjectVersion) (*data.LockInfo, error) {
|
func (h *handler) getLockInfo(ctx context.Context, objVersion *ObjectVersion) (*data.LockInfo, error) {
|
||||||
owner := n.Owner(ctx)
|
owner := h.Owner(ctx)
|
||||||
if lockInfo := n.cache.GetLockInfo(owner, lockObjectKey(objVersion)); lockInfo != nil {
|
if lockInfo := h.cache.GetLockInfo(owner, lockObjectKey(objVersion)); lockInfo != nil {
|
||||||
return lockInfo, nil
|
return lockInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
versionNode, err := n.getNodeVersion(ctx, objVersion)
|
versionNode, err := h.getNodeVersion(ctx, objVersion)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
lockInfo, err := n.treeService.GetLock(ctx, objVersion.BktInfo, versionNode.ID)
|
lockInfo, err := h.treeService.GetLock(ctx, objVersion.BktInfo, versionNode.ID)
|
||||||
if err != nil && !errorsStd.Is(err, ErrNodeNotFound) {
|
if err != nil && !errorsStd.Is(err, ErrNodeNotFound) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -149,18 +142,18 @@ func (n *layer) GetLockInfo(ctx context.Context, objVersion *ObjectVersion) (*da
|
||||||
lockInfo = &data.LockInfo{}
|
lockInfo = &data.LockInfo{}
|
||||||
}
|
}
|
||||||
|
|
||||||
n.cache.PutLockInfo(owner, lockObjectKey(objVersion), lockInfo)
|
h.cache.PutLockInfo(owner, lockObjectKey(objVersion), lockInfo)
|
||||||
|
|
||||||
return lockInfo, nil
|
return lockInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *layer) getCORS(ctx context.Context, bkt *data.BucketInfo) (*data.CORSConfiguration, error) {
|
func (h *handler) getCORS(ctx context.Context, bkt *data.BucketInfo) (*data.CORSConfiguration, error) {
|
||||||
owner := n.Owner(ctx)
|
owner := h.Owner(ctx)
|
||||||
if cors := n.cache.GetCORS(owner, bkt); cors != nil {
|
if cors := h.cache.GetCORS(owner, bkt); cors != nil {
|
||||||
return cors, nil
|
return cors, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
objID, err := n.treeService.GetBucketCORS(ctx, bkt)
|
objID, err := h.treeService.GetBucketCORS(ctx, bkt)
|
||||||
objIDNotFound := errorsStd.Is(err, ErrNodeNotFound)
|
objIDNotFound := errorsStd.Is(err, ErrNodeNotFound)
|
||||||
if err != nil && !objIDNotFound {
|
if err != nil && !objIDNotFound {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -170,7 +163,7 @@ func (n *layer) getCORS(ctx context.Context, bkt *data.BucketInfo) (*data.CORSCo
|
||||||
return nil, errors.GetAPIError(errors.ErrNoSuchCORSConfiguration)
|
return nil, errors.GetAPIError(errors.ErrNoSuchCORSConfiguration)
|
||||||
}
|
}
|
||||||
|
|
||||||
obj, err := n.objectGet(ctx, bkt, objID)
|
obj, err := h.objectGet(ctx, bkt, objID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -181,7 +174,7 @@ func (n *layer) getCORS(ctx context.Context, bkt *data.BucketInfo) (*data.CORSCo
|
||||||
return nil, fmt.Errorf("unmarshal cors: %w", err)
|
return nil, fmt.Errorf("unmarshal cors: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
n.cache.PutCORS(owner, bkt, cors)
|
h.cache.PutCORS(owner, bkt, cors)
|
||||||
|
|
||||||
return cors, nil
|
return cors, nil
|
||||||
}
|
}
|
||||||
|
@ -191,36 +184,17 @@ func lockObjectKey(objVersion *ObjectVersion) string {
|
||||||
return ".lock." + objVersion.BktInfo.CID.EncodeToString() + "." + objVersion.ObjectName + "." + objVersion.VersionID
|
return ".lock." + objVersion.BktInfo.CID.EncodeToString() + "." + objVersion.ObjectName + "." + objVersion.VersionID
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *layer) GetBucketSettings(ctx context.Context, bktInfo *data.BucketInfo) (*data.BucketSettings, error) {
|
func (h *handler) putBucketSettings(ctx context.Context, p *PutSettingsParams) error {
|
||||||
owner := n.Owner(ctx)
|
if err := h.treeService.PutSettingsNode(ctx, p.BktInfo, p.Settings); err != nil {
|
||||||
if settings := n.cache.GetSettings(owner, bktInfo); settings != nil {
|
|
||||||
return settings, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
settings, err := n.treeService.GetSettingsNode(ctx, bktInfo)
|
|
||||||
if err != nil {
|
|
||||||
if !errorsStd.Is(err, ErrNodeNotFound) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
settings = &data.BucketSettings{Versioning: data.VersioningUnversioned}
|
|
||||||
}
|
|
||||||
|
|
||||||
n.cache.PutSettings(owner, bktInfo, settings)
|
|
||||||
|
|
||||||
return settings, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) PutBucketSettings(ctx context.Context, p *PutSettingsParams) error {
|
|
||||||
if err := n.treeService.PutSettingsNode(ctx, p.BktInfo, p.Settings); err != nil {
|
|
||||||
return fmt.Errorf("failed to get settings node: %w", err)
|
return fmt.Errorf("failed to get settings node: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
n.cache.PutSettings(n.Owner(ctx), p.BktInfo, p.Settings)
|
h.cache.PutSettings(h.Owner(ctx), p.BktInfo, p.Settings)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *layer) attributesFromLock(ctx context.Context, lock *data.ObjectLock) ([][2]string, error) {
|
func (h *handler) attributesFromLock(ctx context.Context, lock *data.ObjectLock) ([][2]string, error) {
|
||||||
var (
|
var (
|
||||||
err error
|
err error
|
||||||
expEpoch uint64
|
expEpoch uint64
|
||||||
|
@ -228,7 +202,7 @@ func (n *layer) attributesFromLock(ctx context.Context, lock *data.ObjectLock) (
|
||||||
)
|
)
|
||||||
|
|
||||||
if lock.Retention != nil {
|
if lock.Retention != nil {
|
||||||
if _, expEpoch, err = n.neoFS.TimeToEpoch(ctx, TimeNow(ctx), lock.Retention.Until); err != nil {
|
if _, expEpoch, err = h.neoFS.TimeToEpoch(ctx, TimeNow(ctx), lock.Retention.Until); err != nil {
|
||||||
return nil, fmt.Errorf("fetch time to epoch: %w", err)
|
return nil, fmt.Errorf("fetch time to epoch: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,7 +11,6 @@ import (
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -38,15 +37,15 @@ func (h *handler) PutObjectTaggingHandler(w http.ResponseWriter, r *http.Request
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
tagPrm := &layer.PutObjectTaggingParams{
|
tagPrm := &PutObjectTaggingParams{
|
||||||
ObjectVersion: &layer.ObjectVersion{
|
ObjectVersion: &ObjectVersion{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
ObjectName: reqInfo.ObjectName,
|
ObjectName: reqInfo.ObjectName,
|
||||||
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
||||||
},
|
},
|
||||||
TagSet: tagSet,
|
TagSet: tagSet,
|
||||||
}
|
}
|
||||||
nodeVersion, err := h.obj.PutObjectTagging(r.Context(), tagPrm)
|
nodeVersion, err := h.putObjectTagging(r.Context(), tagPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not put object tagging", reqInfo, err)
|
h.logAndSendError(w, "could not put object tagging", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -79,21 +78,21 @@ func (h *handler) GetObjectTaggingHandler(w http.ResponseWriter, r *http.Request
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
settings, err := h.getBucketSettings(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
tagPrm := &layer.GetObjectTaggingParams{
|
tagPrm := &GetObjectTaggingParams{
|
||||||
ObjectVersion: &layer.ObjectVersion{
|
ObjectVersion: &ObjectVersion{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
ObjectName: reqInfo.ObjectName,
|
ObjectName: reqInfo.ObjectName,
|
||||||
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
versionID, tagSet, err := h.obj.GetObjectTagging(r.Context(), tagPrm)
|
versionID, tagSet, err := h.getObjectTagging(r.Context(), tagPrm)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get object tagging", reqInfo, err)
|
h.logAndSendError(w, "could not get object tagging", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -116,13 +115,13 @@ func (h *handler) DeleteObjectTaggingHandler(w http.ResponseWriter, r *http.Requ
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p := &layer.ObjectVersion{
|
p := &ObjectVersion{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
ObjectName: reqInfo.ObjectName,
|
ObjectName: reqInfo.ObjectName,
|
||||||
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeVersion, err := h.obj.DeleteObjectTagging(r.Context(), p)
|
nodeVersion, err := h.deleteObjectTagging(r.Context(), p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not delete object tagging", reqInfo, err)
|
h.logAndSendError(w, "could not delete object tagging", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -161,7 +160,7 @@ func (h *handler) PutBucketTaggingHandler(w http.ResponseWriter, r *http.Request
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.PutBucketTagging(r.Context(), bktInfo, tagSet); err != nil {
|
if err = h.putBucketTagging(r.Context(), bktInfo, tagSet); err != nil {
|
||||||
h.logAndSendError(w, "could not put object tagging", reqInfo, err)
|
h.logAndSendError(w, "could not put object tagging", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -176,7 +175,7 @@ func (h *handler) GetBucketTaggingHandler(w http.ResponseWriter, r *http.Request
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
tagSet, err := h.obj.GetBucketTagging(r.Context(), bktInfo)
|
tagSet, err := h.getBucketTagging(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "could not get object tagging", reqInfo, err)
|
h.logAndSendError(w, "could not get object tagging", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -197,7 +196,7 @@ func (h *handler) DeleteBucketTaggingHandler(w http.ResponseWriter, r *http.Requ
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.DeleteBucketTagging(r.Context(), bktInfo); err != nil {
|
if err = h.deleteBucketTagging(r.Context(), bktInfo); err != nil {
|
||||||
h.logAndSendError(w, "could not delete bucket tagging", reqInfo, err)
|
h.logAndSendError(w, "could not delete bucket tagging", reqInfo, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
package layer
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
|
@ -1,4 +1,4 @@
|
||||||
package layer
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
|
@ -2,15 +2,21 @@ package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/hex"
|
||||||
errorsStd "errors"
|
errorsStd "errors"
|
||||||
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
"github.com/nspcc-dev/neofs-s3-gw/api/handler/encryption"
|
||||||
|
"github.com/nspcc-dev/neofs-s3-gw/creds/accessbox"
|
||||||
|
"github.com/nspcc-dev/neofs-sdk-go/object"
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/session"
|
"github.com/nspcc-dev/neofs-sdk-go/session"
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
@ -34,8 +40,8 @@ func transformToS3Error(err error) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if errorsStd.Is(err, layer.ErrAccessDenied) ||
|
if errorsStd.Is(err, ErrAccessDenied) ||
|
||||||
errorsStd.Is(err, layer.ErrNodeAccessDenied) {
|
errorsStd.Is(err, ErrNodeAccessDenied) {
|
||||||
return errors.GetAPIError(errors.ErrAccessDenied)
|
return errors.GetAPIError(errors.ErrAccessDenied)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,7 +49,7 @@ func transformToS3Error(err error) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *handler) getBucketAndCheckOwner(r *http.Request, bucket string, header ...string) (*data.BucketInfo, error) {
|
func (h *handler) getBucketAndCheckOwner(r *http.Request, bucket string, header ...string) (*data.BucketInfo, error) {
|
||||||
bktInfo, err := h.obj.GetBucketInfo(r.Context(), bucket)
|
bktInfo, err := h.getBucketInfo(r.Context(), bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -62,7 +68,7 @@ func (h *handler) getBucketAndCheckOwner(r *http.Request, bucket string, header
|
||||||
return bktInfo, checkOwner(bktInfo, expected)
|
return bktInfo, checkOwner(bktInfo, expected)
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseRange(s string) (*layer.RangeParams, error) {
|
func parseRange(s string) (*RangeParams, error) {
|
||||||
if s == "" {
|
if s == "" {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
@ -92,14 +98,14 @@ func parseRange(s string) (*layer.RangeParams, error) {
|
||||||
return nil, errors.GetAPIError(errors.ErrInvalidRange)
|
return nil, errors.GetAPIError(errors.ErrInvalidRange)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &layer.RangeParams{
|
return &RangeParams{
|
||||||
Start: values[0],
|
Start: values[0],
|
||||||
End: values[1],
|
End: values[1],
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSessionTokenSetEACL(ctx context.Context) (*session.Container, error) {
|
func getSessionTokenSetEACL(ctx context.Context) (*session.Container, error) {
|
||||||
boxData, err := layer.GetBoxData(ctx)
|
boxData, err := GetBoxData(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -110,3 +116,139 @@ func getSessionTokenSetEACL(ctx context.Context) (*session.Container, error) {
|
||||||
|
|
||||||
return sessionToken, nil
|
return sessionToken, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type (
|
||||||
|
// ListObjectsInfo contains common fields of data for ListObjectsV1 and ListObjectsV2.
|
||||||
|
ListObjectsInfo struct {
|
||||||
|
Prefixes []string
|
||||||
|
Objects []*data.ObjectInfo
|
||||||
|
IsTruncated bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListObjectsInfoV1 holds data which ListObjectsV1 returns.
|
||||||
|
ListObjectsInfoV1 struct {
|
||||||
|
ListObjectsInfo
|
||||||
|
NextMarker string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListObjectsInfoV2 holds data which ListObjectsV2 returns.
|
||||||
|
ListObjectsInfoV2 struct {
|
||||||
|
ListObjectsInfo
|
||||||
|
NextContinuationToken string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ListObjectVersionsInfo stores info and list of objects versions.
|
||||||
|
ListObjectVersionsInfo struct {
|
||||||
|
CommonPrefixes []string
|
||||||
|
IsTruncated bool
|
||||||
|
KeyMarker string
|
||||||
|
NextKeyMarker string
|
||||||
|
NextVersionIDMarker string
|
||||||
|
Version []*data.ExtendedObjectInfo
|
||||||
|
DeleteMarker []*data.ExtendedObjectInfo
|
||||||
|
VersionIDMarker string
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// PathSeparator is a path components separator string.
|
||||||
|
const PathSeparator = string(os.PathSeparator)
|
||||||
|
|
||||||
|
func userHeaders(attrs []object.Attribute) map[string]string {
|
||||||
|
result := make(map[string]string, len(attrs))
|
||||||
|
|
||||||
|
for _, attr := range attrs {
|
||||||
|
result[attr.Key()] = attr.Value()
|
||||||
|
}
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func objectInfoFromMeta(bkt *data.BucketInfo, meta *object.Object) *data.ObjectInfo {
|
||||||
|
var (
|
||||||
|
mimeType string
|
||||||
|
creation time.Time
|
||||||
|
)
|
||||||
|
|
||||||
|
headers := userHeaders(meta.Attributes())
|
||||||
|
delete(headers, object.AttributeFilePath)
|
||||||
|
if contentType, ok := headers[object.AttributeContentType]; ok {
|
||||||
|
mimeType = contentType
|
||||||
|
delete(headers, object.AttributeContentType)
|
||||||
|
}
|
||||||
|
if val, ok := headers[object.AttributeTimestamp]; !ok {
|
||||||
|
// ignore empty value
|
||||||
|
} else if dt, err := strconv.ParseInt(val, 10, 64); err == nil {
|
||||||
|
creation = time.Unix(dt, 0)
|
||||||
|
delete(headers, object.AttributeTimestamp)
|
||||||
|
}
|
||||||
|
|
||||||
|
objID, _ := meta.ID()
|
||||||
|
payloadChecksum, _ := meta.PayloadChecksum()
|
||||||
|
return &data.ObjectInfo{
|
||||||
|
ID: objID,
|
||||||
|
CID: bkt.CID,
|
||||||
|
IsDir: false,
|
||||||
|
|
||||||
|
Bucket: bkt.Name,
|
||||||
|
Name: filepathFromObject(meta),
|
||||||
|
Created: creation,
|
||||||
|
ContentType: mimeType,
|
||||||
|
Headers: headers,
|
||||||
|
Owner: *meta.OwnerID(),
|
||||||
|
Size: int64(meta.PayloadSize()),
|
||||||
|
HashSum: hex.EncodeToString(payloadChecksum.Value()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func FormEncryptionInfo(headers map[string]string) encryption.ObjectEncryption {
|
||||||
|
algorithm := headers[AttributeEncryptionAlgorithm]
|
||||||
|
return encryption.ObjectEncryption{
|
||||||
|
Enabled: len(algorithm) > 0,
|
||||||
|
Algorithm: algorithm,
|
||||||
|
HMACKey: headers[AttributeHMACKey],
|
||||||
|
HMACSalt: headers[AttributeHMACSalt],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func addEncryptionHeaders(meta map[string]string, enc encryption.Params) error {
|
||||||
|
meta[AttributeEncryptionAlgorithm] = AESEncryptionAlgorithm
|
||||||
|
hmacKey, hmacSalt, err := enc.HMAC()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("get hmac: %w", err)
|
||||||
|
}
|
||||||
|
meta[AttributeHMACKey] = hex.EncodeToString(hmacKey)
|
||||||
|
meta[AttributeHMACSalt] = hex.EncodeToString(hmacSalt)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func filepathFromObject(o *object.Object) string {
|
||||||
|
for _, attr := range o.Attributes() {
|
||||||
|
if attr.Key() == object.AttributeFilePath {
|
||||||
|
return attr.Value()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
objID, _ := o.ID()
|
||||||
|
return objID.EncodeToString()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NameFromString splits name into a base file name and a directory path.
|
||||||
|
func NameFromString(name string) (string, string) {
|
||||||
|
ind := strings.LastIndex(name, PathSeparator)
|
||||||
|
return name[ind+1:], name[:ind+1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetBoxData extracts accessbox.Box from context.
|
||||||
|
func GetBoxData(ctx context.Context) (*accessbox.Box, error) {
|
||||||
|
var boxData *accessbox.Box
|
||||||
|
data, ok := ctx.Value(api.BoxData).(*accessbox.Box)
|
||||||
|
if !ok || data == nil {
|
||||||
|
return nil, fmt.Errorf("couldn't get box data from context")
|
||||||
|
}
|
||||||
|
|
||||||
|
boxData = data
|
||||||
|
if boxData.Gate == nil {
|
||||||
|
boxData.Gate = &accessbox.GateData{}
|
||||||
|
}
|
||||||
|
return boxData, nil
|
||||||
|
}
|
||||||
|
|
|
@ -1,7 +1,11 @@
|
||||||
package layer
|
package handler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/rand"
|
||||||
|
"crypto/sha256"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -151,3 +155,21 @@ func TestTryDirectory(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestWrapReader(t *testing.T) {
|
||||||
|
src := make([]byte, 1024*1024+1)
|
||||||
|
_, err := rand.Read(src)
|
||||||
|
require.NoError(t, err)
|
||||||
|
h := sha256.Sum256(src)
|
||||||
|
|
||||||
|
streamHash := sha256.New()
|
||||||
|
reader := bytes.NewReader(src)
|
||||||
|
wrappedReader := wrapReader(reader, 64*1024, func(buf []byte) {
|
||||||
|
streamHash.Write(buf)
|
||||||
|
})
|
||||||
|
|
||||||
|
dst, err := io.ReadAll(wrappedReader)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, src, dst)
|
||||||
|
require.Equal(t, h[:], streamHash.Sum(nil))
|
||||||
|
}
|
|
@ -7,7 +7,6 @@ import (
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func (h *handler) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
|
func (h *handler) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
|
@ -25,7 +24,7 @@ func (h *handler) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Requ
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
settings, err := h.getBucketSettings(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "couldn't get bucket settings", reqInfo, err)
|
h.logAndSendError(w, "couldn't get bucket settings", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
@ -40,7 +39,7 @@ func (h *handler) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Requ
|
||||||
newSettings := *settings
|
newSettings := *settings
|
||||||
newSettings.Versioning = configuration.Status
|
newSettings.Versioning = configuration.Status
|
||||||
|
|
||||||
p := &layer.PutSettingsParams{
|
p := &PutSettingsParams{
|
||||||
BktInfo: bktInfo,
|
BktInfo: bktInfo,
|
||||||
Settings: &newSettings,
|
Settings: &newSettings,
|
||||||
}
|
}
|
||||||
|
@ -50,7 +49,7 @@ func (h *handler) PutBucketVersioningHandler(w http.ResponseWriter, r *http.Requ
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = h.obj.PutBucketSettings(r.Context(), p); err != nil {
|
if err = h.putBucketSettings(r.Context(), p); err != nil {
|
||||||
h.logAndSendError(w, "couldn't put update versioning settings", reqInfo, err)
|
h.logAndSendError(w, "couldn't put update versioning settings", reqInfo, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -65,7 +64,7 @@ func (h *handler) GetBucketVersioningHandler(w http.ResponseWriter, r *http.Requ
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
settings, err := h.obj.GetBucketSettings(r.Context(), bktInfo)
|
settings, err := h.getBucketSettings(r.Context(), bktInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.logAndSendError(w, "couldn't get version settings", reqInfo, err)
|
h.logAndSendError(w, "couldn't get version settings", reqInfo, err)
|
||||||
return
|
return
|
||||||
|
|
145
api/handler/versioning_test.go
Normal file
145
api/handler/versioning_test.go
Normal file
|
@ -0,0 +1,145 @@
|
||||||
|
package handler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/url"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||||
|
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSimpleVersioning(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
bktName, objName := "bkt-name", "obj-name"
|
||||||
|
|
||||||
|
createTestBucket(hc, bktName)
|
||||||
|
putBucketVersioning(t, hc, bktName, true)
|
||||||
|
|
||||||
|
obj1Content1 := "content obj1 v1"
|
||||||
|
version1, _ := putObjectContent(hc, bktName, objName, obj1Content1)
|
||||||
|
|
||||||
|
obj1Content2 := "content obj1 v2"
|
||||||
|
version2, etag2 := putObjectContent(hc, bktName, objName, obj1Content2)
|
||||||
|
|
||||||
|
buffer2 := getObject(hc, bktName, objName, "")
|
||||||
|
require.Equal(t, []byte(obj1Content2), buffer2)
|
||||||
|
|
||||||
|
buffer1 := getObject(hc, bktName, objName, version1)
|
||||||
|
require.Equal(t, []byte(obj1Content1), buffer1)
|
||||||
|
|
||||||
|
checkLastObject(hc, bktName, objName, version2, etag2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSimpleNoVersioning(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
bktName, objName := "bkt-name", "obj-name"
|
||||||
|
createTestBucket(hc, bktName)
|
||||||
|
|
||||||
|
obj1Content1 := "content obj1 v1"
|
||||||
|
version1, _ := putObjectContent(hc, bktName, objName, obj1Content1)
|
||||||
|
|
||||||
|
obj1Content2 := "content obj1 v2"
|
||||||
|
version2, etag2 := putObjectContent(hc, bktName, objName, obj1Content2)
|
||||||
|
|
||||||
|
buffer2 := getObject(hc, bktName, objName, "")
|
||||||
|
require.Equal(t, []byte(obj1Content2), buffer2)
|
||||||
|
|
||||||
|
checkNotFound(hc.t, hc, bktName, objName, version1)
|
||||||
|
checkLastObject(hc, bktName, objName, version2, etag2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestGetUnversioned(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
bktName, objName := "bkt-name", "obj-name"
|
||||||
|
|
||||||
|
createTestBucket(hc, bktName)
|
||||||
|
|
||||||
|
objContent := "content obj1 v1"
|
||||||
|
putObjectContent(hc, bktName, objName, objContent)
|
||||||
|
|
||||||
|
putBucketVersioning(hc.t, hc, bktName, true)
|
||||||
|
|
||||||
|
buffer := getObject(hc, bktName, objName, data.UnversionedObjectVersionID)
|
||||||
|
require.Equal(t, objContent, string(buffer))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestVersioningDeleteSpecificObjectVersion(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
bktName, objName := "bkt-name", "obj-name"
|
||||||
|
|
||||||
|
createTestBucket(hc, bktName)
|
||||||
|
putBucketVersioning(t, hc, bktName, true)
|
||||||
|
|
||||||
|
putObjectContent(hc, bktName, objName, "content obj1 v1")
|
||||||
|
version2, _ := putObjectContent(hc, bktName, objName, "content obj1 v2")
|
||||||
|
objV3Content := "content obj1 v3"
|
||||||
|
putObjectContent(hc, bktName, objName, objV3Content)
|
||||||
|
|
||||||
|
deleteObject(t, hc, bktName, objName, version2)
|
||||||
|
checkNotFound(t, hc, bktName, objName, version2)
|
||||||
|
|
||||||
|
buffer3 := getObject(hc, bktName, objName, "")
|
||||||
|
require.Equal(t, []byte(objV3Content), buffer3)
|
||||||
|
|
||||||
|
deleteObject(t, hc, bktName, objName, "")
|
||||||
|
checkNotFound(t, hc, bktName, objName, "")
|
||||||
|
|
||||||
|
versions := listVersions(t, hc, bktName)
|
||||||
|
for _, ver := range versions.DeleteMarker {
|
||||||
|
if ver.IsLatest {
|
||||||
|
deleteObject(t, hc, bktName, objName, ver.VersionID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
buffer3 = getObject(hc, bktName, objName, "")
|
||||||
|
require.Equal(t, []byte(objV3Content), buffer3)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getObject(hc *handlerContext, bktName, objName, versionID string) []byte {
|
||||||
|
query := make(url.Values)
|
||||||
|
query.Add(api.QueryVersionID, versionID)
|
||||||
|
|
||||||
|
w, r := prepareTestFullRequest(hc, bktName, objName, query, nil)
|
||||||
|
hc.Handler().GetObjectHandler(w, r)
|
||||||
|
assertStatus(hc.t, w, http.StatusOK)
|
||||||
|
|
||||||
|
respData, err := io.ReadAll(w.Body)
|
||||||
|
require.NoError(hc.t, err)
|
||||||
|
|
||||||
|
return respData
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkLastObject(hc *handlerContext, bktName, objName, versionID, etag string) {
|
||||||
|
respV1 := listObjectsV1(hc.t, hc, bktName, "", "", "", -1)
|
||||||
|
existed := false
|
||||||
|
for _, obj := range respV1.Contents {
|
||||||
|
if obj.Key == objName {
|
||||||
|
existed = true
|
||||||
|
require.Equal(hc.t, etag, obj.ETag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.True(hc.t, existed)
|
||||||
|
|
||||||
|
respV2 := listObjectsV2(hc.t, hc, bktName, "", "", "", "", -1)
|
||||||
|
existed = false
|
||||||
|
for _, obj := range respV2.Contents {
|
||||||
|
if obj.Key == objName {
|
||||||
|
existed = true
|
||||||
|
require.Equal(hc.t, etag, obj.ETag)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.True(hc.t, existed)
|
||||||
|
|
||||||
|
versions := listVersions(hc.t, hc, bktName)
|
||||||
|
existed = false
|
||||||
|
for _, obj := range versions.Version {
|
||||||
|
if obj.Key == objName && obj.IsLatest {
|
||||||
|
existed = true
|
||||||
|
require.Equal(hc.t, etag, obj.ETag)
|
||||||
|
require.Equal(hc.t, versionID, obj.VersionID)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
require.True(hc.t, existed)
|
||||||
|
}
|
|
@ -1,41 +0,0 @@
|
||||||
package layer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
errorsStd "errors"
|
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (n *layer) GetObjectTaggingAndLock(ctx context.Context, objVersion *ObjectVersion, nodeVersion *data.NodeVersion) (map[string]string, *data.LockInfo, error) {
|
|
||||||
var err error
|
|
||||||
owner := n.Owner(ctx)
|
|
||||||
|
|
||||||
tags := n.cache.GetTagging(owner, objectTaggingCacheKey(objVersion))
|
|
||||||
lockInfo := n.cache.GetLockInfo(owner, lockObjectKey(objVersion))
|
|
||||||
|
|
||||||
if tags != nil && lockInfo != nil {
|
|
||||||
return tags, lockInfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if nodeVersion == nil {
|
|
||||||
nodeVersion, err = n.getNodeVersion(ctx, objVersion)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tags, lockInfo, err = n.treeService.GetObjectTaggingAndLock(ctx, objVersion.BktInfo, nodeVersion)
|
|
||||||
if err != nil {
|
|
||||||
if errorsStd.Is(err, ErrNodeNotFound) {
|
|
||||||
return nil, nil, errors.GetAPIError(errors.ErrNoSuchKey)
|
|
||||||
}
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
n.cache.PutTagging(owner, objectTaggingCacheKey(objVersion), tags)
|
|
||||||
n.cache.PutLockInfo(owner, lockObjectKey(objVersion), lockInfo)
|
|
||||||
|
|
||||||
return tags, lockInfo, nil
|
|
||||||
}
|
|
|
@ -1,166 +0,0 @@
|
||||||
package layer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/client"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/container"
|
|
||||||
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/eacl"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/session"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
// BucketACL extends BucketInfo by eacl.Table.
|
|
||||||
BucketACL struct {
|
|
||||||
Info *data.BucketInfo
|
|
||||||
EACL *eacl.Table
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
attributeLocationConstraint = ".s3-location-constraint"
|
|
||||||
AttributeLockEnabled = "LockEnabled"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (n *layer) containerInfo(ctx context.Context, idCnr cid.ID) (*data.BucketInfo, error) {
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
res *container.Container
|
|
||||||
rid = api.GetRequestID(ctx)
|
|
||||||
log = n.log.With(zap.Stringer("cid", idCnr), zap.String("request_id", rid))
|
|
||||||
|
|
||||||
info = &data.BucketInfo{
|
|
||||||
CID: idCnr,
|
|
||||||
Name: idCnr.EncodeToString(),
|
|
||||||
}
|
|
||||||
)
|
|
||||||
res, err = n.neoFS.Container(ctx, idCnr)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("could not fetch container", zap.Error(err))
|
|
||||||
|
|
||||||
if client.IsErrContainerNotFound(err) {
|
|
||||||
return nil, errors.GetAPIError(errors.ErrNoSuchBucket)
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("get neofs container: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cnr := *res
|
|
||||||
|
|
||||||
info.Owner = cnr.Owner()
|
|
||||||
if domain := container.ReadDomain(cnr); domain.Name() != "" {
|
|
||||||
info.Name = domain.Name()
|
|
||||||
}
|
|
||||||
info.Created = container.CreatedAt(cnr)
|
|
||||||
info.LocationConstraint = cnr.Attribute(attributeLocationConstraint)
|
|
||||||
|
|
||||||
attrLockEnabled := cnr.Attribute(AttributeLockEnabled)
|
|
||||||
if len(attrLockEnabled) > 0 {
|
|
||||||
info.ObjectLockEnabled, err = strconv.ParseBool(attrLockEnabled)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("could not parse container object lock enabled attribute",
|
|
||||||
zap.String("lock_enabled", attrLockEnabled),
|
|
||||||
zap.Error(err),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
n.cache.PutBucket(info)
|
|
||||||
|
|
||||||
return info, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) containerList(ctx context.Context) ([]*data.BucketInfo, error) {
|
|
||||||
var (
|
|
||||||
err error
|
|
||||||
own = n.Owner(ctx)
|
|
||||||
res []cid.ID
|
|
||||||
rid = api.GetRequestID(ctx)
|
|
||||||
)
|
|
||||||
res, err = n.neoFS.UserContainers(ctx, own)
|
|
||||||
if err != nil {
|
|
||||||
n.log.Error("could not list user containers",
|
|
||||||
zap.String("request_id", rid),
|
|
||||||
zap.Error(err))
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
list := make([]*data.BucketInfo, 0, len(res))
|
|
||||||
for i := range res {
|
|
||||||
info, err := n.containerInfo(ctx, res[i])
|
|
||||||
if err != nil {
|
|
||||||
n.log.Error("could not fetch container info",
|
|
||||||
zap.String("request_id", rid),
|
|
||||||
zap.Error(err))
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
list = append(list, info)
|
|
||||||
}
|
|
||||||
|
|
||||||
return list, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) createContainer(ctx context.Context, p *CreateBucketParams) (*data.BucketInfo, error) {
|
|
||||||
ownerID := n.Owner(ctx)
|
|
||||||
if p.LocationConstraint == "" {
|
|
||||||
p.LocationConstraint = api.DefaultLocationConstraint // s3tests_boto3.functional.test_s3:test_bucket_get_location
|
|
||||||
}
|
|
||||||
bktInfo := &data.BucketInfo{
|
|
||||||
Name: p.Name,
|
|
||||||
Owner: ownerID,
|
|
||||||
Created: TimeNow(ctx),
|
|
||||||
LocationConstraint: p.LocationConstraint,
|
|
||||||
ObjectLockEnabled: p.ObjectLockEnabled,
|
|
||||||
}
|
|
||||||
|
|
||||||
var attributes [][2]string
|
|
||||||
|
|
||||||
attributes = append(attributes, [2]string{
|
|
||||||
attributeLocationConstraint, p.LocationConstraint,
|
|
||||||
})
|
|
||||||
|
|
||||||
if p.ObjectLockEnabled {
|
|
||||||
attributes = append(attributes, [2]string{
|
|
||||||
AttributeLockEnabled, "true",
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
idCnr, err := n.neoFS.CreateContainer(ctx, PrmContainerCreate{
|
|
||||||
Creator: bktInfo.Owner,
|
|
||||||
Policy: p.Policy,
|
|
||||||
Name: p.Name,
|
|
||||||
SessionToken: p.SessionContainerCreation,
|
|
||||||
CreationTime: bktInfo.Created,
|
|
||||||
AdditionalAttributes: attributes,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("create container: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
bktInfo.CID = idCnr
|
|
||||||
|
|
||||||
if err = n.setContainerEACLTable(ctx, bktInfo.CID, p.EACL, p.SessionEACL); err != nil {
|
|
||||||
return nil, fmt.Errorf("set container eacl: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
n.cache.PutBucket(bktInfo)
|
|
||||||
|
|
||||||
return bktInfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) setContainerEACLTable(ctx context.Context, idCnr cid.ID, table *eacl.Table, sessionToken *session.Container) error {
|
|
||||||
table.SetCID(idCnr)
|
|
||||||
|
|
||||||
return n.neoFS.SetContainerEACL(ctx, *table, sessionToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) GetContainerEACL(ctx context.Context, idCnr cid.ID) (*eacl.Table, error) {
|
|
||||||
return n.neoFS.ContainerEACL(ctx, idCnr)
|
|
||||||
}
|
|
|
@ -1,116 +0,0 @@
|
||||||
package layer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/xml"
|
|
||||||
errorsStd "errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
const wildcard = "*"
|
|
||||||
|
|
||||||
var supportedMethods = map[string]struct{}{"GET": {}, "HEAD": {}, "POST": {}, "PUT": {}, "DELETE": {}}
|
|
||||||
|
|
||||||
func (n *layer) PutBucketCORS(ctx context.Context, p *PutCORSParams) error {
|
|
||||||
var (
|
|
||||||
buf bytes.Buffer
|
|
||||||
tee = io.TeeReader(p.Reader, &buf)
|
|
||||||
cors = &data.CORSConfiguration{}
|
|
||||||
)
|
|
||||||
|
|
||||||
if err := xml.NewDecoder(tee).Decode(cors); err != nil {
|
|
||||||
return fmt.Errorf("xml decode cors: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if cors.CORSRules == nil {
|
|
||||||
return errors.GetAPIError(errors.ErrMalformedXML)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := checkCORS(cors); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
prm := PrmObjectCreate{
|
|
||||||
Container: p.BktInfo.CID,
|
|
||||||
Creator: p.BktInfo.Owner,
|
|
||||||
Payload: p.Reader,
|
|
||||||
Filepath: p.BktInfo.CORSObjectName(),
|
|
||||||
CreationTime: TimeNow(ctx),
|
|
||||||
CopiesNumber: p.CopiesNumber,
|
|
||||||
}
|
|
||||||
|
|
||||||
objID, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("put system object: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
objIDToDelete, err := n.treeService.PutBucketCORS(ctx, p.BktInfo, objID)
|
|
||||||
objIDToDeleteNotFound := errorsStd.Is(err, ErrNoNodeToRemove)
|
|
||||||
if err != nil && !objIDToDeleteNotFound {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !objIDToDeleteNotFound {
|
|
||||||
if err = n.objectDelete(ctx, p.BktInfo, objIDToDelete); err != nil {
|
|
||||||
n.log.Error("couldn't delete cors object", zap.Error(err),
|
|
||||||
zap.String("cnrID", p.BktInfo.CID.EncodeToString()),
|
|
||||||
zap.String("bucket name", p.BktInfo.Name),
|
|
||||||
zap.String("objID", objIDToDelete.EncodeToString()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
n.cache.PutCORS(n.Owner(ctx), p.BktInfo, cors)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) GetBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (*data.CORSConfiguration, error) {
|
|
||||||
cors, err := n.getCORS(ctx, bktInfo)
|
|
||||||
if err != nil {
|
|
||||||
if errorsStd.Is(err, ErrNodeNotFound) {
|
|
||||||
return nil, errors.GetAPIError(errors.ErrNoSuchCORSConfiguration)
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return cors, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) DeleteBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) error {
|
|
||||||
objID, err := n.treeService.DeleteBucketCORS(ctx, bktInfo)
|
|
||||||
objIDNotFound := errorsStd.Is(err, ErrNoNodeToRemove)
|
|
||||||
if err != nil && !objIDNotFound {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if !objIDNotFound {
|
|
||||||
if err = n.objectDelete(ctx, bktInfo, objID); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
n.cache.DeleteCORS(bktInfo)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func checkCORS(cors *data.CORSConfiguration) error {
|
|
||||||
for _, r := range cors.CORSRules {
|
|
||||||
for _, m := range r.AllowedMethods {
|
|
||||||
if _, ok := supportedMethods[m]; !ok {
|
|
||||||
return errors.GetAPIErrorWithError(errors.ErrCORSUnsupportedMethod, fmt.Errorf("unsupported method is %s", m))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, h := range r.ExposeHeaders {
|
|
||||||
if h == wildcard {
|
|
||||||
return errors.GetAPIError(errors.ErrCORSWildcardExposeHeaders)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,665 +0,0 @@
|
||||||
package layer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/rand"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/nats-io/nats.go"
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer/encryption"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/creds/accessbox"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/bearer"
|
|
||||||
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/eacl"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
|
||||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/session"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/user"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
EventListener interface {
|
|
||||||
Subscribe(context.Context, string, MsgHandler) error
|
|
||||||
Listen(context.Context)
|
|
||||||
}
|
|
||||||
|
|
||||||
MsgHandler interface {
|
|
||||||
HandleMessage(context.Context, *nats.Msg) error
|
|
||||||
}
|
|
||||||
|
|
||||||
MsgHandlerFunc func(context.Context, *nats.Msg) error
|
|
||||||
|
|
||||||
BucketResolver interface {
|
|
||||||
Resolve(ctx context.Context, name string) (cid.ID, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
layer struct {
|
|
||||||
neoFS NeoFS
|
|
||||||
log *zap.Logger
|
|
||||||
anonKey AnonymousKey
|
|
||||||
resolver BucketResolver
|
|
||||||
ncontroller EventListener
|
|
||||||
cache *Cache
|
|
||||||
treeService TreeService
|
|
||||||
}
|
|
||||||
|
|
||||||
Config struct {
|
|
||||||
ChainAddress string
|
|
||||||
Caches *CachesConfig
|
|
||||||
AnonKey AnonymousKey
|
|
||||||
Resolver BucketResolver
|
|
||||||
TreeService TreeService
|
|
||||||
}
|
|
||||||
|
|
||||||
// AnonymousKey contains data for anonymous requests.
|
|
||||||
AnonymousKey struct {
|
|
||||||
Key *keys.PrivateKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetObjectParams stores object get request parameters.
|
|
||||||
GetObjectParams struct {
|
|
||||||
Range *RangeParams
|
|
||||||
ObjectInfo *data.ObjectInfo
|
|
||||||
BucketInfo *data.BucketInfo
|
|
||||||
Writer io.Writer
|
|
||||||
Encryption encryption.Params
|
|
||||||
}
|
|
||||||
|
|
||||||
// HeadObjectParams stores object head request parameters.
|
|
||||||
HeadObjectParams struct {
|
|
||||||
BktInfo *data.BucketInfo
|
|
||||||
Object string
|
|
||||||
VersionID string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ObjectVersion stores object version info.
|
|
||||||
ObjectVersion struct {
|
|
||||||
BktInfo *data.BucketInfo
|
|
||||||
ObjectName string
|
|
||||||
VersionID string
|
|
||||||
NoErrorOnDeleteMarker bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// RangeParams stores range header request parameters.
|
|
||||||
RangeParams struct {
|
|
||||||
Start uint64
|
|
||||||
End uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutObjectParams stores object put request parameters.
|
|
||||||
PutObjectParams struct {
|
|
||||||
BktInfo *data.BucketInfo
|
|
||||||
Object string
|
|
||||||
Size int64
|
|
||||||
Reader io.Reader
|
|
||||||
Header map[string]string
|
|
||||||
Lock *data.ObjectLock
|
|
||||||
Encryption encryption.Params
|
|
||||||
CopiesNumber uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
DeleteObjectParams struct {
|
|
||||||
BktInfo *data.BucketInfo
|
|
||||||
Objects []*VersionedObject
|
|
||||||
Settings *data.BucketSettings
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutSettingsParams stores object copy request parameters.
|
|
||||||
PutSettingsParams struct {
|
|
||||||
BktInfo *data.BucketInfo
|
|
||||||
Settings *data.BucketSettings
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutCORSParams stores PutCORS request parameters.
|
|
||||||
PutCORSParams struct {
|
|
||||||
BktInfo *data.BucketInfo
|
|
||||||
Reader io.Reader
|
|
||||||
CopiesNumber uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyObjectParams stores object copy request parameters.
|
|
||||||
CopyObjectParams struct {
|
|
||||||
SrcObject *data.ObjectInfo
|
|
||||||
ScrBktInfo *data.BucketInfo
|
|
||||||
DstBktInfo *data.BucketInfo
|
|
||||||
DstObject string
|
|
||||||
SrcSize int64
|
|
||||||
Header map[string]string
|
|
||||||
Range *RangeParams
|
|
||||||
Lock *data.ObjectLock
|
|
||||||
Encryption encryption.Params
|
|
||||||
CopiesNuber uint32
|
|
||||||
}
|
|
||||||
// CreateBucketParams stores bucket create request parameters.
|
|
||||||
CreateBucketParams struct {
|
|
||||||
Name string
|
|
||||||
Policy netmap.PlacementPolicy
|
|
||||||
EACL *eacl.Table
|
|
||||||
SessionContainerCreation *session.Container
|
|
||||||
SessionEACL *session.Container
|
|
||||||
LocationConstraint string
|
|
||||||
ObjectLockEnabled bool
|
|
||||||
}
|
|
||||||
// PutBucketACLParams stores put bucket acl request parameters.
|
|
||||||
PutBucketACLParams struct {
|
|
||||||
BktInfo *data.BucketInfo
|
|
||||||
EACL *eacl.Table
|
|
||||||
SessionToken *session.Container
|
|
||||||
}
|
|
||||||
// DeleteBucketParams stores delete bucket request parameters.
|
|
||||||
DeleteBucketParams struct {
|
|
||||||
BktInfo *data.BucketInfo
|
|
||||||
SessionToken *session.Container
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListObjectVersionsParams stores list objects versions parameters.
|
|
||||||
ListObjectVersionsParams struct {
|
|
||||||
BktInfo *data.BucketInfo
|
|
||||||
Delimiter string
|
|
||||||
KeyMarker string
|
|
||||||
MaxKeys int
|
|
||||||
Prefix string
|
|
||||||
VersionIDMarker string
|
|
||||||
Encode string
|
|
||||||
}
|
|
||||||
|
|
||||||
// VersionedObject stores info about objects to delete.
|
|
||||||
VersionedObject struct {
|
|
||||||
Name string
|
|
||||||
VersionID string
|
|
||||||
DeleteMarkVersion string
|
|
||||||
DeleteMarkerEtag string
|
|
||||||
Error error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Client provides S3 API client interface.
|
|
||||||
Client interface {
|
|
||||||
Initialize(ctx context.Context, c EventListener) error
|
|
||||||
EphemeralKey() *keys.PublicKey
|
|
||||||
|
|
||||||
GetBucketSettings(ctx context.Context, bktInfo *data.BucketInfo) (*data.BucketSettings, error)
|
|
||||||
PutBucketSettings(ctx context.Context, p *PutSettingsParams) error
|
|
||||||
|
|
||||||
PutBucketCORS(ctx context.Context, p *PutCORSParams) error
|
|
||||||
GetBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (*data.CORSConfiguration, error)
|
|
||||||
DeleteBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) error
|
|
||||||
|
|
||||||
ListBuckets(ctx context.Context) ([]*data.BucketInfo, error)
|
|
||||||
GetBucketInfo(ctx context.Context, name string) (*data.BucketInfo, error)
|
|
||||||
GetBucketACL(ctx context.Context, bktInfo *data.BucketInfo) (*BucketACL, error)
|
|
||||||
PutBucketACL(ctx context.Context, p *PutBucketACLParams) error
|
|
||||||
CreateBucket(ctx context.Context, p *CreateBucketParams) (*data.BucketInfo, error)
|
|
||||||
DeleteBucket(ctx context.Context, p *DeleteBucketParams) error
|
|
||||||
|
|
||||||
GetObject(ctx context.Context, p *GetObjectParams) error
|
|
||||||
GetObjectInfo(ctx context.Context, p *HeadObjectParams) (*data.ObjectInfo, error)
|
|
||||||
GetExtendedObjectInfo(ctx context.Context, p *HeadObjectParams) (*data.ExtendedObjectInfo, error)
|
|
||||||
|
|
||||||
GetLockInfo(ctx context.Context, obj *ObjectVersion) (*data.LockInfo, error)
|
|
||||||
PutLockInfo(ctx context.Context, p *PutLockInfoParams) error
|
|
||||||
|
|
||||||
GetBucketTagging(ctx context.Context, bktInfo *data.BucketInfo) (map[string]string, error)
|
|
||||||
PutBucketTagging(ctx context.Context, bktInfo *data.BucketInfo, tagSet map[string]string) error
|
|
||||||
DeleteBucketTagging(ctx context.Context, bktInfo *data.BucketInfo) error
|
|
||||||
|
|
||||||
GetObjectTagging(ctx context.Context, p *GetObjectTaggingParams) (string, map[string]string, error)
|
|
||||||
PutObjectTagging(ctx context.Context, p *PutObjectTaggingParams) (*data.NodeVersion, error)
|
|
||||||
DeleteObjectTagging(ctx context.Context, p *ObjectVersion) (*data.NodeVersion, error)
|
|
||||||
|
|
||||||
PutObject(ctx context.Context, p *PutObjectParams) (*data.ExtendedObjectInfo, error)
|
|
||||||
|
|
||||||
CopyObject(ctx context.Context, p *CopyObjectParams) (*data.ExtendedObjectInfo, error)
|
|
||||||
|
|
||||||
ListObjectsV1(ctx context.Context, p *ListObjectsParamsV1) (*ListObjectsInfoV1, error)
|
|
||||||
ListObjectsV2(ctx context.Context, p *ListObjectsParamsV2) (*ListObjectsInfoV2, error)
|
|
||||||
ListObjectVersions(ctx context.Context, p *ListObjectVersionsParams) (*ListObjectVersionsInfo, error)
|
|
||||||
|
|
||||||
DeleteObjects(ctx context.Context, p *DeleteObjectParams) []*VersionedObject
|
|
||||||
|
|
||||||
CreateMultipartUpload(ctx context.Context, p *CreateMultipartParams) error
|
|
||||||
CompleteMultipartUpload(ctx context.Context, p *CompleteMultipartParams) (*UploadData, *data.ExtendedObjectInfo, error)
|
|
||||||
UploadPart(ctx context.Context, p *UploadPartParams) (string, error)
|
|
||||||
UploadPartCopy(ctx context.Context, p *UploadCopyParams) (*data.ObjectInfo, error)
|
|
||||||
ListMultipartUploads(ctx context.Context, p *ListMultipartUploadsParams) (*ListMultipartUploadsInfo, error)
|
|
||||||
AbortMultipartUpload(ctx context.Context, p *UploadInfoParams) error
|
|
||||||
ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsInfo, error)
|
|
||||||
|
|
||||||
PutBucketNotificationConfiguration(ctx context.Context, p *PutBucketNotificationConfigurationParams) error
|
|
||||||
GetBucketNotificationConfiguration(ctx context.Context, bktInfo *data.BucketInfo) (*data.NotificationConfiguration, error)
|
|
||||||
|
|
||||||
// Compound methods for optimizations
|
|
||||||
|
|
||||||
// GetObjectTaggingAndLock unifies GetObjectTagging and GetLock methods in single tree service invocation.
|
|
||||||
GetObjectTaggingAndLock(ctx context.Context, p *ObjectVersion, nodeVersion *data.NodeVersion) (map[string]string, *data.LockInfo, error)
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
tagPrefix = "S3-Tag-"
|
|
||||||
|
|
||||||
AESEncryptionAlgorithm = "AES256"
|
|
||||||
AESKeySize = 32
|
|
||||||
AttributeEncryptionAlgorithm = api.NeoFSSystemMetadataPrefix + "Algorithm"
|
|
||||||
AttributeDecryptedSize = api.NeoFSSystemMetadataPrefix + "Decrypted-Size"
|
|
||||||
AttributeHMACSalt = api.NeoFSSystemMetadataPrefix + "HMAC-Salt"
|
|
||||||
AttributeHMACKey = api.NeoFSSystemMetadataPrefix + "HMAC-Key"
|
|
||||||
|
|
||||||
AttributeNeofsCopiesNumber = "neofs-copies-number" // such formate to match X-Amz-Meta-Neofs-Copies-Number header
|
|
||||||
)
|
|
||||||
|
|
||||||
func (t *VersionedObject) String() string {
|
|
||||||
return t.Name + ":" + t.VersionID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f MsgHandlerFunc) HandleMessage(ctx context.Context, msg *nats.Msg) error {
|
|
||||||
return f(ctx, msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewLayer creates an instance of a layer. It checks credentials
|
|
||||||
// and establishes gRPC connection with the node.
|
|
||||||
func NewLayer(log *zap.Logger, neoFS NeoFS, config *Config) Client {
|
|
||||||
return &layer{
|
|
||||||
neoFS: neoFS,
|
|
||||||
log: log,
|
|
||||||
anonKey: config.AnonKey,
|
|
||||||
resolver: config.Resolver,
|
|
||||||
cache: NewCache(config.Caches),
|
|
||||||
treeService: config.TreeService,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) EphemeralKey() *keys.PublicKey {
|
|
||||||
return n.anonKey.Key.PublicKey()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) Initialize(ctx context.Context, c EventListener) error {
|
|
||||||
if n.IsNotificationEnabled() {
|
|
||||||
return fmt.Errorf("already initialized")
|
|
||||||
}
|
|
||||||
|
|
||||||
// todo add notification handlers (e.g. for lifecycles)
|
|
||||||
|
|
||||||
c.Listen(ctx)
|
|
||||||
|
|
||||||
n.ncontroller = c
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) IsNotificationEnabled() bool {
|
|
||||||
return n.ncontroller != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsAuthenticatedRequest checks if access box exists in the current request.
|
|
||||||
func IsAuthenticatedRequest(ctx context.Context) bool {
|
|
||||||
_, ok := ctx.Value(api.BoxData).(*accessbox.Box)
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
// TimeNow returns client time from request or time.Now().
|
|
||||||
func TimeNow(ctx context.Context) time.Time {
|
|
||||||
if now, ok := ctx.Value(api.ClientTime).(time.Time); ok {
|
|
||||||
return now
|
|
||||||
}
|
|
||||||
|
|
||||||
return time.Now()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Owner returns owner id from BearerToken (context) or from client owner.
|
|
||||||
func (n *layer) Owner(ctx context.Context) user.ID {
|
|
||||||
if bd, ok := ctx.Value(api.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil && bd.Gate.BearerToken != nil {
|
|
||||||
return bearer.ResolveIssuer(*bd.Gate.BearerToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
var ownerID user.ID
|
|
||||||
user.IDFromKey(&ownerID, (ecdsa.PublicKey)(*n.EphemeralKey()))
|
|
||||||
|
|
||||||
return ownerID
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) prepareAuthParameters(ctx context.Context, prm *PrmAuth, bktOwner user.ID) {
|
|
||||||
if bd, ok := ctx.Value(api.BoxData).(*accessbox.Box); ok && bd != nil && bd.Gate != nil && bd.Gate.BearerToken != nil {
|
|
||||||
if bktOwner.Equals(bearer.ResolveIssuer(*bd.Gate.BearerToken)) {
|
|
||||||
prm.BearerToken = bd.Gate.BearerToken
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
prm.PrivateKey = &n.anonKey.Key.PrivateKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBucketInfo returns bucket info by name.
|
|
||||||
func (n *layer) GetBucketInfo(ctx context.Context, name string) (*data.BucketInfo, error) {
|
|
||||||
name, err := url.QueryUnescape(name)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("unescape bucket name: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if bktInfo := n.cache.GetBucket(name); bktInfo != nil {
|
|
||||||
return bktInfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
containerID, err := n.ResolveBucket(ctx, name)
|
|
||||||
if err != nil {
|
|
||||||
n.log.Debug("bucket not found", zap.Error(err))
|
|
||||||
return nil, errors.GetAPIError(errors.ErrNoSuchBucket)
|
|
||||||
}
|
|
||||||
|
|
||||||
return n.containerInfo(ctx, containerID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBucketACL returns bucket acl info by name.
|
|
||||||
func (n *layer) GetBucketACL(ctx context.Context, bktInfo *data.BucketInfo) (*BucketACL, error) {
|
|
||||||
eACL, err := n.GetContainerEACL(ctx, bktInfo.CID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("get container eacl: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &BucketACL{
|
|
||||||
Info: bktInfo,
|
|
||||||
EACL: eACL,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutBucketACL puts bucket acl by name.
|
|
||||||
func (n *layer) PutBucketACL(ctx context.Context, param *PutBucketACLParams) error {
|
|
||||||
return n.setContainerEACLTable(ctx, param.BktInfo.CID, param.EACL, param.SessionToken)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListBuckets returns all user containers. The name of the bucket is a container
|
|
||||||
// id. Timestamp is omitted since it is not saved in neofs container.
|
|
||||||
func (n *layer) ListBuckets(ctx context.Context) ([]*data.BucketInfo, error) {
|
|
||||||
return n.containerList(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetObject from storage.
|
|
||||||
func (n *layer) GetObject(ctx context.Context, p *GetObjectParams) error {
|
|
||||||
var params getParams
|
|
||||||
|
|
||||||
params.oid = p.ObjectInfo.ID
|
|
||||||
params.bktInfo = p.BucketInfo
|
|
||||||
|
|
||||||
var decReader *encryption.Decrypter
|
|
||||||
if p.Encryption.Enabled() {
|
|
||||||
var err error
|
|
||||||
decReader, err = getDecrypter(p)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("creating decrypter: %w", err)
|
|
||||||
}
|
|
||||||
params.off = decReader.EncryptedOffset()
|
|
||||||
params.ln = decReader.EncryptedLength()
|
|
||||||
} else {
|
|
||||||
if p.Range != nil {
|
|
||||||
if p.Range.Start > p.Range.End {
|
|
||||||
panic("invalid range")
|
|
||||||
}
|
|
||||||
params.ln = p.Range.End - p.Range.Start + 1
|
|
||||||
params.off = p.Range.Start
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
payload, err := n.initObjectPayloadReader(ctx, params)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("init object payload reader: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
bufSize := uint64(32 * 1024) // configure?
|
|
||||||
if params.ln != 0 && params.ln < bufSize {
|
|
||||||
bufSize = params.ln
|
|
||||||
}
|
|
||||||
|
|
||||||
// alloc buffer for copying
|
|
||||||
buf := make([]byte, bufSize) // sync-pool it?
|
|
||||||
|
|
||||||
r := payload
|
|
||||||
if decReader != nil {
|
|
||||||
if err = decReader.SetReader(payload); err != nil {
|
|
||||||
return fmt.Errorf("set reader to decrypter: %w", err)
|
|
||||||
}
|
|
||||||
r = io.LimitReader(decReader, int64(decReader.DecryptedLength()))
|
|
||||||
}
|
|
||||||
|
|
||||||
// copy full payload
|
|
||||||
written, err := io.CopyBuffer(p.Writer, r, buf)
|
|
||||||
if err != nil {
|
|
||||||
if decReader != nil {
|
|
||||||
return fmt.Errorf("copy object payload written: '%d', decLength: '%d', params.ln: '%d' : %w", written, decReader.DecryptedLength(), params.ln, err)
|
|
||||||
}
|
|
||||||
return fmt.Errorf("copy object payload written: '%d': %w", written, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getDecrypter(p *GetObjectParams) (*encryption.Decrypter, error) {
|
|
||||||
var encRange *encryption.Range
|
|
||||||
if p.Range != nil {
|
|
||||||
encRange = &encryption.Range{Start: p.Range.Start, End: p.Range.End}
|
|
||||||
}
|
|
||||||
|
|
||||||
header := p.ObjectInfo.Headers[UploadCompletedParts]
|
|
||||||
if len(header) == 0 {
|
|
||||||
return encryption.NewDecrypter(p.Encryption, uint64(p.ObjectInfo.Size), encRange)
|
|
||||||
}
|
|
||||||
|
|
||||||
decryptedObjectSize, err := strconv.ParseUint(p.ObjectInfo.Headers[AttributeDecryptedSize], 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("parse decrypted size: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
splits := strings.Split(header, ",")
|
|
||||||
sizes := make([]uint64, len(splits))
|
|
||||||
for i, splitInfo := range splits {
|
|
||||||
part, err := ParseCompletedPartHeader(splitInfo)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("parse completed part: %w", err)
|
|
||||||
}
|
|
||||||
sizes[i] = uint64(part.Size)
|
|
||||||
}
|
|
||||||
|
|
||||||
return encryption.NewMultipartDecrypter(p.Encryption, decryptedObjectSize, sizes, encRange)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetObjectInfo returns meta information about the object.
|
|
||||||
func (n *layer) GetObjectInfo(ctx context.Context, p *HeadObjectParams) (*data.ObjectInfo, error) {
|
|
||||||
extendedObjectInfo, err := n.GetExtendedObjectInfo(ctx, p)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return extendedObjectInfo.ObjectInfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetExtendedObjectInfo returns meta information and corresponding info from the tree service about the object.
|
|
||||||
func (n *layer) GetExtendedObjectInfo(ctx context.Context, p *HeadObjectParams) (*data.ExtendedObjectInfo, error) {
|
|
||||||
if len(p.VersionID) == 0 {
|
|
||||||
return n.headLastVersionIfNotDeleted(ctx, p.BktInfo, p.Object)
|
|
||||||
}
|
|
||||||
|
|
||||||
return n.headVersion(ctx, p.BktInfo, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyObject from one bucket into another bucket.
|
|
||||||
func (n *layer) CopyObject(ctx context.Context, p *CopyObjectParams) (*data.ExtendedObjectInfo, error) {
|
|
||||||
pr, pw := io.Pipe()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
err := n.GetObject(ctx, &GetObjectParams{
|
|
||||||
ObjectInfo: p.SrcObject,
|
|
||||||
Writer: pw,
|
|
||||||
Range: p.Range,
|
|
||||||
BucketInfo: p.ScrBktInfo,
|
|
||||||
Encryption: p.Encryption,
|
|
||||||
})
|
|
||||||
|
|
||||||
if err = pw.CloseWithError(err); err != nil {
|
|
||||||
n.log.Error("could not get object", zap.Error(err))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return n.PutObject(ctx, &PutObjectParams{
|
|
||||||
BktInfo: p.DstBktInfo,
|
|
||||||
Object: p.DstObject,
|
|
||||||
Size: p.SrcSize,
|
|
||||||
Reader: pr,
|
|
||||||
Header: p.Header,
|
|
||||||
Encryption: p.Encryption,
|
|
||||||
CopiesNumber: p.CopiesNuber,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func getRandomOID() (oid.ID, error) {
|
|
||||||
b := [32]byte{}
|
|
||||||
if _, err := rand.Read(b[:]); err != nil {
|
|
||||||
return oid.ID{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var objID oid.ID
|
|
||||||
objID.SetSHA256(b)
|
|
||||||
return objID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) deleteObject(ctx context.Context, bkt *data.BucketInfo, settings *data.BucketSettings, obj *VersionedObject) *VersionedObject {
|
|
||||||
if len(obj.VersionID) != 0 || settings.Unversioned() {
|
|
||||||
var nodeVersion *data.NodeVersion
|
|
||||||
if nodeVersion, obj.Error = n.getNodeVersionToDelete(ctx, bkt, obj); obj.Error != nil {
|
|
||||||
return dismissNotFoundError(obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
if obj.DeleteMarkVersion, obj.Error = n.removeOldVersion(ctx, bkt, nodeVersion, obj); obj.Error != nil {
|
|
||||||
return obj
|
|
||||||
}
|
|
||||||
|
|
||||||
obj.Error = n.treeService.RemoveVersion(ctx, bkt, nodeVersion.ID)
|
|
||||||
n.cache.CleanListCacheEntriesContainingObject(obj.Name, bkt.CID)
|
|
||||||
return obj
|
|
||||||
}
|
|
||||||
|
|
||||||
var newVersion *data.NodeVersion
|
|
||||||
|
|
||||||
if settings.VersioningSuspended() {
|
|
||||||
obj.VersionID = data.UnversionedObjectVersionID
|
|
||||||
|
|
||||||
var nodeVersion *data.NodeVersion
|
|
||||||
if nodeVersion, obj.Error = n.getNodeVersionToDelete(ctx, bkt, obj); obj.Error != nil {
|
|
||||||
return dismissNotFoundError(obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
if obj.DeleteMarkVersion, obj.Error = n.removeOldVersion(ctx, bkt, nodeVersion, obj); obj.Error != nil {
|
|
||||||
return obj
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
randOID, err := getRandomOID()
|
|
||||||
if err != nil {
|
|
||||||
obj.Error = fmt.Errorf("couldn't get random oid: %w", err)
|
|
||||||
return obj
|
|
||||||
}
|
|
||||||
|
|
||||||
obj.DeleteMarkVersion = randOID.EncodeToString()
|
|
||||||
|
|
||||||
newVersion = &data.NodeVersion{
|
|
||||||
BaseNodeVersion: data.BaseNodeVersion{
|
|
||||||
OID: randOID,
|
|
||||||
FilePath: obj.Name,
|
|
||||||
},
|
|
||||||
DeleteMarker: &data.DeleteMarkerInfo{
|
|
||||||
Created: TimeNow(ctx),
|
|
||||||
Owner: n.Owner(ctx),
|
|
||||||
},
|
|
||||||
IsUnversioned: settings.VersioningSuspended(),
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, obj.Error = n.treeService.AddVersion(ctx, bkt, newVersion); obj.Error != nil {
|
|
||||||
return obj
|
|
||||||
}
|
|
||||||
|
|
||||||
n.cache.DeleteObjectName(bkt.CID, bkt.Name, obj.Name)
|
|
||||||
|
|
||||||
return obj
|
|
||||||
}
|
|
||||||
|
|
||||||
func dismissNotFoundError(obj *VersionedObject) *VersionedObject {
|
|
||||||
if errors.IsS3Error(obj.Error, errors.ErrNoSuchKey) ||
|
|
||||||
errors.IsS3Error(obj.Error, errors.ErrNoSuchVersion) {
|
|
||||||
obj.Error = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return obj
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) getNodeVersionToDelete(ctx context.Context, bkt *data.BucketInfo, obj *VersionedObject) (*data.NodeVersion, error) {
|
|
||||||
objVersion := &ObjectVersion{
|
|
||||||
BktInfo: bkt,
|
|
||||||
ObjectName: obj.Name,
|
|
||||||
VersionID: obj.VersionID,
|
|
||||||
NoErrorOnDeleteMarker: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
return n.getNodeVersion(ctx, objVersion)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) removeOldVersion(ctx context.Context, bkt *data.BucketInfo, nodeVersion *data.NodeVersion, obj *VersionedObject) (string, error) {
|
|
||||||
if nodeVersion.IsDeleteMarker() {
|
|
||||||
return obj.VersionID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", n.objectDelete(ctx, bkt, nodeVersion.OID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeleteObjects from the storage.
|
|
||||||
func (n *layer) DeleteObjects(ctx context.Context, p *DeleteObjectParams) []*VersionedObject {
|
|
||||||
for i, obj := range p.Objects {
|
|
||||||
p.Objects[i] = n.deleteObject(ctx, p.BktInfo, p.Settings, obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
return p.Objects
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) CreateBucket(ctx context.Context, p *CreateBucketParams) (*data.BucketInfo, error) {
|
|
||||||
bktInfo, err := n.GetBucketInfo(ctx, p.Name)
|
|
||||||
if err != nil {
|
|
||||||
if errors.IsS3Error(err, errors.ErrNoSuchBucket) {
|
|
||||||
return n.createContainer(ctx, p)
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.SessionContainerCreation != nil && session.IssuedBy(*p.SessionContainerCreation, bktInfo.Owner) {
|
|
||||||
return nil, errors.GetAPIError(errors.ErrBucketAlreadyOwnedByYou)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, errors.GetAPIError(errors.ErrBucketAlreadyExists)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) ResolveBucket(ctx context.Context, name string) (cid.ID, error) {
|
|
||||||
var cnrID cid.ID
|
|
||||||
if err := cnrID.DecodeString(name); err != nil {
|
|
||||||
return n.resolver.Resolve(ctx, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
return cnrID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) DeleteBucket(ctx context.Context, p *DeleteBucketParams) error {
|
|
||||||
nodeVersions, err := n.bucketNodeVersions(ctx, p.BktInfo, "")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(nodeVersions) != 0 {
|
|
||||||
return errors.GetAPIError(errors.ErrBucketNotEmpty)
|
|
||||||
}
|
|
||||||
|
|
||||||
n.cache.DeleteBucket(p.BktInfo.Name)
|
|
||||||
return n.neoFS.DeleteContainer(ctx, p.BktInfo.CID, p.SessionToken)
|
|
||||||
}
|
|
|
@ -1,52 +0,0 @@
|
||||||
package layer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestObjectLockAttributes(t *testing.T) {
|
|
||||||
tc := prepareContext(t)
|
|
||||||
err := tc.layer.PutBucketSettings(tc.ctx, &PutSettingsParams{
|
|
||||||
BktInfo: tc.bktInfo,
|
|
||||||
Settings: &data.BucketSettings{Versioning: data.VersioningEnabled},
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
obj := tc.putObject([]byte("content obj1 v1"))
|
|
||||||
|
|
||||||
p := &PutLockInfoParams{
|
|
||||||
ObjVersion: &ObjectVersion{
|
|
||||||
BktInfo: tc.bktInfo,
|
|
||||||
ObjectName: obj.Name,
|
|
||||||
VersionID: obj.VersionID(),
|
|
||||||
},
|
|
||||||
NewLock: &data.ObjectLock{
|
|
||||||
Retention: &data.RetentionLock{
|
|
||||||
Until: time.Now(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
CopiesNumber: 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
err = tc.layer.PutLockInfo(tc.ctx, p)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
foundLock, err := tc.layer.GetLockInfo(tc.ctx, p.ObjVersion)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
lockObj := tc.getObjectByID(foundLock.Retention())
|
|
||||||
require.NotNil(t, lockObj)
|
|
||||||
|
|
||||||
expEpoch := false
|
|
||||||
for _, attr := range lockObj.Attributes() {
|
|
||||||
if attr.Key() == AttributeExpirationEpoch {
|
|
||||||
expEpoch = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
require.Truef(t, expEpoch, "system header __NEOFS__EXPIRATION_EPOCH presence")
|
|
||||||
}
|
|
|
@ -1,673 +0,0 @@
|
||||||
package layer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/hex"
|
|
||||||
stderrors "errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/minio/sio"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer/encryption"
|
|
||||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/user"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
UploadIDAttributeName = "S3-Upload-Id"
|
|
||||||
UploadPartNumberAttributeName = "S3-Upload-Part-Number"
|
|
||||||
UploadCompletedParts = "S3-Completed-Parts"
|
|
||||||
|
|
||||||
metaPrefix = "meta-"
|
|
||||||
aclPrefix = "acl-"
|
|
||||||
|
|
||||||
MaxSizeUploadsList = 1000
|
|
||||||
MaxSizePartsList = 1000
|
|
||||||
UploadMinPartNumber = 1
|
|
||||||
UploadMaxPartNumber = 10000
|
|
||||||
uploadMinSize = 5 * 1048576 // 5MB
|
|
||||||
uploadMaxSize = 5 * 1073741824 // 5GB
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
UploadInfoParams struct {
|
|
||||||
UploadID string
|
|
||||||
Bkt *data.BucketInfo
|
|
||||||
Key string
|
|
||||||
Encryption encryption.Params
|
|
||||||
}
|
|
||||||
|
|
||||||
CreateMultipartParams struct {
|
|
||||||
Info *UploadInfoParams
|
|
||||||
Header map[string]string
|
|
||||||
Data *UploadData
|
|
||||||
CopiesNumber uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
UploadData struct {
|
|
||||||
TagSet map[string]string
|
|
||||||
ACLHeaders map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
UploadPartParams struct {
|
|
||||||
Info *UploadInfoParams
|
|
||||||
PartNumber int
|
|
||||||
Size int64
|
|
||||||
Reader io.Reader
|
|
||||||
}
|
|
||||||
|
|
||||||
UploadCopyParams struct {
|
|
||||||
Info *UploadInfoParams
|
|
||||||
SrcObjInfo *data.ObjectInfo
|
|
||||||
SrcBktInfo *data.BucketInfo
|
|
||||||
PartNumber int
|
|
||||||
Range *RangeParams
|
|
||||||
}
|
|
||||||
|
|
||||||
CompleteMultipartParams struct {
|
|
||||||
Info *UploadInfoParams
|
|
||||||
Parts []*CompletedPart
|
|
||||||
}
|
|
||||||
|
|
||||||
CompletedPart struct {
|
|
||||||
ETag string
|
|
||||||
PartNumber int
|
|
||||||
}
|
|
||||||
|
|
||||||
EncryptedPart struct {
|
|
||||||
Part
|
|
||||||
EncryptedSize int64
|
|
||||||
}
|
|
||||||
|
|
||||||
Part struct {
|
|
||||||
ETag string
|
|
||||||
LastModified string
|
|
||||||
PartNumber int
|
|
||||||
Size int64
|
|
||||||
}
|
|
||||||
|
|
||||||
ListMultipartUploadsParams struct {
|
|
||||||
Bkt *data.BucketInfo
|
|
||||||
Delimiter string
|
|
||||||
EncodingType string
|
|
||||||
KeyMarker string
|
|
||||||
MaxUploads int
|
|
||||||
Prefix string
|
|
||||||
UploadIDMarker string
|
|
||||||
}
|
|
||||||
|
|
||||||
ListPartsParams struct {
|
|
||||||
Info *UploadInfoParams
|
|
||||||
MaxParts int
|
|
||||||
PartNumberMarker int
|
|
||||||
}
|
|
||||||
|
|
||||||
ListPartsInfo struct {
|
|
||||||
Parts []*Part
|
|
||||||
Owner user.ID
|
|
||||||
NextPartNumberMarker int
|
|
||||||
IsTruncated bool
|
|
||||||
}
|
|
||||||
|
|
||||||
ListMultipartUploadsInfo struct {
|
|
||||||
Prefixes []string
|
|
||||||
Uploads []*UploadInfo
|
|
||||||
IsTruncated bool
|
|
||||||
NextKeyMarker string
|
|
||||||
NextUploadIDMarker string
|
|
||||||
}
|
|
||||||
UploadInfo struct {
|
|
||||||
IsDir bool
|
|
||||||
Key string
|
|
||||||
UploadID string
|
|
||||||
Owner user.ID
|
|
||||||
Created time.Time
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func (n *layer) CreateMultipartUpload(ctx context.Context, p *CreateMultipartParams) error {
|
|
||||||
metaSize := len(p.Header)
|
|
||||||
if p.Data != nil {
|
|
||||||
metaSize += len(p.Data.ACLHeaders)
|
|
||||||
metaSize += len(p.Data.TagSet)
|
|
||||||
}
|
|
||||||
|
|
||||||
info := &data.MultipartInfo{
|
|
||||||
Key: p.Info.Key,
|
|
||||||
UploadID: p.Info.UploadID,
|
|
||||||
Owner: n.Owner(ctx),
|
|
||||||
Created: TimeNow(ctx),
|
|
||||||
Meta: make(map[string]string, metaSize),
|
|
||||||
CopiesNumber: p.CopiesNumber,
|
|
||||||
}
|
|
||||||
|
|
||||||
for key, val := range p.Header {
|
|
||||||
info.Meta[metaPrefix+key] = val
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.Data != nil {
|
|
||||||
for key, val := range p.Data.ACLHeaders {
|
|
||||||
info.Meta[aclPrefix+key] = val
|
|
||||||
}
|
|
||||||
|
|
||||||
for key, val := range p.Data.TagSet {
|
|
||||||
info.Meta[tagPrefix+key] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.Info.Encryption.Enabled() {
|
|
||||||
if err := addEncryptionHeaders(info.Meta, p.Info.Encryption); err != nil {
|
|
||||||
return fmt.Errorf("add encryption header: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return n.treeService.CreateMultipartUpload(ctx, p.Info.Bkt, info)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) UploadPart(ctx context.Context, p *UploadPartParams) (string, error) {
|
|
||||||
multipartInfo, err := n.treeService.GetMultipartUpload(ctx, p.Info.Bkt, p.Info.Key, p.Info.UploadID)
|
|
||||||
if err != nil {
|
|
||||||
if stderrors.Is(err, ErrNodeNotFound) {
|
|
||||||
return "", errors.GetAPIError(errors.ErrNoSuchUpload)
|
|
||||||
}
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.Size > uploadMaxSize {
|
|
||||||
return "", errors.GetAPIError(errors.ErrEntityTooLarge)
|
|
||||||
}
|
|
||||||
|
|
||||||
objInfo, err := n.uploadPart(ctx, multipartInfo, p)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return objInfo.HashSum, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInfo, p *UploadPartParams) (*data.ObjectInfo, error) {
|
|
||||||
encInfo := FormEncryptionInfo(multipartInfo.Meta)
|
|
||||||
if err := p.Info.Encryption.MatchObjectEncryption(encInfo); err != nil {
|
|
||||||
n.log.Warn("mismatched obj encryptionInfo", zap.Error(err))
|
|
||||||
return nil, errors.GetAPIError(errors.ErrInvalidEncryptionParameters)
|
|
||||||
}
|
|
||||||
|
|
||||||
bktInfo := p.Info.Bkt
|
|
||||||
prm := PrmObjectCreate{
|
|
||||||
Container: bktInfo.CID,
|
|
||||||
Creator: bktInfo.Owner,
|
|
||||||
Attributes: make([][2]string, 2),
|
|
||||||
Payload: p.Reader,
|
|
||||||
CreationTime: TimeNow(ctx),
|
|
||||||
CopiesNumber: multipartInfo.CopiesNumber,
|
|
||||||
}
|
|
||||||
|
|
||||||
decSize := p.Size
|
|
||||||
if p.Info.Encryption.Enabled() {
|
|
||||||
r, encSize, err := encryptionReader(p.Reader, uint64(p.Size), p.Info.Encryption.Key())
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to create ecnrypted reader: %w", err)
|
|
||||||
}
|
|
||||||
prm.Attributes = append(prm.Attributes, [2]string{AttributeDecryptedSize, strconv.FormatInt(p.Size, 10)})
|
|
||||||
prm.Payload = r
|
|
||||||
p.Size = int64(encSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
prm.Attributes[0][0], prm.Attributes[0][1] = UploadIDAttributeName, p.Info.UploadID
|
|
||||||
prm.Attributes[1][0], prm.Attributes[1][1] = UploadPartNumberAttributeName, strconv.Itoa(p.PartNumber)
|
|
||||||
|
|
||||||
id, hash, err := n.objectPutAndHash(ctx, prm, bktInfo)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
partInfo := &data.PartInfo{
|
|
||||||
Key: p.Info.Key,
|
|
||||||
UploadID: p.Info.UploadID,
|
|
||||||
Number: p.PartNumber,
|
|
||||||
OID: id,
|
|
||||||
Size: decSize,
|
|
||||||
ETag: hex.EncodeToString(hash),
|
|
||||||
Created: prm.CreationTime,
|
|
||||||
}
|
|
||||||
|
|
||||||
oldPartID, err := n.treeService.AddPart(ctx, bktInfo, multipartInfo.ID, partInfo)
|
|
||||||
oldPartIDNotFound := stderrors.Is(err, ErrNoNodeToRemove)
|
|
||||||
if err != nil && !oldPartIDNotFound {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if !oldPartIDNotFound {
|
|
||||||
if err = n.objectDelete(ctx, bktInfo, oldPartID); err != nil {
|
|
||||||
n.log.Error("couldn't delete old part object", zap.Error(err),
|
|
||||||
zap.String("cnrID", bktInfo.CID.EncodeToString()),
|
|
||||||
zap.String("bucket name", bktInfo.Name),
|
|
||||||
zap.String("objID", oldPartID.EncodeToString()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
objInfo := &data.ObjectInfo{
|
|
||||||
ID: id,
|
|
||||||
CID: bktInfo.CID,
|
|
||||||
|
|
||||||
Owner: bktInfo.Owner,
|
|
||||||
Bucket: bktInfo.Name,
|
|
||||||
Size: partInfo.Size,
|
|
||||||
Created: partInfo.Created,
|
|
||||||
HashSum: partInfo.ETag,
|
|
||||||
}
|
|
||||||
|
|
||||||
return objInfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) UploadPartCopy(ctx context.Context, p *UploadCopyParams) (*data.ObjectInfo, error) {
|
|
||||||
multipartInfo, err := n.treeService.GetMultipartUpload(ctx, p.Info.Bkt, p.Info.Key, p.Info.UploadID)
|
|
||||||
if err != nil {
|
|
||||||
if stderrors.Is(err, ErrNodeNotFound) {
|
|
||||||
return nil, errors.GetAPIError(errors.ErrNoSuchUpload)
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
size := p.SrcObjInfo.Size
|
|
||||||
if p.Range != nil {
|
|
||||||
size = int64(p.Range.End - p.Range.Start + 1)
|
|
||||||
if p.Range.End > uint64(p.SrcObjInfo.Size) {
|
|
||||||
return nil, errors.GetAPIError(errors.ErrInvalidCopyPartRangeSource)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if size > uploadMaxSize {
|
|
||||||
return nil, errors.GetAPIError(errors.ErrEntityTooLarge)
|
|
||||||
}
|
|
||||||
|
|
||||||
pr, pw := io.Pipe()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
err = n.GetObject(ctx, &GetObjectParams{
|
|
||||||
ObjectInfo: p.SrcObjInfo,
|
|
||||||
Writer: pw,
|
|
||||||
Range: p.Range,
|
|
||||||
BucketInfo: p.SrcBktInfo,
|
|
||||||
})
|
|
||||||
|
|
||||||
if err = pw.CloseWithError(err); err != nil {
|
|
||||||
n.log.Error("could not get object", zap.Error(err))
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
params := &UploadPartParams{
|
|
||||||
Info: p.Info,
|
|
||||||
PartNumber: p.PartNumber,
|
|
||||||
Size: size,
|
|
||||||
Reader: pr,
|
|
||||||
}
|
|
||||||
|
|
||||||
return n.uploadPart(ctx, multipartInfo, params)
|
|
||||||
}
|
|
||||||
|
|
||||||
// implements io.Reader of payloads of the object list stored in the NeoFS network.
|
|
||||||
type multiObjectReader struct {
|
|
||||||
ctx context.Context
|
|
||||||
|
|
||||||
layer *layer
|
|
||||||
|
|
||||||
prm getParams
|
|
||||||
|
|
||||||
curReader io.Reader
|
|
||||||
|
|
||||||
parts []*data.PartInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
func (x *multiObjectReader) Read(p []byte) (n int, err error) {
|
|
||||||
if x.curReader != nil {
|
|
||||||
n, err = x.curReader.Read(p)
|
|
||||||
if !stderrors.Is(err, io.EOF) {
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(x.parts) == 0 {
|
|
||||||
return n, io.EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
x.prm.oid = x.parts[0].OID
|
|
||||||
|
|
||||||
x.curReader, err = x.layer.initObjectPayloadReader(x.ctx, x.prm)
|
|
||||||
if err != nil {
|
|
||||||
return n, fmt.Errorf("init payload reader for the next part: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
x.parts = x.parts[1:]
|
|
||||||
|
|
||||||
next, err := x.Read(p[n:])
|
|
||||||
|
|
||||||
return n + next, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) CompleteMultipartUpload(ctx context.Context, p *CompleteMultipartParams) (*UploadData, *data.ExtendedObjectInfo, error) {
|
|
||||||
for i := 1; i < len(p.Parts); i++ {
|
|
||||||
if p.Parts[i].PartNumber <= p.Parts[i-1].PartNumber {
|
|
||||||
return nil, nil, errors.GetAPIError(errors.ErrInvalidPartOrder)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
multipartInfo, partsInfo, err := n.getUploadParts(ctx, p.Info)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
encInfo := FormEncryptionInfo(multipartInfo.Meta)
|
|
||||||
|
|
||||||
if len(partsInfo) < len(p.Parts) {
|
|
||||||
return nil, nil, errors.GetAPIError(errors.ErrInvalidPart)
|
|
||||||
}
|
|
||||||
|
|
||||||
var multipartObjetSize int64
|
|
||||||
var encMultipartObjectSize uint64
|
|
||||||
parts := make([]*data.PartInfo, 0, len(p.Parts))
|
|
||||||
|
|
||||||
var completedPartsHeader strings.Builder
|
|
||||||
for i, part := range p.Parts {
|
|
||||||
partInfo := partsInfo[part.PartNumber]
|
|
||||||
if partInfo == nil || part.ETag != partInfo.ETag {
|
|
||||||
return nil, nil, errors.GetAPIError(errors.ErrInvalidPart)
|
|
||||||
}
|
|
||||||
// for the last part we have no minimum size limit
|
|
||||||
if i != len(p.Parts)-1 && partInfo.Size < uploadMinSize {
|
|
||||||
return nil, nil, errors.GetAPIError(errors.ErrEntityTooSmall)
|
|
||||||
}
|
|
||||||
parts = append(parts, partInfo)
|
|
||||||
multipartObjetSize += partInfo.Size // even if encryption is enabled size is actual (decrypted)
|
|
||||||
|
|
||||||
if encInfo.Enabled {
|
|
||||||
encPartSize, err := sio.EncryptedSize(uint64(partInfo.Size))
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("compute encrypted size: %w", err)
|
|
||||||
}
|
|
||||||
encMultipartObjectSize += encPartSize
|
|
||||||
}
|
|
||||||
|
|
||||||
partInfoStr := partInfo.ToHeaderString()
|
|
||||||
if i != len(p.Parts)-1 {
|
|
||||||
partInfoStr += ","
|
|
||||||
}
|
|
||||||
if _, err = completedPartsHeader.WriteString(partInfoStr); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
initMetadata := make(map[string]string, len(multipartInfo.Meta)+1)
|
|
||||||
initMetadata[UploadCompletedParts] = completedPartsHeader.String()
|
|
||||||
|
|
||||||
uploadData := &UploadData{
|
|
||||||
TagSet: make(map[string]string),
|
|
||||||
ACLHeaders: make(map[string]string),
|
|
||||||
}
|
|
||||||
for key, val := range multipartInfo.Meta {
|
|
||||||
if strings.HasPrefix(key, metaPrefix) {
|
|
||||||
initMetadata[strings.TrimPrefix(key, metaPrefix)] = val
|
|
||||||
} else if strings.HasPrefix(key, tagPrefix) {
|
|
||||||
uploadData.TagSet[strings.TrimPrefix(key, tagPrefix)] = val
|
|
||||||
} else if strings.HasPrefix(key, aclPrefix) {
|
|
||||||
uploadData.ACLHeaders[strings.TrimPrefix(key, aclPrefix)] = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if encInfo.Enabled {
|
|
||||||
initMetadata[AttributeEncryptionAlgorithm] = encInfo.Algorithm
|
|
||||||
initMetadata[AttributeHMACKey] = encInfo.HMACKey
|
|
||||||
initMetadata[AttributeHMACSalt] = encInfo.HMACSalt
|
|
||||||
initMetadata[AttributeDecryptedSize] = strconv.FormatInt(multipartObjetSize, 10)
|
|
||||||
multipartObjetSize = int64(encMultipartObjectSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
r := &multiObjectReader{
|
|
||||||
ctx: ctx,
|
|
||||||
layer: n,
|
|
||||||
parts: parts,
|
|
||||||
}
|
|
||||||
|
|
||||||
r.prm.bktInfo = p.Info.Bkt
|
|
||||||
|
|
||||||
extObjInfo, err := n.PutObject(ctx, &PutObjectParams{
|
|
||||||
BktInfo: p.Info.Bkt,
|
|
||||||
Object: p.Info.Key,
|
|
||||||
Reader: r,
|
|
||||||
Header: initMetadata,
|
|
||||||
Size: multipartObjetSize,
|
|
||||||
Encryption: p.Info.Encryption,
|
|
||||||
CopiesNumber: multipartInfo.CopiesNumber,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
n.log.Error("could not put a completed object (multipart upload)",
|
|
||||||
zap.String("uploadID", p.Info.UploadID),
|
|
||||||
zap.String("uploadKey", p.Info.Key),
|
|
||||||
zap.Error(err))
|
|
||||||
|
|
||||||
return nil, nil, errors.GetAPIError(errors.ErrInternalError)
|
|
||||||
}
|
|
||||||
|
|
||||||
var addr oid.Address
|
|
||||||
addr.SetContainer(p.Info.Bkt.CID)
|
|
||||||
for _, partInfo := range partsInfo {
|
|
||||||
if err = n.objectDelete(ctx, p.Info.Bkt, partInfo.OID); err != nil {
|
|
||||||
n.log.Warn("could not delete upload part",
|
|
||||||
zap.Stringer("object id", &partInfo.OID),
|
|
||||||
zap.Stringer("bucket id", p.Info.Bkt.CID),
|
|
||||||
zap.Error(err))
|
|
||||||
}
|
|
||||||
addr.SetObject(partInfo.OID)
|
|
||||||
n.cache.DeleteObject(addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
return uploadData, extObjInfo, n.treeService.DeleteMultipartUpload(ctx, p.Info.Bkt, multipartInfo.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) ListMultipartUploads(ctx context.Context, p *ListMultipartUploadsParams) (*ListMultipartUploadsInfo, error) {
|
|
||||||
var result ListMultipartUploadsInfo
|
|
||||||
if p.MaxUploads == 0 {
|
|
||||||
return &result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
multipartInfos, err := n.treeService.GetMultipartUploadsByPrefix(ctx, p.Bkt, p.Prefix)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
uploads := make([]*UploadInfo, 0, len(multipartInfos))
|
|
||||||
uniqDirs := make(map[string]struct{})
|
|
||||||
|
|
||||||
for _, multipartInfo := range multipartInfos {
|
|
||||||
info := uploadInfoFromMultipartInfo(multipartInfo, p.Prefix, p.Delimiter)
|
|
||||||
if info != nil {
|
|
||||||
if info.IsDir {
|
|
||||||
if _, ok := uniqDirs[info.Key]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
uniqDirs[info.Key] = struct{}{}
|
|
||||||
}
|
|
||||||
uploads = append(uploads, info)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Slice(uploads, func(i, j int) bool {
|
|
||||||
if uploads[i].Key == uploads[j].Key {
|
|
||||||
return uploads[i].UploadID < uploads[j].UploadID
|
|
||||||
}
|
|
||||||
return uploads[i].Key < uploads[j].Key
|
|
||||||
})
|
|
||||||
|
|
||||||
if p.KeyMarker != "" {
|
|
||||||
if p.UploadIDMarker != "" {
|
|
||||||
uploads = trimAfterUploadIDAndKey(p.KeyMarker, p.UploadIDMarker, uploads)
|
|
||||||
} else {
|
|
||||||
uploads = trimAfterUploadKey(p.KeyMarker, uploads)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(uploads) > p.MaxUploads {
|
|
||||||
result.IsTruncated = true
|
|
||||||
uploads = uploads[:p.MaxUploads]
|
|
||||||
result.NextUploadIDMarker = uploads[len(uploads)-1].UploadID
|
|
||||||
result.NextKeyMarker = uploads[len(uploads)-1].Key
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, ov := range uploads {
|
|
||||||
if ov.IsDir {
|
|
||||||
result.Prefixes = append(result.Prefixes, ov.Key)
|
|
||||||
} else {
|
|
||||||
result.Uploads = append(result.Uploads, ov)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) AbortMultipartUpload(ctx context.Context, p *UploadInfoParams) error {
|
|
||||||
multipartInfo, parts, err := n.getUploadParts(ctx, p)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, info := range parts {
|
|
||||||
if err = n.objectDelete(ctx, p.Bkt, info.OID); err != nil {
|
|
||||||
n.log.Warn("couldn't delete part", zap.String("cid", p.Bkt.CID.EncodeToString()),
|
|
||||||
zap.String("oid", info.OID.EncodeToString()), zap.Int("part number", info.Number), zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return n.treeService.DeleteMultipartUpload(ctx, p.Bkt, multipartInfo.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) ListParts(ctx context.Context, p *ListPartsParams) (*ListPartsInfo, error) {
|
|
||||||
var res ListPartsInfo
|
|
||||||
multipartInfo, partsInfo, err := n.getUploadParts(ctx, p.Info)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
encInfo := FormEncryptionInfo(multipartInfo.Meta)
|
|
||||||
if err = p.Info.Encryption.MatchObjectEncryption(encInfo); err != nil {
|
|
||||||
n.log.Warn("mismatched obj encryptionInfo", zap.Error(err))
|
|
||||||
return nil, errors.GetAPIError(errors.ErrInvalidEncryptionParameters)
|
|
||||||
}
|
|
||||||
|
|
||||||
res.Owner = multipartInfo.Owner
|
|
||||||
|
|
||||||
parts := make([]*Part, 0, len(partsInfo))
|
|
||||||
|
|
||||||
for _, partInfo := range partsInfo {
|
|
||||||
parts = append(parts, &Part{
|
|
||||||
ETag: partInfo.ETag,
|
|
||||||
LastModified: partInfo.Created.UTC().Format(time.RFC3339),
|
|
||||||
PartNumber: partInfo.Number,
|
|
||||||
Size: partInfo.Size,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Slice(parts, func(i, j int) bool {
|
|
||||||
return parts[i].PartNumber < parts[j].PartNumber
|
|
||||||
})
|
|
||||||
|
|
||||||
if p.PartNumberMarker != 0 {
|
|
||||||
for i, part := range parts {
|
|
||||||
if part.PartNumber > p.PartNumberMarker {
|
|
||||||
parts = parts[i:]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(parts) > p.MaxParts {
|
|
||||||
res.IsTruncated = true
|
|
||||||
res.NextPartNumberMarker = parts[p.MaxParts-1].PartNumber
|
|
||||||
parts = parts[:p.MaxParts]
|
|
||||||
}
|
|
||||||
|
|
||||||
res.Parts = parts
|
|
||||||
|
|
||||||
return &res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data.MultipartInfo, map[int]*data.PartInfo, error) {
|
|
||||||
multipartInfo, err := n.treeService.GetMultipartUpload(ctx, p.Bkt, p.Key, p.UploadID)
|
|
||||||
if err != nil {
|
|
||||||
if stderrors.Is(err, ErrNodeNotFound) {
|
|
||||||
return nil, nil, errors.GetAPIError(errors.ErrNoSuchUpload)
|
|
||||||
}
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
parts, err := n.treeService.GetParts(ctx, p.Bkt, multipartInfo.ID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
res := make(map[int]*data.PartInfo, len(parts))
|
|
||||||
for _, part := range parts {
|
|
||||||
res[part.Number] = part
|
|
||||||
}
|
|
||||||
|
|
||||||
return multipartInfo, res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func trimAfterUploadIDAndKey(key, id string, uploads []*UploadInfo) []*UploadInfo {
|
|
||||||
var res []*UploadInfo
|
|
||||||
if len(uploads) != 0 && uploads[len(uploads)-1].Key < key {
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, obj := range uploads {
|
|
||||||
if obj.Key >= key && obj.UploadID > id {
|
|
||||||
res = append(res, obj)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func trimAfterUploadKey(key string, objects []*UploadInfo) []*UploadInfo {
|
|
||||||
var result []*UploadInfo
|
|
||||||
if len(objects) != 0 && objects[len(objects)-1].Key <= key {
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
for i, obj := range objects {
|
|
||||||
if obj.Key > key {
|
|
||||||
result = objects[i:]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func uploadInfoFromMultipartInfo(uploadInfo *data.MultipartInfo, prefix, delimiter string) *UploadInfo {
|
|
||||||
var isDir bool
|
|
||||||
key := uploadInfo.Key
|
|
||||||
|
|
||||||
if !strings.HasPrefix(key, prefix) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(delimiter) > 0 {
|
|
||||||
tail := strings.TrimPrefix(key, prefix)
|
|
||||||
index := strings.Index(tail, delimiter)
|
|
||||||
if index >= 0 {
|
|
||||||
isDir = true
|
|
||||||
key = prefix + tail[:index+1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &UploadInfo{
|
|
||||||
IsDir: isDir,
|
|
||||||
Key: key,
|
|
||||||
UploadID: uploadInfo.UploadID,
|
|
||||||
Owner: uploadInfo.Owner,
|
|
||||||
Created: uploadInfo.Created,
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,90 +0,0 @@
|
||||||
package layer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/xml"
|
|
||||||
errorsStd "errors"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type PutBucketNotificationConfigurationParams struct {
|
|
||||||
RequestInfo *api.ReqInfo
|
|
||||||
BktInfo *data.BucketInfo
|
|
||||||
Configuration *data.NotificationConfiguration
|
|
||||||
CopiesNumber uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) PutBucketNotificationConfiguration(ctx context.Context, p *PutBucketNotificationConfigurationParams) error {
|
|
||||||
confXML, err := xml.Marshal(p.Configuration)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("marshal notify configuration: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
prm := PrmObjectCreate{
|
|
||||||
Container: p.BktInfo.CID,
|
|
||||||
Creator: p.BktInfo.Owner,
|
|
||||||
Payload: bytes.NewReader(confXML),
|
|
||||||
Filepath: p.BktInfo.NotificationConfigurationObjectName(),
|
|
||||||
CreationTime: TimeNow(ctx),
|
|
||||||
CopiesNumber: p.CopiesNumber,
|
|
||||||
}
|
|
||||||
|
|
||||||
objID, _, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
objIDToDelete, err := n.treeService.PutNotificationConfigurationNode(ctx, p.BktInfo, objID)
|
|
||||||
objIDToDeleteNotFound := errorsStd.Is(err, ErrNoNodeToRemove)
|
|
||||||
if err != nil && !objIDToDeleteNotFound {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !objIDToDeleteNotFound {
|
|
||||||
if err = n.objectDelete(ctx, p.BktInfo, objIDToDelete); err != nil {
|
|
||||||
n.log.Error("couldn't delete notification configuration object", zap.Error(err),
|
|
||||||
zap.String("cnrID", p.BktInfo.CID.EncodeToString()),
|
|
||||||
zap.String("bucket name", p.BktInfo.Name),
|
|
||||||
zap.String("objID", objIDToDelete.EncodeToString()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
n.cache.PutNotificationConfiguration(n.Owner(ctx), p.BktInfo, p.Configuration)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) GetBucketNotificationConfiguration(ctx context.Context, bktInfo *data.BucketInfo) (*data.NotificationConfiguration, error) {
|
|
||||||
owner := n.Owner(ctx)
|
|
||||||
if conf := n.cache.GetNotificationConfiguration(owner, bktInfo); conf != nil {
|
|
||||||
return conf, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
objID, err := n.treeService.GetNotificationConfigurationNode(ctx, bktInfo)
|
|
||||||
objIDNotFound := errorsStd.Is(err, ErrNodeNotFound)
|
|
||||||
if err != nil && !objIDNotFound {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
conf := &data.NotificationConfiguration{}
|
|
||||||
|
|
||||||
if !objIDNotFound {
|
|
||||||
obj, err := n.objectGet(ctx, bktInfo, objID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = xml.Unmarshal(obj.Payload(), &conf); err != nil {
|
|
||||||
return nil, fmt.Errorf("unmarshal notify configuration: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
n.cache.PutNotificationConfiguration(owner, bktInfo, conf)
|
|
||||||
|
|
||||||
return conf, nil
|
|
||||||
}
|
|
|
@ -1,813 +0,0 @@
|
||||||
package layer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"mime"
|
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/minio/sio"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/cache"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
|
||||||
apiErrors "github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/client"
|
|
||||||
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/object"
|
|
||||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
|
||||||
"github.com/panjf2000/ants/v2"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
getParams struct {
|
|
||||||
// payload range
|
|
||||||
off, ln uint64
|
|
||||||
|
|
||||||
oid oid.ID
|
|
||||||
bktInfo *data.BucketInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListObjectsParamsCommon contains common parameters for ListObjectsV1 and ListObjectsV2.
|
|
||||||
ListObjectsParamsCommon struct {
|
|
||||||
BktInfo *data.BucketInfo
|
|
||||||
Delimiter string
|
|
||||||
Encode string
|
|
||||||
MaxKeys int
|
|
||||||
Prefix string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListObjectsParamsV1 contains params for ListObjectsV1.
|
|
||||||
ListObjectsParamsV1 struct {
|
|
||||||
ListObjectsParamsCommon
|
|
||||||
Marker string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListObjectsParamsV2 contains params for ListObjectsV2.
|
|
||||||
ListObjectsParamsV2 struct {
|
|
||||||
ListObjectsParamsCommon
|
|
||||||
ContinuationToken string
|
|
||||||
StartAfter string
|
|
||||||
FetchOwner bool
|
|
||||||
}
|
|
||||||
|
|
||||||
allObjectParams struct {
|
|
||||||
Bucket *data.BucketInfo
|
|
||||||
Delimiter string
|
|
||||||
Prefix string
|
|
||||||
MaxKeys int
|
|
||||||
Marker string
|
|
||||||
ContinuationToken string
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
continuationToken = "<continuation-token>"
|
|
||||||
)
|
|
||||||
|
|
||||||
func newAddress(cnr cid.ID, obj oid.ID) oid.Address {
|
|
||||||
var addr oid.Address
|
|
||||||
addr.SetContainer(cnr)
|
|
||||||
addr.SetObject(obj)
|
|
||||||
return addr
|
|
||||||
}
|
|
||||||
|
|
||||||
// objectHead returns all object's headers.
|
|
||||||
func (n *layer) objectHead(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID) (*object.Object, error) {
|
|
||||||
prm := PrmObjectRead{
|
|
||||||
Container: bktInfo.CID,
|
|
||||||
Object: idObj,
|
|
||||||
WithHeader: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
|
||||||
|
|
||||||
res, err := n.neoFS.ReadObject(ctx, prm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return res.Head, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// initializes payload reader of the NeoFS object.
|
|
||||||
// Zero range corresponds to full payload (panics if only offset is set).
|
|
||||||
func (n *layer) initObjectPayloadReader(ctx context.Context, p getParams) (io.Reader, error) {
|
|
||||||
prm := PrmObjectRead{
|
|
||||||
Container: p.bktInfo.CID,
|
|
||||||
Object: p.oid,
|
|
||||||
WithPayload: true,
|
|
||||||
PayloadRange: [2]uint64{p.off, p.ln},
|
|
||||||
}
|
|
||||||
|
|
||||||
n.prepareAuthParameters(ctx, &prm.PrmAuth, p.bktInfo.Owner)
|
|
||||||
|
|
||||||
res, err := n.neoFS.ReadObject(ctx, prm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return res.Payload, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// objectGet returns an object with payload in the object.
|
|
||||||
func (n *layer) objectGet(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID) (*object.Object, error) {
|
|
||||||
prm := PrmObjectRead{
|
|
||||||
Container: bktInfo.CID,
|
|
||||||
Object: objID,
|
|
||||||
WithHeader: true,
|
|
||||||
WithPayload: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
|
||||||
|
|
||||||
res, err := n.neoFS.ReadObject(ctx, prm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return res.Head, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MimeByFilePath detect mime type by file path extension.
|
|
||||||
func MimeByFilePath(path string) string {
|
|
||||||
ext := filepath.Ext(path)
|
|
||||||
if len(ext) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return mime.TypeByExtension(ext)
|
|
||||||
}
|
|
||||||
|
|
||||||
func encryptionReader(r io.Reader, size uint64, key []byte) (io.Reader, uint64, error) {
|
|
||||||
encSize, err := sio.EncryptedSize(size)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, fmt.Errorf("failed to compute enc size: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
r, err = sio.EncryptReader(r, sio.Config{MinVersion: sio.Version20, MaxVersion: sio.Version20, Key: key, CipherSuites: []byte{sio.AES_256_GCM}})
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, fmt.Errorf("couldn't create encrypter: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return r, encSize, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ParseCompletedPartHeader(hdr string) (*Part, error) {
|
|
||||||
// partInfo[0] -- part number, partInfo[1] -- part size, partInfo[2] -- checksum
|
|
||||||
partInfo := strings.Split(hdr, "-")
|
|
||||||
if len(partInfo) != 3 {
|
|
||||||
return nil, fmt.Errorf("invalid completed part header")
|
|
||||||
}
|
|
||||||
num, err := strconv.Atoi(partInfo[0])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid completed part number '%s': %w", partInfo[0], err)
|
|
||||||
}
|
|
||||||
size, err := strconv.Atoi(partInfo[1])
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid completed part size '%s': %w", partInfo[1], err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Part{
|
|
||||||
ETag: partInfo[2],
|
|
||||||
PartNumber: num,
|
|
||||||
Size: int64(size),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PutObject stores object into NeoFS, took payload from io.Reader.
|
|
||||||
func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.ExtendedObjectInfo, error) {
|
|
||||||
owner := n.Owner(ctx)
|
|
||||||
|
|
||||||
bktSettings, err := n.GetBucketSettings(ctx, p.BktInfo)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("couldn't get versioning settings object: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
newVersion := &data.NodeVersion{
|
|
||||||
BaseNodeVersion: data.BaseNodeVersion{
|
|
||||||
FilePath: p.Object,
|
|
||||||
Size: p.Size,
|
|
||||||
},
|
|
||||||
IsUnversioned: !bktSettings.VersioningEnabled(),
|
|
||||||
}
|
|
||||||
|
|
||||||
r := p.Reader
|
|
||||||
if p.Encryption.Enabled() {
|
|
||||||
p.Header[AttributeDecryptedSize] = strconv.FormatInt(p.Size, 10)
|
|
||||||
if err = addEncryptionHeaders(p.Header, p.Encryption); err != nil {
|
|
||||||
return nil, fmt.Errorf("add encryption header: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var encSize uint64
|
|
||||||
if r, encSize, err = encryptionReader(p.Reader, uint64(p.Size), p.Encryption.Key()); err != nil {
|
|
||||||
return nil, fmt.Errorf("create encrypter: %w", err)
|
|
||||||
}
|
|
||||||
p.Size = int64(encSize)
|
|
||||||
}
|
|
||||||
|
|
||||||
if r != nil {
|
|
||||||
if len(p.Header[api.ContentType]) == 0 {
|
|
||||||
if contentType := MimeByFilePath(p.Object); len(contentType) == 0 {
|
|
||||||
d := newDetector(r)
|
|
||||||
if contentType, err := d.Detect(); err == nil {
|
|
||||||
p.Header[api.ContentType] = contentType
|
|
||||||
}
|
|
||||||
r = d.MultiReader()
|
|
||||||
} else {
|
|
||||||
p.Header[api.ContentType] = contentType
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
prm := PrmObjectCreate{
|
|
||||||
Container: p.BktInfo.CID,
|
|
||||||
Creator: owner,
|
|
||||||
PayloadSize: uint64(p.Size),
|
|
||||||
Filepath: p.Object,
|
|
||||||
Payload: r,
|
|
||||||
CreationTime: TimeNow(ctx),
|
|
||||||
CopiesNumber: p.CopiesNumber,
|
|
||||||
}
|
|
||||||
|
|
||||||
prm.Attributes = make([][2]string, 0, len(p.Header))
|
|
||||||
|
|
||||||
for k, v := range p.Header {
|
|
||||||
prm.Attributes = append(prm.Attributes, [2]string{k, v})
|
|
||||||
}
|
|
||||||
|
|
||||||
id, hash, err := n.objectPutAndHash(ctx, prm, p.BktInfo)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
newVersion.OID = id
|
|
||||||
newVersion.ETag = hex.EncodeToString(hash)
|
|
||||||
if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {
|
|
||||||
return nil, fmt.Errorf("couldn't add new verion to tree service: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.Lock != nil && (p.Lock.Retention != nil || p.Lock.LegalHold != nil) {
|
|
||||||
putLockInfoPrms := &PutLockInfoParams{
|
|
||||||
ObjVersion: &ObjectVersion{
|
|
||||||
BktInfo: p.BktInfo,
|
|
||||||
ObjectName: p.Object,
|
|
||||||
VersionID: id.EncodeToString(),
|
|
||||||
},
|
|
||||||
NewLock: p.Lock,
|
|
||||||
CopiesNumber: p.CopiesNumber,
|
|
||||||
NodeVersion: newVersion, // provide new version to make one less tree service call in PutLockInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = n.PutLockInfo(ctx, putLockInfoPrms); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
n.cache.CleanListCacheEntriesContainingObject(p.Object, p.BktInfo.CID)
|
|
||||||
|
|
||||||
objInfo := &data.ObjectInfo{
|
|
||||||
ID: id,
|
|
||||||
CID: p.BktInfo.CID,
|
|
||||||
|
|
||||||
Owner: owner,
|
|
||||||
Bucket: p.BktInfo.Name,
|
|
||||||
Name: p.Object,
|
|
||||||
Size: p.Size,
|
|
||||||
Created: prm.CreationTime,
|
|
||||||
Headers: p.Header,
|
|
||||||
ContentType: p.Header[api.ContentType],
|
|
||||||
HashSum: newVersion.ETag,
|
|
||||||
}
|
|
||||||
|
|
||||||
extendedObjInfo := &data.ExtendedObjectInfo{
|
|
||||||
ObjectInfo: objInfo,
|
|
||||||
NodeVersion: newVersion,
|
|
||||||
}
|
|
||||||
|
|
||||||
n.cache.PutObjectWithName(owner, extendedObjInfo)
|
|
||||||
|
|
||||||
return extendedObjInfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) headLastVersionIfNotDeleted(ctx context.Context, bkt *data.BucketInfo, objectName string) (*data.ExtendedObjectInfo, error) {
|
|
||||||
owner := n.Owner(ctx)
|
|
||||||
if extObjInfo := n.cache.GetLastObject(owner, bkt.Name, objectName); extObjInfo != nil {
|
|
||||||
return extObjInfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
node, err := n.treeService.GetLatestVersion(ctx, bkt, objectName)
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, ErrNodeNotFound) {
|
|
||||||
return nil, apiErrors.GetAPIError(apiErrors.ErrNoSuchKey)
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if node.IsDeleteMarker() {
|
|
||||||
return nil, apiErrors.GetAPIError(apiErrors.ErrNoSuchKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
meta, err := n.objectHead(ctx, bkt, node.OID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
objInfo := objectInfoFromMeta(bkt, meta)
|
|
||||||
|
|
||||||
extObjInfo := &data.ExtendedObjectInfo{
|
|
||||||
ObjectInfo: objInfo,
|
|
||||||
NodeVersion: node,
|
|
||||||
}
|
|
||||||
|
|
||||||
n.cache.PutObjectWithName(owner, extObjInfo)
|
|
||||||
|
|
||||||
return extObjInfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) headVersion(ctx context.Context, bkt *data.BucketInfo, p *HeadObjectParams) (*data.ExtendedObjectInfo, error) {
|
|
||||||
var err error
|
|
||||||
var foundVersion *data.NodeVersion
|
|
||||||
if p.VersionID == data.UnversionedObjectVersionID {
|
|
||||||
foundVersion, err = n.treeService.GetUnversioned(ctx, bkt, p.Object)
|
|
||||||
if err != nil {
|
|
||||||
if errors.Is(err, ErrNodeNotFound) {
|
|
||||||
return nil, apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion)
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
versions, err := n.treeService.GetVersions(ctx, bkt, p.Object)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("couldn't get versions: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, version := range versions {
|
|
||||||
if version.OID.EncodeToString() == p.VersionID {
|
|
||||||
foundVersion = version
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if foundVersion == nil {
|
|
||||||
return nil, apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
owner := n.Owner(ctx)
|
|
||||||
if extObjInfo := n.cache.GetObject(owner, newAddress(bkt.CID, foundVersion.OID)); extObjInfo != nil {
|
|
||||||
return extObjInfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
meta, err := n.objectHead(ctx, bkt, foundVersion.OID)
|
|
||||||
if err != nil {
|
|
||||||
if client.IsErrObjectNotFound(err) {
|
|
||||||
return nil, apiErrors.GetAPIError(apiErrors.ErrNoSuchVersion)
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
objInfo := objectInfoFromMeta(bkt, meta)
|
|
||||||
|
|
||||||
extObjInfo := &data.ExtendedObjectInfo{
|
|
||||||
ObjectInfo: objInfo,
|
|
||||||
NodeVersion: foundVersion,
|
|
||||||
}
|
|
||||||
|
|
||||||
n.cache.PutObject(owner, extObjInfo)
|
|
||||||
|
|
||||||
return extObjInfo, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// objectDelete puts tombstone object into neofs.
|
|
||||||
func (n *layer) objectDelete(ctx context.Context, bktInfo *data.BucketInfo, idObj oid.ID) error {
|
|
||||||
prm := PrmObjectDelete{
|
|
||||||
Container: bktInfo.CID,
|
|
||||||
Object: idObj,
|
|
||||||
}
|
|
||||||
|
|
||||||
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
|
||||||
|
|
||||||
n.cache.DeleteObject(newAddress(bktInfo.CID, idObj))
|
|
||||||
|
|
||||||
return n.neoFS.DeleteObject(ctx, prm)
|
|
||||||
}
|
|
||||||
|
|
||||||
// objectPutAndHash prepare auth parameters and invoke neofs.CreateObject.
|
|
||||||
// Returns object ID and payload sha256 hash.
|
|
||||||
func (n *layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktInfo *data.BucketInfo) (oid.ID, []byte, error) {
|
|
||||||
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
|
||||||
hash := sha256.New()
|
|
||||||
prm.Payload = wrapReader(prm.Payload, 64*1024, func(buf []byte) {
|
|
||||||
hash.Write(buf)
|
|
||||||
})
|
|
||||||
id, err := n.neoFS.CreateObject(ctx, prm)
|
|
||||||
if err != nil {
|
|
||||||
return oid.ID{}, nil, err
|
|
||||||
}
|
|
||||||
return id, hash.Sum(nil), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListObjectsV1 returns objects in a bucket for requests of Version 1.
|
|
||||||
func (n *layer) ListObjectsV1(ctx context.Context, p *ListObjectsParamsV1) (*ListObjectsInfoV1, error) {
|
|
||||||
var result ListObjectsInfoV1
|
|
||||||
|
|
||||||
prm := allObjectParams{
|
|
||||||
Bucket: p.BktInfo,
|
|
||||||
Delimiter: p.Delimiter,
|
|
||||||
Prefix: p.Prefix,
|
|
||||||
MaxKeys: p.MaxKeys,
|
|
||||||
Marker: p.Marker,
|
|
||||||
}
|
|
||||||
|
|
||||||
objects, next, err := n.getLatestObjectsVersions(ctx, prm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if next != nil {
|
|
||||||
result.IsTruncated = true
|
|
||||||
result.NextMarker = objects[len(objects)-1].Name
|
|
||||||
}
|
|
||||||
|
|
||||||
result.Prefixes, result.Objects = triageObjects(objects)
|
|
||||||
|
|
||||||
return &result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListObjectsV2 returns objects in a bucket for requests of Version 2.
|
|
||||||
func (n *layer) ListObjectsV2(ctx context.Context, p *ListObjectsParamsV2) (*ListObjectsInfoV2, error) {
|
|
||||||
var result ListObjectsInfoV2
|
|
||||||
|
|
||||||
prm := allObjectParams{
|
|
||||||
Bucket: p.BktInfo,
|
|
||||||
Delimiter: p.Delimiter,
|
|
||||||
Prefix: p.Prefix,
|
|
||||||
MaxKeys: p.MaxKeys,
|
|
||||||
Marker: p.StartAfter,
|
|
||||||
ContinuationToken: p.ContinuationToken,
|
|
||||||
}
|
|
||||||
|
|
||||||
objects, next, err := n.getLatestObjectsVersions(ctx, prm)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if next != nil {
|
|
||||||
result.IsTruncated = true
|
|
||||||
result.NextContinuationToken = next.ID.EncodeToString()
|
|
||||||
}
|
|
||||||
|
|
||||||
result.Prefixes, result.Objects = triageObjects(objects)
|
|
||||||
|
|
||||||
return &result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type logWrapper struct {
|
|
||||||
log *zap.Logger
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *logWrapper) Printf(format string, args ...interface{}) {
|
|
||||||
l.log.Info(fmt.Sprintf(format, args...))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) getLatestObjectsVersions(ctx context.Context, p allObjectParams) (objects []*data.ObjectInfo, next *data.ObjectInfo, err error) {
|
|
||||||
if p.MaxKeys == 0 {
|
|
||||||
return nil, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
owner := n.Owner(ctx)
|
|
||||||
cacheKey := cache.CreateObjectsListCacheKey(p.Bucket.CID, p.Prefix, true)
|
|
||||||
nodeVersions := n.cache.GetList(owner, cacheKey)
|
|
||||||
|
|
||||||
if nodeVersions == nil {
|
|
||||||
nodeVersions, err = n.treeService.GetLatestVersionsByPrefix(ctx, p.Bucket, p.Prefix)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
n.cache.PutList(owner, cacheKey, nodeVersions)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(nodeVersions) == 0 {
|
|
||||||
return nil, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Slice(nodeVersions, func(i, j int) bool {
|
|
||||||
return nodeVersions[i].FilePath < nodeVersions[j].FilePath
|
|
||||||
})
|
|
||||||
|
|
||||||
poolCtx, cancel := context.WithCancel(ctx)
|
|
||||||
defer cancel()
|
|
||||||
objOutCh, err := n.initWorkerPool(poolCtx, 2, p, nodesGenerator(poolCtx, p, nodeVersions))
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to init worker pool: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
objects = make([]*data.ObjectInfo, 0, p.MaxKeys)
|
|
||||||
|
|
||||||
for obj := range objOutCh {
|
|
||||||
objects = append(objects, obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Slice(objects, func(i, j int) bool {
|
|
||||||
return objects[i].Name < objects[j].Name
|
|
||||||
})
|
|
||||||
|
|
||||||
if len(objects) > p.MaxKeys {
|
|
||||||
next = objects[p.MaxKeys]
|
|
||||||
objects = objects[:p.MaxKeys]
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func nodesGenerator(ctx context.Context, p allObjectParams, nodeVersions []*data.NodeVersion) <-chan *data.NodeVersion {
|
|
||||||
nodeCh := make(chan *data.NodeVersion)
|
|
||||||
existed := make(map[string]struct{}, len(nodeVersions)) // to squash the same directories
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
var generated int
|
|
||||||
LOOP:
|
|
||||||
for _, node := range nodeVersions {
|
|
||||||
if shouldSkip(node, p, existed) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
break LOOP
|
|
||||||
case nodeCh <- node:
|
|
||||||
generated++
|
|
||||||
if generated == p.MaxKeys+1 { // we use maxKeys+1 to be able to know nextMarker/nextContinuationToken
|
|
||||||
break LOOP
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
close(nodeCh)
|
|
||||||
}()
|
|
||||||
|
|
||||||
return nodeCh
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) initWorkerPool(ctx context.Context, size int, p allObjectParams, input <-chan *data.NodeVersion) (<-chan *data.ObjectInfo, error) {
|
|
||||||
pool, err := ants.NewPool(size, ants.WithLogger(&logWrapper{n.log}))
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("coudln't init go pool for listing: %w", err)
|
|
||||||
}
|
|
||||||
objCh := make(chan *data.ObjectInfo)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
LOOP:
|
|
||||||
for node := range input {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
break LOOP
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
|
|
||||||
// We have to make a copy of pointer to data.NodeVersion
|
|
||||||
// to get correct value in submitted task function.
|
|
||||||
func(node *data.NodeVersion) {
|
|
||||||
wg.Add(1)
|
|
||||||
err = pool.Submit(func() {
|
|
||||||
defer wg.Done()
|
|
||||||
oi := n.objectInfoFromObjectsCacheOrNeoFS(ctx, p.Bucket, node, p.Prefix, p.Delimiter)
|
|
||||||
if oi == nil {
|
|
||||||
// try to get object again
|
|
||||||
if oi = n.objectInfoFromObjectsCacheOrNeoFS(ctx, p.Bucket, node, p.Prefix, p.Delimiter); oi == nil {
|
|
||||||
// form object info with data that the tree node contains
|
|
||||||
oi = getPartialObjectInfo(p.Bucket, node)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
case objCh <- oi:
|
|
||||||
}
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
wg.Done()
|
|
||||||
n.log.Warn("failed to submit task to pool", zap.Error(err))
|
|
||||||
}
|
|
||||||
}(node)
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
close(objCh)
|
|
||||||
pool.Release()
|
|
||||||
}()
|
|
||||||
|
|
||||||
return objCh, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getPartialObjectInfo form data.ObjectInfo using data available in data.NodeVersion.
|
|
||||||
func getPartialObjectInfo(bktInfo *data.BucketInfo, node *data.NodeVersion) *data.ObjectInfo {
|
|
||||||
return &data.ObjectInfo{
|
|
||||||
ID: node.OID,
|
|
||||||
CID: bktInfo.CID,
|
|
||||||
Bucket: bktInfo.Name,
|
|
||||||
Name: node.FilePath,
|
|
||||||
Size: node.Size,
|
|
||||||
HashSum: node.ETag,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) bucketNodeVersions(ctx context.Context, bkt *data.BucketInfo, prefix string) ([]*data.NodeVersion, error) {
|
|
||||||
var err error
|
|
||||||
|
|
||||||
owner := n.Owner(ctx)
|
|
||||||
cacheKey := cache.CreateObjectsListCacheKey(bkt.CID, prefix, false)
|
|
||||||
nodeVersions := n.cache.GetList(owner, cacheKey)
|
|
||||||
|
|
||||||
if nodeVersions == nil {
|
|
||||||
nodeVersions, err = n.treeService.GetAllVersionsByPrefix(ctx, bkt, prefix)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("get all versions from tree service: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
n.cache.PutList(owner, cacheKey, nodeVersions)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nodeVersions, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) getAllObjectsVersions(ctx context.Context, bkt *data.BucketInfo, prefix, delimiter string) (map[string][]*data.ExtendedObjectInfo, error) {
|
|
||||||
nodeVersions, err := n.bucketNodeVersions(ctx, bkt, prefix)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
versions := make(map[string][]*data.ExtendedObjectInfo, len(nodeVersions))
|
|
||||||
|
|
||||||
for _, nodeVersion := range nodeVersions {
|
|
||||||
oi := &data.ObjectInfo{}
|
|
||||||
|
|
||||||
if nodeVersion.IsDeleteMarker() { // delete marker does not match any object in NeoFS
|
|
||||||
oi.ID = nodeVersion.OID
|
|
||||||
oi.Name = nodeVersion.FilePath
|
|
||||||
oi.Owner = nodeVersion.DeleteMarker.Owner
|
|
||||||
oi.Created = nodeVersion.DeleteMarker.Created
|
|
||||||
oi.IsDeleteMarker = true
|
|
||||||
} else {
|
|
||||||
if oi = n.objectInfoFromObjectsCacheOrNeoFS(ctx, bkt, nodeVersion, prefix, delimiter); oi == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
eoi := &data.ExtendedObjectInfo{
|
|
||||||
ObjectInfo: oi,
|
|
||||||
NodeVersion: nodeVersion,
|
|
||||||
}
|
|
||||||
|
|
||||||
objVersions, ok := versions[oi.Name]
|
|
||||||
if !ok {
|
|
||||||
objVersions = []*data.ExtendedObjectInfo{eoi}
|
|
||||||
} else if !oi.IsDir {
|
|
||||||
objVersions = append(objVersions, eoi)
|
|
||||||
}
|
|
||||||
versions[oi.Name] = objVersions
|
|
||||||
}
|
|
||||||
|
|
||||||
return versions, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func IsSystemHeader(key string) bool {
|
|
||||||
_, ok := api.SystemMetadata[key]
|
|
||||||
return ok || strings.HasPrefix(key, api.NeoFSSystemMetadataPrefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
func shouldSkip(node *data.NodeVersion, p allObjectParams, existed map[string]struct{}) bool {
|
|
||||||
if node.IsDeleteMarker() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
filePath := node.FilePath
|
|
||||||
if dirName := tryDirectoryName(node, p.Prefix, p.Delimiter); len(dirName) != 0 {
|
|
||||||
filePath = dirName
|
|
||||||
}
|
|
||||||
if _, ok := existed[filePath]; ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if filePath <= p.Marker {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.ContinuationToken != "" {
|
|
||||||
if _, ok := existed[continuationToken]; !ok {
|
|
||||||
if p.ContinuationToken != node.OID.EncodeToString() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
existed[continuationToken] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
existed[filePath] = struct{}{}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func triageObjects(allObjects []*data.ObjectInfo) (prefixes []string, objects []*data.ObjectInfo) {
|
|
||||||
for _, ov := range allObjects {
|
|
||||||
if ov.IsDir {
|
|
||||||
prefixes = append(prefixes, ov.Name)
|
|
||||||
} else {
|
|
||||||
objects = append(objects, ov)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func triageExtendedObjects(allObjects []*data.ExtendedObjectInfo) (prefixes []string, objects []*data.ExtendedObjectInfo) {
|
|
||||||
for _, ov := range allObjects {
|
|
||||||
if ov.ObjectInfo.IsDir {
|
|
||||||
prefixes = append(prefixes, ov.ObjectInfo.Name)
|
|
||||||
} else {
|
|
||||||
objects = append(objects, ov)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) objectInfoFromObjectsCacheOrNeoFS(ctx context.Context, bktInfo *data.BucketInfo, node *data.NodeVersion, prefix, delimiter string) (oi *data.ObjectInfo) {
|
|
||||||
if oiDir := tryDirectory(bktInfo, node, prefix, delimiter); oiDir != nil {
|
|
||||||
return oiDir
|
|
||||||
}
|
|
||||||
|
|
||||||
owner := n.Owner(ctx)
|
|
||||||
if extInfo := n.cache.GetObject(owner, newAddress(bktInfo.CID, node.OID)); extInfo != nil {
|
|
||||||
return extInfo.ObjectInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
meta, err := n.objectHead(ctx, bktInfo, node.OID)
|
|
||||||
if err != nil {
|
|
||||||
n.log.Warn("could not fetch object meta", zap.Error(err))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
oi = objectInfoFromMeta(bktInfo, meta)
|
|
||||||
n.cache.PutObject(owner, &data.ExtendedObjectInfo{ObjectInfo: oi, NodeVersion: node})
|
|
||||||
|
|
||||||
return oi
|
|
||||||
}
|
|
||||||
|
|
||||||
func tryDirectory(bktInfo *data.BucketInfo, node *data.NodeVersion, prefix, delimiter string) *data.ObjectInfo {
|
|
||||||
dirName := tryDirectoryName(node, prefix, delimiter)
|
|
||||||
if len(dirName) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return &data.ObjectInfo{
|
|
||||||
ID: node.OID, // to use it as continuation token
|
|
||||||
CID: bktInfo.CID,
|
|
||||||
IsDir: true,
|
|
||||||
IsDeleteMarker: node.IsDeleteMarker(),
|
|
||||||
Bucket: bktInfo.Name,
|
|
||||||
Name: dirName,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// tryDirectoryName forms directory name by prefix and delimiter.
|
|
||||||
// If node isn't a directory empty string is returned.
|
|
||||||
// This function doesn't check if node has a prefix. It must do a caller.
|
|
||||||
func tryDirectoryName(node *data.NodeVersion, prefix, delimiter string) string {
|
|
||||||
if len(delimiter) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
tail := strings.TrimPrefix(node.FilePath, prefix)
|
|
||||||
index := strings.Index(tail, delimiter)
|
|
||||||
if index >= 0 {
|
|
||||||
return prefix + tail[:index+1]
|
|
||||||
}
|
|
||||||
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func wrapReader(input io.Reader, bufSize int, f func(buf []byte)) io.Reader {
|
|
||||||
if input == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
r, w := io.Pipe()
|
|
||||||
go func() {
|
|
||||||
var buf = make([]byte, bufSize)
|
|
||||||
for {
|
|
||||||
n, err := input.Read(buf)
|
|
||||||
if n > 0 {
|
|
||||||
f(buf[:n])
|
|
||||||
_, _ = w.Write(buf[:n]) // ignore error, input is not ReadCloser
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
_ = w.CloseWithError(err)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return r
|
|
||||||
}
|
|
|
@ -1,29 +0,0 @@
|
||||||
package layer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/sha256"
|
|
||||||
"io"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestWrapReader(t *testing.T) {
|
|
||||||
src := make([]byte, 1024*1024+1)
|
|
||||||
_, err := rand.Read(src)
|
|
||||||
require.NoError(t, err)
|
|
||||||
h := sha256.Sum256(src)
|
|
||||||
|
|
||||||
streamHash := sha256.New()
|
|
||||||
reader := bytes.NewReader(src)
|
|
||||||
wrappedReader := wrapReader(reader, 64*1024, func(buf []byte) {
|
|
||||||
streamHash.Write(buf)
|
|
||||||
})
|
|
||||||
|
|
||||||
dst, err := io.ReadAll(wrappedReader)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, src, dst)
|
|
||||||
require.Equal(t, h[:], streamHash.Sum(nil))
|
|
||||||
}
|
|
|
@ -1,201 +0,0 @@
|
||||||
package layer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
errorsStd "errors"
|
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/errors"
|
|
||||||
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
|
||||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/user"
|
|
||||||
)
|
|
||||||
|
|
||||||
type GetObjectTaggingParams struct {
|
|
||||||
ObjectVersion *ObjectVersion
|
|
||||||
|
|
||||||
// NodeVersion can be nil. If not nil we save one request to tree service.
|
|
||||||
NodeVersion *data.NodeVersion // optional
|
|
||||||
}
|
|
||||||
|
|
||||||
type PutObjectTaggingParams struct {
|
|
||||||
ObjectVersion *ObjectVersion
|
|
||||||
TagSet map[string]string
|
|
||||||
|
|
||||||
// NodeVersion can be nil. If not nil we save one request to tree service.
|
|
||||||
NodeVersion *data.NodeVersion // optional
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) GetObjectTagging(ctx context.Context, p *GetObjectTaggingParams) (string, map[string]string, error) {
|
|
||||||
var err error
|
|
||||||
owner := n.Owner(ctx)
|
|
||||||
|
|
||||||
if len(p.ObjectVersion.VersionID) != 0 && p.ObjectVersion.VersionID != data.UnversionedObjectVersionID {
|
|
||||||
if tags := n.cache.GetTagging(owner, objectTaggingCacheKey(p.ObjectVersion)); tags != nil {
|
|
||||||
return p.ObjectVersion.VersionID, tags, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
nodeVersion := p.NodeVersion
|
|
||||||
if nodeVersion == nil {
|
|
||||||
nodeVersion, err = n.getNodeVersionFromCacheOrNeofs(ctx, p.ObjectVersion)
|
|
||||||
if err != nil {
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
p.ObjectVersion.VersionID = nodeVersion.OID.EncodeToString()
|
|
||||||
|
|
||||||
if tags := n.cache.GetTagging(owner, objectTaggingCacheKey(p.ObjectVersion)); tags != nil {
|
|
||||||
return p.ObjectVersion.VersionID, tags, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
tags, err := n.treeService.GetObjectTagging(ctx, p.ObjectVersion.BktInfo, nodeVersion)
|
|
||||||
if err != nil {
|
|
||||||
if errorsStd.Is(err, ErrNodeNotFound) {
|
|
||||||
return "", nil, errors.GetAPIError(errors.ErrNoSuchKey)
|
|
||||||
}
|
|
||||||
return "", nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
n.cache.PutTagging(owner, objectTaggingCacheKey(p.ObjectVersion), tags)
|
|
||||||
|
|
||||||
return p.ObjectVersion.VersionID, tags, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) PutObjectTagging(ctx context.Context, p *PutObjectTaggingParams) (nodeVersion *data.NodeVersion, err error) {
|
|
||||||
nodeVersion = p.NodeVersion
|
|
||||||
if nodeVersion == nil {
|
|
||||||
nodeVersion, err = n.getNodeVersionFromCacheOrNeofs(ctx, p.ObjectVersion)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
p.ObjectVersion.VersionID = nodeVersion.OID.EncodeToString()
|
|
||||||
|
|
||||||
err = n.treeService.PutObjectTagging(ctx, p.ObjectVersion.BktInfo, nodeVersion, p.TagSet)
|
|
||||||
if err != nil {
|
|
||||||
if errorsStd.Is(err, ErrNodeNotFound) {
|
|
||||||
return nil, errors.GetAPIError(errors.ErrNoSuchKey)
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
n.cache.PutTagging(n.Owner(ctx), objectTaggingCacheKey(p.ObjectVersion), p.TagSet)
|
|
||||||
|
|
||||||
return nodeVersion, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) DeleteObjectTagging(ctx context.Context, p *ObjectVersion) (*data.NodeVersion, error) {
|
|
||||||
version, err := n.getNodeVersion(ctx, p)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = n.treeService.DeleteObjectTagging(ctx, p.BktInfo, version)
|
|
||||||
if err != nil {
|
|
||||||
if errorsStd.Is(err, ErrNodeNotFound) {
|
|
||||||
return nil, errors.GetAPIError(errors.ErrNoSuchKey)
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
p.VersionID = version.OID.EncodeToString()
|
|
||||||
|
|
||||||
n.cache.DeleteTagging(objectTaggingCacheKey(p))
|
|
||||||
|
|
||||||
return version, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) GetBucketTagging(ctx context.Context, bktInfo *data.BucketInfo) (map[string]string, error) {
|
|
||||||
owner := n.Owner(ctx)
|
|
||||||
|
|
||||||
if tags := n.cache.GetTagging(owner, bucketTaggingCacheKey(bktInfo.CID)); tags != nil {
|
|
||||||
return tags, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
tags, err := n.treeService.GetBucketTagging(ctx, bktInfo)
|
|
||||||
if err != nil && !errorsStd.Is(err, ErrNodeNotFound) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
n.cache.PutTagging(owner, bucketTaggingCacheKey(bktInfo.CID), tags)
|
|
||||||
|
|
||||||
return tags, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) PutBucketTagging(ctx context.Context, bktInfo *data.BucketInfo, tagSet map[string]string) error {
|
|
||||||
if err := n.treeService.PutBucketTagging(ctx, bktInfo, tagSet); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
n.cache.PutTagging(n.Owner(ctx), bucketTaggingCacheKey(bktInfo.CID), tagSet)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) DeleteBucketTagging(ctx context.Context, bktInfo *data.BucketInfo) error {
|
|
||||||
n.cache.DeleteTagging(bucketTaggingCacheKey(bktInfo.CID))
|
|
||||||
|
|
||||||
return n.treeService.DeleteBucketTagging(ctx, bktInfo)
|
|
||||||
}
|
|
||||||
|
|
||||||
func objectTaggingCacheKey(p *ObjectVersion) string {
|
|
||||||
return ".tagset." + p.BktInfo.CID.EncodeToString() + "." + p.ObjectName + "." + p.VersionID
|
|
||||||
}
|
|
||||||
|
|
||||||
func bucketTaggingCacheKey(cnrID cid.ID) string {
|
|
||||||
return ".tagset." + cnrID.EncodeToString()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) getNodeVersion(ctx context.Context, objVersion *ObjectVersion) (*data.NodeVersion, error) {
|
|
||||||
var err error
|
|
||||||
var version *data.NodeVersion
|
|
||||||
|
|
||||||
if objVersion.VersionID == data.UnversionedObjectVersionID {
|
|
||||||
version, err = n.treeService.GetUnversioned(ctx, objVersion.BktInfo, objVersion.ObjectName)
|
|
||||||
} else if len(objVersion.VersionID) == 0 {
|
|
||||||
version, err = n.treeService.GetLatestVersion(ctx, objVersion.BktInfo, objVersion.ObjectName)
|
|
||||||
} else {
|
|
||||||
versions, err2 := n.treeService.GetVersions(ctx, objVersion.BktInfo, objVersion.ObjectName)
|
|
||||||
if err2 != nil {
|
|
||||||
return nil, err2
|
|
||||||
}
|
|
||||||
for _, v := range versions {
|
|
||||||
if v.OID.EncodeToString() == objVersion.VersionID {
|
|
||||||
version = v
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if version == nil {
|
|
||||||
err = errors.GetAPIError(errors.ErrNoSuchVersion)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err == nil && version.IsDeleteMarker() && !objVersion.NoErrorOnDeleteMarker || errorsStd.Is(err, ErrNodeNotFound) {
|
|
||||||
return nil, errors.GetAPIError(errors.ErrNoSuchKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
return version, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *layer) getNodeVersionFromCache(owner user.ID, o *ObjectVersion) *data.NodeVersion {
|
|
||||||
if len(o.VersionID) == 0 || o.VersionID == data.UnversionedObjectVersionID {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var objID oid.ID
|
|
||||||
if objID.DecodeString(o.VersionID) != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var addr oid.Address
|
|
||||||
addr.SetContainer(o.BktInfo.CID)
|
|
||||||
addr.SetObject(objID)
|
|
||||||
|
|
||||||
extObjectInfo := n.cache.GetObject(owner, addr)
|
|
||||||
if extObjectInfo == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return extObjectInfo.NodeVersion
|
|
||||||
}
|
|
|
@ -1,153 +0,0 @@
|
||||||
package layer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer/encryption"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/creds/accessbox"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/object"
|
|
||||||
)
|
|
||||||
|
|
||||||
type (
|
|
||||||
// ListObjectsInfo contains common fields of data for ListObjectsV1 and ListObjectsV2.
|
|
||||||
ListObjectsInfo struct {
|
|
||||||
Prefixes []string
|
|
||||||
Objects []*data.ObjectInfo
|
|
||||||
IsTruncated bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListObjectsInfoV1 holds data which ListObjectsV1 returns.
|
|
||||||
ListObjectsInfoV1 struct {
|
|
||||||
ListObjectsInfo
|
|
||||||
NextMarker string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListObjectsInfoV2 holds data which ListObjectsV2 returns.
|
|
||||||
ListObjectsInfoV2 struct {
|
|
||||||
ListObjectsInfo
|
|
||||||
NextContinuationToken string
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListObjectVersionsInfo stores info and list of objects versions.
|
|
||||||
ListObjectVersionsInfo struct {
|
|
||||||
CommonPrefixes []string
|
|
||||||
IsTruncated bool
|
|
||||||
KeyMarker string
|
|
||||||
NextKeyMarker string
|
|
||||||
NextVersionIDMarker string
|
|
||||||
Version []*data.ExtendedObjectInfo
|
|
||||||
DeleteMarker []*data.ExtendedObjectInfo
|
|
||||||
VersionIDMarker string
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// PathSeparator is a path components separator string.
|
|
||||||
const PathSeparator = string(os.PathSeparator)
|
|
||||||
|
|
||||||
func userHeaders(attrs []object.Attribute) map[string]string {
|
|
||||||
result := make(map[string]string, len(attrs))
|
|
||||||
|
|
||||||
for _, attr := range attrs {
|
|
||||||
result[attr.Key()] = attr.Value()
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
func objectInfoFromMeta(bkt *data.BucketInfo, meta *object.Object) *data.ObjectInfo {
|
|
||||||
var (
|
|
||||||
mimeType string
|
|
||||||
creation time.Time
|
|
||||||
)
|
|
||||||
|
|
||||||
headers := userHeaders(meta.Attributes())
|
|
||||||
delete(headers, object.AttributeFilePath)
|
|
||||||
if contentType, ok := headers[object.AttributeContentType]; ok {
|
|
||||||
mimeType = contentType
|
|
||||||
delete(headers, object.AttributeContentType)
|
|
||||||
}
|
|
||||||
if val, ok := headers[object.AttributeTimestamp]; !ok {
|
|
||||||
// ignore empty value
|
|
||||||
} else if dt, err := strconv.ParseInt(val, 10, 64); err == nil {
|
|
||||||
creation = time.Unix(dt, 0)
|
|
||||||
delete(headers, object.AttributeTimestamp)
|
|
||||||
}
|
|
||||||
|
|
||||||
objID, _ := meta.ID()
|
|
||||||
payloadChecksum, _ := meta.PayloadChecksum()
|
|
||||||
return &data.ObjectInfo{
|
|
||||||
ID: objID,
|
|
||||||
CID: bkt.CID,
|
|
||||||
IsDir: false,
|
|
||||||
|
|
||||||
Bucket: bkt.Name,
|
|
||||||
Name: filepathFromObject(meta),
|
|
||||||
Created: creation,
|
|
||||||
ContentType: mimeType,
|
|
||||||
Headers: headers,
|
|
||||||
Owner: *meta.OwnerID(),
|
|
||||||
Size: int64(meta.PayloadSize()),
|
|
||||||
HashSum: hex.EncodeToString(payloadChecksum.Value()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func FormEncryptionInfo(headers map[string]string) encryption.ObjectEncryption {
|
|
||||||
algorithm := headers[AttributeEncryptionAlgorithm]
|
|
||||||
return encryption.ObjectEncryption{
|
|
||||||
Enabled: len(algorithm) > 0,
|
|
||||||
Algorithm: algorithm,
|
|
||||||
HMACKey: headers[AttributeHMACKey],
|
|
||||||
HMACSalt: headers[AttributeHMACSalt],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func addEncryptionHeaders(meta map[string]string, enc encryption.Params) error {
|
|
||||||
meta[AttributeEncryptionAlgorithm] = AESEncryptionAlgorithm
|
|
||||||
hmacKey, hmacSalt, err := enc.HMAC()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("get hmac: %w", err)
|
|
||||||
}
|
|
||||||
meta[AttributeHMACKey] = hex.EncodeToString(hmacKey)
|
|
||||||
meta[AttributeHMACSalt] = hex.EncodeToString(hmacSalt)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func filepathFromObject(o *object.Object) string {
|
|
||||||
for _, attr := range o.Attributes() {
|
|
||||||
if attr.Key() == object.AttributeFilePath {
|
|
||||||
return attr.Value()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
objID, _ := o.ID()
|
|
||||||
return objID.EncodeToString()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NameFromString splits name into a base file name and a directory path.
|
|
||||||
func NameFromString(name string) (string, string) {
|
|
||||||
ind := strings.LastIndex(name, PathSeparator)
|
|
||||||
return name[ind+1:], name[:ind+1]
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBoxData extracts accessbox.Box from context.
|
|
||||||
func GetBoxData(ctx context.Context) (*accessbox.Box, error) {
|
|
||||||
var boxData *accessbox.Box
|
|
||||||
data, ok := ctx.Value(api.BoxData).(*accessbox.Box)
|
|
||||||
if !ok || data == nil {
|
|
||||||
return nil, fmt.Errorf("couldn't get box data from context")
|
|
||||||
}
|
|
||||||
|
|
||||||
boxData = data
|
|
||||||
if boxData.Gate == nil {
|
|
||||||
boxData.Gate = &accessbox.GateData{}
|
|
||||||
}
|
|
||||||
return boxData, nil
|
|
||||||
}
|
|
|
@ -1,79 +0,0 @@
|
||||||
package layer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"sort"
|
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (n *layer) ListObjectVersions(ctx context.Context, p *ListObjectVersionsParams) (*ListObjectVersionsInfo, error) {
|
|
||||||
var (
|
|
||||||
allObjects = make([]*data.ExtendedObjectInfo, 0, p.MaxKeys)
|
|
||||||
res = &ListObjectVersionsInfo{}
|
|
||||||
)
|
|
||||||
|
|
||||||
versions, err := n.getAllObjectsVersions(ctx, p.BktInfo, p.Prefix, p.Delimiter)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
sortedNames := make([]string, 0, len(versions))
|
|
||||||
for k := range versions {
|
|
||||||
sortedNames = append(sortedNames, k)
|
|
||||||
}
|
|
||||||
sort.Strings(sortedNames)
|
|
||||||
|
|
||||||
for _, name := range sortedNames {
|
|
||||||
sortedVersions := versions[name]
|
|
||||||
sort.Slice(sortedVersions, func(i, j int) bool {
|
|
||||||
return sortedVersions[j].NodeVersion.Timestamp < sortedVersions[i].NodeVersion.Timestamp // sort in reverse order
|
|
||||||
})
|
|
||||||
|
|
||||||
for i, version := range sortedVersions {
|
|
||||||
version.IsLatest = i == 0
|
|
||||||
allObjects = append(allObjects, version)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, obj := range allObjects {
|
|
||||||
if obj.ObjectInfo.Name >= p.KeyMarker && obj.ObjectInfo.VersionID() >= p.VersionIDMarker {
|
|
||||||
allObjects = allObjects[i:]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
res.CommonPrefixes, allObjects = triageExtendedObjects(allObjects)
|
|
||||||
|
|
||||||
if len(allObjects) > p.MaxKeys {
|
|
||||||
res.IsTruncated = true
|
|
||||||
res.NextKeyMarker = allObjects[p.MaxKeys].ObjectInfo.Name
|
|
||||||
res.NextVersionIDMarker = allObjects[p.MaxKeys].ObjectInfo.VersionID()
|
|
||||||
|
|
||||||
allObjects = allObjects[:p.MaxKeys]
|
|
||||||
res.KeyMarker = allObjects[p.MaxKeys-1].ObjectInfo.Name
|
|
||||||
res.VersionIDMarker = allObjects[p.MaxKeys-1].ObjectInfo.VersionID()
|
|
||||||
}
|
|
||||||
|
|
||||||
res.Version, res.DeleteMarker = triageVersions(allObjects)
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func triageVersions(objVersions []*data.ExtendedObjectInfo) ([]*data.ExtendedObjectInfo, []*data.ExtendedObjectInfo) {
|
|
||||||
if len(objVersions) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var resVersion []*data.ExtendedObjectInfo
|
|
||||||
var resDelMarkVersions []*data.ExtendedObjectInfo
|
|
||||||
|
|
||||||
for _, version := range objVersions {
|
|
||||||
if version.NodeVersion.IsDeleteMarker() {
|
|
||||||
resDelMarkVersions = append(resDelMarkVersions, version)
|
|
||||||
} else {
|
|
||||||
resVersion = append(resVersion, version)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return resVersion, resDelMarkVersions
|
|
||||||
}
|
|
|
@ -1,307 +0,0 @@
|
||||||
package layer
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/creds/accessbox"
|
|
||||||
bearertest "github.com/nspcc-dev/neofs-sdk-go/bearer/test"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/object"
|
|
||||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/user"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"go.uber.org/zap"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (tc *testContext) putObject(content []byte) *data.ObjectInfo {
|
|
||||||
extObjInfo, err := tc.layer.PutObject(tc.ctx, &PutObjectParams{
|
|
||||||
BktInfo: tc.bktInfo,
|
|
||||||
Object: tc.obj,
|
|
||||||
Size: int64(len(content)),
|
|
||||||
Reader: bytes.NewReader(content),
|
|
||||||
Header: make(map[string]string),
|
|
||||||
})
|
|
||||||
require.NoError(tc.t, err)
|
|
||||||
|
|
||||||
return extObjInfo.ObjectInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tc *testContext) getObject(objectName, versionID string, needError bool) (*data.ObjectInfo, []byte) {
|
|
||||||
objInfo, err := tc.layer.GetObjectInfo(tc.ctx, &HeadObjectParams{
|
|
||||||
BktInfo: tc.bktInfo,
|
|
||||||
Object: objectName,
|
|
||||||
VersionID: versionID,
|
|
||||||
})
|
|
||||||
if needError {
|
|
||||||
require.Error(tc.t, err)
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
require.NoError(tc.t, err)
|
|
||||||
|
|
||||||
content := bytes.NewBuffer(nil)
|
|
||||||
err = tc.layer.GetObject(tc.ctx, &GetObjectParams{
|
|
||||||
ObjectInfo: objInfo,
|
|
||||||
Writer: content,
|
|
||||||
BucketInfo: tc.bktInfo,
|
|
||||||
})
|
|
||||||
require.NoError(tc.t, err)
|
|
||||||
|
|
||||||
return objInfo, content.Bytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tc *testContext) deleteObject(objectName, versionID string, settings *data.BucketSettings) {
|
|
||||||
p := &DeleteObjectParams{
|
|
||||||
BktInfo: tc.bktInfo,
|
|
||||||
Settings: settings,
|
|
||||||
Objects: []*VersionedObject{
|
|
||||||
{Name: objectName, VersionID: versionID},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
deletedObjects := tc.layer.DeleteObjects(tc.ctx, p)
|
|
||||||
for _, obj := range deletedObjects {
|
|
||||||
require.NoError(tc.t, obj.Error)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tc *testContext) listObjectsV1() []*data.ObjectInfo {
|
|
||||||
res, err := tc.layer.ListObjectsV1(tc.ctx, &ListObjectsParamsV1{
|
|
||||||
ListObjectsParamsCommon: ListObjectsParamsCommon{
|
|
||||||
BktInfo: tc.bktInfo,
|
|
||||||
MaxKeys: 1000,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
require.NoError(tc.t, err)
|
|
||||||
return res.Objects
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tc *testContext) listObjectsV2() []*data.ObjectInfo {
|
|
||||||
res, err := tc.layer.ListObjectsV2(tc.ctx, &ListObjectsParamsV2{
|
|
||||||
ListObjectsParamsCommon: ListObjectsParamsCommon{
|
|
||||||
BktInfo: tc.bktInfo,
|
|
||||||
MaxKeys: 1000,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
require.NoError(tc.t, err)
|
|
||||||
return res.Objects
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tc *testContext) listVersions() *ListObjectVersionsInfo {
|
|
||||||
res, err := tc.layer.ListObjectVersions(tc.ctx, &ListObjectVersionsParams{
|
|
||||||
BktInfo: tc.bktInfo,
|
|
||||||
MaxKeys: 1000,
|
|
||||||
})
|
|
||||||
require.NoError(tc.t, err)
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tc *testContext) checkListObjects(ids ...oid.ID) {
|
|
||||||
objs := tc.listObjectsV1()
|
|
||||||
require.Equal(tc.t, len(ids), len(objs))
|
|
||||||
for _, id := range ids {
|
|
||||||
require.Contains(tc.t, ids, id)
|
|
||||||
}
|
|
||||||
|
|
||||||
objs = tc.listObjectsV2()
|
|
||||||
require.Equal(tc.t, len(ids), len(objs))
|
|
||||||
for _, id := range ids {
|
|
||||||
require.Contains(tc.t, ids, id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (tc *testContext) getObjectByID(objID oid.ID) *object.Object {
|
|
||||||
for _, obj := range tc.testNeoFS.Objects() {
|
|
||||||
id, _ := obj.ID()
|
|
||||||
if id.Equals(objID) {
|
|
||||||
return obj
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type testContext struct {
|
|
||||||
t *testing.T
|
|
||||||
ctx context.Context
|
|
||||||
layer Client
|
|
||||||
bktInfo *data.BucketInfo
|
|
||||||
obj string
|
|
||||||
testNeoFS *TestNeoFS
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareContext(t *testing.T, cachesConfig ...*CachesConfig) *testContext {
|
|
||||||
logger := zap.NewExample()
|
|
||||||
|
|
||||||
key, err := keys.NewPrivateKey()
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
bearerToken := bearertest.Token()
|
|
||||||
require.NoError(t, bearerToken.Sign(key.PrivateKey))
|
|
||||||
|
|
||||||
ctx := context.WithValue(context.Background(), api.BoxData, &accessbox.Box{
|
|
||||||
Gate: &accessbox.GateData{
|
|
||||||
BearerToken: &bearerToken,
|
|
||||||
GateKey: key.PublicKey(),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
tp := NewTestNeoFS()
|
|
||||||
|
|
||||||
bktName := "testbucket1"
|
|
||||||
bktID, err := tp.CreateContainer(ctx, PrmContainerCreate{
|
|
||||||
Name: bktName,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
config := DefaultCachesConfigs(logger)
|
|
||||||
if len(cachesConfig) != 0 {
|
|
||||||
config = cachesConfig[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
var owner user.ID
|
|
||||||
user.IDFromKey(&owner, key.PrivateKey.PublicKey)
|
|
||||||
|
|
||||||
layerCfg := &Config{
|
|
||||||
Caches: config,
|
|
||||||
AnonKey: AnonymousKey{Key: key},
|
|
||||||
TreeService: NewTreeService(),
|
|
||||||
}
|
|
||||||
|
|
||||||
return &testContext{
|
|
||||||
ctx: ctx,
|
|
||||||
layer: NewLayer(logger, tp, layerCfg),
|
|
||||||
bktInfo: &data.BucketInfo{
|
|
||||||
Name: bktName,
|
|
||||||
Owner: owner,
|
|
||||||
CID: bktID,
|
|
||||||
},
|
|
||||||
obj: "obj1",
|
|
||||||
t: t,
|
|
||||||
testNeoFS: tp,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSimpleVersioning(t *testing.T) {
|
|
||||||
tc := prepareContext(t)
|
|
||||||
err := tc.layer.PutBucketSettings(tc.ctx, &PutSettingsParams{
|
|
||||||
BktInfo: tc.bktInfo,
|
|
||||||
Settings: &data.BucketSettings{Versioning: data.VersioningEnabled},
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
obj1Content1 := []byte("content obj1 v1")
|
|
||||||
obj1v1 := tc.putObject(obj1Content1)
|
|
||||||
|
|
||||||
obj1Content2 := []byte("content obj1 v2")
|
|
||||||
obj1v2 := tc.putObject(obj1Content2)
|
|
||||||
|
|
||||||
_, buffer2 := tc.getObject(tc.obj, "", false)
|
|
||||||
require.Equal(t, obj1Content2, buffer2)
|
|
||||||
|
|
||||||
_, buffer1 := tc.getObject(tc.obj, obj1v1.ID.EncodeToString(), false)
|
|
||||||
require.Equal(t, obj1Content1, buffer1)
|
|
||||||
|
|
||||||
tc.checkListObjects(obj1v2.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSimpleNoVersioning(t *testing.T) {
|
|
||||||
tc := prepareContext(t)
|
|
||||||
|
|
||||||
obj1Content1 := []byte("content obj1 v1")
|
|
||||||
obj1v1 := tc.putObject(obj1Content1)
|
|
||||||
|
|
||||||
obj1Content2 := []byte("content obj1 v2")
|
|
||||||
obj1v2 := tc.putObject(obj1Content2)
|
|
||||||
|
|
||||||
_, buffer2 := tc.getObject(tc.obj, "", false)
|
|
||||||
require.Equal(t, obj1Content2, buffer2)
|
|
||||||
|
|
||||||
tc.getObject(tc.obj, obj1v1.ID.EncodeToString(), true)
|
|
||||||
tc.checkListObjects(obj1v2.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestVersioningDeleteObject(t *testing.T) {
|
|
||||||
tc := prepareContext(t)
|
|
||||||
settings := &data.BucketSettings{Versioning: data.VersioningEnabled}
|
|
||||||
err := tc.layer.PutBucketSettings(tc.ctx, &PutSettingsParams{
|
|
||||||
BktInfo: tc.bktInfo,
|
|
||||||
Settings: settings,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
tc.putObject([]byte("content obj1 v1"))
|
|
||||||
tc.putObject([]byte("content obj1 v2"))
|
|
||||||
|
|
||||||
tc.deleteObject(tc.obj, "", settings)
|
|
||||||
tc.getObject(tc.obj, "", true)
|
|
||||||
|
|
||||||
tc.checkListObjects()
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetUnversioned(t *testing.T) {
|
|
||||||
tc := prepareContext(t)
|
|
||||||
|
|
||||||
objContent := []byte("content obj1 v1")
|
|
||||||
objInfo := tc.putObject(objContent)
|
|
||||||
|
|
||||||
settings := &data.BucketSettings{Versioning: data.VersioningUnversioned}
|
|
||||||
err := tc.layer.PutBucketSettings(tc.ctx, &PutSettingsParams{
|
|
||||||
BktInfo: tc.bktInfo,
|
|
||||||
Settings: settings,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
resInfo, buffer := tc.getObject(tc.obj, data.UnversionedObjectVersionID, false)
|
|
||||||
require.Equal(t, objContent, buffer)
|
|
||||||
require.Equal(t, objInfo.VersionID(), resInfo.VersionID())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestVersioningDeleteSpecificObjectVersion(t *testing.T) {
|
|
||||||
tc := prepareContext(t)
|
|
||||||
settings := &data.BucketSettings{Versioning: data.VersioningEnabled}
|
|
||||||
err := tc.layer.PutBucketSettings(tc.ctx, &PutSettingsParams{
|
|
||||||
BktInfo: tc.bktInfo,
|
|
||||||
Settings: settings,
|
|
||||||
})
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
tc.putObject([]byte("content obj1 v1"))
|
|
||||||
objV2Info := tc.putObject([]byte("content obj1 v2"))
|
|
||||||
objV3Content := []byte("content obj1 v3")
|
|
||||||
objV3Info := tc.putObject(objV3Content)
|
|
||||||
|
|
||||||
tc.deleteObject(tc.obj, objV2Info.VersionID(), settings)
|
|
||||||
tc.getObject(tc.obj, objV2Info.VersionID(), true)
|
|
||||||
|
|
||||||
_, buffer3 := tc.getObject(tc.obj, "", false)
|
|
||||||
require.Equal(t, objV3Content, buffer3)
|
|
||||||
|
|
||||||
tc.deleteObject(tc.obj, "", settings)
|
|
||||||
tc.getObject(tc.obj, "", true)
|
|
||||||
|
|
||||||
versions := tc.listVersions()
|
|
||||||
for _, ver := range versions.DeleteMarker {
|
|
||||||
if ver.IsLatest {
|
|
||||||
tc.deleteObject(tc.obj, ver.ObjectInfo.VersionID(), settings)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resInfo, buffer := tc.getObject(tc.obj, "", false)
|
|
||||||
require.Equal(t, objV3Content, buffer)
|
|
||||||
require.Equal(t, objV3Info.VersionID(), resInfo.VersionID())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestNoVersioningDeleteObject(t *testing.T) {
|
|
||||||
tc := prepareContext(t)
|
|
||||||
|
|
||||||
tc.putObject([]byte("content obj1 v1"))
|
|
||||||
tc.putObject([]byte("content obj1 v2"))
|
|
||||||
|
|
||||||
settings, err := tc.layer.GetBucketSettings(tc.ctx, tc.bktInfo)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
tc.deleteObject(tc.obj, "", settings)
|
|
||||||
tc.getObject(tc.obj, "", true)
|
|
||||||
tc.checkListObjects()
|
|
||||||
}
|
|
|
@ -77,7 +77,7 @@ type (
|
||||||
AppendCORSHeaders(w http.ResponseWriter, r *http.Request)
|
AppendCORSHeaders(w http.ResponseWriter, r *http.Request)
|
||||||
CreateMultipartUploadHandler(http.ResponseWriter, *http.Request)
|
CreateMultipartUploadHandler(http.ResponseWriter, *http.Request)
|
||||||
UploadPartHandler(http.ResponseWriter, *http.Request)
|
UploadPartHandler(http.ResponseWriter, *http.Request)
|
||||||
UploadPartCopy(w http.ResponseWriter, r *http.Request)
|
UploadPartCopyHandler(w http.ResponseWriter, r *http.Request)
|
||||||
CompleteMultipartUploadHandler(http.ResponseWriter, *http.Request)
|
CompleteMultipartUploadHandler(http.ResponseWriter, *http.Request)
|
||||||
AbortMultipartUploadHandler(http.ResponseWriter, *http.Request)
|
AbortMultipartUploadHandler(http.ResponseWriter, *http.Request)
|
||||||
ListPartsHandler(w http.ResponseWriter, r *http.Request)
|
ListPartsHandler(w http.ResponseWriter, r *http.Request)
|
||||||
|
@ -217,7 +217,7 @@ func Attach(r *mux.Router, domains []string, m MaxClients, h Handler, center aut
|
||||||
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(
|
bucket.Methods(http.MethodHead).Path("/{object:.+}").HandlerFunc(
|
||||||
m.Handle(metrics.APIStats("headobject", h.HeadObjectHandler))).Name("HeadObject")
|
m.Handle(metrics.APIStats("headobject", h.HeadObjectHandler))).Name("HeadObject")
|
||||||
// CopyObjectPart
|
// CopyObjectPart
|
||||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").Headers(hdrAmzCopySource, "").HandlerFunc(m.Handle(metrics.APIStats("uploadpartcopy", h.UploadPartCopy))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}").
|
bucket.Methods(http.MethodPut).Path("/{object:.+}").Headers(hdrAmzCopySource, "").HandlerFunc(m.Handle(metrics.APIStats("uploadpartcopy", h.UploadPartCopyHandler))).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}").
|
||||||
Name("UploadPartCopy")
|
Name("UploadPartCopy")
|
||||||
// PutObjectPart
|
// PutObjectPart
|
||||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(
|
||||||
|
|
139
cmd/s3-gw/app.go
139
cmd/s3-gw/app.go
|
@ -19,10 +19,9 @@ import (
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/auth"
|
"github.com/nspcc-dev/neofs-s3-gw/api/auth"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/cache"
|
"github.com/nspcc-dev/neofs-s3-gw/api/cache"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/handler"
|
"github.com/nspcc-dev/neofs-s3-gw/api/handler"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/notifications"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/resolver"
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/internal/neofs"
|
"github.com/nspcc-dev/neofs-s3-gw/internal/neofs"
|
||||||
|
"github.com/nspcc-dev/neofs-s3-gw/internal/notifications"
|
||||||
|
"github.com/nspcc-dev/neofs-s3-gw/internal/resolver"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/internal/version"
|
"github.com/nspcc-dev/neofs-s3-gw/internal/version"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/internal/wallet"
|
"github.com/nspcc-dev/neofs-s3-gw/internal/wallet"
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
"github.com/nspcc-dev/neofs-sdk-go/netmap"
|
||||||
|
@ -40,7 +39,6 @@ type (
|
||||||
pool *pool.Pool
|
pool *pool.Pool
|
||||||
key *keys.PrivateKey
|
key *keys.PrivateKey
|
||||||
nc *notifications.Controller
|
nc *notifications.Controller
|
||||||
obj layer.Client
|
|
||||||
api api.Handler
|
api api.Handler
|
||||||
|
|
||||||
servers []Server
|
servers []Server
|
||||||
|
@ -115,47 +113,6 @@ func (a *App) init(ctx context.Context) {
|
||||||
a.initServers(ctx)
|
a.initServers(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *App) initLayer(ctx context.Context) {
|
|
||||||
a.initResolver()
|
|
||||||
|
|
||||||
treeServiceEndpoint := a.cfg.GetString(cfgTreeServiceEndpoint)
|
|
||||||
treeService, err := neofs.NewTreeClient(ctx, treeServiceEndpoint, a.key)
|
|
||||||
if err != nil {
|
|
||||||
a.log.Fatal("failed to create tree service", zap.Error(err))
|
|
||||||
}
|
|
||||||
a.log.Info("init tree service", zap.String("endpoint", treeServiceEndpoint))
|
|
||||||
|
|
||||||
// prepare random key for anonymous requests
|
|
||||||
randomKey, err := keys.NewPrivateKey()
|
|
||||||
if err != nil {
|
|
||||||
a.log.Fatal("couldn't generate random key", zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
layerCfg := &layer.Config{
|
|
||||||
Caches: getCacheOptions(a.cfg, a.log),
|
|
||||||
AnonKey: layer.AnonymousKey{
|
|
||||||
Key: randomKey,
|
|
||||||
},
|
|
||||||
Resolver: a.bucketResolver,
|
|
||||||
TreeService: treeService,
|
|
||||||
}
|
|
||||||
|
|
||||||
// prepare object layer
|
|
||||||
a.obj = layer.NewLayer(a.log, neofs.NewNeoFS(a.pool), layerCfg)
|
|
||||||
|
|
||||||
if a.cfg.GetBool(cfgEnableNATS) {
|
|
||||||
nopts := getNotificationsOptions(a.cfg, a.log)
|
|
||||||
a.nc, err = notifications.NewController(nopts, a.log)
|
|
||||||
if err != nil {
|
|
||||||
a.log.Fatal("failed to enable notifications", zap.Error(err))
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = a.obj.Initialize(ctx, a.nc); err != nil {
|
|
||||||
a.log.Fatal("couldn't initialize layer", zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newAppSettings(log *Logger, v *viper.Viper) *appSettings {
|
func newAppSettings(log *Logger, v *viper.Viper) *appSettings {
|
||||||
policies, err := newPlacementPolicy(getDefaultPolicyValue(v), v.GetString(cfgPolicyRegionMapFile))
|
policies, err := newPlacementPolicy(getDefaultPolicyValue(v), v.GetString(cfgPolicyRegionMapFile))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -178,8 +135,62 @@ func getDefaultPolicyValue(v *viper.Viper) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *App) initAPI(ctx context.Context) {
|
func (a *App) initAPI(ctx context.Context) {
|
||||||
a.initLayer(ctx)
|
a.initResolver()
|
||||||
a.initHandler()
|
|
||||||
|
treeServiceEndpoint := a.cfg.GetString(cfgTreeServiceEndpoint)
|
||||||
|
treeService, err := neofs.NewTreeClient(ctx, treeServiceEndpoint, a.key)
|
||||||
|
if err != nil {
|
||||||
|
a.log.Fatal("failed to create tree service", zap.Error(err))
|
||||||
|
}
|
||||||
|
a.log.Info("init tree service", zap.String("endpoint", treeServiceEndpoint))
|
||||||
|
|
||||||
|
// prepare random key for anonymous requests
|
||||||
|
randomKey, err := keys.NewPrivateKey()
|
||||||
|
if err != nil {
|
||||||
|
a.log.Fatal("couldn't generate random key", zap.Error(err))
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.cfg.GetBool(cfgEnableNATS) {
|
||||||
|
nopts := getNotificationsOptions(a.cfg, a.log)
|
||||||
|
a.nc, err = notifications.NewController(nopts, a.log)
|
||||||
|
if err != nil {
|
||||||
|
a.log.Fatal("failed to enable notifications", zap.Error(err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg := &handler.Config{
|
||||||
|
Policy: a.settings.policies,
|
||||||
|
DefaultMaxAge: handler.DefaultMaxAge,
|
||||||
|
NotificatorEnabled: a.cfg.GetBool(cfgEnableNATS),
|
||||||
|
CopiesNumber: handler.DefaultCopiesNumber,
|
||||||
|
AnonKey: handler.AnonymousKey{
|
||||||
|
Key: randomKey,
|
||||||
|
},
|
||||||
|
Cache: getCacheOptions(a.cfg, a.log),
|
||||||
|
Resolver: a.bucketResolver,
|
||||||
|
TreeService: treeService,
|
||||||
|
NeoFS: neofs.NewNeoFS(a.pool),
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.cfg.IsSet(cfgDefaultMaxAge) {
|
||||||
|
defaultMaxAge := a.cfg.GetInt(cfgDefaultMaxAge)
|
||||||
|
|
||||||
|
if defaultMaxAge <= 0 && defaultMaxAge != -1 {
|
||||||
|
a.log.Fatal("invalid defaultMaxAge",
|
||||||
|
zap.String("parameter", cfgDefaultMaxAge),
|
||||||
|
zap.String("value in config", strconv.Itoa(defaultMaxAge)))
|
||||||
|
}
|
||||||
|
cfg.DefaultMaxAge = defaultMaxAge
|
||||||
|
}
|
||||||
|
|
||||||
|
if val := a.cfg.GetUint32(cfgSetCopiesNumber); val > 0 {
|
||||||
|
cfg.CopiesNumber = val
|
||||||
|
}
|
||||||
|
|
||||||
|
a.api, err = handler.New(ctx, a.log, a.nc, cfg)
|
||||||
|
if err != nil {
|
||||||
|
a.log.Fatal("could not initialize API handler", zap.Error(err))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *App) initMetrics() {
|
func (a *App) initMetrics() {
|
||||||
|
@ -584,8 +595,8 @@ func getNotificationsOptions(v *viper.Viper, l *zap.Logger) *notifications.Optio
|
||||||
return &cfg
|
return &cfg
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCacheOptions(v *viper.Viper, l *zap.Logger) *layer.CachesConfig {
|
func getCacheOptions(v *viper.Viper, l *zap.Logger) *handler.CachesConfig {
|
||||||
cacheCfg := layer.DefaultCachesConfigs(l)
|
cacheCfg := handler.DefaultCachesConfigs(l)
|
||||||
|
|
||||||
cacheCfg.Objects.Lifetime = getLifetime(v, l, cfgObjectsCacheLifetime, cacheCfg.Objects.Lifetime)
|
cacheCfg.Objects.Lifetime = getLifetime(v, l, cfgObjectsCacheLifetime, cacheCfg.Objects.Lifetime)
|
||||||
cacheCfg.Objects.Size = getSize(v, l, cfgObjectsCacheSize, cacheCfg.Objects.Size)
|
cacheCfg.Objects.Size = getSize(v, l, cfgObjectsCacheSize, cacheCfg.Objects.Size)
|
||||||
|
@ -647,36 +658,6 @@ func getAccessBoxCacheConfig(v *viper.Viper, l *zap.Logger) *cache.Config {
|
||||||
return cacheCfg
|
return cacheCfg
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *App) initHandler() {
|
|
||||||
cfg := &handler.Config{
|
|
||||||
Policy: a.settings.policies,
|
|
||||||
DefaultMaxAge: handler.DefaultMaxAge,
|
|
||||||
NotificatorEnabled: a.cfg.GetBool(cfgEnableNATS),
|
|
||||||
CopiesNumber: handler.DefaultCopiesNumber,
|
|
||||||
}
|
|
||||||
|
|
||||||
if a.cfg.IsSet(cfgDefaultMaxAge) {
|
|
||||||
defaultMaxAge := a.cfg.GetInt(cfgDefaultMaxAge)
|
|
||||||
|
|
||||||
if defaultMaxAge <= 0 && defaultMaxAge != -1 {
|
|
||||||
a.log.Fatal("invalid defaultMaxAge",
|
|
||||||
zap.String("parameter", cfgDefaultMaxAge),
|
|
||||||
zap.String("value in config", strconv.Itoa(defaultMaxAge)))
|
|
||||||
}
|
|
||||||
cfg.DefaultMaxAge = defaultMaxAge
|
|
||||||
}
|
|
||||||
|
|
||||||
if val := a.cfg.GetUint32(cfgSetCopiesNumber); val > 0 {
|
|
||||||
cfg.CopiesNumber = val
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
|
||||||
a.api, err = handler.New(a.log, a.obj, a.nc, cfg)
|
|
||||||
if err != nil {
|
|
||||||
a.log.Fatal("could not initialize API handler", zap.Error(err))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func readRegionMap(filePath string) (map[string]string, error) {
|
func readRegionMap(filePath string) (map[string]string, error) {
|
||||||
regionMap := make(map[string]string)
|
regionMap := make(map[string]string)
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,7 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/resolver"
|
"github.com/nspcc-dev/neofs-s3-gw/internal/resolver"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/internal/version"
|
"github.com/nspcc-dev/neofs-s3-gw/internal/version"
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/pool"
|
"github.com/nspcc-dev/neofs-sdk-go/pool"
|
||||||
"github.com/spf13/pflag"
|
"github.com/spf13/pflag"
|
||||||
|
|
|
@ -11,7 +11,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
objectv2 "github.com/nspcc-dev/neofs-api-go/v2/object"
|
objectv2 "github.com/nspcc-dev/neofs-api-go/v2/object"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
"github.com/nspcc-dev/neofs-s3-gw/api/handler"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/authmate"
|
"github.com/nspcc-dev/neofs-s3-gw/authmate"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/creds/tokens"
|
"github.com/nspcc-dev/neofs-s3-gw/creds/tokens"
|
||||||
apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status"
|
apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status"
|
||||||
|
@ -105,7 +105,7 @@ var basicACLZero acl.Basic
|
||||||
// CreateContainer implements neofs.NeoFS interface method.
|
// CreateContainer implements neofs.NeoFS interface method.
|
||||||
//
|
//
|
||||||
// If prm.BasicACL is zero, 'eacl-public-read-write' is used.
|
// If prm.BasicACL is zero, 'eacl-public-read-write' is used.
|
||||||
func (x *NeoFS) CreateContainer(ctx context.Context, prm layer.PrmContainerCreate) (cid.ID, error) {
|
func (x *NeoFS) CreateContainer(ctx context.Context, prm handler.PrmContainerCreate) (cid.ID, error) {
|
||||||
if prm.BasicACL == basicACLZero {
|
if prm.BasicACL == basicACLZero {
|
||||||
prm.BasicACL = acl.PublicRWExtended
|
prm.BasicACL = acl.PublicRWExtended
|
||||||
}
|
}
|
||||||
|
@ -219,7 +219,7 @@ func (x *NeoFS) DeleteContainer(ctx context.Context, id cid.ID, token *session.C
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateObject implements neofs.NeoFS interface method.
|
// CreateObject implements neofs.NeoFS interface method.
|
||||||
func (x *NeoFS) CreateObject(ctx context.Context, prm layer.PrmObjectCreate) (oid.ID, error) {
|
func (x *NeoFS) CreateObject(ctx context.Context, prm handler.PrmObjectCreate) (oid.ID, error) {
|
||||||
attrNum := len(prm.Attributes) + 1 // + creation time
|
attrNum := len(prm.Attributes) + 1 // + creation time
|
||||||
|
|
||||||
if prm.Filepath != "" {
|
if prm.Filepath != "" {
|
||||||
|
@ -281,7 +281,7 @@ func (x *NeoFS) CreateObject(ctx context.Context, prm layer.PrmObjectCreate) (oi
|
||||||
if err != nil {
|
if err != nil {
|
||||||
reason, ok := isErrAccessDenied(err)
|
reason, ok := isErrAccessDenied(err)
|
||||||
if ok {
|
if ok {
|
||||||
return oid.ID{}, fmt.Errorf("%w: %s", layer.ErrAccessDenied, reason)
|
return oid.ID{}, fmt.Errorf("%w: %s", handler.ErrAccessDenied, reason)
|
||||||
}
|
}
|
||||||
return oid.ID{}, fmt.Errorf("save object via connection pool: %w", err)
|
return oid.ID{}, fmt.Errorf("save object via connection pool: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -299,7 +299,7 @@ func (x payloadReader) Read(p []byte) (int, error) {
|
||||||
n, err := x.ReadCloser.Read(p)
|
n, err := x.ReadCloser.Read(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if reason, ok := isErrAccessDenied(err); ok {
|
if reason, ok := isErrAccessDenied(err); ok {
|
||||||
return n, fmt.Errorf("%w: %s", layer.ErrAccessDenied, reason)
|
return n, fmt.Errorf("%w: %s", handler.ErrAccessDenied, reason)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -307,7 +307,7 @@ func (x payloadReader) Read(p []byte) (int, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadObject implements neofs.NeoFS interface method.
|
// ReadObject implements neofs.NeoFS interface method.
|
||||||
func (x *NeoFS) ReadObject(ctx context.Context, prm layer.PrmObjectRead) (*layer.ObjectPart, error) {
|
func (x *NeoFS) ReadObject(ctx context.Context, prm handler.PrmObjectRead) (*handler.ObjectPart, error) {
|
||||||
var addr oid.Address
|
var addr oid.Address
|
||||||
addr.SetContainer(prm.Container)
|
addr.SetContainer(prm.Container)
|
||||||
addr.SetObject(prm.Object)
|
addr.SetObject(prm.Object)
|
||||||
|
@ -326,7 +326,7 @@ func (x *NeoFS) ReadObject(ctx context.Context, prm layer.PrmObjectRead) (*layer
|
||||||
res, err := x.pool.GetObject(ctx, prmGet)
|
res, err := x.pool.GetObject(ctx, prmGet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if reason, ok := isErrAccessDenied(err); ok {
|
if reason, ok := isErrAccessDenied(err); ok {
|
||||||
return nil, fmt.Errorf("%w: %s", layer.ErrAccessDenied, reason)
|
return nil, fmt.Errorf("%w: %s", handler.ErrAccessDenied, reason)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("init full object reading via connection pool: %w", err)
|
return nil, fmt.Errorf("init full object reading via connection pool: %w", err)
|
||||||
|
@ -341,7 +341,7 @@ func (x *NeoFS) ReadObject(ctx context.Context, prm layer.PrmObjectRead) (*layer
|
||||||
|
|
||||||
res.Header.SetPayload(payload)
|
res.Header.SetPayload(payload)
|
||||||
|
|
||||||
return &layer.ObjectPart{
|
return &handler.ObjectPart{
|
||||||
Head: &res.Header,
|
Head: &res.Header,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -358,26 +358,26 @@ func (x *NeoFS) ReadObject(ctx context.Context, prm layer.PrmObjectRead) (*layer
|
||||||
hdr, err := x.pool.HeadObject(ctx, prmHead)
|
hdr, err := x.pool.HeadObject(ctx, prmHead)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if reason, ok := isErrAccessDenied(err); ok {
|
if reason, ok := isErrAccessDenied(err); ok {
|
||||||
return nil, fmt.Errorf("%w: %s", layer.ErrAccessDenied, reason)
|
return nil, fmt.Errorf("%w: %s", handler.ErrAccessDenied, reason)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("read object header via connection pool: %w", err)
|
return nil, fmt.Errorf("read object header via connection pool: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &layer.ObjectPart{
|
return &handler.ObjectPart{
|
||||||
Head: &hdr,
|
Head: &hdr,
|
||||||
}, nil
|
}, nil
|
||||||
} else if prm.PayloadRange[0]+prm.PayloadRange[1] == 0 {
|
} else if prm.PayloadRange[0]+prm.PayloadRange[1] == 0 {
|
||||||
res, err := x.pool.GetObject(ctx, prmGet)
|
res, err := x.pool.GetObject(ctx, prmGet)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if reason, ok := isErrAccessDenied(err); ok {
|
if reason, ok := isErrAccessDenied(err); ok {
|
||||||
return nil, fmt.Errorf("%w: %s", layer.ErrAccessDenied, reason)
|
return nil, fmt.Errorf("%w: %s", handler.ErrAccessDenied, reason)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("init full payload range reading via connection pool: %w", err)
|
return nil, fmt.Errorf("init full payload range reading via connection pool: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &layer.ObjectPart{
|
return &handler.ObjectPart{
|
||||||
Payload: res.Payload,
|
Payload: res.Payload,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -396,19 +396,19 @@ func (x *NeoFS) ReadObject(ctx context.Context, prm layer.PrmObjectRead) (*layer
|
||||||
res, err := x.pool.ObjectRange(ctx, prmRange)
|
res, err := x.pool.ObjectRange(ctx, prmRange)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if reason, ok := isErrAccessDenied(err); ok {
|
if reason, ok := isErrAccessDenied(err); ok {
|
||||||
return nil, fmt.Errorf("%w: %s", layer.ErrAccessDenied, reason)
|
return nil, fmt.Errorf("%w: %s", handler.ErrAccessDenied, reason)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, fmt.Errorf("init payload range reading via connection pool: %w", err)
|
return nil, fmt.Errorf("init payload range reading via connection pool: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &layer.ObjectPart{
|
return &handler.ObjectPart{
|
||||||
Payload: payloadReader{&res},
|
Payload: payloadReader{&res},
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteObject implements neofs.NeoFS interface method.
|
// DeleteObject implements neofs.NeoFS interface method.
|
||||||
func (x *NeoFS) DeleteObject(ctx context.Context, prm layer.PrmObjectDelete) error {
|
func (x *NeoFS) DeleteObject(ctx context.Context, prm handler.PrmObjectDelete) error {
|
||||||
var addr oid.Address
|
var addr oid.Address
|
||||||
addr.SetContainer(prm.Container)
|
addr.SetContainer(prm.Container)
|
||||||
addr.SetObject(prm.Object)
|
addr.SetObject(prm.Object)
|
||||||
|
@ -425,7 +425,7 @@ func (x *NeoFS) DeleteObject(ctx context.Context, prm layer.PrmObjectDelete) err
|
||||||
err := x.pool.DeleteObject(ctx, prmDelete)
|
err := x.pool.DeleteObject(ctx, prmDelete)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if reason, ok := isErrAccessDenied(err); ok {
|
if reason, ok := isErrAccessDenied(err); ok {
|
||||||
return fmt.Errorf("%w: %s", layer.ErrAccessDenied, reason)
|
return fmt.Errorf("%w: %s", handler.ErrAccessDenied, reason)
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("mark object removal via connection pool: %w", err)
|
return fmt.Errorf("mark object removal via connection pool: %w", err)
|
||||||
|
@ -508,7 +508,7 @@ func (x *AuthmateNeoFS) CreateContainer(ctx context.Context, prm authmate.PrmCon
|
||||||
// allow reading objects to OTHERS in order to provide read access to S3 gateways
|
// allow reading objects to OTHERS in order to provide read access to S3 gateways
|
||||||
basicACL.AllowOp(acl.OpObjectGet, acl.RoleOthers)
|
basicACL.AllowOp(acl.OpObjectGet, acl.RoleOthers)
|
||||||
|
|
||||||
return x.neoFS.CreateContainer(ctx, layer.PrmContainerCreate{
|
return x.neoFS.CreateContainer(ctx, handler.PrmContainerCreate{
|
||||||
Creator: prm.Owner,
|
Creator: prm.Owner,
|
||||||
Policy: prm.Policy,
|
Policy: prm.Policy,
|
||||||
Name: prm.FriendlyName,
|
Name: prm.FriendlyName,
|
||||||
|
@ -518,7 +518,7 @@ func (x *AuthmateNeoFS) CreateContainer(ctx context.Context, prm authmate.PrmCon
|
||||||
|
|
||||||
// ReadObjectPayload implements authmate.NeoFS interface method.
|
// ReadObjectPayload implements authmate.NeoFS interface method.
|
||||||
func (x *AuthmateNeoFS) ReadObjectPayload(ctx context.Context, addr oid.Address) ([]byte, error) {
|
func (x *AuthmateNeoFS) ReadObjectPayload(ctx context.Context, addr oid.Address) ([]byte, error) {
|
||||||
res, err := x.neoFS.ReadObject(ctx, layer.PrmObjectRead{
|
res, err := x.neoFS.ReadObject(ctx, handler.PrmObjectRead{
|
||||||
Container: addr.Container(),
|
Container: addr.Container(),
|
||||||
Object: addr.Object(),
|
Object: addr.Object(),
|
||||||
WithPayload: true,
|
WithPayload: true,
|
||||||
|
@ -534,7 +534,7 @@ func (x *AuthmateNeoFS) ReadObjectPayload(ctx context.Context, addr oid.Address)
|
||||||
|
|
||||||
// CreateObject implements authmate.NeoFS interface method.
|
// CreateObject implements authmate.NeoFS interface method.
|
||||||
func (x *AuthmateNeoFS) CreateObject(ctx context.Context, prm tokens.PrmObjectCreate) (oid.ID, error) {
|
func (x *AuthmateNeoFS) CreateObject(ctx context.Context, prm tokens.PrmObjectCreate) (oid.ID, error) {
|
||||||
return x.neoFS.CreateObject(ctx, layer.PrmObjectCreate{
|
return x.neoFS.CreateObject(ctx, handler.PrmObjectCreate{
|
||||||
Creator: prm.Creator,
|
Creator: prm.Creator,
|
||||||
Container: prm.Container,
|
Container: prm.Container,
|
||||||
Filepath: prm.Filepath,
|
Filepath: prm.Filepath,
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
"github.com/nspcc-dev/neofs-s3-gw/api/handler"
|
||||||
apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status"
|
apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
@ -17,9 +17,9 @@ func TestErrorChecking(t *testing.T) {
|
||||||
var wrappedError error
|
var wrappedError error
|
||||||
|
|
||||||
if fetchedReason, ok := isErrAccessDenied(err); ok {
|
if fetchedReason, ok := isErrAccessDenied(err); ok {
|
||||||
wrappedError = fmt.Errorf("%w: %s", layer.ErrAccessDenied, fetchedReason)
|
wrappedError = fmt.Errorf("%w: %s", handler.ErrAccessDenied, fetchedReason)
|
||||||
}
|
}
|
||||||
|
|
||||||
require.ErrorIs(t, wrappedError, layer.ErrAccessDenied)
|
require.ErrorIs(t, wrappedError, handler.ErrAccessDenied)
|
||||||
require.Contains(t, wrappedError.Error(), reason)
|
require.Contains(t, wrappedError.Error(), reason)
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,7 +12,7 @@ import (
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api"
|
"github.com/nspcc-dev/neofs-s3-gw/api"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
"github.com/nspcc-dev/neofs-s3-gw/api/handler"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/creds/accessbox"
|
"github.com/nspcc-dev/neofs-s3-gw/creds/accessbox"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/internal/neofs/services/tree"
|
"github.com/nspcc-dev/neofs-s3-gw/internal/neofs/services/tree"
|
||||||
"github.com/nspcc-dev/neofs-sdk-go/bearer"
|
"github.com/nspcc-dev/neofs-sdk-go/bearer"
|
||||||
|
@ -293,7 +293,7 @@ func (c *TreeClient) GetSettingsNode(ctx context.Context, bktInfo *data.BucketIn
|
||||||
|
|
||||||
func (c *TreeClient) PutSettingsNode(ctx context.Context, bktInfo *data.BucketInfo, settings *data.BucketSettings) error {
|
func (c *TreeClient) PutSettingsNode(ctx context.Context, bktInfo *data.BucketInfo, settings *data.BucketSettings) error {
|
||||||
node, err := c.getSystemNode(ctx, bktInfo, []string{settingsFileName}, []string{})
|
node, err := c.getSystemNode(ctx, bktInfo, []string{settingsFileName}, []string{})
|
||||||
isErrNotFound := errors.Is(err, layer.ErrNodeNotFound)
|
isErrNotFound := errors.Is(err, handler.ErrNodeNotFound)
|
||||||
if err != nil && !isErrNotFound {
|
if err != nil && !isErrNotFound {
|
||||||
return fmt.Errorf("couldn't get node: %w", err)
|
return fmt.Errorf("couldn't get node: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -319,7 +319,7 @@ func (c *TreeClient) GetNotificationConfigurationNode(ctx context.Context, bktIn
|
||||||
|
|
||||||
func (c *TreeClient) PutNotificationConfigurationNode(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID) (oid.ID, error) {
|
func (c *TreeClient) PutNotificationConfigurationNode(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID) (oid.ID, error) {
|
||||||
node, err := c.getSystemNode(ctx, bktInfo, []string{notifConfFileName}, []string{oidKV})
|
node, err := c.getSystemNode(ctx, bktInfo, []string{notifConfFileName}, []string{oidKV})
|
||||||
isErrNotFound := errors.Is(err, layer.ErrNodeNotFound)
|
isErrNotFound := errors.Is(err, handler.ErrNodeNotFound)
|
||||||
if err != nil && !isErrNotFound {
|
if err != nil && !isErrNotFound {
|
||||||
return oid.ID{}, fmt.Errorf("couldn't get node: %w", err)
|
return oid.ID{}, fmt.Errorf("couldn't get node: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -332,7 +332,7 @@ func (c *TreeClient) PutNotificationConfigurationNode(ctx context.Context, bktIn
|
||||||
if _, err = c.addNode(ctx, bktInfo, systemTree, 0, meta); err != nil {
|
if _, err = c.addNode(ctx, bktInfo, systemTree, 0, meta); err != nil {
|
||||||
return oid.ID{}, err
|
return oid.ID{}, err
|
||||||
}
|
}
|
||||||
return oid.ID{}, layer.ErrNoNodeToRemove
|
return oid.ID{}, handler.ErrNoNodeToRemove
|
||||||
}
|
}
|
||||||
|
|
||||||
return node.ObjID, c.moveNode(ctx, bktInfo, systemTree, node.ID, 0, meta)
|
return node.ObjID, c.moveNode(ctx, bktInfo, systemTree, node.ID, 0, meta)
|
||||||
|
@ -349,7 +349,7 @@ func (c *TreeClient) GetBucketCORS(ctx context.Context, bktInfo *data.BucketInfo
|
||||||
|
|
||||||
func (c *TreeClient) PutBucketCORS(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID) (oid.ID, error) {
|
func (c *TreeClient) PutBucketCORS(ctx context.Context, bktInfo *data.BucketInfo, objID oid.ID) (oid.ID, error) {
|
||||||
node, err := c.getSystemNode(ctx, bktInfo, []string{corsFilename}, []string{oidKV})
|
node, err := c.getSystemNode(ctx, bktInfo, []string{corsFilename}, []string{oidKV})
|
||||||
isErrNotFound := errors.Is(err, layer.ErrNodeNotFound)
|
isErrNotFound := errors.Is(err, handler.ErrNodeNotFound)
|
||||||
if err != nil && !isErrNotFound {
|
if err != nil && !isErrNotFound {
|
||||||
return oid.ID{}, fmt.Errorf("couldn't get node: %w", err)
|
return oid.ID{}, fmt.Errorf("couldn't get node: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -362,7 +362,7 @@ func (c *TreeClient) PutBucketCORS(ctx context.Context, bktInfo *data.BucketInfo
|
||||||
if _, err = c.addNode(ctx, bktInfo, systemTree, 0, meta); err != nil {
|
if _, err = c.addNode(ctx, bktInfo, systemTree, 0, meta); err != nil {
|
||||||
return oid.ID{}, err
|
return oid.ID{}, err
|
||||||
}
|
}
|
||||||
return oid.ID{}, layer.ErrNoNodeToRemove
|
return oid.ID{}, handler.ErrNoNodeToRemove
|
||||||
}
|
}
|
||||||
|
|
||||||
return node.ObjID, c.moveNode(ctx, bktInfo, systemTree, node.ID, 0, meta)
|
return node.ObjID, c.moveNode(ctx, bktInfo, systemTree, node.ID, 0, meta)
|
||||||
|
@ -370,7 +370,7 @@ func (c *TreeClient) PutBucketCORS(ctx context.Context, bktInfo *data.BucketInfo
|
||||||
|
|
||||||
func (c *TreeClient) DeleteBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (oid.ID, error) {
|
func (c *TreeClient) DeleteBucketCORS(ctx context.Context, bktInfo *data.BucketInfo) (oid.ID, error) {
|
||||||
node, err := c.getSystemNode(ctx, bktInfo, []string{corsFilename}, []string{oidKV})
|
node, err := c.getSystemNode(ctx, bktInfo, []string{corsFilename}, []string{oidKV})
|
||||||
if err != nil && !errors.Is(err, layer.ErrNodeNotFound) {
|
if err != nil && !errors.Is(err, handler.ErrNodeNotFound) {
|
||||||
return oid.ID{}, err
|
return oid.ID{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -378,7 +378,7 @@ func (c *TreeClient) DeleteBucketCORS(ctx context.Context, bktInfo *data.BucketI
|
||||||
return node.ObjID, c.removeNode(ctx, bktInfo, systemTree, node.ID)
|
return node.ObjID, c.removeNode(ctx, bktInfo, systemTree, node.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
return oid.ID{}, layer.ErrNoNodeToRemove
|
return oid.ID{}, handler.ErrNoNodeToRemove
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *TreeClient) GetObjectTagging(ctx context.Context, bktInfo *data.BucketInfo, objVersion *data.NodeVersion) (map[string]string, error) {
|
func (c *TreeClient) GetObjectTagging(ctx context.Context, bktInfo *data.BucketInfo, objVersion *data.NodeVersion) (map[string]string, error) {
|
||||||
|
@ -460,7 +460,7 @@ func (c *TreeClient) GetBucketTagging(ctx context.Context, bktInfo *data.BucketI
|
||||||
|
|
||||||
func (c *TreeClient) PutBucketTagging(ctx context.Context, bktInfo *data.BucketInfo, tagSet map[string]string) error {
|
func (c *TreeClient) PutBucketTagging(ctx context.Context, bktInfo *data.BucketInfo, tagSet map[string]string) error {
|
||||||
node, err := c.getSystemNode(ctx, bktInfo, []string{bucketTaggingFilename}, []string{})
|
node, err := c.getSystemNode(ctx, bktInfo, []string{bucketTaggingFilename}, []string{})
|
||||||
isErrNotFound := errors.Is(err, layer.ErrNodeNotFound)
|
isErrNotFound := errors.Is(err, handler.ErrNodeNotFound)
|
||||||
if err != nil && !isErrNotFound {
|
if err != nil && !isErrNotFound {
|
||||||
return fmt.Errorf("couldn't get node: %w", err)
|
return fmt.Errorf("couldn't get node: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -483,7 +483,7 @@ func (c *TreeClient) PutBucketTagging(ctx context.Context, bktInfo *data.BucketI
|
||||||
|
|
||||||
func (c *TreeClient) DeleteBucketTagging(ctx context.Context, bktInfo *data.BucketInfo) error {
|
func (c *TreeClient) DeleteBucketTagging(ctx context.Context, bktInfo *data.BucketInfo) error {
|
||||||
node, err := c.getSystemNode(ctx, bktInfo, []string{bucketTaggingFilename}, nil)
|
node, err := c.getSystemNode(ctx, bktInfo, []string{bucketTaggingFilename}, nil)
|
||||||
if err != nil && !errors.Is(err, layer.ErrNodeNotFound) {
|
if err != nil && !errors.Is(err, handler.ErrNodeNotFound) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -553,7 +553,7 @@ func (c *TreeClient) GetLatestVersion(ctx context.Context, bktInfo *data.BucketI
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(nodes) == 0 {
|
if len(nodes) == 0 {
|
||||||
return nil, layer.ErrNodeNotFound
|
return nil, handler.ErrNodeNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
return newNodeVersion(objectName, nodes[0])
|
return newNodeVersion(objectName, nodes[0])
|
||||||
|
@ -605,7 +605,7 @@ func (c *TreeClient) getPrefixNodeID(ctx context.Context, bktInfo *data.BucketIn
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(intermediateNodes) == 0 {
|
if len(intermediateNodes) == 0 {
|
||||||
return 0, layer.ErrNodeNotFound
|
return 0, handler.ErrNodeNotFound
|
||||||
}
|
}
|
||||||
if len(intermediateNodes) > 1 {
|
if len(intermediateNodes) > 1 {
|
||||||
return 0, fmt.Errorf("found more than one intermediate nodes")
|
return 0, fmt.Errorf("found more than one intermediate nodes")
|
||||||
|
@ -617,7 +617,7 @@ func (c *TreeClient) getPrefixNodeID(ctx context.Context, bktInfo *data.BucketIn
|
||||||
func (c *TreeClient) getSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, treeID, prefix string, latestOnly bool) ([]*tree.GetSubTreeResponse_Body, string, error) {
|
func (c *TreeClient) getSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, treeID, prefix string, latestOnly bool) ([]*tree.GetSubTreeResponse_Body, string, error) {
|
||||||
rootID, tailPrefix, err := c.determinePrefixNode(ctx, bktInfo, treeID, prefix)
|
rootID, tailPrefix, err := c.determinePrefixNode(ctx, bktInfo, treeID, prefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, layer.ErrNodeNotFound) {
|
if errors.Is(err, handler.ErrNodeNotFound) {
|
||||||
return nil, "", nil
|
return nil, "", nil
|
||||||
}
|
}
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
|
@ -625,7 +625,7 @@ func (c *TreeClient) getSubTreeByPrefix(ctx context.Context, bktInfo *data.Bucke
|
||||||
|
|
||||||
subTree, err := c.getSubTree(ctx, bktInfo, treeID, rootID, 2)
|
subTree, err := c.getSubTree(ctx, bktInfo, treeID, rootID, 2)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, layer.ErrNodeNotFound) {
|
if errors.Is(err, handler.ErrNodeNotFound) {
|
||||||
return nil, "", nil
|
return nil, "", nil
|
||||||
}
|
}
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
|
@ -812,7 +812,7 @@ func (c *TreeClient) getUnversioned(ctx context.Context, bktInfo *data.BucketInf
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(nodes) != 1 {
|
if len(nodes) != 1 {
|
||||||
return nil, layer.ErrNodeNotFound
|
return nil, handler.ErrNodeNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
return nodes[0], nil
|
return nodes[0], nil
|
||||||
|
@ -894,7 +894,7 @@ func (c *TreeClient) GetMultipartUpload(ctx context.Context, bktInfo *data.Bucke
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, layer.ErrNodeNotFound
|
return nil, handler.ErrNodeNotFound
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *TreeClient) AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64, info *data.PartInfo) (oldObjIDToDelete oid.ID, err error) {
|
func (c *TreeClient) AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartNodeID uint64, info *data.PartInfo) (oldObjIDToDelete oid.ID, err error) {
|
||||||
|
@ -931,7 +931,7 @@ func (c *TreeClient) AddPart(ctx context.Context, bktInfo *data.BucketInfo, mult
|
||||||
if _, err = c.addNode(ctx, bktInfo, systemTree, multipartNodeID, meta); err != nil {
|
if _, err = c.addNode(ctx, bktInfo, systemTree, multipartNodeID, meta); err != nil {
|
||||||
return oid.ID{}, err
|
return oid.ID{}, err
|
||||||
}
|
}
|
||||||
return oid.ID{}, layer.ErrNoNodeToRemove
|
return oid.ID{}, handler.ErrNoNodeToRemove
|
||||||
}
|
}
|
||||||
|
|
||||||
return oldObjIDToDelete, c.moveNode(ctx, bktInfo, systemTree, foundPartID, multipartNodeID, meta)
|
return oldObjIDToDelete, c.moveNode(ctx, bktInfo, systemTree, foundPartID, multipartNodeID, meta)
|
||||||
|
@ -1074,7 +1074,7 @@ func (c *TreeClient) addVersion(ctx context.Context, bktInfo *data.BucketInfo, t
|
||||||
return node.ID, c.clearOutdatedVersionInfo(ctx, bktInfo, treeID, node.ID)
|
return node.ID, c.clearOutdatedVersionInfo(ctx, bktInfo, treeID, node.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !errors.Is(err, layer.ErrNodeNotFound) {
|
if !errors.Is(err, handler.ErrNodeNotFound) {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1107,7 +1107,7 @@ func (c *TreeClient) getVersions(ctx context.Context, bktInfo *data.BucketInfo,
|
||||||
}
|
}
|
||||||
nodes, err := c.getNodes(ctx, p)
|
nodes, err := c.getNodes(ctx, p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, layer.ErrNodeNotFound) {
|
if errors.Is(err, handler.ErrNodeNotFound) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -1210,7 +1210,7 @@ func (c *TreeClient) getNode(ctx context.Context, bktInfo *data.BucketInfo, tree
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if len(nodes) == 0 {
|
if len(nodes) == 0 {
|
||||||
return nil, layer.ErrNodeNotFound
|
return nil, handler.ErrNodeNotFound
|
||||||
}
|
}
|
||||||
if len(nodes) != 1 {
|
if len(nodes) != 1 {
|
||||||
return nil, fmt.Errorf("found more than one node")
|
return nil, fmt.Errorf("found more than one node")
|
||||||
|
@ -1252,9 +1252,9 @@ func (c *TreeClient) getNodes(ctx context.Context, p *getNodesParams) ([]*tree.G
|
||||||
|
|
||||||
func handleError(msg string, err error) error {
|
func handleError(msg string, err error) error {
|
||||||
if strings.Contains(err.Error(), "not found") {
|
if strings.Contains(err.Error(), "not found") {
|
||||||
return fmt.Errorf("%w: %s", layer.ErrNodeNotFound, err.Error())
|
return fmt.Errorf("%w: %s", handler.ErrNodeNotFound, err.Error())
|
||||||
} else if strings.Contains(err.Error(), "is denied by") {
|
} else if strings.Contains(err.Error(), "is denied by") {
|
||||||
return fmt.Errorf("%w: %s", layer.ErrNodeAccessDenied, err.Error())
|
return fmt.Errorf("%w: %s", handler.ErrNodeAccessDenied, err.Error())
|
||||||
}
|
}
|
||||||
return fmt.Errorf("%s: %w", msg, err)
|
return fmt.Errorf("%s: %w", msg, err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
"github.com/nspcc-dev/neofs-s3-gw/api/data"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
"github.com/nspcc-dev/neofs-s3-gw/api/handler"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -108,11 +108,11 @@ func TestHandleError(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
err: errors.New("something not found"),
|
err: errors.New("something not found"),
|
||||||
expectedError: layer.ErrNodeNotFound,
|
expectedError: handler.ErrNodeNotFound,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
err: errors.New("something is denied by some acl rule"),
|
err: errors.New("something is denied by some acl rule"),
|
||||||
expectedError: layer.ErrNodeAccessDenied,
|
expectedError: handler.ErrNodeAccessDenied,
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
t.Run("", func(t *testing.T) {
|
t.Run("", func(t *testing.T) {
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
|
|
||||||
"github.com/nats-io/nats.go"
|
"github.com/nats-io/nats.go"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/handler"
|
"github.com/nspcc-dev/neofs-s3-gw/api/handler"
|
||||||
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
|
|
||||||
"go.uber.org/zap"
|
"go.uber.org/zap"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -42,7 +41,7 @@ type (
|
||||||
}
|
}
|
||||||
|
|
||||||
Stream struct {
|
Stream struct {
|
||||||
h layer.MsgHandler
|
h handler.MsgHandler
|
||||||
ch chan *nats.Msg
|
ch chan *nats.Msg
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -131,7 +130,7 @@ func NewController(p *Options, l *zap.Logger) (*Controller, error) {
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Controller) Subscribe(ctx context.Context, topic string, handler layer.MsgHandler) error {
|
func (c *Controller) Subscribe(ctx context.Context, topic string, handler handler.MsgHandler) error {
|
||||||
ch := make(chan *nats.Msg, 1)
|
ch := make(chan *nats.Msg, 1)
|
||||||
|
|
||||||
c.mu.RLock()
|
c.mu.RLock()
|
Loading…
Reference in a new issue