[#13] Rename go module name according to NSPCC standards
- refactoring s3 gate structure - cleanup unused code - rename go module to `github.com/nspcc-dev/neofs-s3-gate` closes #13 Signed-off-by: Evgeniy Kulikov <kim@nspcc.ru>
This commit is contained in:
parent
e7f72fc670
commit
0161d2fbd3
25 changed files with 396 additions and 1112 deletions
130
api/layer/container.go
Normal file
130
api/layer/container.go
Normal file
|
@ -0,0 +1,130 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/nspcc-dev/neofs-api-go/container"
|
||||
"github.com/nspcc-dev/neofs-api-go/refs"
|
||||
"github.com/nspcc-dev/neofs-api-go/service"
|
||||
"github.com/nspcc-dev/neofs-s3-gate/auth"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type (
|
||||
BucketInfo struct {
|
||||
Name string
|
||||
CID refs.CID
|
||||
Created time.Time
|
||||
}
|
||||
|
||||
ListObjectsParams struct {
|
||||
Bucket string
|
||||
Prefix string
|
||||
Token string
|
||||
Delimiter string
|
||||
MaxKeys int
|
||||
}
|
||||
)
|
||||
|
||||
func (n *layer) containerInfo(ctx context.Context, cid refs.CID) (*BucketInfo, error) {
|
||||
bearer, err := auth.GetBearerToken(ctx)
|
||||
if err != nil {
|
||||
n.log.Error("could not receive bearer token",
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req := new(container.GetRequest)
|
||||
req.SetCID(cid)
|
||||
req.SetTTL(service.SingleForwardingTTL)
|
||||
// req.SetBearer(bearer)
|
||||
|
||||
_ = bearer
|
||||
|
||||
if err = service.SignRequestData(n.key, req); err != nil {
|
||||
n.log.Error("could not prepare request",
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conn, err := n.cli.GetConnection(ctx)
|
||||
if err != nil {
|
||||
n.log.Error("could not prepare client",
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// todo: think about timeout
|
||||
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
res, err := container.NewServiceClient(conn).Get(ctx, req)
|
||||
if err != nil {
|
||||
n.log.Error("could not list buckets",
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_ = res
|
||||
|
||||
return &BucketInfo{
|
||||
CID: cid,
|
||||
Name: cid.String(), // should be fetched from container.GetResponse
|
||||
Created: time.Time{}, // should be fetched from container.GetResponse
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (n *layer) containerList(ctx context.Context) ([]BucketInfo, error) {
|
||||
bearer, err := auth.GetBearerToken(ctx)
|
||||
if err != nil {
|
||||
n.log.Error("could not receive bearer token",
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req := new(container.ListRequest)
|
||||
req.OwnerID = n.uid
|
||||
req.SetTTL(service.SingleForwardingTTL)
|
||||
// req.SetBearer(bearer)
|
||||
|
||||
_ = bearer
|
||||
|
||||
if err := service.SignRequestData(n.key, req); err != nil {
|
||||
n.log.Error("could not prepare request",
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
conn, err := n.cli.GetConnection(ctx)
|
||||
if err != nil {
|
||||
n.log.Error("could not prepare client",
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// todo: think about timeout
|
||||
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
res, err := container.NewServiceClient(conn).List(ctx, req)
|
||||
if err != nil {
|
||||
n.log.Error("could not list buckets",
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
list := make([]BucketInfo, 0, len(res.CID))
|
||||
for _, cid := range res.CID {
|
||||
info, err := n.containerInfo(ctx, cid)
|
||||
if err != nil {
|
||||
n.log.Error("could not fetch container info",
|
||||
zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
list = append(list, *info)
|
||||
}
|
||||
|
||||
return list, nil
|
||||
}
|
388
api/layer/layer.go
Normal file
388
api/layer/layer.go
Normal file
|
@ -0,0 +1,388 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/nspcc-dev/neofs-api-go/object"
|
||||
"github.com/nspcc-dev/neofs-api-go/refs"
|
||||
"github.com/nspcc-dev/neofs-api-go/service"
|
||||
"github.com/nspcc-dev/neofs-s3-gate/api/pool"
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
type (
|
||||
layer struct {
|
||||
log *zap.Logger
|
||||
cli pool.Client
|
||||
uid refs.OwnerID
|
||||
key *ecdsa.PrivateKey
|
||||
}
|
||||
|
||||
GetObjectParams struct {
|
||||
Bucket string
|
||||
Object string
|
||||
Offset int64
|
||||
Length int64
|
||||
Writer io.Writer
|
||||
}
|
||||
|
||||
PutObjectParams struct {
|
||||
Bucket string
|
||||
Object string
|
||||
Size int64
|
||||
Reader io.Reader
|
||||
Header map[string]string
|
||||
}
|
||||
|
||||
CopyObjectParams struct {
|
||||
SrcBucket string
|
||||
DstBucket string
|
||||
SrcObject string
|
||||
DstObject string
|
||||
}
|
||||
|
||||
NeoFS interface {
|
||||
Get(ctx context.Context, address refs.Address) (*object.Object, error)
|
||||
}
|
||||
|
||||
Client interface {
|
||||
NeoFS
|
||||
|
||||
ListBuckets(ctx context.Context) ([]BucketInfo, error)
|
||||
GetBucketInfo(ctx context.Context, name string) (*BucketInfo, error)
|
||||
|
||||
GetObject(ctx context.Context, p *GetObjectParams) error
|
||||
GetObjectInfo(ctx context.Context, bucketName, objectName string) (*ObjectInfo, error)
|
||||
|
||||
PutObject(ctx context.Context, p *PutObjectParams) (*ObjectInfo, error)
|
||||
|
||||
CopyObject(ctx context.Context, p *CopyObjectParams) (*ObjectInfo, error)
|
||||
|
||||
ListObjects(ctx context.Context, p *ListObjectsParams) (*ListObjectsInfo, error)
|
||||
|
||||
DeleteObject(ctx context.Context, bucket, object string) error
|
||||
DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error)
|
||||
}
|
||||
)
|
||||
|
||||
// AWS3NameHeader key in the object neofs.
|
||||
const AWS3NameHeader = "filename"
|
||||
|
||||
// NewGatewayLayer creates instance of layer. It checks credentials
|
||||
// and establishes gRPC connection with node.
|
||||
func NewLayer(log *zap.Logger, cli pool.Client, key *ecdsa.PrivateKey) (Client, error) {
|
||||
uid, err := refs.NewOwnerID(&key.PublicKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &layer{
|
||||
cli: cli,
|
||||
key: key,
|
||||
log: log,
|
||||
uid: uid,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Get NeoFS Object by refs.Address (should be used by auth.Center)
|
||||
func (n *layer) Get(ctx context.Context, address refs.Address) (*object.Object, error) {
|
||||
conn, err := n.cli.GetConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
token, err := n.cli.SessionToken(ctx, &pool.SessionParams{
|
||||
Conn: conn,
|
||||
Addr: address,
|
||||
Verb: service.Token_Info_Get,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req := new(object.GetRequest)
|
||||
req.Address = address
|
||||
req.SetTTL(service.SingleForwardingTTL)
|
||||
req.SetToken(token)
|
||||
|
||||
err = service.SignRequestData(n.key, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// todo: think about timeout
|
||||
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
cli, err := object.NewServiceClient(conn).Get(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return receiveObject(cli)
|
||||
}
|
||||
|
||||
// GetBucketInfo returns bucket name.
|
||||
func (n *layer) GetBucketInfo(ctx context.Context, name string) (*BucketInfo, error) {
|
||||
list, err := n.containerList(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, bkt := range list {
|
||||
if bkt.Name == name {
|
||||
return &bkt, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, errors.New("bucket not found")
|
||||
}
|
||||
|
||||
// ListBuckets returns all user containers. Name of the bucket is a container
|
||||
// id. Timestamp is omitted since it is not saved in neofs container.
|
||||
func (n *layer) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
|
||||
return n.containerList(ctx)
|
||||
}
|
||||
|
||||
// ListObjects returns objects from the container. It ignores tombstones and
|
||||
// storage groups.
|
||||
// ctx, bucket, prefix, continuationToken, delimiter, maxKeys
|
||||
func (n *layer) ListObjects(ctx context.Context, p *ListObjectsParams) (*ListObjectsInfo, error) {
|
||||
// todo: make pagination when search response will be gRPC stream,
|
||||
// pagination must be implemented with cache, because search results
|
||||
// may be different between search calls
|
||||
var (
|
||||
result ListObjectsInfo
|
||||
uniqNames = make(map[string]struct{})
|
||||
)
|
||||
|
||||
bkt, err := n.GetBucketInfo(ctx, p.Bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
objectIDs, err := n.objectSearchContainer(ctx, bkt.CID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ln := len(objectIDs)
|
||||
// todo: check what happens if there is more than maxKeys objects
|
||||
if ln > p.MaxKeys {
|
||||
result.IsTruncated = true
|
||||
ln = p.MaxKeys
|
||||
}
|
||||
|
||||
result.Objects = make([]ObjectInfo, 0, ln)
|
||||
|
||||
for i := 0; i < ln; i++ {
|
||||
addr := refs.Address{ObjectID: objectIDs[i], CID: bkt.CID}
|
||||
|
||||
meta, err := n.objectHead(ctx, addr)
|
||||
if err != nil {
|
||||
n.log.Warn("could not fetch object meta", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
// ignore tombstone objects
|
||||
_, hdr := meta.LastHeader(object.HeaderType(object.TombstoneHdr))
|
||||
if hdr != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// ignore storage group objects
|
||||
_, hdr = meta.LastHeader(object.HeaderType(object.StorageGroupHdr))
|
||||
if hdr != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// dirs don't exist in neofs, gateway stores full path to the file
|
||||
// in object header, e.g. `filename`:`/this/is/path/file.txt`
|
||||
|
||||
// prefix argument contains full dir path from the root, e.g. `/this/is/`
|
||||
|
||||
// to emulate dirs we take dirs in path, compare it with prefix
|
||||
// and look for entities after prefix. If entity does not have any
|
||||
// sub-entities, then it is a file, else directory.
|
||||
|
||||
_, dirname := nameFromObject(meta)
|
||||
if strings.HasPrefix(dirname, p.Prefix) {
|
||||
var (
|
||||
oi *ObjectInfo
|
||||
tail = strings.TrimLeft(dirname, p.Prefix)
|
||||
ind = strings.Index(tail, pathSeparator)
|
||||
)
|
||||
|
||||
if ind < 0 { // if there are not sub-entities in tail - file
|
||||
oi = objectInfoFromMeta(meta)
|
||||
} else { // if there are sub-entities in tail - dir
|
||||
oi = &ObjectInfo{
|
||||
Bucket: meta.SystemHeader.CID.String(),
|
||||
Name: tail[:ind+1], // dir MUST have slash symbol in the end
|
||||
// IsDir: true,
|
||||
}
|
||||
}
|
||||
|
||||
// use only unique dir names
|
||||
if _, ok := uniqNames[oi.Name]; !ok {
|
||||
uniqNames[oi.Name] = struct{}{}
|
||||
|
||||
result.Objects = append(result.Objects, *oi)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// GetObject from storage.
|
||||
func (n *layer) GetObject(ctx context.Context, p *GetObjectParams) error {
|
||||
cid, err := refs.CIDFromString(p.Bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
oid, err := n.objectFindID(ctx, cid, p.Object, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
addr := refs.Address{
|
||||
ObjectID: oid,
|
||||
CID: cid,
|
||||
}
|
||||
_, err = n.objectGet(ctx, getParams{
|
||||
addr: addr,
|
||||
start: p.Offset,
|
||||
length: p.Length,
|
||||
writer: p.Writer,
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// GetObjectInfo returns meta information about the object.
|
||||
func (n *layer) GetObjectInfo(ctx context.Context, bucketName, objectName string) (*ObjectInfo, error) {
|
||||
var meta *object.Object
|
||||
if cid, err := refs.CIDFromString(bucketName); err != nil {
|
||||
return nil, err
|
||||
} else if oid, err := n.objectFindID(ctx, cid, objectName, false); err != nil {
|
||||
return nil, err
|
||||
} else if meta, err = n.objectHead(ctx, refs.Address{CID: cid, ObjectID: oid}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return objectInfoFromMeta(meta), nil
|
||||
}
|
||||
|
||||
// PutObject into storage.
|
||||
func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*ObjectInfo, error) {
|
||||
cid, err := refs.CIDFromString(p.Bucket)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = n.objectFindID(ctx, cid, p.Object, true)
|
||||
if err == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
oid, err := refs.NewObjectID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sgid, err := refs.NewSGID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addr := refs.Address{
|
||||
ObjectID: oid,
|
||||
CID: cid,
|
||||
}
|
||||
|
||||
meta, err := n.objectPut(ctx, putParams{
|
||||
addr: addr,
|
||||
size: p.Size,
|
||||
name: p.Object,
|
||||
r: p.Reader,
|
||||
userHeaders: p.Header,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
oi := objectInfoFromMeta(meta)
|
||||
|
||||
// for every object create storage group, otherwise object will be deleted
|
||||
addr.ObjectID = sgid
|
||||
|
||||
_, err = n.storageGroupPut(ctx, sgParams{
|
||||
addr: addr,
|
||||
objects: []refs.ObjectID{oid},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return oi, nil
|
||||
}
|
||||
|
||||
// CopyObject from one bucket into another bucket.
|
||||
func (n *layer) CopyObject(ctx context.Context, p *CopyObjectParams) (*ObjectInfo, error) {
|
||||
info, err := n.GetObjectInfo(ctx, p.SrcBucket, p.SrcObject)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
|
||||
go func() {
|
||||
err := n.GetObject(ctx, &GetObjectParams{
|
||||
Bucket: p.SrcBucket,
|
||||
Object: p.SrcObject,
|
||||
Writer: pw,
|
||||
})
|
||||
|
||||
_ = pw.CloseWithError(err)
|
||||
}()
|
||||
|
||||
return n.PutObject(ctx, &PutObjectParams{
|
||||
Bucket: p.DstBucket,
|
||||
Object: p.DstObject,
|
||||
Size: info.Size,
|
||||
Reader: pr,
|
||||
Header: info.Headers,
|
||||
})
|
||||
}
|
||||
|
||||
// DeleteObject from the storage.
|
||||
func (n *layer) DeleteObject(ctx context.Context, bucket, object string) error {
|
||||
cid, err := refs.CIDFromString(bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
oid, err := n.objectFindID(ctx, cid, object, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.objectDelete(ctx, delParams{addr: refs.Address{CID: cid, ObjectID: oid}})
|
||||
}
|
||||
|
||||
// DeleteObjects from the storage.
|
||||
func (n *layer) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) {
|
||||
var errs = make([]error, 0, len(objects))
|
||||
|
||||
for i := range objects {
|
||||
errs = append(errs, n.DeleteObject(ctx, bucket, objects[i]))
|
||||
}
|
||||
|
||||
return errs, nil
|
||||
}
|
566
api/layer/object.go
Normal file
566
api/layer/object.go
Normal file
|
@ -0,0 +1,566 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/nspcc-dev/neofs-api-go/object"
|
||||
"github.com/nspcc-dev/neofs-api-go/query"
|
||||
"github.com/nspcc-dev/neofs-api-go/refs"
|
||||
"github.com/nspcc-dev/neofs-api-go/service"
|
||||
"github.com/nspcc-dev/neofs-api-go/storagegroup"
|
||||
"github.com/nspcc-dev/neofs-s3-gate/api/pool"
|
||||
"github.com/pkg/errors"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const (
|
||||
dataChunkSize = 3 * object.UnitsMB
|
||||
objectVersion = 1
|
||||
)
|
||||
|
||||
type (
|
||||
putParams struct {
|
||||
addr refs.Address
|
||||
name string
|
||||
size int64
|
||||
r io.Reader
|
||||
userHeaders map[string]string
|
||||
}
|
||||
|
||||
sgParams struct {
|
||||
addr refs.Address
|
||||
objects []refs.ObjectID
|
||||
}
|
||||
|
||||
delParams struct {
|
||||
addr refs.Address
|
||||
}
|
||||
|
||||
getParams struct {
|
||||
addr refs.Address
|
||||
start int64
|
||||
length int64
|
||||
writer io.Writer
|
||||
}
|
||||
)
|
||||
|
||||
// objectSearchContainer returns all available objects in the container.
|
||||
func (n *layer) objectSearchContainer(ctx context.Context, cid refs.CID) ([]refs.ObjectID, error) {
|
||||
var q query.Query
|
||||
q.Filters = append(q.Filters, query.Filter{
|
||||
Type: query.Filter_Exact,
|
||||
Name: object.KeyRootObject,
|
||||
})
|
||||
|
||||
conn, err := n.cli.GetConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
queryBinary, err := q.Marshal()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
token, err := n.cli.SessionToken(ctx, &pool.SessionParams{
|
||||
Conn: conn,
|
||||
Addr: refs.Address{CID: cid},
|
||||
Verb: service.Token_Info_Search,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req := new(object.SearchRequest)
|
||||
req.Query = queryBinary
|
||||
req.QueryVersion = 1
|
||||
req.ContainerID = cid
|
||||
req.SetTTL(service.SingleForwardingTTL)
|
||||
req.SetToken(token)
|
||||
// req.SetBearer(bearerToken)
|
||||
|
||||
err = service.SignRequestData(n.key, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// todo: think about timeout
|
||||
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
searchClient, err := object.NewServiceClient(conn).Search(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
response []refs.Address
|
||||
result []refs.ObjectID
|
||||
)
|
||||
|
||||
for {
|
||||
resp, err := searchClient.Recv()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
return nil, errors.New("search command received error")
|
||||
}
|
||||
|
||||
response = append(response, resp.Addresses...)
|
||||
}
|
||||
|
||||
for i := range response {
|
||||
result = append(result, response[i].ObjectID)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// objectFindID returns object id (uuid) based on it's nice name in s3. If
|
||||
// nice name is uuid compatible, then function returns it.
|
||||
func (n *layer) objectFindID(ctx context.Context, cid refs.CID, name string, put bool) (refs.ObjectID, error) {
|
||||
var (
|
||||
id refs.ObjectID
|
||||
q query.Query
|
||||
)
|
||||
|
||||
q.Filters = append(q.Filters, query.Filter{
|
||||
Type: query.Filter_Exact,
|
||||
Name: object.KeyRootObject,
|
||||
})
|
||||
q.Filters = append(q.Filters, query.Filter{
|
||||
Type: query.Filter_Exact,
|
||||
Name: AWS3NameHeader,
|
||||
Value: name,
|
||||
})
|
||||
|
||||
queryBinary, err := q.Marshal()
|
||||
if err != nil {
|
||||
return id, err
|
||||
}
|
||||
|
||||
conn, err := n.cli.GetConnection(ctx)
|
||||
if err != nil {
|
||||
return id, err
|
||||
}
|
||||
|
||||
token, err := n.cli.SessionToken(ctx, &pool.SessionParams{
|
||||
Conn: conn,
|
||||
Addr: refs.Address{CID: cid},
|
||||
Verb: service.Token_Info_Search,
|
||||
})
|
||||
if err != nil {
|
||||
return id, err
|
||||
}
|
||||
|
||||
req := new(object.SearchRequest)
|
||||
req.Query = queryBinary
|
||||
req.QueryVersion = 1
|
||||
req.ContainerID = cid
|
||||
req.SetTTL(service.SingleForwardingTTL)
|
||||
req.SetToken(token)
|
||||
// req.SetBearer(bearerToken)
|
||||
|
||||
err = service.SignRequestData(n.key, req)
|
||||
if err != nil {
|
||||
return id, err
|
||||
}
|
||||
|
||||
// todo: think about timeout
|
||||
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
searchClient, err := object.NewServiceClient(conn).Search(ctx, req)
|
||||
if err != nil {
|
||||
return id, err
|
||||
}
|
||||
|
||||
var response []refs.Address
|
||||
|
||||
for {
|
||||
resp, err := searchClient.Recv()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
return id, errors.New("search command received error")
|
||||
}
|
||||
|
||||
response = append(response, resp.Addresses...)
|
||||
}
|
||||
|
||||
switch ln := len(response); {
|
||||
case ln > 1:
|
||||
return id, errors.New("several objects with the same name found")
|
||||
case ln == 1:
|
||||
return response[0].ObjectID, nil
|
||||
default:
|
||||
// Minio lists all objects with and without nice names. All objects
|
||||
// without nice name still have "name" in terms of minio - uuid encoded
|
||||
// into string. There is a tricky case when user upload object
|
||||
// with nice name that is encoded uuid.
|
||||
// There is an optimisation to parse name and return uuid if it name is uuid
|
||||
// compatible. It _should not_ work in case of put operation, because object
|
||||
// with uuid compatible nice name may not exist. Therefore this optimization
|
||||
// breaks object put logic and must be turned off.
|
||||
if !put {
|
||||
err := id.Parse(name)
|
||||
if err == nil {
|
||||
return id, nil
|
||||
}
|
||||
}
|
||||
return id, errors.New("object not found")
|
||||
}
|
||||
}
|
||||
|
||||
// objectHead returns all object's headers.
|
||||
func (n *layer) objectHead(ctx context.Context, addr refs.Address) (*object.Object, error) {
|
||||
|
||||
conn, err := n.cli.GetConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
token, err := n.cli.SessionToken(ctx, &pool.SessionParams{
|
||||
Conn: conn,
|
||||
Addr: addr,
|
||||
Verb: service.Token_Info_Head,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req := new(object.HeadRequest)
|
||||
req.Address = addr
|
||||
req.FullHeaders = true
|
||||
req.SetTTL(service.SingleForwardingTTL)
|
||||
req.SetToken(token)
|
||||
// req.SetBearer(bearerToken)
|
||||
|
||||
err = service.SignRequestData(n.key, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// todo: think about timeout
|
||||
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
res, err := object.NewServiceClient(conn).Head(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return res.Object, nil
|
||||
}
|
||||
|
||||
func receiveObject(cli object.Service_GetClient) (*object.Object, error) {
|
||||
var (
|
||||
off int
|
||||
buf []byte
|
||||
obj *object.Object
|
||||
)
|
||||
|
||||
for {
|
||||
resp, err := cli.Recv()
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch o := resp.R.(type) {
|
||||
case *object.GetResponse_Object:
|
||||
|
||||
if _, hdr := o.Object.LastHeader(object.HeaderType(object.TombstoneHdr)); hdr != nil {
|
||||
return nil, errors.New("object already removed")
|
||||
}
|
||||
|
||||
obj = o.Object
|
||||
buf = make([]byte, obj.SystemHeader.PayloadLength)
|
||||
|
||||
if len(obj.Payload) > 0 {
|
||||
off += copy(buf, obj.Payload)
|
||||
}
|
||||
case *object.GetResponse_Chunk:
|
||||
if obj == nil {
|
||||
return nil, errors.New("object headers not received")
|
||||
}
|
||||
off += copy(buf[off:], o.Chunk)
|
||||
default:
|
||||
return nil, errors.Errorf("unknown response %T", o)
|
||||
}
|
||||
}
|
||||
|
||||
if obj == nil {
|
||||
return nil, errors.New("object headers not received")
|
||||
}
|
||||
obj.Payload = buf
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// objectGet and write it into provided io.Reader.
|
||||
func (n *layer) objectGet(ctx context.Context, p getParams) (*object.Object, error) {
|
||||
conn, err := n.cli.GetConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
token, err := n.cli.SessionToken(ctx, &pool.SessionParams{
|
||||
Conn: conn,
|
||||
Addr: p.addr,
|
||||
Verb: service.Token_Info_Get,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// todo: replace object.Get() call by object.GetRange() for
|
||||
// true sequential reading support; it will be possible when
|
||||
// object.GetRange() response message become gRPC stream.
|
||||
req := new(object.GetRequest)
|
||||
req.Address = p.addr
|
||||
req.SetTTL(service.SingleForwardingTTL)
|
||||
req.SetToken(token)
|
||||
// req.SetBearer(bearerToken)
|
||||
|
||||
err = service.SignRequestData(n.key, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// todo: think about timeout
|
||||
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
var obj *object.Object
|
||||
|
||||
if cli, err := object.NewServiceClient(conn).Get(ctx, req); err != nil {
|
||||
return nil, err
|
||||
} else if obj, err = receiveObject(cli); err != nil {
|
||||
return nil, err
|
||||
} else if ln := int64(obj.SystemHeader.PayloadLength); p.start+p.length > ln {
|
||||
return nil, errors.Errorf("slice bounds out of range: len = %d, start = %d, offset = %d",
|
||||
ln, p.start, p.length)
|
||||
} else if _, err = p.writer.Write(obj.Payload[p.start : p.start+p.length]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// remove payload:
|
||||
obj.Payload = nil
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// objectPut into neofs, took payload from io.Reader.
|
||||
func (n *layer) objectPut(ctx context.Context, p putParams) (*object.Object, error) {
|
||||
conn, err := n.cli.GetConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
token, err := n.cli.SessionToken(ctx, &pool.SessionParams{
|
||||
Conn: conn,
|
||||
Addr: p.addr,
|
||||
Verb: service.Token_Info_Put,
|
||||
})
|
||||
if err != nil {
|
||||
n.log.Error("could not prepare token",
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
putClient, err := object.NewServiceClient(conn).Put(ctx)
|
||||
if err != nil {
|
||||
n.log.Error("could not prepare PutClient",
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if p.userHeaders == nil {
|
||||
p.userHeaders = make(map[string]string)
|
||||
}
|
||||
|
||||
p.userHeaders[AWS3NameHeader] = p.name
|
||||
|
||||
readBuffer := make([]byte, dataChunkSize)
|
||||
obj := &object.Object{
|
||||
SystemHeader: object.SystemHeader{
|
||||
Version: objectVersion,
|
||||
ID: p.addr.ObjectID,
|
||||
OwnerID: n.uid,
|
||||
CID: p.addr.CID,
|
||||
PayloadLength: uint64(p.size),
|
||||
},
|
||||
Headers: parseUserHeaders(p.userHeaders),
|
||||
}
|
||||
|
||||
req := object.MakePutRequestHeader(obj)
|
||||
req.SetTTL(service.SingleForwardingTTL)
|
||||
req.SetToken(token)
|
||||
// req.SetBearer(bearerToken)
|
||||
|
||||
err = service.SignRequestData(n.key, req)
|
||||
if err != nil {
|
||||
n.log.Error("could not prepare request",
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = putClient.Send(req)
|
||||
if err != nil {
|
||||
n.log.Error("could not send request",
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
read, err := p.r.Read(readBuffer)
|
||||
for read > 0 {
|
||||
if err != nil && err != io.EOF {
|
||||
n.log.Error("something went wrong",
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if read > 0 {
|
||||
req := object.MakePutRequestChunk(readBuffer[:read])
|
||||
req.SetTTL(service.SingleForwardingTTL)
|
||||
// req.SetBearer(bearerToken)
|
||||
|
||||
err = service.SignRequestData(n.key, req)
|
||||
if err != nil {
|
||||
n.log.Error("could not sign chunk request",
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = putClient.Send(req)
|
||||
if err != nil && err != io.EOF {
|
||||
n.log.Error("could not send chunk",
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
read, err = p.r.Read(readBuffer)
|
||||
}
|
||||
|
||||
_, err = putClient.CloseAndRecv()
|
||||
if err != nil {
|
||||
n.log.Error("could not finish request",
|
||||
zap.Error(err))
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// maybe make a head?
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// storageGroupPut prepares storage group object and put it into neofs.
|
||||
func (n *layer) storageGroupPut(ctx context.Context, p sgParams) (*object.Object, error) {
|
||||
conn, err := n.cli.GetConnection(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
token, err := n.cli.SessionToken(ctx, &pool.SessionParams{
|
||||
Conn: conn,
|
||||
Addr: p.addr,
|
||||
Verb: service.Token_Info_Put,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client := object.NewServiceClient(conn)
|
||||
// todo: think about timeout
|
||||
putClient, err := client.Put(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sg := &object.Object{
|
||||
SystemHeader: object.SystemHeader{
|
||||
Version: objectVersion,
|
||||
ID: p.addr.ObjectID,
|
||||
OwnerID: n.uid,
|
||||
CID: p.addr.CID,
|
||||
},
|
||||
Headers: make([]object.Header, 0, len(p.objects)),
|
||||
}
|
||||
|
||||
for i := range p.objects {
|
||||
sg.AddHeader(&object.Header{Value: &object.Header_Link{
|
||||
Link: &object.Link{Type: object.Link_StorageGroup, ID: p.objects[i]},
|
||||
}})
|
||||
}
|
||||
|
||||
sg.SetStorageGroup(new(storagegroup.StorageGroup))
|
||||
|
||||
req := object.MakePutRequestHeader(sg)
|
||||
req.SetTTL(service.SingleForwardingTTL)
|
||||
req.SetToken(token)
|
||||
// req.SetBearer(bearerToken)
|
||||
|
||||
err = service.SignRequestData(n.key, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = putClient.Send(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = putClient.CloseAndRecv()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sg, nil
|
||||
}
|
||||
|
||||
// objectDelete puts tombstone object into neofs.
|
||||
func (n *layer) objectDelete(ctx context.Context, p delParams) error {
|
||||
conn, err := n.cli.GetConnection(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
token, err := n.cli.SessionToken(ctx, &pool.SessionParams{
|
||||
Conn: conn,
|
||||
Addr: p.addr,
|
||||
Verb: service.Token_Info_Delete,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req := new(object.DeleteRequest)
|
||||
req.Address = p.addr
|
||||
req.OwnerID = n.uid
|
||||
req.SetTTL(service.SingleForwardingTTL)
|
||||
req.SetToken(token)
|
||||
// req.SetBearer(bearerToken)
|
||||
|
||||
err = service.SignRequestData(n.key, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// todo: think about timeout
|
||||
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err = object.NewServiceClient(conn).Delete(ctx, req)
|
||||
|
||||
return err
|
||||
}
|
113
api/layer/util.go
Normal file
113
api/layer/util.go
Normal file
|
@ -0,0 +1,113 @@
|
|||
package layer
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/nspcc-dev/neofs-api-go/object"
|
||||
)
|
||||
|
||||
type (
|
||||
ObjectInfo struct {
|
||||
Bucket string
|
||||
Name string
|
||||
Size int64
|
||||
ContentType string
|
||||
Created time.Time
|
||||
Headers map[string]string
|
||||
}
|
||||
|
||||
// ListObjectsInfo - container for list objects.
|
||||
ListObjectsInfo struct {
|
||||
// Indicates whether the returned list objects response is truncated. A
|
||||
// value of true indicates that the list was truncated. The list can be truncated
|
||||
// if the number of objects exceeds the limit allowed or specified
|
||||
// by max keys.
|
||||
IsTruncated bool
|
||||
|
||||
// When response is truncated (the IsTruncated element value in the response
|
||||
// is true), you can use the key name in this field as marker in the subsequent
|
||||
// request to get next set of objects.
|
||||
//
|
||||
// NOTE: This element is returned only if you have delimiter request parameter
|
||||
// specified.
|
||||
ContinuationToken string
|
||||
NextContinuationToken string
|
||||
|
||||
// List of objects info for this request.
|
||||
Objects []ObjectInfo
|
||||
|
||||
// List of prefixes for this request.
|
||||
Prefixes []string
|
||||
}
|
||||
)
|
||||
|
||||
const pathSeparator = string(os.PathSeparator)
|
||||
|
||||
func userHeaders(h []object.Header) map[string]string {
|
||||
result := make(map[string]string, len(h))
|
||||
|
||||
for i := range h {
|
||||
switch v := h[i].Value.(type) {
|
||||
case *object.Header_UserHeader:
|
||||
result[v.UserHeader.Key] = v.UserHeader.Value
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func objectInfoFromMeta(meta *object.Object) *ObjectInfo {
|
||||
aws3name := meta.SystemHeader.ID.String()
|
||||
|
||||
userHeaders := userHeaders(meta.Headers)
|
||||
if name, ok := userHeaders[AWS3NameHeader]; ok {
|
||||
aws3name = name
|
||||
delete(userHeaders, name)
|
||||
}
|
||||
|
||||
mimeType := http.DetectContentType(meta.Payload)
|
||||
|
||||
return &ObjectInfo{
|
||||
Bucket: meta.SystemHeader.CID.String(),
|
||||
Name: aws3name,
|
||||
ContentType: mimeType,
|
||||
Headers: userHeaders,
|
||||
Size: int64(meta.SystemHeader.PayloadLength),
|
||||
Created: time.Unix(meta.SystemHeader.CreatedAt.UnixTime, 0),
|
||||
}
|
||||
}
|
||||
|
||||
func parseUserHeaders(h map[string]string) []object.Header {
|
||||
headers := make([]object.Header, 0, len(h))
|
||||
|
||||
for k, v := range h {
|
||||
uh := &object.UserHeader{Key: k, Value: v}
|
||||
headers = append(headers, object.Header{
|
||||
Value: &object.Header_UserHeader{UserHeader: uh},
|
||||
})
|
||||
}
|
||||
|
||||
return headers
|
||||
}
|
||||
|
||||
func nameFromObject(o *object.Object) (string, string) {
|
||||
var (
|
||||
name string
|
||||
uh = userHeaders(o.Headers)
|
||||
)
|
||||
|
||||
if _, ok := uh[AWS3NameHeader]; !ok {
|
||||
name = o.SystemHeader.ID.String()
|
||||
} else {
|
||||
name = uh[AWS3NameHeader]
|
||||
}
|
||||
|
||||
ind := strings.LastIndex(name, pathSeparator)
|
||||
|
||||
return name[ind+1:], name[:ind+1]
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue