[#25] Migrate layer to NeoFS API v2

Naive migration, without any tries to start application

closes #25

Signed-off-by: Evgeniy Kulikov <kim@nspcc.ru>
This commit is contained in:
Evgeniy Kulikov 2020-10-19 04:04:37 +03:00
parent ec56a1818e
commit 7deeb68d47
6 changed files with 420 additions and 738 deletions

View file

@ -4,9 +4,8 @@ import (
"context" "context"
"time" "time"
"github.com/nspcc-dev/neofs-api-go/container" "github.com/nspcc-dev/neofs-api-go/pkg/client"
"github.com/nspcc-dev/neofs-api-go/refs" "github.com/nspcc-dev/neofs-api-go/pkg/container"
"github.com/nspcc-dev/neofs-api-go/service"
"github.com/nspcc-dev/neofs-s3-gate/api" "github.com/nspcc-dev/neofs-s3-gate/api"
"github.com/nspcc-dev/neofs-s3-gate/auth" "github.com/nspcc-dev/neofs-s3-gate/auth"
"go.uber.org/zap" "go.uber.org/zap"
@ -15,7 +14,7 @@ import (
type ( type (
BucketInfo struct { BucketInfo struct {
Name string Name string
CID refs.CID CID *container.ID
Created time.Time Created time.Time
} }
@ -28,47 +27,36 @@ type (
} }
) )
func (n *layer) containerInfo(ctx context.Context, cid refs.CID) (*BucketInfo, error) { func (n *layer) containerInfo(ctx context.Context, cid *container.ID) (*BucketInfo, error) {
rid := api.GetRequestID(ctx) rid := api.GetRequestID(ctx)
bearer, err := auth.GetBearerToken(ctx) bearer, err := auth.GetBearerToken(ctx)
if err != nil { if err != nil {
n.log.Error("could not receive bearer token", n.log.Error("could not receive bearer token",
zap.Stringer("cid", cid),
zap.String("request_id", rid), zap.String("request_id", rid),
zap.Error(err)) zap.Error(err))
return nil, err return nil, err
} }
req := new(container.GetRequest)
req.SetCID(cid)
req.SetTTL(service.SingleForwardingTTL)
// req.SetBearer(bearer)
_ = bearer _ = bearer
if err = service.SignRequestData(n.key, req); err != nil { cli, tkn, err := n.prepareClient(ctx)
n.log.Error("could not prepare request",
zap.String("request_id", rid),
zap.Error(err))
return nil, err
}
conn, err := n.cli.GetConnection(ctx)
if err != nil { if err != nil {
n.log.Error("could not prepare client", n.log.Error("could not prepare client",
zap.Stringer("cid", cid),
zap.String("request_id", rid), zap.String("request_id", rid),
zap.Error(err)) zap.Error(err))
return nil, err return nil, err
} }
// todo: think about timeout res, err := cli.GetContainer(ctx, cid, client.WithSession(tkn))
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
res, err := container.NewServiceClient(conn).Get(ctx, req)
if err != nil { if err != nil {
n.log.Error("could not list buckets", n.log.Error("could not fetch container",
zap.Stringer("cid", cid),
zap.String("request_id", rid), zap.String("request_id", rid),
zap.Error(err)) zap.Error(err))
return nil, err return nil, err
} }
@ -76,8 +64,8 @@ func (n *layer) containerInfo(ctx context.Context, cid refs.CID) (*BucketInfo, e
return &BucketInfo{ return &BucketInfo{
CID: cid, CID: cid,
Name: cid.String(), // should be fetched from container.GetResponse Name: cid.String(), // should be fetched from container.Attributes
Created: time.Time{}, // should be fetched from container.GetResponse Created: time.Time{}, // should be fetched from container.Attributes
}, nil }, nil
} }
@ -91,21 +79,9 @@ func (n *layer) containerList(ctx context.Context) ([]BucketInfo, error) {
return nil, err return nil, err
} }
req := new(container.ListRequest)
req.OwnerID = n.uid
req.SetTTL(service.SingleForwardingTTL)
// req.SetBearer(bearer)
_ = bearer _ = bearer
if err := service.SignRequestData(n.key, req); err != nil { cli, tkn, err := n.prepareClient(ctx)
n.log.Error("could not prepare request",
zap.String("request_id", rid),
zap.Error(err))
return nil, err
}
conn, err := n.cli.GetConnection(ctx)
if err != nil { if err != nil {
n.log.Error("could not prepare client", n.log.Error("could not prepare client",
zap.String("request_id", rid), zap.String("request_id", rid),
@ -113,20 +89,24 @@ func (n *layer) containerList(ctx context.Context) ([]BucketInfo, error) {
return nil, err return nil, err
} }
// todo: think about timeout // own, err := GetOwnerID(bearer)
ctx, cancel := context.WithTimeout(ctx, 30*time.Second) // if err != nil {
defer cancel() // n.log.Error("could not fetch owner id",
// zap.String("request_id", rid),
// zap.Error(err))
// return nil, err
// }
res, err := container.NewServiceClient(conn).List(ctx, req) res, err := cli.ListContainers(ctx, tkn.OwnerID(), client.WithSession(tkn))
if err != nil { if err != nil {
n.log.Error("could not list buckets", n.log.Error("could not fetch container",
zap.String("request_id", rid), zap.String("request_id", rid),
zap.Error(err)) zap.Error(err))
return nil, err return nil, err
} }
list := make([]BucketInfo, 0, len(res.CID)) list := make([]BucketInfo, 0, len(res))
for _, cid := range res.CID { for _, cid := range res {
info, err := n.containerInfo(ctx, cid) info, err := n.containerInfo(ctx, cid)
if err != nil { if err != nil {
n.log.Error("could not fetch container info", n.log.Error("could not fetch container info",

View file

@ -3,13 +3,17 @@ package layer
import ( import (
"context" "context"
"crypto/ecdsa" "crypto/ecdsa"
"errors"
"io" "io"
"strings" "strings"
"time" "time"
"github.com/nspcc-dev/neofs-api-go/object" "github.com/nspcc-dev/neofs-api-go/pkg"
"github.com/nspcc-dev/neofs-api-go/refs" "github.com/nspcc-dev/neofs-api-go/pkg/client"
"github.com/nspcc-dev/neofs-api-go/service" "github.com/nspcc-dev/neofs-api-go/pkg/container"
"github.com/nspcc-dev/neofs-api-go/pkg/object"
"github.com/nspcc-dev/neofs-api-go/pkg/owner"
"github.com/nspcc-dev/neofs-api-go/pkg/token"
"github.com/nspcc-dev/neofs-s3-gate/api" "github.com/nspcc-dev/neofs-s3-gate/api"
"github.com/nspcc-dev/neofs-s3-gate/api/pool" "github.com/nspcc-dev/neofs-s3-gate/api/pool"
"go.uber.org/zap" "go.uber.org/zap"
@ -19,10 +23,19 @@ import (
type ( type (
layer struct { layer struct {
uid *owner.ID
log *zap.Logger log *zap.Logger
cli pool.Client cli pool.Client
uid refs.OwnerID
key *ecdsa.PrivateKey key *ecdsa.PrivateKey
reqTimeout time.Duration
}
Params struct {
Pool pool.Client
Logger *zap.Logger
Timeout time.Duration
NFKey *ecdsa.PrivateKey
} }
GetObjectParams struct { GetObjectParams struct {
@ -50,7 +63,7 @@ type (
} }
NeoFS interface { NeoFS interface {
Get(ctx context.Context, address refs.Address) (*object.Object, error) Get(ctx context.Context, address *object.Address) (*object.Object, error)
} }
Client interface { Client interface {
@ -73,61 +86,41 @@ type (
} }
) )
// AWS3NameHeader key in the object neofs. // AWS3NameHeader key in the object NeoFS.
const AWS3NameHeader = "filename" const AWS3NameHeader = "filename"
// NewGatewayLayer creates instance of layer. It checks credentials // NewGatewayLayer creates instance of layer. It checks credentials
// and establishes gRPC connection with node. // and establishes gRPC connection with node.
func NewLayer(log *zap.Logger, cli pool.Client, key *ecdsa.PrivateKey) (Client, error) { func NewLayer(p *Params) (Client, error) {
uid, err := refs.NewOwnerID(&key.PublicKey) wallet, err := owner.NEO3WalletFromPublicKey(&p.NFKey.PublicKey)
if err != nil { if err != nil {
return nil, err return nil, err
} }
uid := owner.NewID()
uid.SetNeo3Wallet(wallet)
return &layer{ return &layer{
cli: cli,
key: key,
log: log,
uid: uid, uid: uid,
cli: p.Pool,
key: p.NFKey,
log: p.Logger,
reqTimeout: p.Timeout,
}, nil }, nil
} }
// Get NeoFS Object by refs.Address (should be used by auth.Center) // Get NeoFS Object by refs.Address (should be used by auth.Center)
func (n *layer) Get(ctx context.Context, address refs.Address) (*object.Object, error) { func (n *layer) Get(ctx context.Context, address *object.Address) (*object.Object, error) {
conn, err := n.cli.GetConnection(ctx) cli, tkn, err := n.prepareClient(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
token, err := n.cli.SessionToken(ctx, &pool.SessionParams{ gop := new(client.GetObjectParams)
Conn: conn, gop.WithAddress(address)
Addr: address,
Verb: service.Token_Info_Get,
})
if err != nil { return cli.GetObject(ctx, gop, client.WithSession(tkn))
return nil, err
}
req := new(object.GetRequest)
req.Address = address
req.SetTTL(service.SingleForwardingTTL)
req.SetToken(token)
err = service.SignRequestData(n.key, req)
if err != nil {
return nil, err
}
// todo: think about timeout
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
cli, err := object.NewServiceClient(conn).Get(ctx, req)
if err != nil {
return nil, err
}
return receiveObject(cli)
} }
// GetBucketInfo returns bucket name. // GetBucketInfo returns bucket name.
@ -160,31 +153,32 @@ func (n *layer) ListObjects(ctx context.Context, p *ListObjectsParams) (*ListObj
// pagination must be implemented with cache, because search results // pagination must be implemented with cache, because search results
// may be different between search calls // may be different between search calls
var ( var (
err error
bkt *BucketInfo
ids []*object.ID
result ListObjectsInfo result ListObjectsInfo
uniqNames = make(map[string]struct{}) uniqNames = make(map[string]struct{})
) )
bkt, err := n.GetBucketInfo(ctx, p.Bucket) if bkt, err = n.GetBucketInfo(ctx, p.Bucket); err != nil {
if err != nil { return nil, err
} else if ids, err = n.objectSearch(ctx, &findParams{cid: bkt.CID}); err != nil {
return nil, err return nil, err
} }
objectIDs, err := n.objectSearchContainer(ctx, bkt.CID) ln := len(ids)
if err != nil {
return nil, err
}
ln := len(objectIDs)
// todo: check what happens if there is more than maxKeys objects // todo: check what happens if there is more than maxKeys objects
if ln > p.MaxKeys { if ln > p.MaxKeys {
result.IsTruncated = true
ln = p.MaxKeys ln = p.MaxKeys
result.IsTruncated = true
} }
result.Objects = make([]ObjectInfo, 0, ln) result.Objects = make([]*ObjectInfo, 0, ln)
for i := 0; i < ln; i++ { for _, id := range ids {
addr := refs.Address{ObjectID: objectIDs[i], CID: bkt.CID} addr := object.NewAddress()
addr.SetObjectID(id)
addr.SetContainerID(bkt.CID)
meta, err := n.objectHead(ctx, addr) meta, err := n.objectHead(ctx, addr)
if err != nil { if err != nil {
@ -192,17 +186,17 @@ func (n *layer) ListObjects(ctx context.Context, p *ListObjectsParams) (*ListObj
continue continue
} }
// ignore tombstone objects // // ignore tombstone objects
_, hdr := meta.LastHeader(object.HeaderType(object.TombstoneHdr)) // _, hdr := meta.LastHeader(object.HeaderType(object.TombstoneHdr))
if hdr != nil { // if hdr != nil {
continue // continue
} // }
// ignore storage group objects // ignore storage group objects
_, hdr = meta.LastHeader(object.HeaderType(object.StorageGroupHdr)) // _, hdr = meta.LastHeader(object.HeaderType(object.StorageGroupHdr))
if hdr != nil { // if hdr != nil {
continue // continue
} // }
// dirs don't exist in neofs, gateway stores full path to the file // dirs don't exist in neofs, gateway stores full path to the file
// in object header, e.g. `filename`:`/this/is/path/file.txt` // in object header, e.g. `filename`:`/this/is/path/file.txt`
@ -225,8 +219,8 @@ func (n *layer) ListObjects(ctx context.Context, p *ListObjectsParams) (*ListObj
oi = objectInfoFromMeta(meta) oi = objectInfoFromMeta(meta)
} else { // if there are sub-entities in tail - dir } else { // if there are sub-entities in tail - dir
oi = &ObjectInfo{ oi = &ObjectInfo{
Owner: meta.SystemHeader.OwnerID, Owner: meta.GetOwnerID(),
Bucket: meta.SystemHeader.CID.String(), Bucket: bkt.Name,
Name: tail[:ind+1], // dir MUST have slash symbol in the end Name: tail[:ind+1], // dir MUST have slash symbol in the end
// IsDir: true, // IsDir: true,
} }
@ -236,7 +230,7 @@ func (n *layer) ListObjects(ctx context.Context, p *ListObjectsParams) (*ListObj
if _, ok := uniqNames[oi.Name]; !ok { if _, ok := uniqNames[oi.Name]; !ok {
uniqNames[oi.Name] = struct{}{} uniqNames[oi.Name] = struct{}{}
result.Objects = append(result.Objects, *oi) result.Objects = append(result.Objects, oi)
} }
} }
} }
@ -246,98 +240,74 @@ func (n *layer) ListObjects(ctx context.Context, p *ListObjectsParams) (*ListObj
// GetObject from storage. // GetObject from storage.
func (n *layer) GetObject(ctx context.Context, p *GetObjectParams) error { func (n *layer) GetObject(ctx context.Context, p *GetObjectParams) error {
cid, err := refs.CIDFromString(p.Bucket) var (
if err != nil { err error
oid *object.ID
cid = container.NewID()
)
if err = cid.Parse(p.Bucket); err != nil {
return err
} else if oid, err = n.objectFindID(ctx, &findParams{cid: cid, key: p.Object}); err != nil {
return err return err
} }
oid, err := n.objectFindID(ctx, cid, p.Object, false) addr := object.NewAddress()
if err != nil { addr.SetObjectID(oid)
return err addr.SetContainerID(cid)
}
addr := refs.Address{ _, err = n.objectGet(ctx, &getParams{
ObjectID: oid, Writer: p.Writer,
CID: cid,
} addr: addr,
_, err = n.objectGet(ctx, getParams{
addr: addr, offset: p.Offset,
start: p.Offset,
length: p.Length, length: p.Length,
writer: p.Writer,
}) })
return err return err
} }
// GetObjectInfo returns meta information about the object. // GetObjectInfo returns meta information about the object.
func (n *layer) GetObjectInfo(ctx context.Context, bucketName, objectName string) (*ObjectInfo, error) { func (n *layer) GetObjectInfo(ctx context.Context, bucketName, filename string) (*ObjectInfo, error) {
var meta *object.Object var (
if cid, err := refs.CIDFromString(bucketName); err != nil { err error
oid *object.ID
cid = container.NewID()
meta *object.Object
)
if err = cid.Parse(bucketName); err != nil {
return nil, err return nil, err
} else if oid, err := n.objectFindID(ctx, cid, objectName, false); err != nil { } else if oid, err = n.objectFindID(ctx, &findParams{cid: cid, key: filename}); err != nil {
return nil, err
} else if meta, err = n.objectHead(ctx, refs.Address{CID: cid, ObjectID: oid}); err != nil {
return nil, err return nil, err
} }
addr := object.NewAddress()
addr.SetObjectID(oid)
addr.SetContainerID(cid)
if meta, err = n.objectHead(ctx, addr); err != nil {
return nil, err
}
return objectInfoFromMeta(meta), nil return objectInfoFromMeta(meta), nil
} }
func GetOwnerID(tkn *token.BearerToken) (*owner.ID, error) {
switch pkg.SDKVersion().GetMajor() {
case 2:
id := tkn.ToV2().GetBody().GetOwnerID()
return owner.NewIDFromV2(id), nil
default:
return nil, errors.New("unknown version")
}
}
// PutObject into storage. // PutObject into storage.
func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*ObjectInfo, error) { func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*ObjectInfo, error) {
cid, err := refs.CIDFromString(p.Bucket) return n.objectPut(ctx, p)
if err != nil {
return nil, err
}
_, err = n.objectFindID(ctx, cid, p.Object, true)
if err == nil {
return nil, &api.ObjectAlreadyExists{
Bucket: p.Bucket,
Object: p.Object,
}
}
oid, err := refs.NewObjectID()
if err != nil {
return nil, err
}
sgid, err := refs.NewSGID()
if err != nil {
return nil, err
}
addr := refs.Address{
ObjectID: oid,
CID: cid,
}
meta, err := n.objectPut(ctx, putParams{
addr: addr,
size: p.Size,
name: p.Object,
r: p.Reader,
userHeaders: p.Header,
})
if err != nil {
return nil, err
}
oi := objectInfoFromMeta(meta)
// for every object create storage group, otherwise object will be deleted
addr.ObjectID = sgid
_, err = n.storageGroupPut(ctx, sgParams{
addr: addr,
objects: []refs.ObjectID{oid},
})
if err != nil {
return nil, err
}
return oi, nil
} }
// CopyObject from one bucket into another bucket. // CopyObject from one bucket into another bucket.
@ -374,28 +344,34 @@ func (n *layer) CopyObject(ctx context.Context, p *CopyObjectParams) (*ObjectInf
} }
// DeleteObject removes all objects with passed nice name. // DeleteObject removes all objects with passed nice name.
func (n *layer) DeleteObject(ctx context.Context, bucket, object string) error { func (n *layer) DeleteObject(ctx context.Context, bucket, filename string) error {
cid, err := refs.CIDFromString(bucket) var (
if err != nil { err error
return &api.DeleteError{ ids []*object.ID
Err: err, cid = container.NewID()
Object: object, )
}
}
ids, err := n.objectFindIDs(ctx, cid, object) if err = cid.Parse(bucket); err != nil {
if err != nil {
return &api.DeleteError{ return &api.DeleteError{
Err: err, Err: err,
Object: object, Object: filename,
}
} else if ids, err = n.objectSearch(ctx, &findParams{cid: cid, key: filename}); err != nil {
return &api.DeleteError{
Err: err,
Object: filename,
} }
} }
for _, id := range ids { for _, id := range ids {
if err = n.objectDelete(ctx, delParams{addr: refs.Address{CID: cid, ObjectID: id}}); err != nil { addr := object.NewAddress()
addr.SetObjectID(id)
addr.SetContainerID(cid)
if err = n.objectDelete(ctx, addr); err != nil {
return &api.DeleteError{ return &api.DeleteError{
Err: err, Err: err,
Object: object, Object: filename,
} }
} }
} }

View file

@ -1,590 +1,209 @@
package layer package layer
import ( import (
"bytes"
"context" "context"
"io" "io"
"net/http"
"time" "time"
"github.com/nspcc-dev/neofs-api-go/object" "github.com/nspcc-dev/neofs-api-go/pkg/client"
"github.com/nspcc-dev/neofs-api-go/query" "github.com/nspcc-dev/neofs-api-go/pkg/container"
"github.com/nspcc-dev/neofs-api-go/refs" "github.com/nspcc-dev/neofs-api-go/pkg/object"
"github.com/nspcc-dev/neofs-api-go/service" "github.com/nspcc-dev/neofs-api-go/pkg/owner"
"github.com/nspcc-dev/neofs-api-go/storagegroup" "github.com/nspcc-dev/neofs-api-go/pkg/token"
"github.com/nspcc-dev/neofs-s3-gate/api/pool" "github.com/nspcc-dev/neofs-s3-gate/api"
"github.com/nspcc-dev/neofs-s3-gate/auth"
"github.com/pkg/errors" "github.com/pkg/errors"
"go.uber.org/zap"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
) )
const (
dataChunkSize = 3 * object.UnitsMB
objectVersion = 1
)
type ( type (
putParams struct { findParams struct {
addr refs.Address key string
name string cid *container.ID
size int64
r io.Reader
userHeaders map[string]string
}
sgParams struct {
addr refs.Address
objects []refs.ObjectID
}
delParams struct {
addr refs.Address
} }
getParams struct { getParams struct {
addr refs.Address io.Writer
start int64
addr *object.Address
offset int64
length int64 length int64
writer io.Writer
} }
) )
// objectSearchContainer returns all available objects in the container. func (n *layer) prepareClient(ctx context.Context) (*client.Client, *token.SessionToken, error) {
func (n *layer) objectSearchContainer(ctx context.Context, cid refs.CID) ([]refs.ObjectID, error) { conn, err := n.cli.Connection(ctx)
var q query.Query
q.Filters = append(q.Filters, query.Filter{
Type: query.Filter_Exact,
Name: object.KeyRootObject,
})
conn, err := n.cli.GetConnection(ctx)
if err != nil { if err != nil {
return nil, err return nil, nil, err
} }
queryBinary, err := q.Marshal() tkn, err := n.cli.Token(ctx, conn)
if err != nil { if err != nil {
return nil, err return nil, nil, err
} }
token, err := n.cli.SessionToken(ctx, &pool.SessionParams{ cli, err := client.New(n.key, client.WithGRPCConnection(conn))
Conn: conn,
Addr: refs.Address{CID: cid},
Verb: service.Token_Info_Search,
})
if err != nil { if err != nil {
return nil, err return nil, nil, err
} }
req := new(object.SearchRequest) return cli, tkn, nil
req.Query = queryBinary
req.QueryVersion = 1
req.ContainerID = cid
req.SetTTL(service.SingleForwardingTTL)
req.SetToken(token)
// req.SetBearer(bearerToken)
err = service.SignRequestData(n.key, req)
if err != nil {
return nil, err
}
// todo: think about timeout
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
searchClient, err := object.NewServiceClient(conn).Search(ctx, req)
if err != nil {
return nil, err
}
var (
response []refs.Address
result []refs.ObjectID
)
for {
resp, err := searchClient.Recv()
if err != nil {
if err == io.EOF {
break
}
return nil, errors.New("search command received error")
}
response = append(response, resp.Addresses...)
}
for i := range response {
result = append(result, response[i].ObjectID)
}
return result, nil
} }
// objectFindIDs returns object id's (uuid) based on they nice name in s3. If // objectSearch returns all available objects by search params.
// nice name is uuid compatible, then function returns it. func (n *layer) objectSearch(ctx context.Context, p *findParams) ([]*object.ID, error) {
func (n *layer) objectFindIDs(ctx context.Context, cid refs.CID, name string) ([]refs.ObjectID, error) { cli, tkn, err := n.prepareClient(ctx)
var q query.Query
q.Filters = append(q.Filters, query.Filter{
Type: query.Filter_Exact,
Name: object.KeyRootObject,
})
q.Filters = append(q.Filters, query.Filter{
Type: query.Filter_Exact,
Name: AWS3NameHeader,
Value: name,
})
queryBinary, err := q.Marshal()
if err != nil { if err != nil {
return nil, err return nil, err
} }
conn, err := n.cli.GetConnection(ctx) filter := object.NewSearchFilters()
if err != nil { filter.AddNonLeafFilter()
return nil, err
sop := new(client.SearchObjectParams)
if p.cid != nil {
filter.AddFilter(object.HdrSysNameCID, p.cid.String(), object.MatchStringEqual)
sop.WithContainerID(p.cid)
} }
token, err := n.cli.SessionToken(ctx, &pool.SessionParams{ if p.key != "" {
Conn: conn, filter.AddFilter(AWS3NameHeader, p.key, object.MatchStringEqual)
Addr: refs.Address{CID: cid},
Verb: service.Token_Info_Search,
})
if err != nil {
return nil, err
} }
req := new(object.SearchRequest) sop.WithSearchFilters(filter)
req.Query = queryBinary
req.QueryVersion = 1
req.ContainerID = cid
req.SetTTL(service.SingleForwardingTTL)
req.SetToken(token)
// req.SetBearer(bearerToken)
err = service.SignRequestData(n.key, req) return cli.SearchObject(ctx, sop, client.WithSession(tkn))
if err != nil {
return nil, err
}
// todo: think about timeout
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
searchClient, err := object.NewServiceClient(conn).Search(ctx, req)
if err != nil {
return nil, err
}
var response []refs.Address
for {
resp, err := searchClient.Recv()
if err != nil {
if err == io.EOF {
break
}
return nil, errors.New("search command received error")
}
response = append(response, resp.Addresses...)
}
switch ln := len(response); {
case ln > 0:
result := make([]refs.ObjectID, 0, len(response))
for i := range response {
result = append(result, response[i].ObjectID)
}
return result, nil
default:
return nil, errors.New("object not found")
}
} }
// objectFindID returns object id (uuid) based on it's nice name in s3. If // objectFindID returns object id (uuid) based on it's nice name in s3. If
// nice name is uuid compatible, then function returns it. // nice name is uuid compatible, then function returns it.
func (n *layer) objectFindID(ctx context.Context, cid refs.CID, name string, put bool) (refs.ObjectID, error) { func (n *layer) objectFindID(ctx context.Context, p *findParams) (*object.ID, error) {
var id refs.ObjectID if result, err := n.objectSearch(ctx, p); err != nil {
return nil, err
if result, err := n.objectFindIDs(ctx, cid, name); err != nil {
return id, err
} else if ln := len(result); ln == 0 { } else if ln := len(result); ln == 0 {
// Minio lists all objects with and without nice names. All objects return nil, status.Error(codes.NotFound, "object not found")
// without nice name still have "name" in terms of minio - uuid encoded
// into string. There is a tricky case when user upload object
// with nice name that is encoded uuid.
// There is an optimisation to parse name and return uuid if it name is uuid
// compatible. It _should not_ work in case of put operation, because object
// with uuid compatible nice name may not exist. Therefore this optimization
// breaks object put logic and must be turned off.
if !put {
err := id.Parse(name)
if err == nil {
return id, nil
}
}
return id, status.Error(codes.NotFound, "object not found")
} else if ln == 1 { } else if ln == 1 {
return result[0], nil return result[0], nil
} }
return id, errors.New("several objects with the same name found") return nil, errors.New("several objects with the same name found")
} }
// objectHead returns all object's headers. // objectHead returns all object's headers.
func (n *layer) objectHead(ctx context.Context, addr refs.Address) (*object.Object, error) { func (n *layer) objectHead(ctx context.Context, addr *object.Address) (*object.Object, error) {
cli, tkn, err := n.prepareClient(ctx)
conn, err := n.cli.GetConnection(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
token, err := n.cli.SessionToken(ctx, &pool.SessionParams{ ohp := new(client.ObjectHeaderParams)
Conn: conn, ohp.WithAddress(addr)
Addr: addr, ohp.WithAllFields()
Verb: service.Token_Info_Head, ohp.WithMainFields()
})
if err != nil {
return nil, err
}
req := new(object.HeadRequest) return cli.GetObjectHeader(ctx, ohp, client.WithSession(tkn))
req.Address = addr
req.FullHeaders = true
req.SetTTL(service.SingleForwardingTTL)
req.SetToken(token)
// req.SetBearer(bearerToken)
err = service.SignRequestData(n.key, req)
if err != nil {
return nil, err
}
// todo: think about timeout
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
res, err := object.NewServiceClient(conn).Head(ctx, req)
if err != nil {
return nil, err
}
return res.Object, nil
}
func receiveObject(cli object.Service_GetClient) (*object.Object, error) {
var (
off int
buf []byte
obj *object.Object
)
for {
resp, err := cli.Recv()
if err != nil {
if err == io.EOF {
break
}
return nil, err
}
switch o := resp.R.(type) {
case *object.GetResponse_Object:
if obj != nil {
return nil, errors.New("object headers already received")
} else if _, hdr := o.Object.LastHeader(object.HeaderType(object.TombstoneHdr)); hdr != nil {
return nil, errors.New("object already removed")
}
obj = o.Object
buf = make([]byte, obj.SystemHeader.PayloadLength)
if len(obj.Payload) > 0 {
off += copy(buf, obj.Payload)
}
case *object.GetResponse_Chunk:
if obj == nil {
return nil, errors.New("object headers not received")
}
off += copy(buf[off:], o.Chunk)
default:
return nil, errors.Errorf("unknown response %T", o)
}
}
if obj == nil {
return nil, errors.New("object headers not received")
}
obj.Payload = buf
return obj, nil
} }
// objectGet and write it into provided io.Reader. // objectGet and write it into provided io.Reader.
func (n *layer) objectGet(ctx context.Context, p getParams) (*object.Object, error) { func (n *layer) objectGet(ctx context.Context, p *getParams) (*object.Object, error) {
conn, err := n.cli.GetConnection(ctx) cli, tkn, err := n.prepareClient(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
token, err := n.cli.SessionToken(ctx, &pool.SessionParams{ // prepare length/offset writer
Conn: conn, writer := newWriter(p.Writer, p.offset, p.length)
Addr: p.addr,
Verb: service.Token_Info_Get,
})
if err != nil {
return nil, err
}
// todo: replace object.Get() call by object.GetRange() for gop := new(client.GetObjectParams)
// true sequential reading support; it will be possible when gop.WithPayloadWriter(writer)
// object.GetRange() response message become gRPC stream.
req := new(object.GetRequest)
req.Address = p.addr
req.SetTTL(service.SingleForwardingTTL)
req.SetToken(token)
// req.SetBearer(bearerToken)
err = service.SignRequestData(n.key, req) return cli.GetObject(ctx, gop, client.WithSession(tkn))
if err != nil {
return nil, err
}
// todo: think about timeout
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
var obj *object.Object
if cli, err := object.NewServiceClient(conn).Get(ctx, req); err != nil {
return nil, err
} else if obj, err = receiveObject(cli); err != nil {
return nil, err
} else if ln := int64(obj.SystemHeader.PayloadLength); p.start+p.length > ln {
return nil, errors.Errorf("slice bounds out of range: len = %d, start = %d, offset = %d",
ln, p.start, p.length)
} else if _, err = p.writer.Write(obj.Payload[p.start : p.start+p.length]); err != nil {
return nil, err
}
// remove payload:
obj.Payload = nil
return obj, nil
} }
// objectPut into neofs, took payload from io.Reader. // objectPut into NeoFS, took payload from io.Reader.
func (n *layer) objectPut(ctx context.Context, p putParams) (*object.Object, error) { func (n *layer) objectPut(ctx context.Context, p *PutObjectParams) (*ObjectInfo, error) {
conn, err := n.cli.GetConnection(ctx) var (
if err != nil { err error
own *owner.ID
brt *token.BearerToken
cid = container.NewID()
)
if brt, err = auth.GetBearerToken(ctx); err != nil {
return nil, err return nil, err
} } else if own, err = GetOwnerID(brt); err != nil {
token, err := n.cli.SessionToken(ctx, &pool.SessionParams{
Conn: conn,
Addr: p.addr,
Verb: service.Token_Info_Put,
})
if err != nil {
n.log.Error("could not prepare token",
zap.Error(err))
return nil, err return nil, err
} } else if err = cid.Parse(p.Bucket); err != nil {
putClient, err := object.NewServiceClient(conn).Put(ctx)
if err != nil {
n.log.Error("could not prepare PutClient",
zap.Error(err))
return nil, err return nil, err
} } else if _, err = n.objectFindID(ctx, &findParams{cid: cid, key: p.Object}); err == nil {
return nil, &api.ObjectAlreadyExists{
if p.userHeaders == nil { Bucket: p.Bucket,
p.userHeaders = make(map[string]string) Object: p.Object,
}
// Set object name if not set before
if _, ok := p.userHeaders[AWS3NameHeader]; !ok {
p.userHeaders[AWS3NameHeader] = p.name
}
readBuffer := make([]byte, dataChunkSize)
obj := &object.Object{
SystemHeader: object.SystemHeader{
Version: objectVersion,
ID: p.addr.ObjectID,
OwnerID: n.uid,
CID: p.addr.CID,
PayloadLength: uint64(p.size),
},
Headers: parseUserHeaders(p.userHeaders),
}
req := object.MakePutRequestHeader(obj)
req.SetTTL(service.SingleForwardingTTL)
req.SetToken(token)
// req.SetBearer(bearerToken)
err = service.SignRequestData(n.key, req)
if err != nil {
n.log.Error("could not prepare request",
zap.Error(err))
return nil, err
}
err = putClient.Send(req)
if err != nil {
n.log.Error("could not send request",
zap.Error(err))
return nil, err
}
read, err := p.r.Read(readBuffer)
for read > 0 {
if err != nil && err != io.EOF {
n.log.Error("something went wrong",
zap.Error(err))
return nil, err
} }
if read > 0 {
req := object.MakePutRequestChunk(readBuffer[:read])
req.SetTTL(service.SingleForwardingTTL)
// req.SetBearer(bearerToken)
err = service.SignRequestData(n.key, req)
if err != nil {
n.log.Error("could not sign chunk request",
zap.Error(err))
return nil, err
}
err = putClient.Send(req)
if err != nil && err != io.EOF {
n.log.Error("could not send chunk",
zap.Error(err))
return nil, err
}
}
read, err = p.r.Read(readBuffer)
} }
_, err = putClient.CloseAndRecv() cli, tkn, err := n.prepareClient(ctx)
if err != nil {
n.log.Error("could not finish request",
zap.Error(err))
return nil, err
}
// maybe make a head?
return obj, nil
}
// storageGroupPut prepares storage group object and put it into neofs.
func (n *layer) storageGroupPut(ctx context.Context, p sgParams) (*object.Object, error) {
conn, err := n.cli.GetConnection(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
token, err := n.cli.SessionToken(ctx, &pool.SessionParams{ attributes := make([]*object.Attribute, 0, len(p.Header)+1)
Conn: conn,
Addr: p.addr, filename := object.NewAttribute()
Verb: service.Token_Info_Put, filename.SetKey(AWS3NameHeader)
}) filename.SetValue(p.Object)
if err != nil {
attributes = append(attributes, filename)
for k, v := range p.Header {
ua := object.NewAttribute()
ua.SetKey(k)
ua.SetValue(v)
attributes = append(attributes, ua)
}
b := new(bytes.Buffer)
r := io.TeeReader(p.Reader, b)
raw := object.NewRaw()
raw.SetOwnerID(own)
raw.SetContainerID(cid)
raw.SetAttributes(attributes...)
pop := new(client.PutObjectParams)
pop.WithPayloadReader(r)
pop.WithObject(raw.Object())
if _, err = cli.PutObject(ctx, pop, client.WithSession(tkn)); err != nil {
return nil, err return nil, err
} }
// todo: think about timeout return &ObjectInfo{
ctx, cancel := context.WithTimeout(ctx, 30*time.Second) Bucket: p.Bucket,
defer cancel() Name: p.Object,
Size: p.Size,
putClient, err := object.NewServiceClient(conn).Put(ctx) Created: time.Now(),
if err != nil { ContentType: http.DetectContentType(b.Bytes()),
return nil, err Owner: own,
} Headers: p.Header,
}, nil
sg := &object.Object{
SystemHeader: object.SystemHeader{
Version: objectVersion,
ID: p.addr.ObjectID,
OwnerID: n.uid,
CID: p.addr.CID,
},
Headers: make([]object.Header, 0, len(p.objects)),
}
for i := range p.objects {
sg.AddHeader(&object.Header{Value: &object.Header_Link{
Link: &object.Link{Type: object.Link_StorageGroup, ID: p.objects[i]},
}})
}
sg.SetStorageGroup(new(storagegroup.StorageGroup))
req := object.MakePutRequestHeader(sg)
req.SetTTL(service.SingleForwardingTTL)
req.SetToken(token)
// req.SetBearer(bearerToken)
err = service.SignRequestData(n.key, req)
if err != nil {
return nil, err
}
err = putClient.Send(req)
if err != nil {
return nil, err
}
_, err = putClient.CloseAndRecv()
if err != nil {
return nil, err
}
return sg, nil
} }
// objectDelete puts tombstone object into neofs. // objectDelete puts tombstone object into neofs.
func (n *layer) objectDelete(ctx context.Context, p delParams) error { func (n *layer) objectDelete(ctx context.Context, address *object.Address) error {
conn, err := n.cli.GetConnection(ctx) cli, tkn, err := n.prepareClient(ctx)
if err != nil { if err != nil {
return err return err
} }
token, err := n.cli.SessionToken(ctx, &pool.SessionParams{ dob := new(client.DeleteObjectParams)
Conn: conn, dob.WithAddress(address)
Addr: p.addr,
Verb: service.Token_Info_Delete,
})
if err != nil {
return err
}
req := new(object.DeleteRequest) return cli.DeleteObject(ctx, dob, client.WithSession(tkn))
req.Address = p.addr
req.OwnerID = n.uid
req.SetTTL(service.SingleForwardingTTL)
req.SetToken(token)
// req.SetBearer(bearerToken)
err = service.SignRequestData(n.key, req)
if err != nil {
return err
}
// todo: think about timeout
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
_, err = object.NewServiceClient(conn).Delete(ctx, req)
return err
} }

View file

@ -6,8 +6,8 @@ import (
"strings" "strings"
"time" "time"
"github.com/nspcc-dev/neofs-api-go/object" "github.com/nspcc-dev/neofs-api-go/pkg/object"
"github.com/nspcc-dev/neofs-api-go/refs" "github.com/nspcc-dev/neofs-api-go/pkg/owner"
) )
type ( type (
@ -17,7 +17,7 @@ type (
Size int64 Size int64
ContentType string ContentType string
Created time.Time Created time.Time
Owner refs.OwnerID Owner *owner.ID
Headers map[string]string Headers map[string]string
} }
@ -39,7 +39,7 @@ type (
NextContinuationToken string NextContinuationToken string
// List of objects info for this request. // List of objects info for this request.
Objects []ObjectInfo Objects []*ObjectInfo
// List of prefixes for this request. // List of prefixes for this request.
Prefixes []string Prefixes []string
@ -48,65 +48,46 @@ type (
const pathSeparator = string(os.PathSeparator) const pathSeparator = string(os.PathSeparator)
func userHeaders(h []object.Header) map[string]string { func userHeaders(attrs []*object.Attribute) map[string]string {
result := make(map[string]string, len(h)) result := make(map[string]string, len(attrs))
for i := range h { for _, attr := range attrs {
switch v := h[i].Value.(type) { result[attr.GetKey()] = attr.GetValue()
case *object.Header_UserHeader:
result[v.UserHeader.Key] = v.UserHeader.Value
default:
continue
}
} }
return result return result
} }
func objectInfoFromMeta(meta *object.Object) *ObjectInfo { func objectInfoFromMeta(meta *object.Object) *ObjectInfo {
aws3name := meta.SystemHeader.ID.String() aws3name := meta.GetID().String()
userHeaders := userHeaders(meta.Headers) userHeaders := userHeaders(meta.GetAttributes())
if name, ok := userHeaders[AWS3NameHeader]; ok { if name, ok := userHeaders[AWS3NameHeader]; ok {
aws3name = name aws3name = name
delete(userHeaders, name) delete(userHeaders, name)
} }
mimeType := http.DetectContentType(meta.Payload) mimeType := http.DetectContentType(meta.GetPayload())
return &ObjectInfo{ return &ObjectInfo{
Bucket: meta.SystemHeader.CID.String(), Bucket: meta.GetContainerID().String(),
Name: aws3name, Name: aws3name,
ContentType: mimeType, ContentType: mimeType,
Headers: userHeaders, Headers: userHeaders,
Size: int64(meta.SystemHeader.PayloadLength), Size: int64(meta.GetPayloadSize()),
Created: time.Unix(meta.SystemHeader.CreatedAt.UnixTime, 0), Created: time.Now(), // time.Unix(meta.GetCreationEpoch(), 0),
} }
} }
func parseUserHeaders(h map[string]string) []object.Header {
headers := make([]object.Header, 0, len(h))
for k, v := range h {
uh := &object.UserHeader{Key: k, Value: v}
headers = append(headers, object.Header{
Value: &object.Header_UserHeader{UserHeader: uh},
})
}
return headers
}
func nameFromObject(o *object.Object) (string, string) { func nameFromObject(o *object.Object) (string, string) {
var ( var name = o.GetID().String()
name string
uh = userHeaders(o.Headers)
)
if _, ok := uh[AWS3NameHeader]; !ok { for _, attr := range o.GetAttributes() {
name = o.SystemHeader.ID.String() if attr.GetKey() == AWS3NameHeader {
} else { name = attr.GetValue()
name = uh[AWS3NameHeader]
break
}
} }
ind := strings.LastIndex(name, pathSeparator) ind := strings.LastIndex(name, pathSeparator)

49
api/layer/writer.go Normal file
View file

@ -0,0 +1,49 @@
package layer
import "io"
type offsetWriter struct {
io.Writer
written int64
skipped int64
offset int64
length int64
}
func newWriter(w io.Writer, offset, length int64) io.Writer {
return &offsetWriter{
Writer: w,
offset: offset,
length: length,
}
}
func (w *offsetWriter) Write(p []byte) (int, error) {
ln := len(p)
length := int64(ln)
offset := w.offset - w.skipped
if length-offset < 0 {
w.skipped += length
return ln, nil
}
length -= offset
left := w.length - w.written
if left-length < 0 || length-left < length {
length = left
} else {
return 0, nil
}
n, err := w.Writer.Write(p[offset : offset+length])
w.written += int64(n)
w.skipped += offset
return n, err
}

77
api/layer/writer_test.go Normal file
View file

@ -0,0 +1,77 @@
package layer
import (
"bytes"
"crypto/rand"
"testing"
"github.com/stretchr/testify/require"
)
func testBuffer(t *testing.T) []byte {
buf := make([]byte, 1024)
_, err := rand.Read(buf)
require.NoError(t, err)
return buf
}
func TestOffsetWriter(t *testing.T) {
b := testBuffer(t)
k := 64
d := len(b) / k
t.Run("1024 / 100 / 100 bytes success", func(t *testing.T) {
w := new(bytes.Buffer)
o := int64(100)
l := int64(100)
wt := newWriter(w, o, l)
for i := 0; i < k; i++ {
_, err := wt.Write(b[i*d : (i+1)*d])
require.NoError(t, err)
}
wo := wt.(*offsetWriter)
require.Equal(t, o, wo.skipped)
require.Equal(t, l, wo.written)
require.Equal(t, b[o:o+l], w.Bytes())
})
t.Run("1024 / 0 / 100 bytes success", func(t *testing.T) {
w := new(bytes.Buffer)
o := int64(0)
l := int64(100)
wt := newWriter(w, o, l)
for i := 0; i < k; i++ {
_, err := wt.Write(b[i*d : (i+1)*d])
require.NoError(t, err)
}
wo := wt.(*offsetWriter)
require.Equal(t, o, wo.skipped)
require.Equal(t, l, wo.written)
require.Equal(t, b[o:o+l], w.Bytes())
})
t.Run("1024 / 0 / 1024 bytes success", func(t *testing.T) {
w := new(bytes.Buffer)
o := int64(0)
l := int64(1024)
wt := newWriter(w, o, l)
for i := 0; i < k; i++ {
_, err := wt.Write(b[i*d : (i+1)*d])
require.NoError(t, err)
}
wo := wt.(*offsetWriter)
require.Equal(t, o, wo.skipped)
require.Equal(t, l, wo.written)
require.Equal(t, b[o:o+l], w.Bytes())
})
}