Merge pull request #5 from nspcc-dev/object-layer

Refactoring ObjectLayer
remotes/KirillovDenis/bugfix/681-fix_acl_parsing
Evgeniy Kulikov 2020-08-03 17:36:20 +03:00 committed by GitHub
commit 2b5b71ac83
12 changed files with 602 additions and 906 deletions

4
go.mod
View File

@ -49,7 +49,7 @@ require (
github.com/lib/pq v1.1.1
github.com/mattn/go-colorable v0.1.4
github.com/mattn/go-ieproxy v0.0.0-20190805055040-f9202b1cfdeb // indirect
github.com/mattn/go-isatty v0.0.8
github.com/mattn/go-isatty v0.0.10
github.com/mattn/go-runewidth v0.0.4 // indirect
github.com/miekg/dns v1.1.8
github.com/minio/cli v1.22.0
@ -87,13 +87,11 @@ require (
github.com/spf13/pflag v1.0.3
github.com/spf13/viper v1.7.0
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94
github.com/stretchr/testify v1.5.1
github.com/tinylib/msgp v1.1.1
github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a
github.com/willf/bitset v1.1.10 // indirect
github.com/willf/bloom v2.0.3+incompatible
github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c
go.etcd.io/bbolt v1.3.3 // indirect
go.uber.org/atomic v1.6.0
go.uber.org/zap v1.15.0
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9

6
go.sum
View File

@ -313,6 +313,8 @@ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.10 h1:qxFzApOv4WsAL965uUPIsXzAKCZxN2p9UqdhFS4ZW10=
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
@ -532,9 +534,8 @@ github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
@ -634,6 +635,7 @@ golang.org/x/sys v0.0.0-20190523142557-0e01d883c5c5/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=

View File

@ -0,0 +1,130 @@
package layer
import (
"context"
"time"
"github.com/minio/minio/auth"
"github.com/nspcc-dev/neofs-api-go/container"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/nspcc-dev/neofs-api-go/service"
"go.uber.org/zap"
)
type (
BucketInfo struct {
Name string
CID refs.CID
Created time.Time
}
ListObjectsParams struct {
Bucket string
Prefix string
Token string
Delimiter string
MaxKeys int
}
)
func (n *layer) containerInfo(ctx context.Context, cid refs.CID) (*BucketInfo, error) {
bearer, err := auth.GetBearerToken(ctx)
if err != nil {
n.log.Error("could not receive bearer token",
zap.Error(err))
return nil, err
}
req := new(container.GetRequest)
req.SetCID(cid)
req.SetTTL(service.SingleForwardingTTL)
// req.SetBearer(bearer)
_ = bearer
if err = service.SignRequestData(n.key, req); err != nil {
n.log.Error("could not prepare request",
zap.Error(err))
return nil, err
}
conn, err := n.cli.GetConnection(ctx)
if err != nil {
n.log.Error("could not prepare client",
zap.Error(err))
return nil, err
}
// todo: think about timeout
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
res, err := container.NewServiceClient(conn).Get(ctx, req)
if err != nil {
n.log.Error("could not list buckets",
zap.Error(err))
return nil, err
}
_ = res
return &BucketInfo{
CID: cid,
Name: cid.String(), // should be fetched from container.GetResponse
Created: time.Time{}, // should be fetched from container.GetResponse
}, nil
}
func (n *layer) containerList(ctx context.Context) ([]BucketInfo, error) {
bearer, err := auth.GetBearerToken(ctx)
if err != nil {
n.log.Error("could not receive bearer token",
zap.Error(err))
return nil, err
}
req := new(container.ListRequest)
req.OwnerID = n.uid
req.SetTTL(service.SingleForwardingTTL)
// req.SetBearer(bearer)
_ = bearer
if err := service.SignRequestData(n.key, req); err != nil {
n.log.Error("could not prepare request",
zap.Error(err))
return nil, err
}
conn, err := n.cli.GetConnection(ctx)
if err != nil {
n.log.Error("could not prepare client",
zap.Error(err))
return nil, err
}
// todo: think about timeout
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
res, err := container.NewServiceClient(conn).List(ctx, req)
if err != nil {
n.log.Error("could not list buckets",
zap.Error(err))
return nil, err
}
list := make([]BucketInfo, 0, len(res.CID))
for _, cid := range res.CID {
info, err := n.containerInfo(ctx, cid)
if err != nil {
n.log.Error("could not fetch container info",
zap.Error(err))
continue
}
list = append(list, *info)
}
return list, nil
}

View File

@ -1,64 +0,0 @@
package layer
import (
"context"
"crypto/ecdsa"
"math"
"time"
auth "github.com/minio/minio/auth"
minio "github.com/minio/minio/legacy"
"github.com/minio/minio/neofs/pool"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/nspcc-dev/neofs-api-go/service"
"github.com/pkg/errors"
"go.uber.org/zap"
)
type (
// neofsObjects implements gateway for MinIO and S3
// compatible object storage server.
neofsObject struct {
minio.GatewayUnsupported // placeholder for unimplemented functions
log *zap.Logger
cli pool.Client
key *ecdsa.PrivateKey
owner refs.OwnerID
token *service.Token
bearerToken *service.BearerTokenMsg
// Concurrency must be resolved by creating one lock per object, but
// it may be unnecessary in neofs, because objects are immutable. So
// there are no any mutexes and locks right now but it might be
// useful during parallel execution from one client (different clients
// have different `neofsObject` instances).
// todo: add fast expired cache to store list of containers or
// even short objects during sequential reading
}
)
// NewGatewayLayer creates instance of neofsObject. It checks credentials
// and establishes gRPC connection with node.
func NewLayer(log *zap.Logger, cli pool.Client, center *auth.Center) (minio.ObjectLayer, error) {
// setup gRPC connection
// todo: think about getting timeout parameters from cli args
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
token, err := generateToken(ctx, tokenParams{
cli: cli,
key: center.GetNeoFSPrivateKey(),
until: math.MaxInt64,
})
if err != nil {
return nil, errors.Wrap(err, "can't establish neofs session with remote host")
}
return &neofsObject{
cli: cli,
key: center.GetNeoFSPrivateKey(),
log: log,
owner: center.GetOwnerID(),
token: token,
}, nil
}

View File

@ -0,0 +1,341 @@
package layer
import (
"context"
"crypto/ecdsa"
"io"
"strings"
"github.com/minio/minio/neofs/pool"
"github.com/nspcc-dev/neofs-api-go/object"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/pkg/errors"
"go.uber.org/zap"
)
type (
layer struct {
log *zap.Logger
cli pool.Client
uid refs.OwnerID
key *ecdsa.PrivateKey
}
GetObjectParams struct {
Bucket string
Object string
Offset int64
Length int64
Writer io.Writer
}
PutObjectParams struct {
Bucket string
Object string
Size int64
Reader io.Reader
Header map[string]string
}
CopyObjectParams struct {
SrcBucket string
DstBucket string
SrcObject string
DstObject string
}
Client interface {
ListBuckets(ctx context.Context) ([]BucketInfo, error)
GetBucketInfo(ctx context.Context, name string) (*BucketInfo, error)
GetObject(ctx context.Context, p *GetObjectParams) error
GetObjectInfo(ctx context.Context, bucketName, objectName string) (*ObjectInfo, error)
PutObject(ctx context.Context, p *PutObjectParams) (*ObjectInfo, error)
CopyObject(ctx context.Context, p *CopyObjectParams) (*ObjectInfo, error)
ListObjects(ctx context.Context, p *ListObjectsParams) (*ListObjectsInfo, error)
DeleteObject(ctx context.Context, bucket, object string) error
DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error)
}
)
// AWS3NameHeader key in the object neofs.
const AWS3NameHeader = "filename"
// NewGatewayLayer creates instance of layer. It checks credentials
// and establishes gRPC connection with node.
func NewLayer(log *zap.Logger, cli pool.Client, key *ecdsa.PrivateKey) (Client, error) {
uid, err := refs.NewOwnerID(&key.PublicKey)
if err != nil {
return nil, err
}
return &layer{
cli: cli,
key: key,
log: log,
uid: uid,
}, nil
}
// GetBucketInfo returns bucket name.
func (n *layer) GetBucketInfo(ctx context.Context, name string) (*BucketInfo, error) {
list, err := n.containerList(ctx)
if err != nil {
return nil, err
}
for _, bkt := range list {
if bkt.Name == name {
return &bkt, nil
}
}
return nil, errors.New("bucket not found")
}
// ListBuckets returns all user containers. Name of the bucket is a container
// id. Timestamp is omitted since it is not saved in neofs container.
func (n *layer) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
return n.containerList(ctx)
}
// ListObjects returns objects from the container. It ignores tombstones and
// storage groups.
// ctx, bucket, prefix, continuationToken, delimiter, maxKeys
func (n *layer) ListObjects(ctx context.Context, p *ListObjectsParams) (*ListObjectsInfo, error) {
// todo: make pagination when search response will be gRPC stream,
// pagination must be implemented with cache, because search results
// may be different between search calls
var (
result ListObjectsInfo
uniqNames = make(map[string]struct{})
)
bkt, err := n.GetBucketInfo(ctx, p.Bucket)
if err != nil {
return nil, err
}
objectIDs, err := n.objectSearchContainer(ctx, bkt.CID)
if err != nil {
return nil, err
}
ln := len(objectIDs)
// todo: check what happens if there is more than maxKeys objects
if ln > p.MaxKeys {
result.IsTruncated = true
ln = p.MaxKeys
}
result.Objects = make([]ObjectInfo, 0, ln)
for i := 0; i < ln; i++ {
addr := refs.Address{ObjectID: objectIDs[i], CID: bkt.CID}
meta, err := n.objectHead(ctx, addr)
if err != nil {
n.log.Warn("could not fetch object meta", zap.Error(err))
continue
}
// ignore tombstone objects
_, hdr := meta.LastHeader(object.HeaderType(object.TombstoneHdr))
if hdr != nil {
continue
}
// ignore storage group objects
_, hdr = meta.LastHeader(object.HeaderType(object.StorageGroupHdr))
if hdr != nil {
continue
}
// dirs don't exist in neofs, gateway stores full path to the file
// in object header, e.g. `filename`:`/this/is/path/file.txt`
// prefix argument contains full dir path from the root, e.g. `/this/is/`
// to emulate dirs we take dirs in path, compare it with prefix
// and look for entities after prefix. If entity does not have any
// sub-entities, then it is a file, else directory.
_, dirname := nameFromObject(meta)
if strings.HasPrefix(dirname, p.Prefix) {
var (
oi *ObjectInfo
tail = strings.TrimLeft(dirname, p.Prefix)
ind = strings.Index(tail, pathSeparator)
)
if ind < 0 { // if there are not sub-entities in tail - file
oi = objectInfoFromMeta(meta)
} else { // if there are sub-entities in tail - dir
oi = &ObjectInfo{
Bucket: meta.SystemHeader.CID.String(),
Name: tail[:ind+1], // dir MUST have slash symbol in the end
// IsDir: true,
}
}
// use only unique dir names
if _, ok := uniqNames[oi.Name]; !ok {
uniqNames[oi.Name] = struct{}{}
result.Objects = append(result.Objects, *oi)
}
}
}
return &result, nil
}
// GetObject from storage.
func (n *layer) GetObject(ctx context.Context, p *GetObjectParams) error {
cid, err := refs.CIDFromString(p.Bucket)
if err != nil {
return err
}
oid, err := n.objectFindID(ctx, cid, p.Object, false)
if err != nil {
return err
}
addr := refs.Address{
ObjectID: oid,
CID: cid,
}
_, err = n.objectGet(ctx, getParams{
addr: addr,
start: p.Offset,
length: p.Length,
writer: p.Writer,
})
return err
}
// GetObjectInfo returns meta information about the object.
func (n *layer) GetObjectInfo(ctx context.Context, bucketName, objectName string) (*ObjectInfo, error) {
var meta *object.Object
if cid, err := refs.CIDFromString(bucketName); err != nil {
return nil, err
} else if oid, err := n.objectFindID(ctx, cid, objectName, false); err != nil {
return nil, err
} else if meta, err = n.objectHead(ctx, refs.Address{CID: cid, ObjectID: oid}); err != nil {
return nil, err
}
return objectInfoFromMeta(meta), nil
}
// PutObject into storage.
func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*ObjectInfo, error) {
cid, err := refs.CIDFromString(p.Bucket)
if err != nil {
return nil, err
}
_, err = n.objectFindID(ctx, cid, p.Object, true)
if err == nil {
return nil, err
}
oid, err := refs.NewObjectID()
if err != nil {
return nil, err
}
sgid, err := refs.NewSGID()
if err != nil {
return nil, err
}
addr := refs.Address{
ObjectID: oid,
CID: cid,
}
meta, err := n.objectPut(ctx, putParams{
addr: addr,
size: p.Size,
name: p.Object,
r: p.Reader,
userHeaders: p.Header,
})
if err != nil {
return nil, err
}
oi := objectInfoFromMeta(meta)
// for every object create storage group, otherwise object will be deleted
addr.ObjectID = sgid
_, err = n.storageGroupPut(ctx, sgParams{
addr: addr,
objects: []refs.ObjectID{oid},
})
if err != nil {
return nil, err
}
return oi, nil
}
// CopyObject from one bucket into another bucket.
func (n *layer) CopyObject(ctx context.Context, p *CopyObjectParams) (*ObjectInfo, error) {
info, err := n.GetObjectInfo(ctx, p.SrcBucket, p.SrcObject)
if err != nil {
return nil, err
}
pr, pw := io.Pipe()
go func() {
err := n.GetObject(ctx, &GetObjectParams{
Bucket: p.SrcBucket,
Object: p.SrcObject,
Writer: pw,
})
_ = pw.CloseWithError(err)
}()
return n.PutObject(ctx, &PutObjectParams{
Bucket: p.DstBucket,
Object: p.DstObject,
Size: info.Size,
Reader: pr,
Header: info.Headers,
})
}
// DeleteObject from the storage.
func (n *layer) DeleteObject(ctx context.Context, bucket, object string) error {
cid, err := refs.CIDFromString(bucket)
if err != nil {
return err
}
oid, err := n.objectFindID(ctx, cid, object, false)
if err != nil {
return err
}
return n.objectDelete(ctx, delParams{addr: refs.Address{CID: cid, ObjectID: oid}})
}
// DeleteObjects from the storage.
func (n *layer) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) {
var errs = make([]error, 0, len(objects))
for i := range objects {
errs = append(errs, n.DeleteObject(ctx, bucket, objects[i]))
}
return errs, nil
}

View File

@ -1,52 +0,0 @@
package layer
import (
"context"
"time"
"go.uber.org/zap"
"github.com/minio/minio/auth"
"github.com/nspcc-dev/neofs-api-go/container"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/nspcc-dev/neofs-api-go/service"
"github.com/pkg/errors"
)
func (n *neofsObject) containerList(ctx context.Context) ([]refs.CID, error) {
req := new(container.ListRequest)
req.OwnerID = n.owner
req.SetVersion(APIVersion)
req.SetTTL(service.SingleForwardingTTL)
bearerToken, err := auth.GetBearerToken(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to get bearer token")
}
req.SetBearer(bearerToken)
err = service.SignRequestData(n.key, req)
if err != nil {
n.log.Error("could not prepare request",
zap.Error(err))
return nil, err
}
conn, err := n.cli.GetConnection(ctx)
if err != nil {
n.log.Error("could not prepare client",
zap.Error(err))
return nil, err
}
// todo: think about timeout
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
res, err := container.NewServiceClient(conn).List(ctx, req)
if err != nil {
n.log.Error("could not list buckets",
zap.Error(err))
return nil, err
}
return res.CID, nil
}

View File

@ -1,34 +0,0 @@
package layer
import (
"context"
"time"
"github.com/nspcc-dev/neofs-api-go/service"
"github.com/nspcc-dev/neofs-api-go/state"
)
func (n *neofsObject) statusHealth(ctx context.Context) bool {
req := new(state.HealthRequest)
req.SetTTL(service.NonForwardingTTL)
req.SetVersion(APIVersion)
err := service.SignRequestData(n.key, req)
if err != nil {
return false
}
conn, err := n.cli.GetConnection(ctx)
if err != nil {
return false
}
// 1 second timeout is the same as in gateway-common.go
// see: cmd/gateway-common.go:295
ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
defer cancel()
res, err := state.NewStatusClient(conn).HealthCheck(ctx, req)
return err != nil && res != nil && res.Healthy
}

View File

@ -1,445 +0,0 @@
package layer
import (
"context"
"errors"
"io"
"net/http"
"strings"
minio "github.com/minio/minio/legacy"
"github.com/minio/minio/pkg/hash"
"github.com/nspcc-dev/neofs-api-go/object"
"github.com/nspcc-dev/neofs-api-go/refs"
"google.golang.org/grpc/connectivity"
)
const (
// AWS3NameHeader key in the object neofs.
AWS3NameHeader = "filename"
// SlashSeparator to find dirs in object path.
SlashSeparator = "/"
)
// Shutdown called when minio remote client closed.
func (n *neofsObject) Shutdown(context.Context) error {
return nil
}
// StorageInfo is not relevant in NeoFS the same way as in B2.
func (n *neofsObject) StorageInfo(ctx context.Context, local bool) (minio.StorageInfo, []error) {
var si minio.StorageInfo
si.Backend.Type = minio.BackendGateway
si.Backend.GatewayOnline = n.statusHealth(ctx)
return si, nil
}
// MakeBucketWithLocation is not supported in neofs gateway.
func (n *neofsObject) MakeBucketWithLocation(ctx context.Context, bucket, location string, lockEnabled bool) error {
// ATTENTION
// We do not support new bucket creation, because NeoFS does not support
// nice names for containers (buckets in s3). Nice name support might me
// implemented with external NNS which can be smart-contract in NEO
// blockchain.
return errors.New("neofs gateway doesn't support bucket creation")
}
// GetBucketInfo returns bucket name.
func (n *neofsObject) GetBucketInfo(ctx context.Context, bucket string) (minio.BucketInfo, error) {
var result = minio.BucketInfo{Name: bucket}
return result, nil
}
// ListBuckets returns all user containers. Name of the bucket is a container
// id. Timestamp is omitted since it is not saved in neofs container.
func (n *neofsObject) ListBuckets(ctx context.Context) ([]minio.BucketInfo, error) {
containerIDs, err := n.containerList(ctx)
if err != nil {
return nil, err
}
buckets := make([]minio.BucketInfo, 0, len(containerIDs))
for i := range containerIDs {
buckets = append(buckets, minio.BucketInfo{
Name: containerIDs[i].String(),
})
}
return buckets, nil
}
// DeleteBucket is not supported in neofs gateway
func (n *neofsObject) DeleteBucket(ctx context.Context, bucket string, force bool) error {
// ATTENTION
// We do not support bucket removal, because NeoFS does not support
// bucket creation, therefore it is not consistent. With NNS it may
// be implemented later, see `MakeBucketWithLocation()` function.
return errors.New("neofs gateway doesn't support bucket removal")
}
// ListObjects returns objects from the container. It ignores tombstones and
// storage groups.
func (n *neofsObject) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (minio.ListObjectsInfo, error) {
// todo: make pagination when search response will be gRPC stream,
// pagination must be implemented with cache, because search results
// may be different between search calls
var (
result minio.ListObjectsInfo
uniqNames = make(map[string]struct{})
)
containerID, err := refs.CIDFromString(bucket)
if err != nil {
return result, err
}
objectIDs, err := n.objectSearchContainer(ctx, containerID)
if err != nil {
return result, err
}
ln := len(objectIDs)
// todo: check what happens if there is more than maxKeys objects
if ln > maxKeys {
result.IsTruncated = true
ln = maxKeys
}
result.Objects = make([]minio.ObjectInfo, 0, ln)
for i := 0; i < ln; i++ {
addr := refs.Address{ObjectID: objectIDs[i], CID: containerID}
meta, err := n.objectHead(ctx, addr)
if err != nil {
// todo: log there
continue
}
// ignore tombstone objects
_, hdr := meta.LastHeader(object.HeaderType(object.TombstoneHdr))
if hdr != nil {
continue
}
// ignore storage group objects
_, hdr = meta.LastHeader(object.HeaderType(object.StorageGroupHdr))
if hdr != nil {
continue
}
// dirs don't exist in neofs, gateway stores full path to the file
// in object header, e.g. `filename`:`/this/is/path/file.txt`
// prefix argument contains full dir path from the root, e.g. `/this/is/`
// to emulate dirs we take dirs in path, compare it with prefix
// and look for entities after prefix. If entity does not have any
// sub-entities, then it is a file, else directory.
_, filedir := nameFromObject(meta)
if strings.HasPrefix(filedir, prefix) {
var (
oi minio.ObjectInfo
tail = strings.TrimLeft(filedir, prefix)
ind = strings.Index(tail, SlashSeparator)
)
if ind < 0 { // if there are not sub-entities in tail - file
oi = objectInfoFromMeta(meta)
} else { // if there are sub-entities in tail - dir
oi = minio.ObjectInfo{
Bucket: meta.SystemHeader.CID.String(),
Name: tail[:ind+1], // dir MUST have slash symbol in the end
IsDir: true,
}
}
// use only unique dir names
if _, ok := uniqNames[oi.Name]; !ok {
uniqNames[oi.Name] = struct{}{}
result.Objects = append(result.Objects, oi)
}
}
}
return result, nil
}
// ListObjectsV2 returns objects from the container. It ignores tombstones and
// storage groups.
func (n *neofsObject) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (minio.ListObjectsV2Info, error) {
// todo: make pagination when search response will be gRPC stream
// pagination must be implemented via cache, because search results
// may be different between search queries. Also use startAfter
// param in the answer
//
var result minio.ListObjectsV2Info
list, err := n.ListObjects(ctx, bucket, prefix, continuationToken, delimiter, maxKeys)
if err != nil {
return result, err
}
result.IsTruncated = list.IsTruncated
result.Prefixes = list.Prefixes
result.ContinuationToken = continuationToken
result.NextContinuationToken = list.NextMarker
result.Objects = list.Objects
return result, nil
}
// GetObjectNInfo performs two operations within one call.
func (n *neofsObject) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header, lockType minio.LockType, opts minio.ObjectOptions) (*minio.GetObjectReader, error) {
oi, err := n.GetObjectInfo(ctx, bucket, object, opts)
if err != nil {
return nil, err
}
var startOffset, length int64
startOffset, length, err = rs.GetOffsetLength(oi.Size)
if err != nil {
return nil, err
}
pr, pw := io.Pipe()
go func() {
err = n.GetObject(ctx, bucket, object, startOffset, length, pw, oi.ETag, opts)
_ = pw.CloseWithError(err)
}()
pipeCloser := func() { _ = pr.Close() }
return minio.NewGetObjectReaderFromReader(pr, oi, opts, pipeCloser)
}
// GetObject from storage.
func (n *neofsObject) GetObject(ctx context.Context, bucket, object string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error {
var (
notFoundError = minio.ObjectNotFound{
Bucket: bucket,
Object: object,
}
)
containerID, err := refs.CIDFromString(bucket)
if err != nil {
return err
}
objectID, err := n.objectFindID(ctx, containerID, object, false)
if err != nil {
return notFoundError
}
addr := refs.Address{
ObjectID: objectID,
CID: containerID,
}
_, err = n.objectGet(ctx, getParams{
addr: addr,
start: startOffset,
length: length,
writer: writer,
})
return err
}
// GetObjectInfo returns meta information about the object.
func (n *neofsObject) GetObjectInfo(ctx context.Context, bucket, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) {
var (
err error
result minio.ObjectInfo
notFoundError = minio.ObjectNotFound{
Bucket: bucket,
Object: object,
}
)
containerID, err := refs.CIDFromString(bucket)
if err != nil {
return result, err
}
objectID, err := n.objectFindID(ctx, containerID, object, false)
if err != nil {
return result, notFoundError
}
addr := refs.Address{
ObjectID: objectID,
CID: containerID,
}
meta, err := n.objectHead(ctx, addr)
if err != nil {
return result, err
}
return objectInfoFromMeta(meta), nil
}
// PutObject into storage.
func (n *neofsObject) PutObject(ctx context.Context, bucket, object string, data *minio.PutObjReader, opts minio.ObjectOptions) (minio.ObjectInfo, error) {
var (
result minio.ObjectInfo
objectExistsError = minio.ObjectAlreadyExists{
Bucket: bucket,
Object: object,
}
)
containerID, err := refs.CIDFromString(bucket)
if err != nil {
return result, err
}
// check if object with such name already exists in the bucket
_, err = n.objectFindID(ctx, containerID, object, true)
if err == nil {
return result, objectExistsError
}
objectID, err := refs.NewObjectID()
if err != nil {
return result, err
}
storageGroupID, err := refs.NewObjectID()
if err != nil {
return result, err
}
addr := refs.Address{
ObjectID: objectID,
CID: containerID,
}
meta, err := n.objectPut(ctx, putParams{
addr: addr,
name: object,
size: data.Size(),
r: data.Reader,
userHeaders: opts.UserDefined,
})
if err != nil {
return result, err
}
oi := objectInfoFromMeta(meta)
// for every object create storage group, otherwise object will be deleted
addr.ObjectID = storageGroupID
_, err = n.storageGroupPut(ctx, sgParams{
addr: addr,
objects: []refs.ObjectID{objectID},
})
if err != nil {
return result, err
}
return oi, nil
}
// CopyObject from one bucket into another bucket.
func (n *neofsObject) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (minio.ObjectInfo, error) {
objInfo, err := n.GetObjectInfo(ctx, srcBucket, srcObject, srcOpts)
if err != nil {
return objInfo, err
}
pr, pw := io.Pipe()
go func() {
err := n.GetObject(ctx, srcBucket, srcObject, 0, 0, pw, "", srcOpts)
_ = pw.CloseWithError(err)
}()
data := new(minio.PutObjReader)
// ignore hex
data.Reader, err = hash.NewReader(pr, objInfo.Size, "", "", objInfo.Size, false)
if err != nil {
return objInfo, err
}
_, err = n.PutObject(ctx, destBucket, destObject, data, dstOpts)
return objInfo, err
}
// DeleteObject from the storage.
func (n *neofsObject) DeleteObject(ctx context.Context, bucket, object string) error {
containerID, err := refs.CIDFromString(bucket)
if err != nil {
return err
}
objectID, err := n.objectFindID(ctx, containerID, object, false)
if err != nil {
return err
}
addr := refs.Address{
ObjectID: objectID,
CID: containerID,
}
// maybe we need to wait some time after objectDelete() to propagate
// tombstone before return from function, e.g. validate delete by
// performing head operation
return n.objectDelete(ctx, delParams{
addr: addr,
})
}
// DeleteObjects from the storage.
func (n *neofsObject) DeleteObjects(ctx context.Context, bucket string, objects []string) ([]error, error) {
var errs = make([]error, 0, len(objects))
for i := range objects {
errs = append(errs, n.DeleteObject(ctx, bucket, objects[i]))
}
return errs, nil
}
// IsNotificationSupported - no
func (n *neofsObject) IsNotificationSupported() bool {
return false
}
// IsListenBucketSupported - no
func (n *neofsObject) IsListenBucketSupported() bool {
return false
}
// IsEncryptionSupported - no
func (n *neofsObject) IsEncryptionSupported() bool {
return false
}
// IsCompressionSupported - no
func (n *neofsObject) IsCompressionSupported() bool {
return false
}
// IsReady returns whether the layer is ready to take requests.
func (n *neofsObject) IsReady(ctx context.Context) bool {
if conn, err := n.cli.GetConnection(ctx); err == nil {
return conn.GetState() == connectivity.Ready
}
return false
}

View File

@ -6,7 +6,7 @@ import (
"io"
"time"
"github.com/minio/minio/auth"
"github.com/minio/minio/neofs/pool"
"github.com/nspcc-dev/neofs-api-go/object"
"github.com/nspcc-dev/neofs-api-go/query"
"github.com/nspcc-dev/neofs-api-go/refs"
@ -29,13 +29,16 @@ type (
r io.Reader
userHeaders map[string]string
}
sgParams struct {
addr refs.Address
objects []refs.ObjectID
}
delParams struct {
addr refs.Address
}
getParams struct {
addr refs.Address
start int64
@ -45,22 +48,27 @@ type (
)
// objectSearchContainer returns all available objects in the container.
func (n *neofsObject) objectSearchContainer(ctx context.Context, cid refs.CID) ([]refs.ObjectID, error) {
func (n *layer) objectSearchContainer(ctx context.Context, cid refs.CID) ([]refs.ObjectID, error) {
var q query.Query
q.Filters = append(q.Filters, query.Filter{
Type: query.Filter_Exact,
Name: object.KeyRootObject,
})
conn, err := n.cli.GetConnection(ctx)
if err != nil {
return nil, err
}
queryBinary, err := q.Marshal()
if err != nil {
return nil, err
}
token, err := prepareToken(n.token, queryParams{
key: n.key,
addr: refs.Address{CID: cid},
verb: service.Token_Info_Search,
token, err := n.cli.SessionToken(ctx, &pool.SessionParams{
Conn: conn,
Addr: refs.Address{CID: cid},
Verb: service.Token_Info_Search,
})
if err != nil {
return nil, err
@ -70,25 +78,15 @@ func (n *neofsObject) objectSearchContainer(ctx context.Context, cid refs.CID) (
req.Query = queryBinary
req.QueryVersion = 1
req.ContainerID = cid
req.SetVersion(APIVersion)
req.SetTTL(service.SingleForwardingTTL)
bearerToken, err := auth.GetBearerToken(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to get bearer token")
}
req.SetBearer(bearerToken)
req.SetToken(token)
// req.SetBearer(bearerToken)
err = service.SignRequestData(n.key, req)
if err != nil {
return nil, err
}
conn, err := n.cli.GetConnection(ctx)
if err != nil {
return nil, err
}
// todo: think about timeout
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
@ -125,7 +123,7 @@ func (n *neofsObject) objectSearchContainer(ctx context.Context, cid refs.CID) (
// objectFindID returns object id (uuid) based on it's nice name in s3. If
// nice name is uuid compatible, then function returns it.
func (n *neofsObject) objectFindID(ctx context.Context, cid refs.CID, name string, put bool) (refs.ObjectID, error) {
func (n *layer) objectFindID(ctx context.Context, cid refs.CID, name string, put bool) (refs.ObjectID, error) {
var (
id refs.ObjectID
q query.Query
@ -146,10 +144,15 @@ func (n *neofsObject) objectFindID(ctx context.Context, cid refs.CID, name strin
return id, err
}
token, err := prepareToken(n.token, queryParams{
key: n.key,
addr: refs.Address{CID: cid},
verb: service.Token_Info_Search,
conn, err := n.cli.GetConnection(ctx)
if err != nil {
return id, err
}
token, err := n.cli.SessionToken(ctx, &pool.SessionParams{
Conn: conn,
Addr: refs.Address{CID: cid},
Verb: service.Token_Info_Search,
})
if err != nil {
return id, err
@ -159,26 +162,15 @@ func (n *neofsObject) objectFindID(ctx context.Context, cid refs.CID, name strin
req.Query = queryBinary
req.QueryVersion = 1
req.ContainerID = cid
req.SetVersion(APIVersion)
req.SetTTL(service.SingleForwardingTTL)
bearerToken, err := auth.GetBearerToken(ctx)
if err != nil {
var empty refs.ObjectID
return empty, errors.Wrap(err, "failed to get bearer token")
}
req.SetBearer(bearerToken)
req.SetToken(token)
// req.SetBearer(bearerToken)
err = service.SignRequestData(n.key, req)
if err != nil {
return id, err
}
conn, err := n.cli.GetConnection(ctx)
if err != nil {
return id, err
}
// todo: think about timeout
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
@ -228,11 +220,17 @@ func (n *neofsObject) objectFindID(ctx context.Context, cid refs.CID, name strin
}
// objectHead returns all object's headers.
func (n *neofsObject) objectHead(ctx context.Context, addr refs.Address) (*object.Object, error) {
token, err := prepareToken(n.token, queryParams{
key: n.key,
addr: addr,
verb: service.Token_Info_Head,
func (n *layer) objectHead(ctx context.Context, addr refs.Address) (*object.Object, error) {
conn, err := n.cli.GetConnection(ctx)
if err != nil {
return nil, err
}
token, err := n.cli.SessionToken(ctx, &pool.SessionParams{
Conn: conn,
Addr: addr,
Verb: service.Token_Info_Head,
})
if err != nil {
return nil, err
@ -241,25 +239,15 @@ func (n *neofsObject) objectHead(ctx context.Context, addr refs.Address) (*objec
req := new(object.HeadRequest)
req.Address = addr
req.FullHeaders = true
req.SetVersion(APIVersion)
req.SetTTL(service.SingleForwardingTTL)
bearerToken, err := auth.GetBearerToken(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to get bearer token")
}
req.SetBearer(bearerToken)
req.SetToken(token)
// req.SetBearer(bearerToken)
err = service.SignRequestData(n.key, req)
if err != nil {
return nil, err
}
conn, err := n.cli.GetConnection(ctx)
if err != nil {
return nil, err
}
// todo: think about timeout
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
@ -273,11 +261,16 @@ func (n *neofsObject) objectHead(ctx context.Context, addr refs.Address) (*objec
}
// objectGet and write it into provided io.Reader.
func (n *neofsObject) objectGet(ctx context.Context, p getParams) (*object.Object, error) {
token, err := prepareToken(n.token, queryParams{
key: n.key,
addr: p.addr,
verb: service.Token_Info_Get,
func (n *layer) objectGet(ctx context.Context, p getParams) (*object.Object, error) {
conn, err := n.cli.GetConnection(ctx)
if err != nil {
return nil, err
}
token, err := n.cli.SessionToken(ctx, &pool.SessionParams{
Conn: conn,
Addr: p.addr,
Verb: service.Token_Info_Get,
})
if err != nil {
return nil, err
@ -288,25 +281,15 @@ func (n *neofsObject) objectGet(ctx context.Context, p getParams) (*object.Objec
// object.GetRange() response message become gRPC stream.
req := new(object.GetRequest)
req.Address = p.addr
req.SetVersion(APIVersion)
req.SetTTL(service.SingleForwardingTTL)
bearerToken, err := auth.GetBearerToken(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to get bearer token")
}
req.SetBearer(bearerToken)
req.SetToken(token)
// req.SetBearer(bearerToken)
err = service.SignRequestData(n.key, req)
if err != nil {
return nil, err
}
conn, err := n.cli.GetConnection(ctx)
if err != nil {
return nil, err
}
// todo: think about timeout
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
@ -366,11 +349,16 @@ func (n *neofsObject) objectGet(ctx context.Context, p getParams) (*object.Objec
}
// objectPut into neofs, took payload from io.Reader.
func (n *neofsObject) objectPut(ctx context.Context, p putParams) (*object.Object, error) {
token, err := prepareToken(n.token, queryParams{
key: n.key,
addr: p.addr,
verb: service.Token_Info_Put,
func (n *layer) objectPut(ctx context.Context, p putParams) (*object.Object, error) {
conn, err := n.cli.GetConnection(ctx)
if err != nil {
return nil, err
}
token, err := n.cli.SessionToken(ctx, &pool.SessionParams{
Conn: conn,
Addr: p.addr,
Verb: service.Token_Info_Put,
})
if err != nil {
n.log.Error("could not prepare token",
@ -378,16 +366,7 @@ func (n *neofsObject) objectPut(ctx context.Context, p putParams) (*object.Objec
return nil, err
}
conn, err := n.cli.GetConnection(ctx)
if err != nil {
n.log.Error("could not prepare connection",
zap.Error(err))
return nil, err
}
client := object.NewServiceClient(conn)
// todo: think about timeout
putClient, err := client.Put(ctx)
putClient, err := object.NewServiceClient(conn).Put(ctx)
if err != nil {
n.log.Error("could not prepare PutClient",
zap.Error(err))
@ -405,7 +384,7 @@ func (n *neofsObject) objectPut(ctx context.Context, p putParams) (*object.Objec
SystemHeader: object.SystemHeader{
Version: objectVersion,
ID: p.addr.ObjectID,
OwnerID: n.owner,
OwnerID: n.uid,
CID: p.addr.CID,
PayloadLength: uint64(p.size),
},
@ -413,14 +392,9 @@ func (n *neofsObject) objectPut(ctx context.Context, p putParams) (*object.Objec
}
req := object.MakePutRequestHeader(obj)
req.SetVersion(APIVersion)
req.SetTTL(service.SingleForwardingTTL)
bearerToken, err := auth.GetBearerToken(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to get bearer token")
}
req.SetBearer(bearerToken)
req.SetToken(token)
// req.SetBearer(bearerToken)
err = service.SignRequestData(n.key, req)
if err != nil {
@ -446,13 +420,8 @@ func (n *neofsObject) objectPut(ctx context.Context, p putParams) (*object.Objec
if read > 0 {
req := object.MakePutRequestChunk(readBuffer[:read])
req.SetVersion(APIVersion)
req.SetTTL(service.SingleForwardingTTL)
bearerToken, err := auth.GetBearerToken(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to get bearer token")
}
req.SetBearer(bearerToken)
// req.SetBearer(bearerToken)
err = service.SignRequestData(n.key, req)
if err != nil {
@ -484,17 +453,17 @@ func (n *neofsObject) objectPut(ctx context.Context, p putParams) (*object.Objec
}
// storageGroupPut prepares storage group object and put it into neofs.
func (n *neofsObject) storageGroupPut(ctx context.Context, p sgParams) (*object.Object, error) {
token, err := prepareToken(n.token, queryParams{
key: n.key,
addr: p.addr,
verb: service.Token_Info_Put,
})
func (n *layer) storageGroupPut(ctx context.Context, p sgParams) (*object.Object, error) {
conn, err := n.cli.GetConnection(ctx)
if err != nil {
return nil, err
}
conn, err := n.cli.GetConnection(ctx)
token, err := n.cli.SessionToken(ctx, &pool.SessionParams{
Conn: conn,
Addr: p.addr,
Verb: service.Token_Info_Put,
})
if err != nil {
return nil, err
}
@ -510,7 +479,7 @@ func (n *neofsObject) storageGroupPut(ctx context.Context, p sgParams) (*object.
SystemHeader: object.SystemHeader{
Version: objectVersion,
ID: p.addr.ObjectID,
OwnerID: n.owner,
OwnerID: n.uid,
CID: p.addr.CID,
},
Headers: make([]object.Header, 0, len(p.objects)),
@ -525,14 +494,9 @@ func (n *neofsObject) storageGroupPut(ctx context.Context, p sgParams) (*object.
sg.SetStorageGroup(new(storagegroup.StorageGroup))
req := object.MakePutRequestHeader(sg)
req.SetVersion(APIVersion)
req.SetTTL(service.SingleForwardingTTL)
bearerToken, err := auth.GetBearerToken(ctx)
if err != nil {
return nil, errors.Wrap(err, "failed to get bearer token")
}
req.SetBearer(bearerToken)
req.SetToken(token)
// req.SetBearer(bearerToken)
err = service.SignRequestData(n.key, req)
if err != nil {
@ -553,11 +517,16 @@ func (n *neofsObject) storageGroupPut(ctx context.Context, p sgParams) (*object.
}
// objectDelete puts tombstone object into neofs.
func (n *neofsObject) objectDelete(ctx context.Context, p delParams) error {
token, err := prepareToken(n.token, queryParams{
key: n.key,
addr: p.addr,
verb: service.Token_Info_Delete,
func (n *layer) objectDelete(ctx context.Context, p delParams) error {
conn, err := n.cli.GetConnection(ctx)
if err != nil {
return err
}
token, err := n.cli.SessionToken(ctx, &pool.SessionParams{
Conn: conn,
Addr: p.addr,
Verb: service.Token_Info_Delete,
})
if err != nil {
return err
@ -565,26 +534,16 @@ func (n *neofsObject) objectDelete(ctx context.Context, p delParams) error {
req := new(object.DeleteRequest)
req.Address = p.addr
req.OwnerID = n.owner
req.SetVersion(APIVersion)
req.OwnerID = n.uid
req.SetTTL(service.SingleForwardingTTL)
bearerToken, err := auth.GetBearerToken(ctx)
if err != nil {
return errors.Wrap(err, "failed to get bearer token")
}
req.SetBearer(bearerToken)
req.SetToken(token)
// req.SetBearer(bearerToken)
err = service.SignRequestData(n.key, req)
if err != nil {
return err
}
conn, err := n.cli.GetConnection(ctx)
if err != nil {
return err
}
// todo: think about timeout
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()

View File

@ -1,36 +1,50 @@
package layer
import (
"context"
"crypto/ecdsa"
"net/http"
"os"
"strings"
"time"
minio "github.com/minio/minio/legacy"
"github.com/minio/minio/neofs/pool"
"github.com/nspcc-dev/neofs-api-go/object"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/nspcc-dev/neofs-api-go/service"
"github.com/nspcc-dev/neofs-api-go/session"
crypto "github.com/nspcc-dev/neofs-crypto"
)
type (
tokenParams struct {
cli pool.Client
key *ecdsa.PrivateKey
until uint64
ObjectInfo struct {
Bucket string
Name string
Size int64
ContentType string
Created time.Time
Headers map[string]string
}
queryParams struct {
key *ecdsa.PrivateKey
addr refs.Address
verb service.Token_Info_Verb
// ListObjectsInfo - container for list objects.
ListObjectsInfo struct {
// Indicates whether the returned list objects response is truncated. A
// value of true indicates that the list was truncated. The list can be truncated
// if the number of objects exceeds the limit allowed or specified
// by max keys.
IsTruncated bool
// When response is truncated (the IsTruncated element value in the response
// is true), you can use the key name in this field as marker in the subsequent
// request to get next set of objects.
//
// NOTE: This element is returned only if you have delimiter request parameter
// specified.
ContinuationToken string
NextContinuationToken string
// List of objects info for this request.
Objects []ObjectInfo
// List of prefixes for this request.
Prefixes []string
}
)
// APIVersion of the neofs
const APIVersion = 1
const pathSeparator = string(os.PathSeparator)
func userHeaders(h []object.Header) map[string]string {
result := make(map[string]string, len(h))
@ -47,7 +61,7 @@ func userHeaders(h []object.Header) map[string]string {
return result
}
func objectInfoFromMeta(meta *object.Object) minio.ObjectInfo {
func objectInfoFromMeta(meta *object.Object) *ObjectInfo {
aws3name := meta.SystemHeader.ID.String()
userHeaders := userHeaders(meta.Headers)
@ -56,78 +70,16 @@ func objectInfoFromMeta(meta *object.Object) minio.ObjectInfo {
delete(userHeaders, name)
}
oi := minio.ObjectInfo{
mimeType := http.DetectContentType(meta.Payload)
return &ObjectInfo{
Bucket: meta.SystemHeader.CID.String(),
Name: aws3name,
ModTime: time.Unix(meta.SystemHeader.CreatedAt.UnixTime, 0),
ContentType: mimeType,
Headers: userHeaders,
Size: int64(meta.SystemHeader.PayloadLength),
ETag: "", // ?
ContentType: "", // ?
UserDefined: userHeaders,
UserTags: "", // ignore it
Created: time.Unix(meta.SystemHeader.CreatedAt.UnixTime, 0),
}
return oi
}
func generateToken(ctx context.Context, p tokenParams) (*service.Token, error) {
owner, err := refs.NewOwnerID(&p.key.PublicKey)
if err != nil {
return nil, err
}
token := new(service.Token)
token.SetOwnerID(owner)
token.SetExpirationEpoch(p.until)
token.SetOwnerKey(crypto.MarshalPublicKey(&p.key.PublicKey))
conn, err := p.cli.GetConnection(ctx)
if err != nil {
return nil, err
}
creator, err := session.NewGRPCCreator(conn, p.key)
if err != nil {
return nil, err
}
res, err := creator.Create(ctx, token)
if err != nil {
return nil, err
}
token.SetID(res.GetID())
token.SetSessionKey(res.GetSessionKey())
return token, nil
}
func prepareToken(t *service.Token, p queryParams) (*service.Token, error) {
sig := make([]byte, len(t.Signature))
copy(sig, t.Signature)
token := &service.Token{
Token_Info: service.Token_Info{
ID: t.ID,
OwnerID: t.OwnerID,
Verb: t.Verb,
Address: t.Address,
TokenLifetime: t.TokenLifetime,
SessionKey: t.SessionKey,
OwnerKey: t.OwnerKey,
},
Signature: sig,
}
token.SetAddress(p.addr)
token.SetVerb(p.verb)
err := service.AddSignatureWithKey(p.key, service.NewSignedSessionToken(token))
if err != nil {
return nil, err
}
return token, nil
}
func parseUserHeaders(h map[string]string) []object.Header {
@ -155,7 +107,7 @@ func nameFromObject(o *object.Object) (string, string) {
name = uh[AWS3NameHeader]
}
ind := strings.LastIndex(name, SlashSeparator)
ind := strings.LastIndex(name, pathSeparator)
return name[ind+1:], name[:ind+1]
}

View File

@ -1,91 +0,0 @@
package layer
import (
"crypto/ecdsa"
"math"
"testing"
"github.com/nspcc-dev/neofs-api-go/refs"
"github.com/nspcc-dev/neofs-api-go/service"
"github.com/nspcc-dev/neofs-api-go/session"
crypto "github.com/nspcc-dev/neofs-crypto"
"github.com/nspcc-dev/neofs-crypto/test"
"github.com/stretchr/testify/require"
)
type args struct {
t *service.Token
p queryParams
}
func newTestToken(t *testing.T, key *ecdsa.PrivateKey, until uint64) *service.Token {
owner, err := refs.NewOwnerID(&key.PublicKey)
require.NoError(t, err)
token := new(service.Token)
token.SetOwnerID(owner)
token.SetExpirationEpoch(until)
token.SetOwnerKey(crypto.MarshalPublicKey(&key.PublicKey))
// generate token ID
tokenID, err := refs.NewUUID()
require.NoError(t, err)
pToken, err := session.NewPrivateToken(until)
require.NoError(t, err)
pkBytes, err := session.PublicSessionToken(pToken)
require.NoError(t, err)
token.SetID(tokenID)
token.SetSessionKey(pkBytes)
return token
}
func newTestArgs(t *testing.T, key *ecdsa.PrivateKey) args {
token := newTestToken(t, key, math.MaxUint64)
addr := refs.Address{}
return args{
t: token,
p: queryParams{key: key, addr: addr, verb: service.Token_Info_Put},
}
}
func Test_prepareToken(t *testing.T) {
key1 := test.DecodeKey(1)
key2 := test.DecodeKey(2)
tests := []struct {
name string
args args
want *service.Token
wantErr bool
}{
{
name: "should not fail, key1",
args: newTestArgs(t, key1),
want: newTestToken(t, key1, math.MaxUint64),
wantErr: false,
},
{
name: "should not fail, key 2",
args: newTestArgs(t, key2),
want: newTestToken(t, key2, math.MaxUint64),
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := prepareToken(tt.args.t, tt.args.p)
if (err != nil) != tt.wantErr {
t.Errorf("prepareToken() error = %v, wantErr %v", err, tt.wantErr)
return
}
require.Equal(t, tt.want, got)
})
}
}

View File

@ -32,16 +32,16 @@ type (
}
Client interface {
Status() error
GetConnection(context.Context) (*grpc.ClientConn, error)
SessionToken(ctx context.Context, params *SessionParams) (*service.Token, error)
}
Pool interface {
Client
Close()
Status() error
ReBalance(ctx context.Context)
SessionToken(ctx context.Context, params *SessionParams) (*service.Token, error)
}
Peer struct {