From 7deeb68d47998ef36ec4c2c7cc3d061ff3f43271 Mon Sep 17 00:00:00 2001 From: Evgeniy Kulikov Date: Mon, 19 Oct 2020 04:04:37 +0300 Subject: [PATCH] [#25] Migrate layer to NeoFS API v2 Naive migration, without any tries to start application closes #25 Signed-off-by: Evgeniy Kulikov --- api/layer/container.go | 72 ++--- api/layer/layer.go | 280 ++++++++---------- api/layer/object.go | 619 ++++++++------------------------------- api/layer/util.go | 61 ++-- api/layer/writer.go | 49 ++++ api/layer/writer_test.go | 77 +++++ 6 files changed, 420 insertions(+), 738 deletions(-) create mode 100644 api/layer/writer.go create mode 100644 api/layer/writer_test.go diff --git a/api/layer/container.go b/api/layer/container.go index 550ae05e..c4ea524b 100644 --- a/api/layer/container.go +++ b/api/layer/container.go @@ -4,9 +4,8 @@ import ( "context" "time" - "github.com/nspcc-dev/neofs-api-go/container" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-api-go/pkg/client" + "github.com/nspcc-dev/neofs-api-go/pkg/container" "github.com/nspcc-dev/neofs-s3-gate/api" "github.com/nspcc-dev/neofs-s3-gate/auth" "go.uber.org/zap" @@ -15,7 +14,7 @@ import ( type ( BucketInfo struct { Name string - CID refs.CID + CID *container.ID Created time.Time } @@ -28,47 +27,36 @@ type ( } ) -func (n *layer) containerInfo(ctx context.Context, cid refs.CID) (*BucketInfo, error) { +func (n *layer) containerInfo(ctx context.Context, cid *container.ID) (*BucketInfo, error) { rid := api.GetRequestID(ctx) bearer, err := auth.GetBearerToken(ctx) if err != nil { n.log.Error("could not receive bearer token", + zap.Stringer("cid", cid), zap.String("request_id", rid), zap.Error(err)) return nil, err } - req := new(container.GetRequest) - req.SetCID(cid) - req.SetTTL(service.SingleForwardingTTL) - // req.SetBearer(bearer) - _ = bearer - if err = service.SignRequestData(n.key, req); err != nil { - n.log.Error("could not prepare request", - zap.String("request_id", rid), - zap.Error(err)) - return nil, err - } - - conn, err := n.cli.GetConnection(ctx) + cli, tkn, err := n.prepareClient(ctx) if err != nil { n.log.Error("could not prepare client", + zap.Stringer("cid", cid), zap.String("request_id", rid), zap.Error(err)) + return nil, err } - // todo: think about timeout - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - - res, err := container.NewServiceClient(conn).Get(ctx, req) + res, err := cli.GetContainer(ctx, cid, client.WithSession(tkn)) if err != nil { - n.log.Error("could not list buckets", + n.log.Error("could not fetch container", + zap.Stringer("cid", cid), zap.String("request_id", rid), zap.Error(err)) + return nil, err } @@ -76,8 +64,8 @@ func (n *layer) containerInfo(ctx context.Context, cid refs.CID) (*BucketInfo, e return &BucketInfo{ CID: cid, - Name: cid.String(), // should be fetched from container.GetResponse - Created: time.Time{}, // should be fetched from container.GetResponse + Name: cid.String(), // should be fetched from container.Attributes + Created: time.Time{}, // should be fetched from container.Attributes }, nil } @@ -91,21 +79,9 @@ func (n *layer) containerList(ctx context.Context) ([]BucketInfo, error) { return nil, err } - req := new(container.ListRequest) - req.OwnerID = n.uid - req.SetTTL(service.SingleForwardingTTL) - // req.SetBearer(bearer) - _ = bearer - if err := service.SignRequestData(n.key, req); err != nil { - n.log.Error("could not prepare request", - zap.String("request_id", rid), - zap.Error(err)) - return nil, err - } - - conn, err := n.cli.GetConnection(ctx) + cli, tkn, err := n.prepareClient(ctx) if err != nil { n.log.Error("could not prepare client", zap.String("request_id", rid), @@ -113,20 +89,24 @@ func (n *layer) containerList(ctx context.Context) ([]BucketInfo, error) { return nil, err } - // todo: think about timeout - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() + // own, err := GetOwnerID(bearer) + // if err != nil { + // n.log.Error("could not fetch owner id", + // zap.String("request_id", rid), + // zap.Error(err)) + // return nil, err + // } - res, err := container.NewServiceClient(conn).List(ctx, req) + res, err := cli.ListContainers(ctx, tkn.OwnerID(), client.WithSession(tkn)) if err != nil { - n.log.Error("could not list buckets", + n.log.Error("could not fetch container", zap.String("request_id", rid), zap.Error(err)) return nil, err } - list := make([]BucketInfo, 0, len(res.CID)) - for _, cid := range res.CID { + list := make([]BucketInfo, 0, len(res)) + for _, cid := range res { info, err := n.containerInfo(ctx, cid) if err != nil { n.log.Error("could not fetch container info", diff --git a/api/layer/layer.go b/api/layer/layer.go index 0cb2c702..a8073aef 100644 --- a/api/layer/layer.go +++ b/api/layer/layer.go @@ -3,13 +3,17 @@ package layer import ( "context" "crypto/ecdsa" + "errors" "io" "strings" "time" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-api-go/service" + "github.com/nspcc-dev/neofs-api-go/pkg" + "github.com/nspcc-dev/neofs-api-go/pkg/client" + "github.com/nspcc-dev/neofs-api-go/pkg/container" + "github.com/nspcc-dev/neofs-api-go/pkg/object" + "github.com/nspcc-dev/neofs-api-go/pkg/owner" + "github.com/nspcc-dev/neofs-api-go/pkg/token" "github.com/nspcc-dev/neofs-s3-gate/api" "github.com/nspcc-dev/neofs-s3-gate/api/pool" "go.uber.org/zap" @@ -19,10 +23,19 @@ import ( type ( layer struct { + uid *owner.ID log *zap.Logger cli pool.Client - uid refs.OwnerID key *ecdsa.PrivateKey + + reqTimeout time.Duration + } + + Params struct { + Pool pool.Client + Logger *zap.Logger + Timeout time.Duration + NFKey *ecdsa.PrivateKey } GetObjectParams struct { @@ -50,7 +63,7 @@ type ( } NeoFS interface { - Get(ctx context.Context, address refs.Address) (*object.Object, error) + Get(ctx context.Context, address *object.Address) (*object.Object, error) } Client interface { @@ -73,61 +86,41 @@ type ( } ) -// AWS3NameHeader key in the object neofs. +// AWS3NameHeader key in the object NeoFS. const AWS3NameHeader = "filename" // NewGatewayLayer creates instance of layer. It checks credentials // and establishes gRPC connection with node. -func NewLayer(log *zap.Logger, cli pool.Client, key *ecdsa.PrivateKey) (Client, error) { - uid, err := refs.NewOwnerID(&key.PublicKey) +func NewLayer(p *Params) (Client, error) { + wallet, err := owner.NEO3WalletFromPublicKey(&p.NFKey.PublicKey) if err != nil { return nil, err } + + uid := owner.NewID() + uid.SetNeo3Wallet(wallet) + return &layer{ - cli: cli, - key: key, - log: log, uid: uid, + cli: p.Pool, + key: p.NFKey, + log: p.Logger, + + reqTimeout: p.Timeout, }, nil } // Get NeoFS Object by refs.Address (should be used by auth.Center) -func (n *layer) Get(ctx context.Context, address refs.Address) (*object.Object, error) { - conn, err := n.cli.GetConnection(ctx) +func (n *layer) Get(ctx context.Context, address *object.Address) (*object.Object, error) { + cli, tkn, err := n.prepareClient(ctx) if err != nil { return nil, err } - token, err := n.cli.SessionToken(ctx, &pool.SessionParams{ - Conn: conn, - Addr: address, - Verb: service.Token_Info_Get, - }) + gop := new(client.GetObjectParams) + gop.WithAddress(address) - if err != nil { - return nil, err - } - - req := new(object.GetRequest) - req.Address = address - req.SetTTL(service.SingleForwardingTTL) - req.SetToken(token) - - err = service.SignRequestData(n.key, req) - if err != nil { - return nil, err - } - - // todo: think about timeout - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - - cli, err := object.NewServiceClient(conn).Get(ctx, req) - if err != nil { - return nil, err - } - - return receiveObject(cli) + return cli.GetObject(ctx, gop, client.WithSession(tkn)) } // GetBucketInfo returns bucket name. @@ -160,31 +153,32 @@ func (n *layer) ListObjects(ctx context.Context, p *ListObjectsParams) (*ListObj // pagination must be implemented with cache, because search results // may be different between search calls var ( + err error + bkt *BucketInfo + ids []*object.ID result ListObjectsInfo uniqNames = make(map[string]struct{}) ) - bkt, err := n.GetBucketInfo(ctx, p.Bucket) - if err != nil { + if bkt, err = n.GetBucketInfo(ctx, p.Bucket); err != nil { + return nil, err + } else if ids, err = n.objectSearch(ctx, &findParams{cid: bkt.CID}); err != nil { return nil, err } - objectIDs, err := n.objectSearchContainer(ctx, bkt.CID) - if err != nil { - return nil, err - } - - ln := len(objectIDs) + ln := len(ids) // todo: check what happens if there is more than maxKeys objects if ln > p.MaxKeys { - result.IsTruncated = true ln = p.MaxKeys + result.IsTruncated = true } - result.Objects = make([]ObjectInfo, 0, ln) + result.Objects = make([]*ObjectInfo, 0, ln) - for i := 0; i < ln; i++ { - addr := refs.Address{ObjectID: objectIDs[i], CID: bkt.CID} + for _, id := range ids { + addr := object.NewAddress() + addr.SetObjectID(id) + addr.SetContainerID(bkt.CID) meta, err := n.objectHead(ctx, addr) if err != nil { @@ -192,17 +186,17 @@ func (n *layer) ListObjects(ctx context.Context, p *ListObjectsParams) (*ListObj continue } - // ignore tombstone objects - _, hdr := meta.LastHeader(object.HeaderType(object.TombstoneHdr)) - if hdr != nil { - continue - } + // // ignore tombstone objects + // _, hdr := meta.LastHeader(object.HeaderType(object.TombstoneHdr)) + // if hdr != nil { + // continue + // } // ignore storage group objects - _, hdr = meta.LastHeader(object.HeaderType(object.StorageGroupHdr)) - if hdr != nil { - continue - } + // _, hdr = meta.LastHeader(object.HeaderType(object.StorageGroupHdr)) + // if hdr != nil { + // continue + // } // dirs don't exist in neofs, gateway stores full path to the file // in object header, e.g. `filename`:`/this/is/path/file.txt` @@ -225,8 +219,8 @@ func (n *layer) ListObjects(ctx context.Context, p *ListObjectsParams) (*ListObj oi = objectInfoFromMeta(meta) } else { // if there are sub-entities in tail - dir oi = &ObjectInfo{ - Owner: meta.SystemHeader.OwnerID, - Bucket: meta.SystemHeader.CID.String(), + Owner: meta.GetOwnerID(), + Bucket: bkt.Name, Name: tail[:ind+1], // dir MUST have slash symbol in the end // IsDir: true, } @@ -236,7 +230,7 @@ func (n *layer) ListObjects(ctx context.Context, p *ListObjectsParams) (*ListObj if _, ok := uniqNames[oi.Name]; !ok { uniqNames[oi.Name] = struct{}{} - result.Objects = append(result.Objects, *oi) + result.Objects = append(result.Objects, oi) } } } @@ -246,98 +240,74 @@ func (n *layer) ListObjects(ctx context.Context, p *ListObjectsParams) (*ListObj // GetObject from storage. func (n *layer) GetObject(ctx context.Context, p *GetObjectParams) error { - cid, err := refs.CIDFromString(p.Bucket) - if err != nil { + var ( + err error + oid *object.ID + cid = container.NewID() + ) + + if err = cid.Parse(p.Bucket); err != nil { + return err + } else if oid, err = n.objectFindID(ctx, &findParams{cid: cid, key: p.Object}); err != nil { return err } - oid, err := n.objectFindID(ctx, cid, p.Object, false) - if err != nil { - return err - } + addr := object.NewAddress() + addr.SetObjectID(oid) + addr.SetContainerID(cid) - addr := refs.Address{ - ObjectID: oid, - CID: cid, - } - _, err = n.objectGet(ctx, getParams{ - addr: addr, - start: p.Offset, + _, err = n.objectGet(ctx, &getParams{ + Writer: p.Writer, + + addr: addr, + + offset: p.Offset, length: p.Length, - writer: p.Writer, }) return err } // GetObjectInfo returns meta information about the object. -func (n *layer) GetObjectInfo(ctx context.Context, bucketName, objectName string) (*ObjectInfo, error) { - var meta *object.Object - if cid, err := refs.CIDFromString(bucketName); err != nil { +func (n *layer) GetObjectInfo(ctx context.Context, bucketName, filename string) (*ObjectInfo, error) { + var ( + err error + oid *object.ID + cid = container.NewID() + + meta *object.Object + ) + + if err = cid.Parse(bucketName); err != nil { return nil, err - } else if oid, err := n.objectFindID(ctx, cid, objectName, false); err != nil { - return nil, err - } else if meta, err = n.objectHead(ctx, refs.Address{CID: cid, ObjectID: oid}); err != nil { + } else if oid, err = n.objectFindID(ctx, &findParams{cid: cid, key: filename}); err != nil { return nil, err } + + addr := object.NewAddress() + addr.SetObjectID(oid) + addr.SetContainerID(cid) + + if meta, err = n.objectHead(ctx, addr); err != nil { + return nil, err + } + return objectInfoFromMeta(meta), nil } +func GetOwnerID(tkn *token.BearerToken) (*owner.ID, error) { + switch pkg.SDKVersion().GetMajor() { + case 2: + id := tkn.ToV2().GetBody().GetOwnerID() + return owner.NewIDFromV2(id), nil + default: + return nil, errors.New("unknown version") + } +} + // PutObject into storage. func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*ObjectInfo, error) { - cid, err := refs.CIDFromString(p.Bucket) - if err != nil { - return nil, err - } - - _, err = n.objectFindID(ctx, cid, p.Object, true) - if err == nil { - return nil, &api.ObjectAlreadyExists{ - Bucket: p.Bucket, - Object: p.Object, - } - } - - oid, err := refs.NewObjectID() - if err != nil { - return nil, err - } - - sgid, err := refs.NewSGID() - if err != nil { - return nil, err - } - - addr := refs.Address{ - ObjectID: oid, - CID: cid, - } - - meta, err := n.objectPut(ctx, putParams{ - addr: addr, - size: p.Size, - name: p.Object, - r: p.Reader, - userHeaders: p.Header, - }) - if err != nil { - return nil, err - } - - oi := objectInfoFromMeta(meta) - - // for every object create storage group, otherwise object will be deleted - addr.ObjectID = sgid - - _, err = n.storageGroupPut(ctx, sgParams{ - addr: addr, - objects: []refs.ObjectID{oid}, - }) - if err != nil { - return nil, err - } - - return oi, nil + return n.objectPut(ctx, p) } // CopyObject from one bucket into another bucket. @@ -374,28 +344,34 @@ func (n *layer) CopyObject(ctx context.Context, p *CopyObjectParams) (*ObjectInf } // DeleteObject removes all objects with passed nice name. -func (n *layer) DeleteObject(ctx context.Context, bucket, object string) error { - cid, err := refs.CIDFromString(bucket) - if err != nil { - return &api.DeleteError{ - Err: err, - Object: object, - } - } +func (n *layer) DeleteObject(ctx context.Context, bucket, filename string) error { + var ( + err error + ids []*object.ID + cid = container.NewID() + ) - ids, err := n.objectFindIDs(ctx, cid, object) - if err != nil { + if err = cid.Parse(bucket); err != nil { return &api.DeleteError{ Err: err, - Object: object, + Object: filename, + } + } else if ids, err = n.objectSearch(ctx, &findParams{cid: cid, key: filename}); err != nil { + return &api.DeleteError{ + Err: err, + Object: filename, } } for _, id := range ids { - if err = n.objectDelete(ctx, delParams{addr: refs.Address{CID: cid, ObjectID: id}}); err != nil { + addr := object.NewAddress() + addr.SetObjectID(id) + addr.SetContainerID(cid) + + if err = n.objectDelete(ctx, addr); err != nil { return &api.DeleteError{ Err: err, - Object: object, + Object: filename, } } } diff --git a/api/layer/object.go b/api/layer/object.go index a8906fac..0d46ebd0 100644 --- a/api/layer/object.go +++ b/api/layer/object.go @@ -1,590 +1,209 @@ package layer import ( + "bytes" "context" "io" + "net/http" "time" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/query" - "github.com/nspcc-dev/neofs-api-go/refs" - "github.com/nspcc-dev/neofs-api-go/service" - "github.com/nspcc-dev/neofs-api-go/storagegroup" - "github.com/nspcc-dev/neofs-s3-gate/api/pool" + "github.com/nspcc-dev/neofs-api-go/pkg/client" + "github.com/nspcc-dev/neofs-api-go/pkg/container" + "github.com/nspcc-dev/neofs-api-go/pkg/object" + "github.com/nspcc-dev/neofs-api-go/pkg/owner" + "github.com/nspcc-dev/neofs-api-go/pkg/token" + "github.com/nspcc-dev/neofs-s3-gate/api" + "github.com/nspcc-dev/neofs-s3-gate/auth" "github.com/pkg/errors" - "go.uber.org/zap" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) -const ( - dataChunkSize = 3 * object.UnitsMB - objectVersion = 1 -) - type ( - putParams struct { - addr refs.Address - name string - size int64 - r io.Reader - userHeaders map[string]string - } - - sgParams struct { - addr refs.Address - objects []refs.ObjectID - } - - delParams struct { - addr refs.Address + findParams struct { + key string + cid *container.ID } getParams struct { - addr refs.Address - start int64 + io.Writer + + addr *object.Address + + offset int64 length int64 - writer io.Writer } ) -// objectSearchContainer returns all available objects in the container. -func (n *layer) objectSearchContainer(ctx context.Context, cid refs.CID) ([]refs.ObjectID, error) { - var q query.Query - q.Filters = append(q.Filters, query.Filter{ - Type: query.Filter_Exact, - Name: object.KeyRootObject, - }) - - conn, err := n.cli.GetConnection(ctx) +func (n *layer) prepareClient(ctx context.Context) (*client.Client, *token.SessionToken, error) { + conn, err := n.cli.Connection(ctx) if err != nil { - return nil, err + return nil, nil, err } - queryBinary, err := q.Marshal() + tkn, err := n.cli.Token(ctx, conn) if err != nil { - return nil, err + return nil, nil, err } - token, err := n.cli.SessionToken(ctx, &pool.SessionParams{ - Conn: conn, - Addr: refs.Address{CID: cid}, - Verb: service.Token_Info_Search, - }) + cli, err := client.New(n.key, client.WithGRPCConnection(conn)) if err != nil { - return nil, err + return nil, nil, err } - req := new(object.SearchRequest) - req.Query = queryBinary - req.QueryVersion = 1 - req.ContainerID = cid - req.SetTTL(service.SingleForwardingTTL) - req.SetToken(token) - // req.SetBearer(bearerToken) - - err = service.SignRequestData(n.key, req) - if err != nil { - return nil, err - } - - // todo: think about timeout - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - - searchClient, err := object.NewServiceClient(conn).Search(ctx, req) - if err != nil { - return nil, err - } - - var ( - response []refs.Address - result []refs.ObjectID - ) - - for { - resp, err := searchClient.Recv() - if err != nil { - if err == io.EOF { - break - } - - return nil, errors.New("search command received error") - } - - response = append(response, resp.Addresses...) - } - - for i := range response { - result = append(result, response[i].ObjectID) - } - - return result, nil + return cli, tkn, nil } -// objectFindIDs returns object id's (uuid) based on they nice name in s3. If -// nice name is uuid compatible, then function returns it. -func (n *layer) objectFindIDs(ctx context.Context, cid refs.CID, name string) ([]refs.ObjectID, error) { - var q query.Query - - q.Filters = append(q.Filters, query.Filter{ - Type: query.Filter_Exact, - Name: object.KeyRootObject, - }) - q.Filters = append(q.Filters, query.Filter{ - Type: query.Filter_Exact, - Name: AWS3NameHeader, - Value: name, - }) - - queryBinary, err := q.Marshal() +// objectSearch returns all available objects by search params. +func (n *layer) objectSearch(ctx context.Context, p *findParams) ([]*object.ID, error) { + cli, tkn, err := n.prepareClient(ctx) if err != nil { return nil, err } - conn, err := n.cli.GetConnection(ctx) - if err != nil { - return nil, err + filter := object.NewSearchFilters() + filter.AddNonLeafFilter() + + sop := new(client.SearchObjectParams) + + if p.cid != nil { + filter.AddFilter(object.HdrSysNameCID, p.cid.String(), object.MatchStringEqual) + sop.WithContainerID(p.cid) } - token, err := n.cli.SessionToken(ctx, &pool.SessionParams{ - Conn: conn, - Addr: refs.Address{CID: cid}, - Verb: service.Token_Info_Search, - }) - if err != nil { - return nil, err + if p.key != "" { + filter.AddFilter(AWS3NameHeader, p.key, object.MatchStringEqual) } - req := new(object.SearchRequest) - req.Query = queryBinary - req.QueryVersion = 1 - req.ContainerID = cid - req.SetTTL(service.SingleForwardingTTL) - req.SetToken(token) - // req.SetBearer(bearerToken) + sop.WithSearchFilters(filter) - err = service.SignRequestData(n.key, req) - if err != nil { - return nil, err - } - - // todo: think about timeout - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - - searchClient, err := object.NewServiceClient(conn).Search(ctx, req) - if err != nil { - return nil, err - } - - var response []refs.Address - - for { - resp, err := searchClient.Recv() - if err != nil { - if err == io.EOF { - break - } - - return nil, errors.New("search command received error") - } - - response = append(response, resp.Addresses...) - } - - switch ln := len(response); { - case ln > 0: - result := make([]refs.ObjectID, 0, len(response)) - for i := range response { - result = append(result, response[i].ObjectID) - } - - return result, nil - default: - return nil, errors.New("object not found") - } + return cli.SearchObject(ctx, sop, client.WithSession(tkn)) } // objectFindID returns object id (uuid) based on it's nice name in s3. If // nice name is uuid compatible, then function returns it. -func (n *layer) objectFindID(ctx context.Context, cid refs.CID, name string, put bool) (refs.ObjectID, error) { - var id refs.ObjectID - - if result, err := n.objectFindIDs(ctx, cid, name); err != nil { - return id, err +func (n *layer) objectFindID(ctx context.Context, p *findParams) (*object.ID, error) { + if result, err := n.objectSearch(ctx, p); err != nil { + return nil, err } else if ln := len(result); ln == 0 { - // Minio lists all objects with and without nice names. All objects - // without nice name still have "name" in terms of minio - uuid encoded - // into string. There is a tricky case when user upload object - // with nice name that is encoded uuid. - // There is an optimisation to parse name and return uuid if it name is uuid - // compatible. It _should not_ work in case of put operation, because object - // with uuid compatible nice name may not exist. Therefore this optimization - // breaks object put logic and must be turned off. - if !put { - err := id.Parse(name) - if err == nil { - return id, nil - } - } - return id, status.Error(codes.NotFound, "object not found") + return nil, status.Error(codes.NotFound, "object not found") } else if ln == 1 { return result[0], nil } - return id, errors.New("several objects with the same name found") + return nil, errors.New("several objects with the same name found") } // objectHead returns all object's headers. -func (n *layer) objectHead(ctx context.Context, addr refs.Address) (*object.Object, error) { - - conn, err := n.cli.GetConnection(ctx) +func (n *layer) objectHead(ctx context.Context, addr *object.Address) (*object.Object, error) { + cli, tkn, err := n.prepareClient(ctx) if err != nil { return nil, err } - token, err := n.cli.SessionToken(ctx, &pool.SessionParams{ - Conn: conn, - Addr: addr, - Verb: service.Token_Info_Head, - }) - if err != nil { - return nil, err - } + ohp := new(client.ObjectHeaderParams) + ohp.WithAddress(addr) + ohp.WithAllFields() + ohp.WithMainFields() - req := new(object.HeadRequest) - req.Address = addr - req.FullHeaders = true - req.SetTTL(service.SingleForwardingTTL) - req.SetToken(token) - // req.SetBearer(bearerToken) - - err = service.SignRequestData(n.key, req) - if err != nil { - return nil, err - } - - // todo: think about timeout - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - - res, err := object.NewServiceClient(conn).Head(ctx, req) - if err != nil { - return nil, err - } - - return res.Object, nil -} - -func receiveObject(cli object.Service_GetClient) (*object.Object, error) { - var ( - off int - buf []byte - obj *object.Object - ) - - for { - resp, err := cli.Recv() - if err != nil { - if err == io.EOF { - break - } - - return nil, err - } - - switch o := resp.R.(type) { - case *object.GetResponse_Object: - if obj != nil { - return nil, errors.New("object headers already received") - } else if _, hdr := o.Object.LastHeader(object.HeaderType(object.TombstoneHdr)); hdr != nil { - return nil, errors.New("object already removed") - } - - obj = o.Object - buf = make([]byte, obj.SystemHeader.PayloadLength) - - if len(obj.Payload) > 0 { - off += copy(buf, obj.Payload) - } - case *object.GetResponse_Chunk: - if obj == nil { - return nil, errors.New("object headers not received") - } - off += copy(buf[off:], o.Chunk) - default: - return nil, errors.Errorf("unknown response %T", o) - } - } - - if obj == nil { - return nil, errors.New("object headers not received") - } - obj.Payload = buf - - return obj, nil + return cli.GetObjectHeader(ctx, ohp, client.WithSession(tkn)) } // objectGet and write it into provided io.Reader. -func (n *layer) objectGet(ctx context.Context, p getParams) (*object.Object, error) { - conn, err := n.cli.GetConnection(ctx) +func (n *layer) objectGet(ctx context.Context, p *getParams) (*object.Object, error) { + cli, tkn, err := n.prepareClient(ctx) if err != nil { return nil, err } - token, err := n.cli.SessionToken(ctx, &pool.SessionParams{ - Conn: conn, - Addr: p.addr, - Verb: service.Token_Info_Get, - }) - if err != nil { - return nil, err - } + // prepare length/offset writer + writer := newWriter(p.Writer, p.offset, p.length) - // todo: replace object.Get() call by object.GetRange() for - // true sequential reading support; it will be possible when - // object.GetRange() response message become gRPC stream. - req := new(object.GetRequest) - req.Address = p.addr - req.SetTTL(service.SingleForwardingTTL) - req.SetToken(token) - // req.SetBearer(bearerToken) + gop := new(client.GetObjectParams) + gop.WithPayloadWriter(writer) - err = service.SignRequestData(n.key, req) - if err != nil { - return nil, err - } - - // todo: think about timeout - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - - var obj *object.Object - - if cli, err := object.NewServiceClient(conn).Get(ctx, req); err != nil { - return nil, err - } else if obj, err = receiveObject(cli); err != nil { - return nil, err - } else if ln := int64(obj.SystemHeader.PayloadLength); p.start+p.length > ln { - return nil, errors.Errorf("slice bounds out of range: len = %d, start = %d, offset = %d", - ln, p.start, p.length) - } else if _, err = p.writer.Write(obj.Payload[p.start : p.start+p.length]); err != nil { - return nil, err - } - - // remove payload: - obj.Payload = nil - - return obj, nil + return cli.GetObject(ctx, gop, client.WithSession(tkn)) } -// objectPut into neofs, took payload from io.Reader. -func (n *layer) objectPut(ctx context.Context, p putParams) (*object.Object, error) { - conn, err := n.cli.GetConnection(ctx) - if err != nil { +// objectPut into NeoFS, took payload from io.Reader. +func (n *layer) objectPut(ctx context.Context, p *PutObjectParams) (*ObjectInfo, error) { + var ( + err error + own *owner.ID + brt *token.BearerToken + cid = container.NewID() + ) + + if brt, err = auth.GetBearerToken(ctx); err != nil { return nil, err - } - - token, err := n.cli.SessionToken(ctx, &pool.SessionParams{ - Conn: conn, - Addr: p.addr, - Verb: service.Token_Info_Put, - }) - if err != nil { - n.log.Error("could not prepare token", - zap.Error(err)) + } else if own, err = GetOwnerID(brt); err != nil { return nil, err - } - - putClient, err := object.NewServiceClient(conn).Put(ctx) - if err != nil { - n.log.Error("could not prepare PutClient", - zap.Error(err)) + } else if err = cid.Parse(p.Bucket); err != nil { return nil, err - } - - if p.userHeaders == nil { - p.userHeaders = make(map[string]string) - } - - // Set object name if not set before - if _, ok := p.userHeaders[AWS3NameHeader]; !ok { - p.userHeaders[AWS3NameHeader] = p.name - } - - readBuffer := make([]byte, dataChunkSize) - obj := &object.Object{ - SystemHeader: object.SystemHeader{ - Version: objectVersion, - ID: p.addr.ObjectID, - OwnerID: n.uid, - CID: p.addr.CID, - PayloadLength: uint64(p.size), - }, - Headers: parseUserHeaders(p.userHeaders), - } - - req := object.MakePutRequestHeader(obj) - req.SetTTL(service.SingleForwardingTTL) - req.SetToken(token) - // req.SetBearer(bearerToken) - - err = service.SignRequestData(n.key, req) - if err != nil { - n.log.Error("could not prepare request", - zap.Error(err)) - return nil, err - } - - err = putClient.Send(req) - if err != nil { - n.log.Error("could not send request", - zap.Error(err)) - return nil, err - } - - read, err := p.r.Read(readBuffer) - for read > 0 { - if err != nil && err != io.EOF { - n.log.Error("something went wrong", - zap.Error(err)) - return nil, err + } else if _, err = n.objectFindID(ctx, &findParams{cid: cid, key: p.Object}); err == nil { + return nil, &api.ObjectAlreadyExists{ + Bucket: p.Bucket, + Object: p.Object, } - - if read > 0 { - req := object.MakePutRequestChunk(readBuffer[:read]) - req.SetTTL(service.SingleForwardingTTL) - // req.SetBearer(bearerToken) - - err = service.SignRequestData(n.key, req) - if err != nil { - n.log.Error("could not sign chunk request", - zap.Error(err)) - return nil, err - } - - err = putClient.Send(req) - if err != nil && err != io.EOF { - n.log.Error("could not send chunk", - zap.Error(err)) - return nil, err - } - } - - read, err = p.r.Read(readBuffer) } - _, err = putClient.CloseAndRecv() - if err != nil { - n.log.Error("could not finish request", - zap.Error(err)) - return nil, err - } - - // maybe make a head? - return obj, nil -} - -// storageGroupPut prepares storage group object and put it into neofs. -func (n *layer) storageGroupPut(ctx context.Context, p sgParams) (*object.Object, error) { - conn, err := n.cli.GetConnection(ctx) + cli, tkn, err := n.prepareClient(ctx) if err != nil { return nil, err } - token, err := n.cli.SessionToken(ctx, &pool.SessionParams{ - Conn: conn, - Addr: p.addr, - Verb: service.Token_Info_Put, - }) - if err != nil { + attributes := make([]*object.Attribute, 0, len(p.Header)+1) + + filename := object.NewAttribute() + filename.SetKey(AWS3NameHeader) + filename.SetValue(p.Object) + + attributes = append(attributes, filename) + + for k, v := range p.Header { + ua := object.NewAttribute() + ua.SetKey(k) + ua.SetValue(v) + + attributes = append(attributes, ua) + } + + b := new(bytes.Buffer) + r := io.TeeReader(p.Reader, b) + + raw := object.NewRaw() + raw.SetOwnerID(own) + raw.SetContainerID(cid) + raw.SetAttributes(attributes...) + + pop := new(client.PutObjectParams) + pop.WithPayloadReader(r) + pop.WithObject(raw.Object()) + + if _, err = cli.PutObject(ctx, pop, client.WithSession(tkn)); err != nil { return nil, err } - // todo: think about timeout - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - - putClient, err := object.NewServiceClient(conn).Put(ctx) - if err != nil { - return nil, err - } - - sg := &object.Object{ - SystemHeader: object.SystemHeader{ - Version: objectVersion, - ID: p.addr.ObjectID, - OwnerID: n.uid, - CID: p.addr.CID, - }, - Headers: make([]object.Header, 0, len(p.objects)), - } - - for i := range p.objects { - sg.AddHeader(&object.Header{Value: &object.Header_Link{ - Link: &object.Link{Type: object.Link_StorageGroup, ID: p.objects[i]}, - }}) - } - - sg.SetStorageGroup(new(storagegroup.StorageGroup)) - - req := object.MakePutRequestHeader(sg) - req.SetTTL(service.SingleForwardingTTL) - req.SetToken(token) - // req.SetBearer(bearerToken) - - err = service.SignRequestData(n.key, req) - if err != nil { - return nil, err - } - - err = putClient.Send(req) - if err != nil { - return nil, err - } - - _, err = putClient.CloseAndRecv() - if err != nil { - return nil, err - } - - return sg, nil + return &ObjectInfo{ + Bucket: p.Bucket, + Name: p.Object, + Size: p.Size, + Created: time.Now(), + ContentType: http.DetectContentType(b.Bytes()), + Owner: own, + Headers: p.Header, + }, nil } // objectDelete puts tombstone object into neofs. -func (n *layer) objectDelete(ctx context.Context, p delParams) error { - conn, err := n.cli.GetConnection(ctx) +func (n *layer) objectDelete(ctx context.Context, address *object.Address) error { + cli, tkn, err := n.prepareClient(ctx) if err != nil { return err } - token, err := n.cli.SessionToken(ctx, &pool.SessionParams{ - Conn: conn, - Addr: p.addr, - Verb: service.Token_Info_Delete, - }) - if err != nil { - return err - } + dob := new(client.DeleteObjectParams) + dob.WithAddress(address) - req := new(object.DeleteRequest) - req.Address = p.addr - req.OwnerID = n.uid - req.SetTTL(service.SingleForwardingTTL) - req.SetToken(token) - // req.SetBearer(bearerToken) - - err = service.SignRequestData(n.key, req) - if err != nil { - return err - } - - // todo: think about timeout - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - - _, err = object.NewServiceClient(conn).Delete(ctx, req) - - return err + return cli.DeleteObject(ctx, dob, client.WithSession(tkn)) } diff --git a/api/layer/util.go b/api/layer/util.go index d97f13ed..e504e83b 100644 --- a/api/layer/util.go +++ b/api/layer/util.go @@ -6,8 +6,8 @@ import ( "strings" "time" - "github.com/nspcc-dev/neofs-api-go/object" - "github.com/nspcc-dev/neofs-api-go/refs" + "github.com/nspcc-dev/neofs-api-go/pkg/object" + "github.com/nspcc-dev/neofs-api-go/pkg/owner" ) type ( @@ -17,7 +17,7 @@ type ( Size int64 ContentType string Created time.Time - Owner refs.OwnerID + Owner *owner.ID Headers map[string]string } @@ -39,7 +39,7 @@ type ( NextContinuationToken string // List of objects info for this request. - Objects []ObjectInfo + Objects []*ObjectInfo // List of prefixes for this request. Prefixes []string @@ -48,65 +48,46 @@ type ( const pathSeparator = string(os.PathSeparator) -func userHeaders(h []object.Header) map[string]string { - result := make(map[string]string, len(h)) +func userHeaders(attrs []*object.Attribute) map[string]string { + result := make(map[string]string, len(attrs)) - for i := range h { - switch v := h[i].Value.(type) { - case *object.Header_UserHeader: - result[v.UserHeader.Key] = v.UserHeader.Value - default: - continue - } + for _, attr := range attrs { + result[attr.GetKey()] = attr.GetValue() } return result } func objectInfoFromMeta(meta *object.Object) *ObjectInfo { - aws3name := meta.SystemHeader.ID.String() + aws3name := meta.GetID().String() - userHeaders := userHeaders(meta.Headers) + userHeaders := userHeaders(meta.GetAttributes()) if name, ok := userHeaders[AWS3NameHeader]; ok { aws3name = name delete(userHeaders, name) } - mimeType := http.DetectContentType(meta.Payload) + mimeType := http.DetectContentType(meta.GetPayload()) return &ObjectInfo{ - Bucket: meta.SystemHeader.CID.String(), + Bucket: meta.GetContainerID().String(), Name: aws3name, ContentType: mimeType, Headers: userHeaders, - Size: int64(meta.SystemHeader.PayloadLength), - Created: time.Unix(meta.SystemHeader.CreatedAt.UnixTime, 0), + Size: int64(meta.GetPayloadSize()), + Created: time.Now(), // time.Unix(meta.GetCreationEpoch(), 0), } } -func parseUserHeaders(h map[string]string) []object.Header { - headers := make([]object.Header, 0, len(h)) - - for k, v := range h { - uh := &object.UserHeader{Key: k, Value: v} - headers = append(headers, object.Header{ - Value: &object.Header_UserHeader{UserHeader: uh}, - }) - } - - return headers -} - func nameFromObject(o *object.Object) (string, string) { - var ( - name string - uh = userHeaders(o.Headers) - ) + var name = o.GetID().String() - if _, ok := uh[AWS3NameHeader]; !ok { - name = o.SystemHeader.ID.String() - } else { - name = uh[AWS3NameHeader] + for _, attr := range o.GetAttributes() { + if attr.GetKey() == AWS3NameHeader { + name = attr.GetValue() + + break + } } ind := strings.LastIndex(name, pathSeparator) diff --git a/api/layer/writer.go b/api/layer/writer.go new file mode 100644 index 00000000..98f24b89 --- /dev/null +++ b/api/layer/writer.go @@ -0,0 +1,49 @@ +package layer + +import "io" + +type offsetWriter struct { + io.Writer + + written int64 + skipped int64 + + offset int64 + length int64 +} + +func newWriter(w io.Writer, offset, length int64) io.Writer { + return &offsetWriter{ + Writer: w, + offset: offset, + length: length, + } +} + +func (w *offsetWriter) Write(p []byte) (int, error) { + ln := len(p) + length := int64(ln) + offset := w.offset - w.skipped + + if length-offset < 0 { + w.skipped += length + + return ln, nil + } + + length -= offset + + left := w.length - w.written + if left-length < 0 || length-left < length { + length = left + } else { + return 0, nil + } + + n, err := w.Writer.Write(p[offset : offset+length]) + + w.written += int64(n) + w.skipped += offset + + return n, err +} diff --git a/api/layer/writer_test.go b/api/layer/writer_test.go new file mode 100644 index 00000000..9c820f0c --- /dev/null +++ b/api/layer/writer_test.go @@ -0,0 +1,77 @@ +package layer + +import ( + "bytes" + "crypto/rand" + "testing" + + "github.com/stretchr/testify/require" +) + +func testBuffer(t *testing.T) []byte { + buf := make([]byte, 1024) + _, err := rand.Read(buf) + require.NoError(t, err) + + return buf +} + +func TestOffsetWriter(t *testing.T) { + b := testBuffer(t) + k := 64 + d := len(b) / k + + t.Run("1024 / 100 / 100 bytes success", func(t *testing.T) { + w := new(bytes.Buffer) + o := int64(100) + l := int64(100) + + wt := newWriter(w, o, l) + for i := 0; i < k; i++ { + _, err := wt.Write(b[i*d : (i+1)*d]) + require.NoError(t, err) + } + + wo := wt.(*offsetWriter) + + require.Equal(t, o, wo.skipped) + require.Equal(t, l, wo.written) + require.Equal(t, b[o:o+l], w.Bytes()) + }) + + t.Run("1024 / 0 / 100 bytes success", func(t *testing.T) { + w := new(bytes.Buffer) + o := int64(0) + l := int64(100) + + wt := newWriter(w, o, l) + for i := 0; i < k; i++ { + _, err := wt.Write(b[i*d : (i+1)*d]) + require.NoError(t, err) + } + + wo := wt.(*offsetWriter) + + require.Equal(t, o, wo.skipped) + require.Equal(t, l, wo.written) + require.Equal(t, b[o:o+l], w.Bytes()) + }) + + t.Run("1024 / 0 / 1024 bytes success", func(t *testing.T) { + w := new(bytes.Buffer) + o := int64(0) + l := int64(1024) + + wt := newWriter(w, o, l) + for i := 0; i < k; i++ { + _, err := wt.Write(b[i*d : (i+1)*d]) + require.NoError(t, err) + } + + wo := wt.(*offsetWriter) + + require.Equal(t, o, wo.skipped) + require.Equal(t, l, wo.written) + require.Equal(t, b[o:o+l], w.Bytes()) + }) +}