forked from TrueCloudLab/frostfs-sdk-go
[#299] client: Do not use pointers to required response fields
In previous implementation `client` package provided access to nested response fields as pointers to them. This caused clients to handle nil cases even when the field presence in the response is required. Avoid returning pointers to required fields in response getters. This also reduces reference counter load and allows fields to be decoded directly without additional assignment. Signed-off-by: Leonard Lyubich <leonard@nspcc.ru>
This commit is contained in:
parent
30bf79f075
commit
4e31b4f231
10 changed files with 150 additions and 160 deletions
|
@ -2,8 +2,6 @@ package client
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
v2accounting "github.com/nspcc-dev/neofs-api-go/v2/accounting"
|
||||
"github.com/nspcc-dev/neofs-api-go/v2/refs"
|
||||
|
@ -32,17 +30,11 @@ func (x *PrmBalanceGet) SetAccount(id user.ID) {
|
|||
type ResBalanceGet struct {
|
||||
statusRes
|
||||
|
||||
amount *accounting.Decimal
|
||||
}
|
||||
|
||||
func (x *ResBalanceGet) setAmount(v *accounting.Decimal) {
|
||||
x.amount = v
|
||||
amount accounting.Decimal
|
||||
}
|
||||
|
||||
// Amount returns current amount of funds on the NeoFS account as decimal number.
|
||||
//
|
||||
// Client doesn't retain value so modification is safe.
|
||||
func (x ResBalanceGet) Amount() *accounting.Decimal {
|
||||
func (x ResBalanceGet) Amount() accounting.Decimal {
|
||||
return x.amount
|
||||
}
|
||||
|
||||
|
@ -96,21 +88,18 @@ func (c *Client) BalanceGet(ctx context.Context, prm PrmBalanceGet) (*ResBalance
|
|||
cc.result = func(r responseV2) {
|
||||
resp := r.(*v2accounting.BalanceResponse)
|
||||
|
||||
const fieldBalance = "balance"
|
||||
|
||||
bal := resp.GetBody().GetBalance()
|
||||
if bal == nil {
|
||||
cc.err = errors.New("missing balance field")
|
||||
cc.err = newErrMissingResponseField(fieldBalance)
|
||||
return
|
||||
}
|
||||
|
||||
var d accounting.Decimal
|
||||
|
||||
cc.err = d.ReadFromV2(*bal)
|
||||
cc.err = res.amount.ReadFromV2(*bal)
|
||||
if cc.err != nil {
|
||||
cc.err = fmt.Errorf("invalid balance: %w", cc.err)
|
||||
return
|
||||
cc.err = newErrInvalidResponseField(fieldBalance, cc.err)
|
||||
}
|
||||
|
||||
res.setAmount(&d)
|
||||
}
|
||||
|
||||
// process call
|
||||
|
|
|
@ -251,7 +251,7 @@ func (x *contextCall) close() bool {
|
|||
x.result(x.resp)
|
||||
}
|
||||
|
||||
return true
|
||||
return x.err == nil
|
||||
}
|
||||
|
||||
// goes through all stages of sending a request and processing a response. Returns true if successful.
|
||||
|
|
|
@ -54,22 +54,16 @@ func (x *PrmContainerPut) WithinSession(s session.Container) {
|
|||
type ResContainerPut struct {
|
||||
statusRes
|
||||
|
||||
id *cid.ID
|
||||
id cid.ID
|
||||
}
|
||||
|
||||
// ID returns identifier of the container declared to be stored in the system.
|
||||
// Used as a link to information about the container (in particular, you can
|
||||
// asynchronously check if the save was successful).
|
||||
//
|
||||
// Client doesn't retain value so modification is safe.
|
||||
func (x ResContainerPut) ID() *cid.ID {
|
||||
func (x ResContainerPut) ID() cid.ID {
|
||||
return x.id
|
||||
}
|
||||
|
||||
func (x *ResContainerPut) setID(id *cid.ID) {
|
||||
x.id = id
|
||||
}
|
||||
|
||||
// ContainerPut sends request to save container in NeoFS.
|
||||
//
|
||||
// Exactly one return value is non-nil. By default, server status is returned in res structure.
|
||||
|
@ -150,17 +144,19 @@ func (c *Client) ContainerPut(ctx context.Context, prm PrmContainerPut) (*ResCon
|
|||
}
|
||||
cc.result = func(r responseV2) {
|
||||
resp := r.(*v2container.PutResponse)
|
||||
var cID *cid.ID
|
||||
|
||||
const fieldCnrID = "container ID"
|
||||
|
||||
cidV2 := resp.GetBody().GetContainerID()
|
||||
if cidV2 != nil {
|
||||
var c cid.ID
|
||||
_ = c.ReadFromV2(*cidV2)
|
||||
|
||||
cID = &c
|
||||
if cidV2 == nil {
|
||||
cc.err = newErrMissingResponseField(fieldCnrID)
|
||||
return
|
||||
}
|
||||
|
||||
res.setID(cID)
|
||||
cc.err = res.id.ReadFromV2(*cidV2)
|
||||
if cc.err != nil {
|
||||
cc.err = newErrInvalidResponseField(fieldCnrID, cc.err)
|
||||
}
|
||||
}
|
||||
|
||||
// process call
|
||||
|
@ -200,10 +196,6 @@ func (x ResContainerGet) Container() container.Container {
|
|||
return x.cnr
|
||||
}
|
||||
|
||||
func (x *ResContainerGet) setContainer(cnr container.Container) {
|
||||
x.cnr = cnr
|
||||
}
|
||||
|
||||
// ContainerGet reads NeoFS container by ID.
|
||||
//
|
||||
// Exactly one return value is non-nil. By default, server status is returned in res structure.
|
||||
|
@ -261,15 +253,10 @@ func (c *Client) ContainerGet(ctx context.Context, prm PrmContainerGet) (*ResCon
|
|||
return
|
||||
}
|
||||
|
||||
var cnr container.Container
|
||||
|
||||
err := cnr.ReadFromV2(*cnrV2)
|
||||
if err != nil {
|
||||
cc.err = fmt.Errorf("invalid container in response: %w", err)
|
||||
return
|
||||
cc.err = res.cnr.ReadFromV2(*cnrV2)
|
||||
if cc.err != nil {
|
||||
cc.err = fmt.Errorf("invalid container in response: %w", cc.err)
|
||||
}
|
||||
|
||||
res.setContainer(cnr)
|
||||
}
|
||||
|
||||
// process call
|
||||
|
@ -309,10 +296,6 @@ func (x ResContainerList) Containers() []cid.ID {
|
|||
return x.ids
|
||||
}
|
||||
|
||||
func (x *ResContainerList) setContainers(ids []cid.ID) {
|
||||
x.ids = ids
|
||||
}
|
||||
|
||||
// ContainerList requests identifiers of the account-owned containers.
|
||||
//
|
||||
// Exactly one return value is non-nil. By default, server status is returned in res structure.
|
||||
|
@ -364,13 +347,15 @@ func (c *Client) ContainerList(ctx context.Context, prm PrmContainerList) (*ResC
|
|||
cc.result = func(r responseV2) {
|
||||
resp := r.(*v2container.ListResponse)
|
||||
|
||||
ids := make([]cid.ID, len(resp.GetBody().GetContainerIDs()))
|
||||
res.ids = make([]cid.ID, len(resp.GetBody().GetContainerIDs()))
|
||||
|
||||
for i, cidV2 := range resp.GetBody().GetContainerIDs() {
|
||||
_ = ids[i].ReadFromV2(cidV2)
|
||||
cc.err = res.ids[i].ReadFromV2(cidV2)
|
||||
if cc.err != nil {
|
||||
cc.err = fmt.Errorf("invalid ID in the response: %w", cc.err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
res.setContainers(ids)
|
||||
}
|
||||
|
||||
// process call
|
||||
|
@ -527,20 +512,14 @@ func (x *PrmContainerEACL) SetContainer(id cid.ID) {
|
|||
type ResContainerEACL struct {
|
||||
statusRes
|
||||
|
||||
table *eacl.Table
|
||||
table eacl.Table
|
||||
}
|
||||
|
||||
// Table returns eACL table of the requested container.
|
||||
//
|
||||
// Client doesn't retain value so modification is safe.
|
||||
func (x ResContainerEACL) Table() *eacl.Table {
|
||||
func (x ResContainerEACL) Table() eacl.Table {
|
||||
return x.table
|
||||
}
|
||||
|
||||
func (x *ResContainerEACL) setTable(table *eacl.Table) {
|
||||
x.table = table
|
||||
}
|
||||
|
||||
// ContainerEACL reads eACL table of the NeoFS container.
|
||||
//
|
||||
// Exactly one return value is non-nil. By default, server status is returned in res structure.
|
||||
|
@ -594,11 +573,13 @@ func (c *Client) ContainerEACL(ctx context.Context, prm PrmContainerEACL) (*ResC
|
|||
cc.result = func(r responseV2) {
|
||||
resp := r.(*v2container.GetExtendedACLResponse)
|
||||
|
||||
body := resp.GetBody()
|
||||
eACL := resp.GetBody().GetEACL()
|
||||
if eACL == nil {
|
||||
cc.err = newErrMissingResponseField("eACL")
|
||||
return
|
||||
}
|
||||
|
||||
table := eacl.NewTableFromV2(body.GetEACL())
|
||||
|
||||
res.setTable(table)
|
||||
res.table = *eacl.NewTableFromV2(eACL)
|
||||
}
|
||||
|
||||
// process call
|
||||
|
@ -833,7 +814,7 @@ func SyncContainerWithNetwork(ctx context.Context, cnr *container.Container, c *
|
|||
return fmt.Errorf("network info call: %w", err)
|
||||
}
|
||||
|
||||
container.ApplyNetworkConfig(cnr, *res.Info())
|
||||
container.ApplyNetworkConfig(cnr, res.Info())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package client
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status"
|
||||
)
|
||||
|
@ -92,3 +93,14 @@ func IsErrSessionNotFound(err error) bool {
|
|||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// returns error describing missing field with the given name.
|
||||
func newErrMissingResponseField(name string) error {
|
||||
return fmt.Errorf("missing %s field in the response", name)
|
||||
}
|
||||
|
||||
// returns error describing invalid field (according to the NeoFS protocol)
|
||||
// with the given name and format violation err.
|
||||
func newErrInvalidResponseField(name string, err error) error {
|
||||
return fmt.Errorf("invalid %s field in the response: %w", name, err)
|
||||
}
|
||||
|
|
|
@ -2,8 +2,6 @@ package client
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
v2netmap "github.com/nspcc-dev/neofs-api-go/v2/netmap"
|
||||
rpcapi "github.com/nspcc-dev/neofs-api-go/v2/rpc"
|
||||
|
@ -21,33 +19,21 @@ type PrmEndpointInfo struct {
|
|||
type ResEndpointInfo struct {
|
||||
statusRes
|
||||
|
||||
version *version.Version
|
||||
version version.Version
|
||||
|
||||
ni *netmap.NodeInfo
|
||||
ni netmap.NodeInfo
|
||||
}
|
||||
|
||||
// LatestVersion returns latest NeoFS API protocol's version in use.
|
||||
//
|
||||
// Client doesn't retain value so modification is safe.
|
||||
func (x ResEndpointInfo) LatestVersion() *version.Version {
|
||||
func (x ResEndpointInfo) LatestVersion() version.Version {
|
||||
return x.version
|
||||
}
|
||||
|
||||
func (x *ResEndpointInfo) setLatestVersion(ver *version.Version) {
|
||||
x.version = ver
|
||||
}
|
||||
|
||||
// NodeInfo returns information about the NeoFS node served on the remote endpoint.
|
||||
//
|
||||
// Client doesn't retain value so modification is safe.
|
||||
func (x ResEndpointInfo) NodeInfo() *netmap.NodeInfo {
|
||||
func (x ResEndpointInfo) NodeInfo() netmap.NodeInfo {
|
||||
return x.ni
|
||||
}
|
||||
|
||||
func (x *ResEndpointInfo) setNodeInfo(info *netmap.NodeInfo) {
|
||||
x.ni = info
|
||||
}
|
||||
|
||||
// EndpointInfo requests information about the storage node served on the remote endpoint.
|
||||
//
|
||||
// Method can be used as a health check to see if node is alive and responds to requests.
|
||||
|
@ -93,31 +79,33 @@ func (c *Client) EndpointInfo(ctx context.Context, prm PrmEndpointInfo) (*ResEnd
|
|||
|
||||
body := resp.GetBody()
|
||||
|
||||
var ver version.Version
|
||||
if v2ver := body.GetVersion(); v2ver != nil {
|
||||
cc.err = ver.ReadFromV2(*v2ver)
|
||||
const fieldVersion = "version"
|
||||
|
||||
verV2 := body.GetVersion()
|
||||
if verV2 == nil {
|
||||
cc.err = newErrMissingResponseField(fieldVersion)
|
||||
return
|
||||
}
|
||||
|
||||
cc.err = res.version.ReadFromV2(*verV2)
|
||||
if cc.err != nil {
|
||||
cc.err = fmt.Errorf("invalid version: %w", cc.err)
|
||||
return
|
||||
}
|
||||
}
|
||||
res.setLatestVersion(&ver)
|
||||
|
||||
nodeV2 := body.GetNodeInfo()
|
||||
if nodeV2 == nil {
|
||||
cc.err = errors.New("missing node info in response body")
|
||||
cc.err = newErrInvalidResponseField(fieldVersion, cc.err)
|
||||
return
|
||||
}
|
||||
|
||||
var node netmap.NodeInfo
|
||||
const fieldNodeInfo = "node info"
|
||||
|
||||
cc.err = node.ReadFromV2(*nodeV2)
|
||||
nodeInfoV2 := body.GetNodeInfo()
|
||||
if nodeInfoV2 == nil {
|
||||
cc.err = newErrMissingResponseField(fieldNodeInfo)
|
||||
return
|
||||
}
|
||||
|
||||
cc.err = res.ni.ReadFromV2(*nodeInfoV2)
|
||||
if cc.err != nil {
|
||||
cc.err = fmt.Errorf("invalid node info: %w", cc.err)
|
||||
cc.err = newErrInvalidResponseField(fieldNodeInfo, cc.err)
|
||||
return
|
||||
}
|
||||
|
||||
res.setNodeInfo(&node)
|
||||
}
|
||||
|
||||
// process call
|
||||
|
@ -137,20 +125,14 @@ type PrmNetworkInfo struct {
|
|||
type ResNetworkInfo struct {
|
||||
statusRes
|
||||
|
||||
info *netmap.NetworkInfo
|
||||
info netmap.NetworkInfo
|
||||
}
|
||||
|
||||
// Info returns structured information about the NeoFS network.
|
||||
//
|
||||
// Client doesn't retain value so modification is safe.
|
||||
func (x ResNetworkInfo) Info() *netmap.NetworkInfo {
|
||||
func (x ResNetworkInfo) Info() netmap.NetworkInfo {
|
||||
return x.info
|
||||
}
|
||||
|
||||
func (x *ResNetworkInfo) setInfo(info *netmap.NetworkInfo) {
|
||||
x.info = info
|
||||
}
|
||||
|
||||
// NetworkInfo requests information about the NeoFS network of which the remote server is a part.
|
||||
//
|
||||
// Any client's internal or transport errors are returned as `error`.
|
||||
|
@ -192,21 +174,19 @@ func (c *Client) NetworkInfo(ctx context.Context, prm PrmNetworkInfo) (*ResNetwo
|
|||
cc.result = func(r responseV2) {
|
||||
resp := r.(*v2netmap.NetworkInfoResponse)
|
||||
|
||||
const fieldNetInfo = "network info"
|
||||
|
||||
netInfoV2 := resp.GetBody().GetNetworkInfo()
|
||||
if netInfoV2 == nil {
|
||||
cc.err = errors.New("missing network info in response body")
|
||||
cc.err = newErrMissingResponseField(fieldNetInfo)
|
||||
return
|
||||
}
|
||||
|
||||
var netInfo netmap.NetworkInfo
|
||||
|
||||
cc.err = netInfo.ReadFromV2(*netInfoV2)
|
||||
cc.err = res.info.ReadFromV2(*netInfoV2)
|
||||
if cc.err != nil {
|
||||
cc.err = fmt.Errorf("invalid network info: %w", cc.err)
|
||||
cc.err = newErrInvalidResponseField(fieldNetInfo, cc.err)
|
||||
return
|
||||
}
|
||||
|
||||
res.setInfo(&netInfo)
|
||||
}
|
||||
|
||||
// process call
|
||||
|
|
|
@ -93,18 +93,12 @@ func (x *PrmObjectDelete) WithXHeaders(hs ...string) {
|
|||
type ResObjectDelete struct {
|
||||
statusRes
|
||||
|
||||
idTomb *v2refs.ObjectID
|
||||
tomb oid.ID
|
||||
}
|
||||
|
||||
// ReadTombstoneID reads identifier of the created tombstone object.
|
||||
// Returns false if ID is missing (not read).
|
||||
func (x ResObjectDelete) ReadTombstoneID(dst *oid.ID) bool {
|
||||
if x.idTomb != nil {
|
||||
_ = dst.ReadFromV2(*x.idTomb)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
// Tombstone returns identifier of the created tombstone object.
|
||||
func (x ResObjectDelete) Tombstone() oid.ID {
|
||||
return x.tomb
|
||||
}
|
||||
|
||||
// ObjectDelete marks an object for deletion from the container using NeoFS API protocol.
|
||||
|
@ -167,7 +161,18 @@ func (c *Client) ObjectDelete(ctx context.Context, prm PrmObjectDelete) (*ResObj
|
|||
return rpcapi.DeleteObject(&c.c, &req, client.WithContext(ctx))
|
||||
}
|
||||
cc.result = func(r responseV2) {
|
||||
res.idTomb = r.(*v2object.DeleteResponse).GetBody().GetTombstone().GetObjectID()
|
||||
const fieldTombstone = "tombstone"
|
||||
|
||||
idTombV2 := r.(*v2object.DeleteResponse).GetBody().GetTombstone().GetObjectID()
|
||||
if idTombV2 == nil {
|
||||
cc.err = newErrMissingResponseField(fieldTombstone)
|
||||
return
|
||||
}
|
||||
|
||||
cc.err = res.tomb.ReadFromV2(*idTombV2)
|
||||
if cc.err != nil {
|
||||
cc.err = newErrInvalidResponseField(fieldTombstone, cc.err)
|
||||
}
|
||||
}
|
||||
|
||||
// process call
|
||||
|
|
|
@ -195,6 +195,9 @@ func (c *Client) ObjectHash(ctx context.Context, prm PrmObjectHash) (*ResObjectH
|
|||
}
|
||||
cc.result = func(r responseV2) {
|
||||
res.checksums = r.(*v2object.GetRangeHashResponse).GetBody().GetHashList()
|
||||
if len(res.checksums) == 0 {
|
||||
cc.err = newErrMissingResponseField("hash list")
|
||||
}
|
||||
}
|
||||
|
||||
// process call
|
||||
|
|
|
@ -32,20 +32,12 @@ func (x *PrmObjectPutInit) SetCopiesNumber(copiesNumber uint32) {
|
|||
type ResObjectPut struct {
|
||||
statusRes
|
||||
|
||||
resp v2object.PutResponse
|
||||
obj oid.ID
|
||||
}
|
||||
|
||||
// ReadStoredObjectID reads identifier of the saved object.
|
||||
// Returns false if ID is missing (not read).
|
||||
func (x *ResObjectPut) ReadStoredObjectID(id *oid.ID) bool {
|
||||
idv2 := x.resp.GetBody().GetObjectID()
|
||||
if idv2 == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
_ = id.ReadFromV2(*idv2)
|
||||
|
||||
return true
|
||||
// StoredObjectID returns identifier of the saved object.
|
||||
func (x ResObjectPut) StoredObjectID() oid.ID {
|
||||
return x.obj
|
||||
}
|
||||
|
||||
// ObjectWriter is designed to write one object to NeoFS system.
|
||||
|
@ -216,13 +208,15 @@ func (c *Client) ObjectPutInit(ctx context.Context, prm PrmObjectPutInit) (*Obje
|
|||
var (
|
||||
res ResObjectPut
|
||||
w ObjectWriter
|
||||
|
||||
resp v2object.PutResponse
|
||||
)
|
||||
|
||||
ctx, w.cancelCtxStream = context.WithCancel(ctx)
|
||||
|
||||
w.partInit.SetCopiesNumber(prm.copyNum)
|
||||
|
||||
stream, err := rpcapi.PutObject(&c.c, &res.resp, client.WithContext(ctx))
|
||||
stream, err := rpcapi.PutObject(&c.c, &resp, client.WithContext(ctx))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open stream: %w", err)
|
||||
}
|
||||
|
@ -242,11 +236,25 @@ func (c *Client) ObjectPutInit(ctx context.Context, prm PrmObjectPutInit) (*Obje
|
|||
c.initCallContext(&w.ctxCall)
|
||||
w.ctxCall.req = &req
|
||||
w.ctxCall.statusRes = &res
|
||||
w.ctxCall.resp = &res.resp
|
||||
w.ctxCall.resp = &resp
|
||||
w.ctxCall.wReq = func() error {
|
||||
return stream.Write(&req)
|
||||
}
|
||||
w.ctxCall.closer = stream.Close
|
||||
w.ctxCall.result = func(r responseV2) {
|
||||
const fieldID = "ID"
|
||||
|
||||
idV2 := r.(*v2object.PutResponse).GetBody().GetObjectID()
|
||||
if idV2 == nil {
|
||||
w.ctxCall.err = newErrMissingResponseField(fieldID)
|
||||
return
|
||||
}
|
||||
|
||||
w.ctxCall.err = res.obj.ReadFromV2(*idV2)
|
||||
if w.ctxCall.err != nil {
|
||||
w.ctxCall.err = newErrInvalidResponseField(fieldID, w.ctxCall.err)
|
||||
}
|
||||
}
|
||||
|
||||
return &w, nil
|
||||
}
|
||||
|
|
37
pool/pool.go
37
pool/pool.go
|
@ -289,7 +289,9 @@ func (c *clientWrapper) balanceGet(ctx context.Context, prm PrmBalanceGet) (*acc
|
|||
return nil, fmt.Errorf("balance get on client: %w", err)
|
||||
}
|
||||
|
||||
return res.Amount(), nil
|
||||
bal := res.Amount()
|
||||
|
||||
return &bal, nil
|
||||
}
|
||||
|
||||
// containerPut invokes sdkClient.ContainerPut parse response status to error and return result as is.
|
||||
|
@ -310,12 +312,14 @@ func (c *clientWrapper) containerPut(ctx context.Context, prm PrmContainerPut) (
|
|||
prm.waitParams.setDefaults()
|
||||
}
|
||||
|
||||
err = waitForContainerPresence(ctx, c, res.ID(), &prm.waitParams)
|
||||
idCnr := res.ID()
|
||||
|
||||
err = waitForContainerPresence(ctx, c, idCnr, &prm.waitParams)
|
||||
if err = c.handleError(nil, err); err != nil {
|
||||
return nil, fmt.Errorf("wait container presence on client: %w", err)
|
||||
}
|
||||
|
||||
return res.ID(), nil
|
||||
return &idCnr, nil
|
||||
}
|
||||
|
||||
// containerGet invokes sdkClient.ContainerGet parse response status to error and return result as is.
|
||||
|
@ -400,7 +404,10 @@ func (c *clientWrapper) containerEACL(ctx context.Context, prm PrmContainerEACL)
|
|||
if err = c.handleError(st, err); err != nil {
|
||||
return nil, fmt.Errorf("get eacl on client: %w", err)
|
||||
}
|
||||
return res.Table(), nil
|
||||
|
||||
eACL := res.Table()
|
||||
|
||||
return &eACL, nil
|
||||
}
|
||||
|
||||
// containerSetEACL invokes sdkClient.ContainerSetEACL parse response status to error.
|
||||
|
@ -454,7 +461,10 @@ func (c *clientWrapper) endpointInfo(ctx context.Context, _ prmEndpointInfo) (*n
|
|||
if err = c.handleError(st, err); err != nil {
|
||||
return nil, fmt.Errorf("endpoint info on client: %w", err)
|
||||
}
|
||||
return res.NodeInfo(), nil
|
||||
|
||||
nodeInfo := res.NodeInfo()
|
||||
|
||||
return &nodeInfo, nil
|
||||
}
|
||||
|
||||
// networkInfo invokes sdkClient.NetworkInfo parse response status to error and return result as is.
|
||||
|
@ -470,7 +480,10 @@ func (c *clientWrapper) networkInfo(ctx context.Context, _ prmNetworkInfo) (*net
|
|||
if err = c.handleError(st, err); err != nil {
|
||||
return nil, fmt.Errorf("network info on client: %w", err)
|
||||
}
|
||||
return res.Info(), nil
|
||||
|
||||
netInfo := res.Info()
|
||||
|
||||
return &netInfo, nil
|
||||
}
|
||||
|
||||
// objectPut writes object to NeoFS.
|
||||
|
@ -551,11 +564,7 @@ func (c *clientWrapper) objectPut(ctx context.Context, prm PrmObjectPut) (*oid.I
|
|||
return nil, fmt.Errorf("client failure: %w", err)
|
||||
}
|
||||
|
||||
var id oid.ID
|
||||
|
||||
if !res.ReadStoredObjectID(&id) {
|
||||
return nil, errors.New("missing ID of the stored object")
|
||||
}
|
||||
id := res.StoredObjectID()
|
||||
|
||||
return &id, nil
|
||||
}
|
||||
|
@ -2167,11 +2176,9 @@ func (p Pool) Statistic() Statistic {
|
|||
}
|
||||
|
||||
// waitForContainerPresence waits until the container is found on the NeoFS network.
|
||||
func waitForContainerPresence(ctx context.Context, cli client, cnrID *cid.ID, waitParams *WaitParams) error {
|
||||
func waitForContainerPresence(ctx context.Context, cli client, cnrID cid.ID, waitParams *WaitParams) error {
|
||||
var prm PrmContainerGet
|
||||
if cnrID != nil {
|
||||
prm.SetContainerID(*cnrID)
|
||||
}
|
||||
prm.SetContainerID(cnrID)
|
||||
|
||||
return waitFor(ctx, waitParams, func(ctx context.Context) bool {
|
||||
_, err := cli.containerGet(ctx, prm)
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status"
|
||||
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
||||
neofsecdsa "github.com/nspcc-dev/neofs-sdk-go/crypto/ecdsa"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/object"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
|
@ -479,7 +480,9 @@ func TestWaitPresence(t *testing.T) {
|
|||
cancel()
|
||||
}()
|
||||
|
||||
err := waitForContainerPresence(ctx, mockCli, nil, &WaitParams{
|
||||
var idCnr cid.ID
|
||||
|
||||
err := waitForContainerPresence(ctx, mockCli, idCnr, &WaitParams{
|
||||
timeout: 120 * time.Second,
|
||||
pollInterval: 5 * time.Second,
|
||||
})
|
||||
|
@ -489,7 +492,8 @@ func TestWaitPresence(t *testing.T) {
|
|||
|
||||
t.Run("context deadline exceeded", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
err := waitForContainerPresence(ctx, mockCli, nil, &WaitParams{
|
||||
var idCnr cid.ID
|
||||
err := waitForContainerPresence(ctx, mockCli, idCnr, &WaitParams{
|
||||
timeout: 500 * time.Millisecond,
|
||||
pollInterval: 5 * time.Second,
|
||||
})
|
||||
|
@ -499,7 +503,8 @@ func TestWaitPresence(t *testing.T) {
|
|||
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
err := waitForContainerPresence(ctx, mockCli, nil, &WaitParams{
|
||||
var idCnr cid.ID
|
||||
err := waitForContainerPresence(ctx, mockCli, idCnr, &WaitParams{
|
||||
timeout: 10 * time.Second,
|
||||
pollInterval: 500 * time.Millisecond,
|
||||
})
|
||||
|
|
Loading…
Reference in a new issue