[#643] pkg: Sync method names and commentaries to them

Signed-off-by: Pavel Karpy <carpawell@nspcc.ru>
This commit is contained in:
Pavel Karpy 2021-06-23 16:29:46 +03:00 committed by Alex Vanin
parent 25391111ad
commit 48827f42d3
33 changed files with 41 additions and 41 deletions

View file

@ -68,7 +68,7 @@ func (ip *DaughterTrustIteratorProvider) InitAllDaughtersIterator(
// InitConsumersIterator returns iterator over all daughters // InitConsumersIterator returns iterator over all daughters
// of the current node(manager) and all their consumers' local // of the current node(manager) and all their consumers' local
// trusts for ctx.Epoch() epoch and ctx.I() iteration. // trusts for ctx.Epoch() epoch and ctx.I() iteration.
//
// Returns ErrNoData if there is no trust data for // Returns ErrNoData if there is no trust data for
// specified epoch and iteration. // specified epoch and iteration.
func (ip *DaughterTrustIteratorProvider) InitConsumersIterator( func (ip *DaughterTrustIteratorProvider) InitConsumersIterator(

View file

@ -21,7 +21,7 @@ type (
// Contracts is an interface of the storage // Contracts is an interface of the storage
// of the alphabet contract addresses. // of the alphabet contract addresses.
Contracts interface { Contracts interface {
// Get by index must return address of the // GetByIndex must return address of the
// alphabet contract by index of the glagolitic // alphabet contract by index of the glagolitic
// letter (e.g 0 for Az, 40 for Izhitsa). // letter (e.g 0 for Az, 40 for Izhitsa).
// //

View file

@ -74,9 +74,9 @@ type epochAuditReporter struct {
rep audit.Reporter rep audit.Reporter
} }
// AuditProcessor manages audit tasks and fills queue for next epoch. This // ProcessorPoolSize limits pool size for audit Processor. Processor manages
// process must not be interrupted by new audit epoch, so we limit pool size // audit tasks and fills queue for next epoch. This process must not be interrupted
// for processor to one. // by new audit epoch, so we limit pool size for processor to one.
const ProcessorPoolSize = 1 const ProcessorPoolSize = 1
// New creates audit processor instance. // New creates audit processor instance.

View file

@ -15,9 +15,9 @@ import (
"go.uber.org/zap" "go.uber.org/zap"
) )
// GovernanceProcessor manages governance sync tasks. This process must not be // ProcessorPoolSize limits pool size for governance Processor. Processor manages
// interrupted by other sync operation, so we limit pool size for processor to // governance sync tasks. This process must not be interrupted by other sync
// one. // operation, so we limit pool size for processor to one.
const ProcessorPoolSize = 1 const ProcessorPoolSize = 1
type ( type (

View file

@ -83,7 +83,7 @@ func WithPermissions(perm os.FileMode) Option {
} }
} }
// WithSizeLimit returns option to specify maximum size // WithObjectSizeLimit returns option to specify maximum size
// of the objects stored in Blobovnicza. // of the objects stored in Blobovnicza.
func WithObjectSizeLimit(lim uint64) Option { func WithObjectSizeLimit(lim uint64) Option {
return func(c *cfg) { return func(c *cfg) {

View file

@ -24,7 +24,7 @@ func (p *GetRangePrm) SetAddress(addr *objectSDK.Address) {
p.addr = addr p.addr = addr
} }
// SetAddress sets range of the requested payload data . // SetRange sets range of the requested payload data .
func (p *GetRangePrm) SetRange(rng *objectSDK.Range) { func (p *GetRangePrm) SetRange(rng *objectSDK.Range) {
p.rng = rng p.rng = rng
} }
@ -34,7 +34,7 @@ func (p *GetRangeRes) RangeData() []byte {
return p.rngData return p.rngData
} }
// Get reads the object from Blobovnicza by address. // GetRange reads range of the object from Blobovnicza by address.
// //
// Returns any error encountered that // Returns any error encountered that
// did not allow to completely read the object. // did not allow to completely read the object.

View file

@ -6,7 +6,7 @@ type GetSmallPrm struct {
rwBlobovniczaID rwBlobovniczaID
} }
// GetBigRes groups resulting values of GetBig operation. // GetSmallRes groups resulting values of GetSmall operation.
type GetSmallRes struct { type GetSmallRes struct {
roObject roObject
} }

View file

@ -2,7 +2,7 @@ package blobstor
import "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/fstree" import "github.com/nspcc-dev/neofs-node/pkg/local_object_storage/blobstor/fstree"
// FSTree returns file-system tree for big object store. // DumpInfo returns information about blob stor.
func (b *BlobStor) DumpInfo() fstree.Info { func (b *BlobStor) DumpInfo() fstree.Info {
return b.fsTree.Info return b.fsTree.Info
} }

View file

@ -17,7 +17,7 @@ type RngPrm struct {
addr *objectSDK.Address addr *objectSDK.Address
} }
// GetRes groups resulting values of GetRange operation. // RngRes groups resulting values of GetRange operation.
type RngRes struct { type RngRes struct {
obj *object.Object obj *object.Object
} }

View file

@ -49,7 +49,7 @@ type MovableRes struct {
addrList []*objectSDK.Address addrList []*objectSDK.Address
} }
// WithAddress sets address of the object to prevent moving into another shard. // AddressList returns resulting addresses of Movable operation.
func (p *MovableRes) AddressList() []*objectSDK.Address { func (p *MovableRes) AddressList() []*objectSDK.Address {
return p.addrList return p.addrList
} }

View file

@ -14,7 +14,7 @@ type ToMoveItPrm struct {
// ToMoveItRes encapsulates results of ToMoveIt operation. // ToMoveItRes encapsulates results of ToMoveIt operation.
type ToMoveItRes struct{} type ToMoveItRes struct{}
// WithAdderss sets object address that should be marked to move into another // WithAddress sets object address that should be marked to move into another
// shard. // shard.
func (p *ToMoveItPrm) WithAddress(addr *objectSDK.Address) *ToMoveItPrm { func (p *ToMoveItPrm) WithAddress(addr *objectSDK.Address) *ToMoveItPrm {
if p != nil { if p != nil {

View file

@ -16,7 +16,7 @@ type RngPrm struct {
addr *objectSDK.Address addr *objectSDK.Address
} }
// RngPrm groups resulting values of GetRange operation. // RngRes groups resulting values of GetRange operation.
type RngRes struct { type RngRes struct {
obj *object.Object obj *object.Object
} }

View file

@ -110,7 +110,7 @@ func WithMetaBaseOptions(opts ...meta.Option) Option {
} }
} }
// WithMetaBaseOptions returns option to set internal metabase options. // WithWriteCacheOptions returns option to set internal write cache options.
func WithWriteCacheOptions(opts ...writecache.Option) Option { func WithWriteCacheOptions(opts ...writecache.Option) Option {
return func(c *cfg) { return func(c *cfg) {
c.writeCacheOpts = opts c.writeCacheOpts = opts

View file

@ -31,7 +31,7 @@ func (w *ClientWrapper) PutAuditResult(result *auditAPI.Result) error {
PutAuditResult(args) PutAuditResult(args)
} }
// ListAuditResults returns a list of all audit result IDs inside audit contract. // ListAllAuditResultID returns a list of all audit result IDs inside audit contract.
func (w *ClientWrapper) ListAllAuditResultID() ([]ResultID, error) { func (w *ClientWrapper) ListAllAuditResultID() ([]ResultID, error) {
args := audit.ListResultsArgs{} args := audit.ListResultsArgs{}

View file

@ -32,7 +32,7 @@ func defaultOpts() *opts {
return new(opts) return new(opts)
} }
// TryNotaryInvoke returns option to enable // TryNotary returns option to enable
// notary invocation tries. // notary invocation tries.
func TryNotary() Option { func TryNotary() Option {
return func(o *opts) { return func(o *opts) {

View file

@ -41,7 +41,7 @@ func (p *PutSizeArgs) SetReporterKey(v []byte) {
p.reporterKey = v p.reporterKey = v
} }
// Put invokes the call of put container method // PutSize invokes the call of put container size method
// of NeoFS Container contract. // of NeoFS Container contract.
func (c *Client) PutSize(args PutSizeArgs) error { func (c *Client) PutSize(args PutSizeArgs) error {
err := c.client.Invoke( err := c.client.Invoke(

View file

@ -278,7 +278,7 @@ type Estimation struct {
Reporter []byte Reporter []byte
} }
// Estimation is a structure of grouped container load estimation inside Container contract. // Estimations is a structure of grouped container load estimation inside Container contract.
type Estimations struct { type Estimations struct {
ContainerID *cid.ID ContainerID *cid.ID

View file

@ -31,7 +31,7 @@ func defaultOpts() *opts {
return new(opts) return new(opts)
} }
// TryNotaryInvoke returns option to enable // TryNotary returns option to enable
// notary invocation tries. // notary invocation tries.
func TryNotary() Option { func TryNotary() Option {
return func(o *opts) { return func(o *opts) {

View file

@ -13,8 +13,8 @@ type ConfigArgs struct {
key []byte key []byte
} }
// EpochValues groups the stack parameters // ConfigValues groups the stack parameters
// returned by get epoch number test invoke. // returned by get config test invoke.
type ConfigValues struct { type ConfigValues struct {
val interface{} val interface{}
} }

View file

@ -69,7 +69,7 @@ func (c *Client) NetMap(_ GetNetMapArgs) (*GetNetMapValues, error) {
return peersFromStackItems(prms, c.netMapMethod) return peersFromStackItems(prms, c.netMapMethod)
} }
// NetMap performs the test invoke of get snapshot of network map // Snapshot performs the test invoke of get snapshot of network map
// from NeoFS Netmap contract. Contract saves only one previous epoch, // from NeoFS Netmap contract. Contract saves only one previous epoch,
// so all invokes with diff > 1 return error. // so all invokes with diff > 1 return error.
func (c *Client) Snapshot(a GetSnapshotArgs) (*GetNetMapValues, error) { func (c *Client) Snapshot(a GetSnapshotArgs) (*GetNetMapValues, error) {

View file

@ -7,7 +7,7 @@ import (
netmap2 "github.com/nspcc-dev/neofs-node/pkg/morph/client/netmap" netmap2 "github.com/nspcc-dev/neofs-node/pkg/morph/client/netmap"
) )
// Fetch returns current netmap node infos. // Snapshot returns current netmap node infos.
// Consider using pkg/morph/client/netmap for this. // Consider using pkg/morph/client/netmap for this.
func (w *Wrapper) Snapshot() (*netmap.Netmap, error) { func (w *Wrapper) Snapshot() (*netmap.Netmap, error) {
res, err := w.client.Snapshot(netmap2.GetSnapshotArgs{}) res, err := w.client.Snapshot(netmap2.GetSnapshotArgs{})

View file

@ -20,7 +20,7 @@ type (
id ReputationID id ReputationID
} }
// GetResults groups the result of "get reputation value" and // GetResult groups the result of "get reputation value" and
// "get reputation value by reputation id" test invocations. // "get reputation value by reputation id" test invocations.
GetResult struct { GetResult struct {
reputations []reputation.GlobalTrust reputations []reputation.GlobalTrust

View file

@ -23,7 +23,7 @@ func defaultOpts() *opts {
return new(opts) return new(opts)
} }
// TryNotaryInvoke returns option to enable // TryNotary returns option to enable
// notary invocation tries. // notary invocation tries.
func TryNotary() Option { func TryNotary() Option {
return func(o *opts) { return func(o *opts) {

View file

@ -37,7 +37,7 @@ func defaultStaticOpts() *staticOpts {
return new(staticOpts) return new(staticOpts)
} }
// TryNotaryInvoke returns option to enable // TryNotary returns option to enable
// notary invocation tries. // notary invocation tries.
func TryNotary() StaticClientOption { func TryNotary() StaticClientOption {
return func(o *staticOpts) { return func(o *staticOpts) {

View file

@ -90,14 +90,14 @@ func WithMaxPDPSleepInterval(dur time.Duration) Option {
} }
} }
// WithPDPWorkerPool returns option to set worker pool for PDP pairs processing. // WithPDPWorkerPoolGenerator returns option to set worker pool for PDP pairs processing.
func WithPDPWorkerPoolGenerator(f func() (util.WorkerPool, error)) Option { func WithPDPWorkerPoolGenerator(f func() (util.WorkerPool, error)) Option {
return func(c *cfg) { return func(c *cfg) {
c.pdpPoolGenerator = f c.pdpPoolGenerator = f
} }
} }
// WithPoRWorkerPool returns option to set worker pool for PoR SG processing. // WithPoRWorkerPoolGenerator returns option to set worker pool for PoR SG processing.
func WithPoRWorkerPoolGenerator(f func() (util.WorkerPool, error)) Option { func WithPoRWorkerPoolGenerator(f func() (util.WorkerPool, error)) Option {
return func(c *cfg) { return func(c *cfg) {
c.porPoolGenerator = f c.porPoolGenerator = f

View file

@ -73,7 +73,7 @@ type Writer interface {
io.Closer io.Closer
} }
// IteratorProvider is a group of methods provided // WriterProvider is a group of methods provided
// by entity which generates keepers of // by entity which generates keepers of
// UsedSpaceAnnouncement values. // UsedSpaceAnnouncement values.
type WriterProvider interface { type WriterProvider interface {

View file

@ -29,7 +29,7 @@ func WithNextService(v objectSvc.ServiceServer) Option {
} }
} }
// WithEACLValidator returns options to set eACL validator options. // WithEACLValidatorOptions returns options to set eACL validator options.
func WithEACLValidatorOptions(v ...eacl.Option) Option { func WithEACLValidatorOptions(v ...eacl.Option) Option {
return func(c *cfg) { return func(c *cfg) {
c.eACLOpts = v c.eACLOpts = v

View file

@ -86,14 +86,14 @@ func WithHeadService(h *getsvc.Service) Option {
} }
} }
// WithClientCache returns option to set cache of remote node clients. // WithSearchService returns option to set search service.
func WithSearchService(s *searchsvc.Service) Option { func WithSearchService(s *searchsvc.Service) Option {
return func(c *cfg) { return func(c *cfg) {
c.searcher = (*searchSvcWrapper)(s) c.searcher = (*searchSvcWrapper)(s)
} }
} }
// WithClientOptions returns option to specify options of remote node clients. // WithPutService returns option to specify put service.
func WithPutService(p *putsvc.Service) Option { func WithPutService(p *putsvc.Service) Option {
return func(c *cfg) { return func(c *cfg) {
c.placer = (*putSvcWrapper)(p) c.placer = (*putSvcWrapper)(p)

View file

@ -34,7 +34,7 @@ func NewService(opts ...Option) *Service {
} }
} }
// Get calls internal service and returns v2 object stream. // Search calls internal service and returns v2 object stream.
func (s *Service) Search(req *objectV2.SearchRequest, stream objectSvc.SearchStream) error { func (s *Service) Search(req *objectV2.SearchRequest, stream objectSvc.SearchStream) error {
p, err := s.toPrm(req, stream) p, err := s.toPrm(req, stream)
if err != nil { if err != nil {

View file

@ -100,7 +100,7 @@ func (a *AccessIdentifiers) Parent() *objectSDK.Object {
return nil return nil
} }
// WithParentID returns AccessIdentifiers with passed parent identifier. // WithParent returns AccessIdentifiers with passed parent identifier.
func (a *AccessIdentifiers) WithParent(v *objectSDK.Object) *AccessIdentifiers { func (a *AccessIdentifiers) WithParent(v *objectSDK.Object) *AccessIdentifiers {
res := a res := a
if res == nil { if res == nil {

View file

@ -65,7 +65,7 @@ func (x *Storage) AllDaughterTrusts(epoch uint64) (*DaughterStorage, bool) {
return s, ok return s, ok
} }
// maps IDs of daughter peers to repositories of the local trusts to their providers. // DaughterStorage maps IDs of daughter peers to repositories of the local trusts to their providers.
type DaughterStorage struct { type DaughterStorage struct {
mtx sync.RWMutex mtx sync.RWMutex

View file

@ -16,7 +16,7 @@ type ClientMessageStreamer struct {
close util.ClientStreamCloser close util.ClientStreamCloser
} }
// Recv calls send method of internal streamer. // Send calls send method of internal streamer.
func (s *ClientMessageStreamer) Send(req interface{}) error { func (s *ClientMessageStreamer) Send(req interface{}) error {
if err := s.send(req); err != nil { if err := s.send(req); err != nil {
return fmt.Errorf("(%T) could not send the request: %w", s, err) return fmt.Errorf("(%T) could not send the request: %w", s, err)

View file

@ -132,7 +132,7 @@ func (db *DB) Put(key locodedb.Key, rec locodedb.Record) error {
var errRecordNotFound = errors.New("record not found") var errRecordNotFound = errors.New("record not found")
// Put reads the record by key from underlying BoltDB instance. // Get reads the record by key from underlying BoltDB instance.
// //
// Returns an error if no record is presented by key in DB. // Returns an error if no record is presented by key in DB.
// //