forked from TrueCloudLab/frostfs-node
[#1115] *: link TODOs to corresponding issues
Signed-off-by: Evgenii Stratonikov <evgeniy@nspcc.ru>
This commit is contained in:
parent
692790a899
commit
050a4bb2b0
51 changed files with 49 additions and 97 deletions
|
@ -93,7 +93,6 @@ func main() {
|
|||
select {
|
||||
case <-ctx.Done():
|
||||
case err := <-intErr:
|
||||
// todo: restart application instead of shutdown
|
||||
log.Info("internal error", zap.String("msg", err.Error()))
|
||||
}
|
||||
|
||||
|
|
|
@ -394,7 +394,7 @@ func (s *signedEACLTable) ReadSignedData(_ []byte) ([]byte, error) {
|
|||
}
|
||||
|
||||
func (s *signedEACLTable) SignedDataSize() int {
|
||||
// TODO: add eacl.Table.Size method
|
||||
// TODO: #1147 add eacl.Table.Size method
|
||||
return (*eaclSDK.Table)(s).ToV2().StableSize()
|
||||
}
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ type TrustWriterProvider struct {
|
|||
func (twp *TrustWriterProvider) InitWriter(ctx reputationcommon.Context) (reputationcommon.Writer, error) {
|
||||
eiContext, ok := ctx.(eigentrustcalc.Context)
|
||||
if !ok {
|
||||
// TODO: think if this can be done without such limitation
|
||||
// TODO: #1164 think if this can be done without such limitation
|
||||
panic(ErrIncorrectContextPanicMsg)
|
||||
}
|
||||
|
||||
|
|
|
@ -112,7 +112,7 @@ func (v *FormatValidator) validateSignatureKey(obj *Object) error {
|
|||
return v.checkOwnerKey(obj.OwnerID(), obj.Signature().Key())
|
||||
}
|
||||
|
||||
// FIXME: perform token verification
|
||||
// FIXME: #1159 perform token verification
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -269,8 +269,6 @@ func (v *FormatValidator) checkOwner(obj *Object) error {
|
|||
}
|
||||
|
||||
// WithNetState returns options to set network state interface.
|
||||
//
|
||||
// FIXME: network state is a required parameter.
|
||||
func WithNetState(netState netmap.State) FormatValidatorOption {
|
||||
return func(c *cfg) {
|
||||
c.netState = netState
|
||||
|
|
|
@ -121,8 +121,6 @@ type (
|
|||
// Set of component runners which
|
||||
// should report start errors
|
||||
// to the application.
|
||||
//
|
||||
// TODO: unify with workers.
|
||||
runners []func(chan<- error)
|
||||
|
||||
subnetHandler
|
||||
|
@ -831,8 +829,6 @@ func New(ctx context.Context, log *zap.Logger, cfg *viper.Viper) (*Server, error
|
|||
return nil, err
|
||||
}
|
||||
|
||||
// todo: create vivid id component
|
||||
|
||||
// initialize epoch timers
|
||||
server.epochTimer = newEpochTimer(&epochTimerArgs{
|
||||
l: server.log,
|
||||
|
|
|
@ -191,7 +191,7 @@ func (cp *Processor) checkDeleteContainer(e *containerEvent.Delete) error {
|
|||
|
||||
if token != nil {
|
||||
// check token context
|
||||
// TODO: think how to avoid version casts
|
||||
// TODO: #1147 think how to avoid version casts
|
||||
idV2 := new(refs.ContainerID)
|
||||
idV2.SetValue(binCID)
|
||||
|
||||
|
|
|
@ -86,7 +86,6 @@ func (np *Processor) processWithdraw(withdraw *neofsEvent.Withdraw) {
|
|||
}
|
||||
|
||||
// create lock account
|
||||
// fixme: check collision there, consider reversed script hash
|
||||
lock, err := util.Uint160DecodeBytesBE(withdraw.ID()[:util.Uint160Size])
|
||||
if err != nil {
|
||||
np.log.Error("can't create lock account", zap.Error(err))
|
||||
|
|
|
@ -74,7 +74,7 @@ func (np *Processor) checkBindCommon(e *bindCommonContext) error {
|
|||
|
||||
func (np *Processor) approveBindCommon(e *bindCommonContext) {
|
||||
// calculate wallet address
|
||||
// TODO: implement some utilities in API Go lib to do it
|
||||
// TODO: nspcc-dev/neofs-sdk-go#134 implement some utilities in API Go lib to do it
|
||||
scriptHash := e.User()
|
||||
|
||||
u160, err := util.Uint160DecodeBytesBE(scriptHash)
|
||||
|
|
|
@ -12,7 +12,7 @@ func (rp *Processor) handlePutReputation(ev event.Event) {
|
|||
put := ev.(reputationEvent.Put)
|
||||
peerID := put.PeerID()
|
||||
|
||||
// FIXME: do not use `ToV2` method outside neofs-api-go library
|
||||
// FIXME: #1147 do not use `ToV2` method outside neofs-api-go library
|
||||
rp.log.Info("notification",
|
||||
zap.String("type", "reputation put"),
|
||||
zap.String("peer_id", hex.EncodeToString(peerID.ToV2().GetPublicKey())))
|
||||
|
|
|
@ -64,7 +64,7 @@ func (rp *Processor) checkManagers(e uint64, mng apireputation.PeerID, peer apir
|
|||
}
|
||||
|
||||
for _, m := range mm {
|
||||
// FIXME: do not use `ToV2` method outside neofs-api-go library
|
||||
// FIXME: #1147 do not use `ToV2` method outside neofs-api-go library
|
||||
if bytes.Equal(mng.ToV2().GetPublicKey(), m.PublicKey()) {
|
||||
return nil
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ func (rp *Processor) approvePutReputation(e *reputationEvent.Put) {
|
|||
err = rp.reputationWrp.Put(args)
|
||||
}
|
||||
if err != nil {
|
||||
// FIXME: do not use `ToV2` method outside neofs-api-go library
|
||||
// FIXME: #1147 do not use `ToV2` method outside neofs-api-go library
|
||||
rp.log.Warn("can't send approval tx for reputation value",
|
||||
zap.String("peer_id", hex.EncodeToString(id.ToV2().GetPublicKey())),
|
||||
zap.String("error", err.Error()))
|
||||
|
|
|
@ -81,11 +81,11 @@ func NewIncomeSettlementContext(p *IncomeSettlementContextPrms) (*IncomeSettleme
|
|||
}
|
||||
|
||||
func bankOwnerID() (*owner.ID, error) {
|
||||
u := util.Uint160{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, // todo: define const
|
||||
u := util.Uint160{1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
|
||||
1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
|
||||
|
||||
o := owner.NewID()
|
||||
|
||||
// TODO: nspcc-dev/neofs-sdk-go#134 use `SetScriptHash` method.
|
||||
err := o.Parse(address.Uint160ToString(u))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -145,7 +145,6 @@ func (c *ClientCache) GetHeader(task *audit.Task, node *netmap.Node, id *oidSDK.
|
|||
var obj *object.Object
|
||||
|
||||
if relay {
|
||||
// todo: function sets hardcoded TTL value, but instead we can set TTL based on container length
|
||||
obj, err = neofsapiclient.GetObjectHeaderFromContainer(cctx, cli, objAddress)
|
||||
} else {
|
||||
obj, err = neofsapiclient.GetRawObjectHeaderLocally(cctx, cli, objAddress)
|
||||
|
|
|
@ -312,5 +312,5 @@ func (s *Server) handleSubnetRemoval(e event.Event) {
|
|||
return
|
||||
}
|
||||
|
||||
// TODO: handle removal of the subnet in netmap candidates
|
||||
// TODO: #1162 handle removal of the subnet in netmap candidates
|
||||
}
|
||||
|
|
|
@ -249,8 +249,6 @@ func (b *blobovniczas) get(prm *GetSmallPrm) (res *GetSmallRes, err error) {
|
|||
//
|
||||
// If blobocvnicza ID is specified, only this blobovnicza is processed.
|
||||
// Otherwise, all blobovniczas are processed descending weight.
|
||||
//
|
||||
// TODO:quite similar to GET, can be unified
|
||||
func (b *blobovniczas) delete(prm *DeleteSmallPrm) (res *DeleteSmallRes, err error) {
|
||||
bPrm := new(blobovnicza.DeletePrm)
|
||||
bPrm.SetAddress(prm.addr)
|
||||
|
@ -304,8 +302,6 @@ func (b *blobovniczas) delete(prm *DeleteSmallPrm) (res *DeleteSmallRes, err err
|
|||
//
|
||||
// If blobocvnicza ID is specified, only this blobovnicza is processed.
|
||||
// Otherwise, all blobovniczas are processed descending weight.
|
||||
//
|
||||
// TODO:quite similar to GET, can be unified
|
||||
func (b *blobovniczas) getRange(prm *GetRangeSmallPrm) (res *GetRangeSmallRes, err error) {
|
||||
if prm.blobovniczaID != nil {
|
||||
blz, err := b.openBlobovnicza(prm.blobovniczaID.String())
|
||||
|
|
|
@ -30,7 +30,7 @@ func (b *BlobStor) Exists(prm *ExistsPrm) (*ExistsRes, error) {
|
|||
// check presence in shallow dir first (cheaper)
|
||||
exists, err := b.existsBig(prm.addr)
|
||||
if !exists {
|
||||
// TODO: do smth if err != nil
|
||||
// TODO: #1143 do smth if err != nil
|
||||
|
||||
// check presence in blobovnicza
|
||||
exists, err = b.existsSmall(prm.addr)
|
||||
|
@ -57,6 +57,6 @@ func (b *BlobStor) existsBig(addr *addressSDK.Address) (bool, error) {
|
|||
|
||||
// checks if object is presented in blobovnicza.
|
||||
func (b *BlobStor) existsSmall(_ *addressSDK.Address) (bool, error) {
|
||||
// TODO: implement
|
||||
// TODO: #1143 implement
|
||||
return false, nil
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ func (e *StorageEngine) put(prm *PutPrm) (*PutRes, error) {
|
|||
defer elapsed(e.metrics.AddPutDuration)()
|
||||
}
|
||||
|
||||
_, err := e.exists(prm.obj.Address()) // todo: make this check parallel
|
||||
_, err := e.exists(prm.obj.Address()) // TODO: #1146 make this check parallel
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -108,7 +108,6 @@ func (e *StorageEngine) put(prm *PutPrm) (*PutRes, error) {
|
|||
|
||||
finished = true
|
||||
}); err != nil {
|
||||
// TODO: log errors except ErrOverload when errors of util.WorkerPool will be documented
|
||||
close(exitCh)
|
||||
}
|
||||
|
||||
|
|
|
@ -128,7 +128,6 @@ func (db *DB) selectObjects(tx *bbolt.Tx, cid *cid.ID, fs object.SearchFilters)
|
|||
|
||||
addr, err := addressFromKey([]byte(a))
|
||||
if err != nil {
|
||||
// TODO: storage was broken, so we need to handle it
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -219,7 +218,6 @@ func (db *DB) selectFastFilter(
|
|||
}
|
||||
}
|
||||
|
||||
// TODO: move to DB struct
|
||||
var mBucketNaming = map[string][]func(*cid.ID) []byte{
|
||||
v2object.TypeRegular.String(): {primaryBucketName, parentBucketName},
|
||||
v2object.TypeTombstone.String(): {tombstoneBucketName},
|
||||
|
@ -577,7 +575,7 @@ func blindlyProcess(fs object.SearchFilters) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
// TODO: check other cases
|
||||
// TODO: #1148 check other cases
|
||||
// e.g. (a == b) && (a != b)
|
||||
}
|
||||
|
||||
|
@ -586,6 +584,6 @@ func blindlyProcess(fs object.SearchFilters) bool {
|
|||
|
||||
// returns true if string key is a reserved system filter key.
|
||||
func isSystemKey(key string) bool {
|
||||
// FIXME: version-dependent approach
|
||||
// FIXME: #1147 version-dependent approach
|
||||
return strings.HasPrefix(key, v2object.ReservedFilterPrefix)
|
||||
}
|
||||
|
|
|
@ -188,7 +188,7 @@ func (c *cache) flushWorker(num int) {
|
|||
metaOnly := false
|
||||
|
||||
// Give priority to direct put.
|
||||
// TODO(fyrchik): do this once in N iterations depending on load
|
||||
// TODO(fyrchik): #1150 do this once in N iterations depending on load
|
||||
select {
|
||||
case obj = <-priorityCh:
|
||||
metaOnly = num%3 == 1
|
||||
|
|
|
@ -43,7 +43,7 @@ func (c *cache) Get(addr *addressSDK.Address) (*object.Object, error) {
|
|||
|
||||
// Head returns object header from write-cache.
|
||||
func (c *cache) Head(addr *addressSDK.Address) (*object.Object, error) {
|
||||
// TODO: easiest to implement solution is presented here, consider more efficient way, e.g.:
|
||||
// TODO: #1149 easiest to implement solution is presented here, consider more efficient way, e.g.:
|
||||
// - provide header as common object.Object to Put, but marked to prevent correlation with full object
|
||||
// (all write-cache logic will automatically spread to headers, except flushing)
|
||||
// - cut header from in-memory objects directly and persist headers into particular bucket of DB
|
||||
|
|
|
@ -377,11 +377,11 @@ func toStackParameter(value interface{}) (sc.Parameter, error) {
|
|||
Value: value,
|
||||
}
|
||||
|
||||
// todo: add more types
|
||||
// TODO: #1141 add more types
|
||||
switch v := value.(type) {
|
||||
case []byte:
|
||||
result.Type = sc.ByteArrayType
|
||||
case int64: // TODO: add other numerical types
|
||||
case int64:
|
||||
result.Type = sc.IntegerType
|
||||
case [][]byte:
|
||||
arr := make([]sc.Parameter, 0, len(v))
|
||||
|
@ -412,7 +412,7 @@ func toStackParameter(value interface{}) (sc.Parameter, error) {
|
|||
|
||||
return toStackParameter(arr)
|
||||
case bool:
|
||||
// FIXME: there are some problems with BoolType in neo-go,
|
||||
// FIXME: #1141 there are some problems with BoolType in neo-go,
|
||||
// so we use compatible type
|
||||
result.Type = sc.IntegerType
|
||||
|
||||
|
|
|
@ -876,7 +876,7 @@ func (c *Client) CalculateNonceAndVUB(hash util.Uint256) (nonce uint32, vub uint
|
|||
return 0, 0, nil
|
||||
}
|
||||
|
||||
// TODO: cache values since some operations uses same TX as triggers
|
||||
// TODO: #1151 cache values since some operations uses same TX as triggers
|
||||
nonce = binary.LittleEndian.Uint32(hash.BytesLE())
|
||||
|
||||
height, err := c.client.GetTransactionHeight(hash)
|
||||
|
|
|
@ -272,7 +272,6 @@ loop:
|
|||
continue loop
|
||||
}
|
||||
|
||||
// TODO: consider asynchronous execution
|
||||
for i := range l.blockHandlers {
|
||||
l.blockHandlers[i](b)
|
||||
}
|
||||
|
|
|
@ -85,7 +85,7 @@ const (
|
|||
// indicating that TLS is enabled. If multiaddress is provided
|
||||
// the argument is returned unchanged.
|
||||
func parseURI(s string) (string, bool, error) {
|
||||
// TODO: code is copy-pasted from client.WithURIAddress function.
|
||||
// TODO: #1151 code is copy-pasted from client.WithNetworkURIAddress function.
|
||||
// Would be nice to share the code.
|
||||
uri, err := url.ParseRequestURI(s)
|
||||
if err != nil {
|
||||
|
|
2
pkg/network/cache/client.go
vendored
2
pkg/network/cache/client.go
vendored
|
@ -37,7 +37,7 @@ func (c *ClientCache) Get(info clientcore.NodeInfo) (clientcore.Client, error) {
|
|||
// same host may have different connections(with tls or not),
|
||||
// therefore, host+port pair is not unique
|
||||
|
||||
// FIXME: we should calculate map key regardless of the address order,
|
||||
// FIXME: #1157 we should calculate map key regardless of the address order,
|
||||
// but network.StringifyGroup is order-dependent.
|
||||
// This works until the same mixed group is transmitted
|
||||
// (for a network map, it seems to be true).
|
||||
|
|
|
@ -30,7 +30,6 @@ func (s *Server) Balance(ctx context.Context, req *accountingGRPC.BalanceRequest
|
|||
|
||||
resp, err := s.srv.Balance(ctx, balReq)
|
||||
if err != nil {
|
||||
// TODO: think about how we transport errors through gRPC
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,6 @@ func (s *Server) Put(ctx context.Context, req *containerGRPC.PutRequest) (*conta
|
|||
|
||||
resp, err := s.srv.Put(ctx, putReq)
|
||||
if err != nil {
|
||||
// TODO: think about how we transport errors through gRPC
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -46,7 +45,6 @@ func (s *Server) Delete(ctx context.Context, req *containerGRPC.DeleteRequest) (
|
|||
|
||||
resp, err := s.srv.Delete(ctx, delReq)
|
||||
if err != nil {
|
||||
// TODO: think about how we transport errors through gRPC
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -62,7 +60,6 @@ func (s *Server) Get(ctx context.Context, req *containerGRPC.GetRequest) (*conta
|
|||
|
||||
resp, err := s.srv.Get(ctx, getReq)
|
||||
if err != nil {
|
||||
// TODO: think about how we transport errors through gRPC
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -78,7 +75,6 @@ func (s *Server) List(ctx context.Context, req *containerGRPC.ListRequest) (*con
|
|||
|
||||
resp, err := s.srv.List(ctx, listReq)
|
||||
if err != nil {
|
||||
// TODO: think about how we transport errors through gRPC
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -94,7 +90,6 @@ func (s *Server) SetExtendedACL(ctx context.Context, req *containerGRPC.SetExten
|
|||
|
||||
resp, err := s.srv.SetExtendedACL(ctx, setEACLReq)
|
||||
if err != nil {
|
||||
// TODO: think about how we transport errors through gRPC
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -110,7 +105,6 @@ func (s *Server) GetExtendedACL(ctx context.Context, req *containerGRPC.GetExten
|
|||
|
||||
resp, err := s.srv.GetExtendedACL(ctx, getEACLReq)
|
||||
if err != nil {
|
||||
// TODO: think about how we transport errors through gRPC
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -126,7 +120,6 @@ func (s *Server) AnnounceUsedSpace(ctx context.Context, req *containerGRPC.Annou
|
|||
|
||||
resp, err := s.srv.AnnounceUsedSpace(ctx, announceReq)
|
||||
if err != nil {
|
||||
// TODO: think about how we transport errors through gRPC
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
@ -32,7 +32,6 @@ func (s *Server) LocalNodeInfo(
|
|||
|
||||
resp, err := s.srv.LocalNodeInfo(ctx, nodeInfoReq)
|
||||
if err != nil {
|
||||
// TODO: think about how we transport errors through gRPC
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -48,7 +47,6 @@ func (s *Server) NetworkInfo(ctx context.Context, req *netmapGRPC.NetworkInfoReq
|
|||
|
||||
resp, err := s.srv.NetworkInfo(ctx, netInfoReq)
|
||||
if err != nil {
|
||||
// TODO: think about how we transport errors through gRPC
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,6 @@ func (s *Server) Get(req *objectGRPC.GetRequest, gStream objectGRPC.ObjectServic
|
|||
return err
|
||||
}
|
||||
|
||||
// TODO: think about how we transport errors through gRPC
|
||||
return s.srv.Get(
|
||||
getReq,
|
||||
&getStreamerV2{
|
||||
|
|
|
@ -23,7 +23,6 @@ func (s *Server) GetRange(req *objectGRPC.GetRangeRequest, gStream objectGRPC.Ob
|
|||
return err
|
||||
}
|
||||
|
||||
// TODO: think about how we transport errors through gRPC
|
||||
return s.srv.GetRange(
|
||||
getRngReq,
|
||||
&getRangeStreamerV2{
|
||||
|
|
|
@ -23,7 +23,6 @@ func (s *Server) Search(req *objectGRPC.SearchRequest, gStream objectGRPC.Object
|
|||
return err
|
||||
}
|
||||
|
||||
// TODO: think about how we transport errors through gRPC
|
||||
return s.srv.Search(
|
||||
searchReq,
|
||||
&searchStreamerV2{
|
||||
|
|
|
@ -28,7 +28,6 @@ func New(c objectSvc.ServiceServer) *Server {
|
|||
func (s *Server) Put(gStream objectGRPC.ObjectService_PutServer) error {
|
||||
stream, err := s.srv.Put(gStream.Context())
|
||||
if err != nil {
|
||||
// TODO: think about how we transport errors through gRPC
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -76,7 +75,6 @@ func (s *Server) Delete(ctx context.Context, req *objectGRPC.DeleteRequest) (*ob
|
|||
|
||||
resp, err := s.srv.Delete(ctx, delReq)
|
||||
if err != nil {
|
||||
// TODO: think about how we transport errors through gRPC
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -92,7 +90,6 @@ func (s *Server) Head(ctx context.Context, req *objectGRPC.HeadRequest) (*object
|
|||
|
||||
resp, err := s.srv.Head(ctx, searchReq)
|
||||
if err != nil {
|
||||
// TODO: think about how we transport errors through gRPC
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -108,7 +105,6 @@ func (s *Server) GetRangeHash(ctx context.Context, req *objectGRPC.GetRangeHashR
|
|||
|
||||
resp, err := s.srv.GetRangeHash(ctx, hashRngReq)
|
||||
if err != nil {
|
||||
// TODO: think about how we transport errors through gRPC
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,6 @@ func (s *Server) AnnounceLocalTrust(ctx context.Context, r *reputation2.Announce
|
|||
|
||||
resp, err := s.srv.AnnounceLocalTrust(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: think about how we transport errors through gRPC
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -44,7 +43,6 @@ func (s *Server) AnnounceIntermediateResult(ctx context.Context, r *reputation2.
|
|||
|
||||
resp, err := s.srv.AnnounceIntermediateResult(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: think about how we transport errors through gRPC
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,6 @@ func (s *Server) Create(ctx context.Context, req *sessionGRPC.CreateRequest) (*s
|
|||
|
||||
resp, err := s.srv.Create(ctx, createReq)
|
||||
if err != nil {
|
||||
// TODO: think about how we transport errors through gRPC
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ func (c *Context) splitPayload(id *oidSDK.ID) []uint64 {
|
|||
|
||||
func (c *Context) collectHashes(p *gamePair) {
|
||||
fn := func(n *netmap.Node, rngs []*object.Range, hashWriter func([]byte)) {
|
||||
// TODO: add order randomization
|
||||
// TODO: #1163 add order randomization
|
||||
for i := range rngs {
|
||||
var sleepDur time.Duration
|
||||
if c.maxPDPSleep > 0 {
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"github.com/nspcc-dev/neofs-api-go/v2/session"
|
||||
)
|
||||
|
||||
// FIXME: (temp solution) we need to pass session token from header
|
||||
// FIXME: #1159 (temp solution) we need to pass session token from header
|
||||
type ContextWithToken struct {
|
||||
context.Context
|
||||
|
||||
|
|
|
@ -461,7 +461,6 @@ func (b Service) findRequestInfo(
|
|||
|
||||
// find verb from token if it is present
|
||||
verb := sourceVerbOfRequest(req, op)
|
||||
// todo: check verb sanity, if it was generated correctly. Do we need it ?
|
||||
|
||||
info.basicACL = basicACLHelper(cnr.BasicACL())
|
||||
info.requestRole = role
|
||||
|
@ -750,7 +749,7 @@ func isValidBearer(reqInfo requestInfo, st netmap.State) bool {
|
|||
// 3. Then check if container owner signed this token.
|
||||
tokenIssuerKey := unmarshalPublicKey(token.GetSignature().GetKey())
|
||||
if !isOwnerFromKey(reqInfo.cnrOwner, tokenIssuerKey) {
|
||||
// todo: in this case we can issue all owner keys from neofs.id and check once again
|
||||
// TODO: #1156 in this case we can issue all owner keys from neofs.id and check once again
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -759,7 +758,7 @@ func isValidBearer(reqInfo requestInfo, st netmap.State) bool {
|
|||
if tokenOwnerField != nil { // see bearer token owner field description
|
||||
requestSenderKey := unmarshalPublicKey(reqInfo.senderKey)
|
||||
if !isOwnerFromKey(tokenOwnerField, requestSenderKey) {
|
||||
// todo: in this case we can issue all owner keys from neofs.id and check once again
|
||||
// TODO: #1156 in this case we can issue all owner keys from neofs.id and check once again
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,7 +40,6 @@ type (
|
|||
}
|
||||
)
|
||||
|
||||
// fixme: update classifier constructor
|
||||
func NewSenderClassifier(l *zap.Logger, ir InnerRingFetcher, nm core.Source) SenderClassifier {
|
||||
return SenderClassifier{
|
||||
log: l,
|
||||
|
@ -64,7 +63,7 @@ func (c SenderClassifier) Classify(
|
|||
|
||||
ownerKeyInBytes := ownerKey.Bytes()
|
||||
|
||||
// todo: get owner from neofs.id if present
|
||||
// TODO: #1156 get owner from neofs.id if present
|
||||
|
||||
// if request owner is the same as container owner, return RoleUser
|
||||
if ownerID.Equal(cnr.OwnerID()) {
|
||||
|
@ -204,7 +203,7 @@ func ownerFromToken(token *session.SessionToken) (*owner.ID, *keys.PublicKey, er
|
|||
tokenOwner := owner.NewIDFromV2(token.GetBody().GetOwnerID())
|
||||
|
||||
if !isOwnerFromKey(tokenOwner, tokenIssuerKey) {
|
||||
// todo: in this case we can issue all owner keys from neofs.id and check once again
|
||||
// TODO: #1156 in this case we can issue all owner keys from neofs.id and check once again
|
||||
return nil, nil, fmt.Errorf("%w: invalid session token owner", ErrMalformedRequest)
|
||||
}
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ func (exec *execCtx) assemble() {
|
|||
exec.overtakePayloadDirectly(children, nil, true)
|
||||
}
|
||||
} else {
|
||||
// TODO: choose one-by-one restoring algorithm according to size
|
||||
// TODO: #1155 choose one-by-one restoring algorithm according to size
|
||||
// * if size > MAX => go right-to-left with HEAD and back with GET
|
||||
// * else go right-to-left with GET and compose in single object before writing
|
||||
|
||||
|
@ -47,7 +47,7 @@ func (exec *execCtx) assemble() {
|
|||
}
|
||||
} else if prev != nil {
|
||||
if ok := exec.writeCollectedHeader(); ok {
|
||||
// TODO: choose one-by-one restoring algorithm according to size
|
||||
// TODO: #1155 choose one-by-one restoring algorithm according to size
|
||||
// * if size > MAX => go right-to-left with HEAD and back with GET
|
||||
// * else go right-to-left with GET and compose in single object before writing
|
||||
|
||||
|
|
|
@ -76,8 +76,8 @@ func (exec *execCtx) processCurrentEpoch() bool {
|
|||
default:
|
||||
}
|
||||
|
||||
// TODO: consider parallel execution
|
||||
// TODO: consider optimization: if status == SPLIT we can continue until
|
||||
// TODO: #1142 consider parallel execution
|
||||
// TODO: #1142 consider optimization: if status == SPLIT we can continue until
|
||||
// we reach the best result - split info with linking object ID.
|
||||
var info client.NodeInfo
|
||||
|
||||
|
|
|
@ -28,10 +28,11 @@ func (s *Service) GetRangeHash(ctx context.Context, prm RangeHashPrm) (*RangeHas
|
|||
for _, rng := range prm.rngs {
|
||||
h := prm.hashGen()
|
||||
|
||||
// TODO: calculating of homomorphic hash (TZ) for "big" ranges can be optimized
|
||||
// by "smaller" range hash requests spawn and response concatenation.
|
||||
// NOTE: for non-homomorphic hashes (SHA256) this won't work with split-range.
|
||||
|
||||
// For big ranges we could fetch range-hashes from different nodes and concatenate them locally.
|
||||
// However,
|
||||
// 1. Potential gains are insignificant when operating in the Internet given typical latencies and losses.
|
||||
// 2. Parallel solution is more complex in terms of code.
|
||||
// 3. TZ-hash is likely to be disabled in private installations.
|
||||
rngPrm := RangePrm{
|
||||
commonPrm: prm.commonPrm,
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ func (s *Service) toPrm(req *objectV2.GetRequest, stream objectSvc.GetObjectStre
|
|||
// compose meta header of the local server
|
||||
metaHdr := new(session.RequestMetaHeader)
|
||||
metaHdr.SetTTL(meta.GetTTL() - 1)
|
||||
// TODO: think how to set the other fields
|
||||
// TODO: #1165 think how to set the other fields
|
||||
metaHdr.SetOrigin(meta)
|
||||
|
||||
req.SetMetaHeader(metaHdr)
|
||||
|
@ -187,7 +187,7 @@ func (s *Service) toRangePrm(req *objectV2.GetRangeRequest, stream objectSvc.Get
|
|||
// compose meta header of the local server
|
||||
metaHdr := new(session.RequestMetaHeader)
|
||||
metaHdr.SetTTL(meta.GetTTL() - 1)
|
||||
// TODO: think how to set the other fields
|
||||
// TODO: #1165 think how to set the other fields
|
||||
metaHdr.SetOrigin(meta)
|
||||
|
||||
req.SetMetaHeader(metaHdr)
|
||||
|
@ -347,7 +347,7 @@ func (s *Service) toHeadPrm(ctx context.Context, req *objectV2.HeadRequest, resp
|
|||
// compose meta header of the local server
|
||||
metaHdr := new(session.RequestMetaHeader)
|
||||
metaHdr.SetTTL(meta.GetTTL() - 1)
|
||||
// TODO: think how to set the other fields
|
||||
// TODO: #1165 think how to set the other fields
|
||||
metaHdr.SetOrigin(meta)
|
||||
|
||||
req.SetMetaHeader(metaHdr)
|
||||
|
|
|
@ -135,7 +135,7 @@ func GetObject(prm GetObjectPrm) (res GetObjectRes, err error) {
|
|||
err = apistatus.ErrFromStatus(res.cliRes.Status())
|
||||
}
|
||||
|
||||
// FIXME: object.ErrAlreadyRemoved never returns
|
||||
// FIXME: #1158 object.ErrAlreadyRemoved never returns
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -186,7 +186,7 @@ func HeadObject(prm HeadObjectPrm) (res HeadObjectRes, err error) {
|
|||
err = apistatus.ErrFromStatus(res.cliRes.Status())
|
||||
}
|
||||
|
||||
// FIXME: object.ErrAlreadyRemoved never returns
|
||||
// FIXME: #1158 object.ErrAlreadyRemoved never returns
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -244,7 +244,7 @@ func PayloadRange(prm PayloadRangePrm) (res PayloadRangeRes, err error) {
|
|||
err = apistatus.ErrFromStatus(res.cliRes.Status())
|
||||
}
|
||||
|
||||
// FIXME: object.ErrAlreadyRemoved never returns
|
||||
// FIXME: #1158 object.ErrAlreadyRemoved never returns
|
||||
|
||||
return
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ func (exec *execCtx) processCurrentEpoch() bool {
|
|||
default:
|
||||
}
|
||||
|
||||
// TODO: consider parallel execution
|
||||
// TODO: #1142 consider parallel execution
|
||||
var info client.NodeInfo
|
||||
|
||||
client.NodeInfoFromNetmapElement(&info, addrs[i])
|
||||
|
|
|
@ -53,7 +53,7 @@ func (s *Service) toPrm(req *objectV2.SearchRequest, stream objectSvc.SearchStre
|
|||
// compose meta header of the local server
|
||||
metaHdr := new(session.RequestMetaHeader)
|
||||
metaHdr.SetTTL(meta.GetTTL() - 1)
|
||||
// TODO: think how to set the other fields
|
||||
// TODO: #1165 think how to set the other fields
|
||||
metaHdr.SetOrigin(meta)
|
||||
|
||||
req.SetMetaHeader(metaHdr)
|
||||
|
|
|
@ -10,7 +10,6 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
// todo(alexvanin): should be a part of status API
|
||||
errNoSessionToken = errors.New("session token does not exist")
|
||||
errSessionTokenExpired = errors.New("session token has been expired")
|
||||
)
|
||||
|
|
|
@ -54,7 +54,6 @@ func (p *localPlacement) BuildPlacement(addr *addressSDK.Address, policy *netmap
|
|||
|
||||
err := addr.FromIterator(vs[i][j])
|
||||
if err != nil {
|
||||
// TODO: log error
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -88,7 +87,6 @@ func (p *remotePlacement) BuildPlacement(addr *addressSDK.Address, policy *netma
|
|||
|
||||
err := addr.FromIterator(vs[i][j])
|
||||
if err != nil {
|
||||
// TODO: log error
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
@ -162,7 +162,6 @@ func (t *Traverser) Next() []Node {
|
|||
for i := 0; i < count; i++ {
|
||||
err := nodes[i].addresses.FromIterator(t.vectors[0][i])
|
||||
if err != nil {
|
||||
// TODO: log error
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -45,8 +45,6 @@ const tzChecksumSize = 64
|
|||
// of the writing object and writes generated objects to targets from initializer.
|
||||
//
|
||||
// Objects w/ payload size less or equal than max size remain untouched.
|
||||
//
|
||||
// TODO: describe behavior in details.
|
||||
func NewPayloadSizeLimiter(maxSize uint64, targetInit TargetInitializer) ObjectTarget {
|
||||
return &payloadSizeLimiter{
|
||||
maxSize: maxSize,
|
||||
|
|
|
@ -93,8 +93,6 @@ func (p *Policer) processNodes(ctx context.Context, addr *addressSDK.Address, no
|
|||
cancel()
|
||||
|
||||
if err != nil {
|
||||
// FIXME: this is a temporary solution to resolve 404 response from remote node
|
||||
// We need to distinguish problem nodes from nodes without an object.
|
||||
if strings.Contains(err.Error(), headsvc.ErrNotFound.Error()) {
|
||||
continue
|
||||
} else {
|
||||
|
|
|
@ -45,11 +45,11 @@ func NewService(opts ...Option) *Service {
|
|||
func setMeta(resp util.ResponseMessage, cfg *cfg) {
|
||||
meta := new(session.ResponseMetaHeader)
|
||||
meta.SetVersion(cfg.version)
|
||||
meta.SetTTL(1) // FIXME: TTL must be calculated
|
||||
meta.SetTTL(1) // FIXME: #1160 TTL must be calculated
|
||||
meta.SetEpoch(cfg.state.CurrentEpoch())
|
||||
|
||||
if origin := resp.GetMetaHeader(); origin != nil {
|
||||
// FIXME: what if origin is set by local server?
|
||||
// FIXME: #1160 what if origin is set by local server?
|
||||
meta.SetOrigin(origin)
|
||||
}
|
||||
|
||||
|
|
|
@ -114,8 +114,6 @@ func (db *DB) Put(key locodedb.Key, rec locodedb.Record) error {
|
|||
return fmt.Errorf("could not create country bucket: %w", err)
|
||||
}
|
||||
|
||||
// TODO: write country name once in Country bucket
|
||||
|
||||
locationKey, err := locationBucketKey(key.LocationCode())
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
Loading…
Reference in a new issue