[#574] Check if the container has been really removed from neo-go #624
5 changed files with 34 additions and 1 deletions
|
@ -31,6 +31,10 @@ func (c cnrSource) Get(id cid.ID) (*container.Container, error) {
|
|||
return c.src.Get(id)
|
||||
}
|
||||
|
||||
func (c cnrSource) DeletionInfo(cid cid.ID) (*container.DelInfo, error) {
|
||||
return c.cli.DeletionInfo(cid)
|
||||
}
|
||||
|
||||
func (c cnrSource) List() ([]cid.ID, error) {
|
||||
return c.cli.ContainersOf(nil)
|
||||
}
|
||||
|
|
|
@ -70,6 +70,7 @@ const (
|
|||
TreeContainerTreesHaveBeenSynced = "container trees have been synced"
|
||||
TreeCouldNotQueryTreesForSynchronization = "could not query trees for synchronization"
|
||||
TreeRemovingRedundantTrees = "removing redundant trees..."
|
||||
TreeCouldNotCheckIfContainerExisted = "could not check if the container ever existed"
|
||||
TreeCouldNotRemoveRedundantTree = "could not remove redundant tree"
|
||||
TreeCouldNotCalculateContainerNodes = "could not calculate container nodes"
|
||||
TreeFailedToApplyReplicatedOperation = "failed to apply replicated operation"
|
||||
|
|
|
@ -14,6 +14,9 @@ import (
|
|||
|
||||
type ContainerSource interface {
|
||||
container.Source
|
||||
|
||||
DeletionInfo(cid.ID) (*container.DelInfo, error)
|
||||
|
||||
fyrchik marked this conversation as resolved
Outdated
|
||||
// List must return list of all the containers in the FrostFS network
|
||||
// at the moment of a call and any error that does not allow fetching
|
||||
// container information.
|
||||
|
|
|
@ -53,6 +53,10 @@ func (s dummyContainerSource) Get(id cid.ID) (*containercore.Container, error) {
|
|||
return cnt, nil
|
||||
}
|
||||
|
||||
func (s dummyContainerSource) DeletionInfo(id cid.ID) (*containercore.DelInfo, error) {
|
||||
return &containercore.DelInfo{}, nil
|
||||
}
|
||||
|
||||
type dummyEACLSource map[string]*containercore.EACL
|
||||
|
||||
func (s dummyEACLSource) GetEACL(id cid.ID) (*containercore.EACL, error) {
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
metrics "git.frostfs.info/TrueCloudLab/frostfs-observability/metrics/grpc"
|
||||
tracing "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
|
||||
tracing_grpc "git.frostfs.info/TrueCloudLab/frostfs-observability/tracing/grpc"
|
||||
apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
|
||||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
netmapSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/netmap"
|
||||
"github.com/panjf2000/ants/v2"
|
||||
|
@ -440,6 +441,18 @@ func (s *Service) syncContainers(ctx context.Context, cnrs []cid.ID) {
|
|||
wg.Wait()
|
||||
}
|
||||
|
||||
func (s *Service) containerEverExisted(cid cid.ID) (bool, error) {
|
||||
_, err := s.cnrSource.DeletionInfo(cid)
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
var errContainerNotFound *apistatus.ContainerNotFound
|
||||
fyrchik marked this conversation as resolved
Outdated
fyrchik
commented
I am not sure I am not sure `Is` will work now as expected (compared to `As`), do we use `Is` anywhere for status errors?
fyrchik marked this conversation as resolved
Outdated
fyrchik
commented
I am not sure I am not sure `Is` will work now as expected (compared to `As`), do we use `Is` anywhere for status errors?
aarifullin
commented
Sorry, some ideas "mixed" up in my mind. Sorry, some [ideas](https://github.com/nspcc-dev/neofs-node/issues/2465#issuecomment-1661642640) "mixed" up in my mind.
Fixed to `errors.As`
|
||||
if errors.As(err, &errContainerNotFound) {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID]struct{}) {
|
||||
ctx, span := tracing.StartSpanFromContext(ctx, "TreeService.removeContainers")
|
||||
defer span.End()
|
||||
|
@ -452,8 +465,16 @@ func (s *Service) removeContainers(ctx context.Context, newContainers map[cid.ID
|
|||
if _, ok := newContainers[cnr]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
existed, err := s.containerEverExisted(cnr)
|
||||
aarifullin
commented
@fyrchik @fyrchik
I am not sure that this can be incorrectly ignored (the container cannot be appended to `removed` list and thus sync will try to sync the container?). Anyway, the netmap cannot be updated until we cannot get info about the container
fyrchik
commented
How is netmap update connected to all this? How is netmap update connected to all this?
It is right, not to remove container in case "unknown" error happens.
aarifullin
commented
Sorry, nevermind
I have mistaken > How is netmap update connected to all this?
Sorry, nevermind
```golang
newMap, cnrsToSync := s.containersToSync(cnrs)
s.syncContainers(ctx, cnrsToSync)
s.removeContainers(ctx, newMap)
```
I have mistaken `newMap` for `netmap` :)
|
||||
if err != nil {
|
||||
s.log.Error(logs.TreeCouldNotCheckIfContainerExisted,
|
||||
zap.Stringer("cid", cnr),
|
||||
zap.Error(err))
|
||||
} else if existed {
|
||||
removed = append(removed, cnr)
|
||||
}
|
||||
}
|
||||
for i := range removed {
|
||||
delete(s.cnrMap, removed[i])
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue
at the moment of a call
is somewhat obvious (and, probably, doesn't say what you wanted to). How about to get rid of it?This was comment for a comment :) Removed