[#1448] container/ape: Ignore an error when getting a role
All checks were successful
DCO action / DCO (pull_request) Successful in 1m38s
Tests and linters / Run gofumpt (pull_request) Successful in 2m4s
Vulncheck / Vulncheck (pull_request) Successful in 3m49s
Tests and linters / gopls check (pull_request) Successful in 4m2s
Pre-commit hooks / Pre-commit (pull_request) Successful in 4m17s
Tests and linters / Lint (pull_request) Successful in 4m24s
Build / Build Components (pull_request) Successful in 4m39s
Tests and linters / Staticcheck (pull_request) Successful in 7m3s
Tests and linters / Tests (pull_request) Successful in 9m33s
Tests and linters / Tests with -race (pull_request) Successful in 9m34s
All checks were successful
DCO action / DCO (pull_request) Successful in 1m38s
Tests and linters / Run gofumpt (pull_request) Successful in 2m4s
Vulncheck / Vulncheck (pull_request) Successful in 3m49s
Tests and linters / gopls check (pull_request) Successful in 4m2s
Pre-commit hooks / Pre-commit (pull_request) Successful in 4m17s
Tests and linters / Lint (pull_request) Successful in 4m24s
Build / Build Components (pull_request) Successful in 4m39s
Tests and linters / Staticcheck (pull_request) Successful in 7m3s
Tests and linters / Tests (pull_request) Successful in 9m33s
Tests and linters / Tests with -race (pull_request) Successful in 9m34s
When getting a role in the APE checker for the container services, an error may be returned if network maps of the previous two epochs don't have enough nodes to fulfil a container placement policy. It's a logical error, so we should ignore it. Signed-off-by: Aleksey Savchuk <a.savchuk@yadro.com>
This commit is contained in:
parent
433aab12bb
commit
e70c2cedbe
1 changed files with 10 additions and 14 deletions
|
@ -536,11 +536,7 @@ func (ac *apeChecker) isContainerKey(pk []byte, cnrID cid.ID, cont *containercor
|
|||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
in, err := isContainerNode(nm, pk, binCnrID, cont)
|
||||
if err != nil {
|
||||
return false, err
|
||||
} else if in {
|
||||
if isContainerNode(nm, pk, binCnrID, cont) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
@ -550,25 +546,25 @@ func (ac *apeChecker) isContainerKey(pk []byte, cnrID cid.ID, cont *containercor
|
|||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return isContainerNode(nm, pk, binCnrID, cont)
|
||||
return isContainerNode(nm, pk, binCnrID, cont), nil
|
||||
}
|
||||
|
||||
func isContainerNode(nm *netmapSDK.NetMap, pk, binCnrID []byte, cont *containercore.Container) (bool, error) {
|
||||
cnrVectors, err := nm.ContainerNodes(cont.Value.PlacementPolicy(), binCnrID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
func isContainerNode(nm *netmapSDK.NetMap, pk, binCnrID []byte, cont *containercore.Container) bool {
|
||||
// An error may be returned if network maps of the previous two epochs
|
||||
// don't have enough nodes to fulfil a container placement policy.
|
||||
// It's a logical error, so we should ignore it.
|
||||
// See https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/1448.
|
||||
cnrVectors, _ := nm.ContainerNodes(cont.Value.PlacementPolicy(), binCnrID)
|
||||
|
||||
for i := range cnrVectors {
|
||||
for j := range cnrVectors[i] {
|
||||
if bytes.Equal(cnrVectors[i][j].PublicKey(), pk) {
|
||||
return true, nil
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
return false
|
||||
}
|
||||
|
||||
func (ac *apeChecker) namespaceByOwner(owner *refs.OwnerID) (string, error) {
|
||||
|
|
Loading…
Reference in a new issue