[#1448] container/ape: Ignore an error when getting a role
All checks were successful
DCO action / DCO (pull_request) Successful in 1m24s
Tests and linters / Run gofumpt (pull_request) Successful in 2m9s
Vulncheck / Vulncheck (pull_request) Successful in 2m29s
Tests and linters / Tests (pull_request) Successful in 3m32s
Pre-commit hooks / Pre-commit (pull_request) Successful in 3m45s
Build / Build Components (pull_request) Successful in 4m8s
Tests and linters / gopls check (pull_request) Successful in 4m15s
Tests and linters / Staticcheck (pull_request) Successful in 5m6s
Tests and linters / Lint (pull_request) Successful in 7m9s
Tests and linters / Tests with -race (pull_request) Successful in 9m49s
All checks were successful
DCO action / DCO (pull_request) Successful in 1m24s
Tests and linters / Run gofumpt (pull_request) Successful in 2m9s
Vulncheck / Vulncheck (pull_request) Successful in 2m29s
Tests and linters / Tests (pull_request) Successful in 3m32s
Pre-commit hooks / Pre-commit (pull_request) Successful in 3m45s
Build / Build Components (pull_request) Successful in 4m8s
Tests and linters / gopls check (pull_request) Successful in 4m15s
Tests and linters / Staticcheck (pull_request) Successful in 5m6s
Tests and linters / Lint (pull_request) Successful in 7m9s
Tests and linters / Tests with -race (pull_request) Successful in 9m49s
When getting a role in the APE checker for the container services, an error may be returned if network maps of the previous two epochs don't have enough nodes to fulfil a container placement policy. It's a logical error, so we should ignore it. Signed-off-by: Aleksey Savchuk <a.savchuk@yadro.com>
This commit is contained in:
parent
81f4cdbb91
commit
2fa82acd1c
1 changed files with 10 additions and 14 deletions
|
@ -536,11 +536,7 @@ func (ac *apeChecker) isContainerKey(pk []byte, cnrID cid.ID, cont *containercor
|
|||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
in, err := isContainerNode(nm, pk, binCnrID, cont)
|
||||
if err != nil {
|
||||
return false, err
|
||||
} else if in {
|
||||
if isContainerNode(nm, pk, binCnrID, cont) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
@ -550,25 +546,25 @@ func (ac *apeChecker) isContainerKey(pk []byte, cnrID cid.ID, cont *containercor
|
|||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return isContainerNode(nm, pk, binCnrID, cont)
|
||||
return isContainerNode(nm, pk, binCnrID, cont), nil
|
||||
}
|
||||
|
||||
func isContainerNode(nm *netmapSDK.NetMap, pk, binCnrID []byte, cont *containercore.Container) (bool, error) {
|
||||
cnrVectors, err := nm.ContainerNodes(cont.Value.PlacementPolicy(), binCnrID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
func isContainerNode(nm *netmapSDK.NetMap, pk, binCnrID []byte, cont *containercore.Container) bool {
|
||||
// An error may be returned if network maps of the previous two epochs
|
||||
// don't have enough nodes to fulfil a container placement policy.
|
||||
// It's a logical error, so we should ignore it.
|
||||
// See https://git.frostfs.info/TrueCloudLab/frostfs-node/issues/1448.
|
||||
cnrVectors, _ := nm.ContainerNodes(cont.Value.PlacementPolicy(), binCnrID)
|
||||
|
||||
for i := range cnrVectors {
|
||||
for j := range cnrVectors[i] {
|
||||
if bytes.Equal(cnrVectors[i][j].PublicKey(), pk) {
|
||||
return true, nil
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
return false
|
||||
}
|
||||
|
||||
func (ac *apeChecker) namespaceByOwner(owner *refs.OwnerID) (string, error) {
|
||||
|
|
Loading…
Reference in a new issue