[#222] auditsvc: Refactor PoR audit
All checks were successful
ci/woodpecker/push/pre-commit Pipeline was successful
All checks were successful
ci/woodpecker/push/pre-commit Pipeline was successful
Resolve funlen linter for Context.checkStorageGroupPoR method. Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
This commit is contained in:
parent
e8d340287f
commit
e2f13d03d7
1 changed files with 27 additions and 16 deletions
|
@ -36,7 +36,6 @@ func (c *Context) executePoR(ctx context.Context) {
|
||||||
c.report.SetPoRCounters(c.porRequests.Load(), c.porRetries.Load())
|
c.report.SetPoRCounters(c.porRequests.Load(), c.porRetries.Load())
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint: funlen
|
|
||||||
func (c *Context) checkStorageGroupPoR(ctx context.Context, sgID oid.ID, sg storagegroupSDK.StorageGroup) {
|
func (c *Context) checkStorageGroupPoR(ctx context.Context, sgID oid.ID, sg storagegroupSDK.StorageGroup) {
|
||||||
members := sg.Members()
|
members := sg.Members()
|
||||||
c.updateSGInfo(sgID, members)
|
c.updateSGInfo(sgID, members)
|
||||||
|
@ -55,22 +54,11 @@ func (c *Context) checkStorageGroupPoR(ctx context.Context, sgID oid.ID, sg stor
|
||||||
homomorphicHashingEnabled := !containerSDK.IsHomomorphicHashingDisabled(c.task.ContainerStructure())
|
homomorphicHashingEnabled := !containerSDK.IsHomomorphicHashingDisabled(c.task.ContainerStructure())
|
||||||
|
|
||||||
for i := range members {
|
for i := range members {
|
||||||
objectPlacement, err := c.buildPlacement(members[i])
|
flat, ok := c.getShuffledNodes(members[i], sgID)
|
||||||
if err != nil {
|
if !ok {
|
||||||
c.log.Info("can't build placement for storage group member",
|
|
||||||
zap.Stringer("sg", sgID),
|
|
||||||
zap.String("member_id", members[i].String()),
|
|
||||||
)
|
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
flat := placement.FlattenNodes(objectPlacement)
|
|
||||||
|
|
||||||
rand.Shuffle(len(flat), func(i, j int) {
|
|
||||||
flat[i], flat[j] = flat[j], flat[i]
|
|
||||||
})
|
|
||||||
|
|
||||||
getHeaderPrm.OID = members[i]
|
getHeaderPrm.OID = members[i]
|
||||||
|
|
||||||
for j := range flat {
|
for j := range flat {
|
||||||
|
@ -126,8 +114,12 @@ func (c *Context) checkStorageGroupPoR(ctx context.Context, sgID oid.ID, sg stor
|
||||||
cs, _ := sg.ValidationDataHash()
|
cs, _ := sg.ValidationDataHash()
|
||||||
tzCheck := !homomorphicHashingEnabled || bytes.Equal(tzHash, cs.Value())
|
tzCheck := !homomorphicHashingEnabled || bytes.Equal(tzHash, cs.Value())
|
||||||
|
|
||||||
|
c.writeCheckReport(sizeCheck, tzCheck, sgID, sg, totalSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Context) writeCheckReport(sizeCheck, tzCheck bool, sgID oid.ID, sg storagegroupSDK.StorageGroup, totalSize uint64) {
|
||||||
if sizeCheck && tzCheck {
|
if sizeCheck && tzCheck {
|
||||||
c.report.PassedPoR(sgID) // write report
|
c.report.PassedPoR(sgID)
|
||||||
} else {
|
} else {
|
||||||
if !sizeCheck {
|
if !sizeCheck {
|
||||||
c.log.Debug("storage group size check failed",
|
c.log.Debug("storage group size check failed",
|
||||||
|
@ -139,6 +131,25 @@ func (c *Context) checkStorageGroupPoR(ctx context.Context, sgID oid.ID, sg stor
|
||||||
c.log.Debug("storage group tz hash check failed")
|
c.log.Debug("storage group tz hash check failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
c.report.FailedPoR(sgID) // write report
|
c.report.FailedPoR(sgID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Context) getShuffledNodes(member oid.ID, sgID oid.ID) ([]netmap.NodeInfo, bool) {
|
||||||
|
objectPlacement, err := c.buildPlacement(member)
|
||||||
|
if err != nil {
|
||||||
|
c.log.Info("can't build placement for storage group member",
|
||||||
|
zap.Stringer("sg", sgID),
|
||||||
|
zap.String("member_id", member.String()),
|
||||||
|
)
|
||||||
|
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
flat := placement.FlattenNodes(objectPlacement)
|
||||||
|
|
||||||
|
rand.Shuffle(len(flat), func(i, j int) {
|
||||||
|
flat[i], flat[j] = flat[j], flat[i]
|
||||||
|
})
|
||||||
|
return flat, true
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in a new issue