forked from TrueCloudLab/frostfs-node
[#1369] audit: Upgrade SDK package
Signed-off-by: Leonard Lyubich <leonard@nspcc.ru>
This commit is contained in:
parent
875f0e79a2
commit
7a57d7b076
9 changed files with 61 additions and 61 deletions
2
go.mod
2
go.mod
|
@ -19,7 +19,7 @@ require (
|
|||
github.com/nspcc-dev/neo-go/pkg/interop v0.0.0-20220321144137-d5a9af5860af // indirect
|
||||
github.com/nspcc-dev/neofs-api-go/v2 v2.12.1
|
||||
github.com/nspcc-dev/neofs-contract v0.14.2
|
||||
github.com/nspcc-dev/neofs-sdk-go v1.0.0-rc.3.0.20220316141620-a55ffa47966a
|
||||
github.com/nspcc-dev/neofs-sdk-go v1.0.0-rc.3.0.20220412151250-3e75660802ae
|
||||
github.com/nspcc-dev/tzhash v1.5.2
|
||||
github.com/panjf2000/ants/v2 v2.4.0
|
||||
github.com/paulmach/orb v0.2.2
|
||||
|
|
BIN
go.sum
BIN
go.sum
Binary file not shown.
|
@ -174,7 +174,7 @@ func (ap *Processor) StartAuditHandler() event.Handler {
|
|||
|
||||
func (r *epochAuditReporter) WriteReport(rep *audit.Report) error {
|
||||
res := rep.Result()
|
||||
res.SetAuditEpoch(r.epoch)
|
||||
res.ForEpoch(r.epoch)
|
||||
|
||||
return r.rep.WriteReport(rep)
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"github.com/nspcc-dev/neofs-sdk-go/audit"
|
||||
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
|
||||
addressSDK "github.com/nspcc-dev/neofs-sdk-go/object/address"
|
||||
oid "github.com/nspcc-dev/neofs-sdk-go/object/id"
|
||||
"github.com/nspcc-dev/neofs-sdk-go/owner"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
@ -108,7 +109,7 @@ func (c *Calculator) Calculate(p *CalculatePrm) {
|
|||
func (c *Calculator) processResult(ctx *singleResultCtx) {
|
||||
ctx.log = ctx.log.With(
|
||||
zap.Stringer("cid", ctx.containerID()),
|
||||
zap.Uint64("audit epoch", ctx.auditResult.AuditEpoch()),
|
||||
zap.Uint64("audit epoch", ctx.auditResult.Epoch()),
|
||||
)
|
||||
|
||||
ctx.log.Debug("reading information about the container")
|
||||
|
@ -147,7 +148,7 @@ func (c *Calculator) processResult(ctx *singleResultCtx) {
|
|||
func (c *Calculator) readContainerInfo(ctx *singleResultCtx) bool {
|
||||
var err error
|
||||
|
||||
ctx.cnrInfo, err = c.prm.ContainerStorage.ContainerInfo(ctx.auditResult.ContainerID())
|
||||
ctx.cnrInfo, err = c.prm.ContainerStorage.ContainerInfo(ctx.auditResult.Container())
|
||||
if err != nil {
|
||||
ctx.log.Error("could not get container info",
|
||||
zap.String("error", err.Error()),
|
||||
|
@ -178,21 +179,26 @@ func (c *Calculator) buildPlacement(ctx *singleResultCtx) bool {
|
|||
func (c *Calculator) collectPassNodes(ctx *singleResultCtx) bool {
|
||||
ctx.passNodes = make(map[string]common.NodeInfo)
|
||||
|
||||
loop:
|
||||
for _, cnrNode := range ctx.cnrNodes {
|
||||
for _, passNode := range ctx.auditResult.PassNodes() {
|
||||
// TODO(@cthulhu-rider): neofs-sdk-go#241 use dedicated method
|
||||
ctx.auditResult.IteratePassedStorageNodes(func(passNode []byte) bool {
|
||||
if !bytes.Equal(cnrNode.PublicKey(), passNode) {
|
||||
continue
|
||||
return true
|
||||
}
|
||||
|
||||
for _, failNode := range ctx.auditResult.FailNodes() {
|
||||
if bytes.Equal(cnrNode.PublicKey(), failNode) {
|
||||
continue loop
|
||||
}
|
||||
}
|
||||
failed := false
|
||||
|
||||
ctx.auditResult.IterateFailedStorageNodes(func(failNode []byte) bool {
|
||||
failed = bytes.Equal(cnrNode.PublicKey(), failNode)
|
||||
return !failed
|
||||
})
|
||||
|
||||
if !failed {
|
||||
ctx.passNodes[hex.EncodeToString(passNode)] = cnrNode
|
||||
}
|
||||
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
empty := len(ctx.passNodes) == 0
|
||||
|
@ -204,32 +210,34 @@ loop:
|
|||
}
|
||||
|
||||
func (c *Calculator) sumSGSizes(ctx *singleResultCtx) bool {
|
||||
passedSG := ctx.auditResult.PassSG()
|
||||
|
||||
if len(passedSG) == 0 {
|
||||
ctx.log.Debug("empty list of passed SG")
|
||||
return false
|
||||
}
|
||||
|
||||
sumPassSGSize := uint64(0)
|
||||
fail := false
|
||||
|
||||
addr := addressSDK.NewAddress()
|
||||
addr.SetContainerID(ctx.containerID())
|
||||
|
||||
passSG := ctx.auditResult.PassSG()
|
||||
for i := range passSG {
|
||||
addr.SetObjectID(&passSG[i])
|
||||
ctx.auditResult.IteratePassedStorageGroups(func(id oid.ID) bool {
|
||||
addr.SetObjectID(&id)
|
||||
|
||||
sgInfo, err := c.prm.SGStorage.SGInfo(addr)
|
||||
if err != nil {
|
||||
ctx.log.Error("could not get SG info",
|
||||
zap.String("id", passSG[i].String()),
|
||||
zap.String("id", id.String()),
|
||||
zap.String("error", err.Error()),
|
||||
)
|
||||
|
||||
fail = true
|
||||
|
||||
return false // we also can continue and calculate at least some part
|
||||
}
|
||||
|
||||
sumPassSGSize += sgInfo.Size()
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
if fail {
|
||||
return false
|
||||
}
|
||||
|
||||
if sumPassSGSize == 0 {
|
||||
|
@ -279,11 +287,11 @@ func (c *Calculator) fillTransferTable(ctx *singleResultCtx) bool {
|
|||
}
|
||||
|
||||
// add txs to pay inner ring node for audit result
|
||||
auditIR, err := ownerFromKey(ctx.auditResult.PublicKey())
|
||||
auditIR, err := ownerFromKey(ctx.auditResult.AuditorKey())
|
||||
if err != nil {
|
||||
ctx.log.Error("could not parse public key of the inner ring node",
|
||||
zap.String("error", err.Error()),
|
||||
zap.String("key", hex.EncodeToString(ctx.auditResult.PublicKey())),
|
||||
zap.String("key", hex.EncodeToString(ctx.auditResult.AuditorKey())),
|
||||
)
|
||||
|
||||
return false
|
||||
|
@ -300,7 +308,7 @@ func (c *Calculator) fillTransferTable(ctx *singleResultCtx) bool {
|
|||
|
||||
func (c *singleResultCtx) containerID() *cid.ID {
|
||||
if c.cid == nil {
|
||||
c.cid = c.auditResult.ContainerID()
|
||||
c.cid = c.auditResult.Container()
|
||||
}
|
||||
|
||||
return c.cid
|
||||
|
@ -308,7 +316,7 @@ func (c *singleResultCtx) containerID() *cid.ID {
|
|||
|
||||
func (c *singleResultCtx) auditEpoch() uint64 {
|
||||
if c.eAudit == 0 {
|
||||
c.eAudit = c.auditResult.AuditEpoch()
|
||||
c.eAudit = c.auditResult.Epoch()
|
||||
}
|
||||
|
||||
return c.eAudit
|
||||
|
|
|
@ -149,7 +149,7 @@ func (s *Server) VoteForSidechainValidator(prm governance.VoteValidatorPrm) erro
|
|||
// and sends it to Audit contract.
|
||||
func (s *Server) WriteReport(r *audit.Report) error {
|
||||
res := r.Result()
|
||||
res.SetPublicKey(s.pubKey)
|
||||
res.SetAuditorKey(s.pubKey)
|
||||
|
||||
prm := auditClient.PutPrm{}
|
||||
prm.SetResult(res)
|
||||
|
|
|
@ -25,10 +25,10 @@ func (c *Client) GetAuditResult(id ResultID) (*auditAPI.Result, error) {
|
|||
return nil, fmt.Errorf("could not get byte array from stack item (%s): %w", getResultMethod, err)
|
||||
}
|
||||
|
||||
auditRes := auditAPI.NewResult()
|
||||
var auditRes auditAPI.Result
|
||||
if err := auditRes.Unmarshal(value); err != nil {
|
||||
return nil, fmt.Errorf("could not unmarshal audit result structure: %w", err)
|
||||
}
|
||||
|
||||
return auditRes, nil
|
||||
return &auditRes, nil
|
||||
}
|
||||
|
|
|
@ -30,17 +30,12 @@ func (p *PutPrm) SetResult(result *auditAPI.Result) {
|
|||
//
|
||||
// Returns encountered error that caused the saving to interrupt.
|
||||
func (c *Client) PutAuditResult(p PutPrm) error {
|
||||
rawResult, err := p.result.Marshal()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not marshal audit result: %w", err)
|
||||
}
|
||||
|
||||
prm := client.InvokePrm{}
|
||||
prm.SetMethod(putResultMethod)
|
||||
prm.SetArgs(rawResult)
|
||||
prm.SetArgs(p.result.Marshal())
|
||||
prm.InvokePrmOptional = p.InvokePrmOptional
|
||||
|
||||
err = c.client.Invoke(prm)
|
||||
err := c.client.Invoke(prm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not invoke method (%s): %w", putResultMethod, err)
|
||||
}
|
||||
|
|
|
@ -34,13 +34,13 @@ func TestAuditResults(t *testing.T) {
|
|||
|
||||
id := cidtest.ID()
|
||||
|
||||
auditRes := auditAPI.NewResult()
|
||||
auditRes.SetAuditEpoch(epoch)
|
||||
auditRes.SetPublicKey(key.PublicKey().Bytes())
|
||||
auditRes.SetContainerID(id)
|
||||
var auditRes auditAPI.Result
|
||||
auditRes.ForEpoch(epoch)
|
||||
auditRes.SetAuditorKey(key.PublicKey().Bytes())
|
||||
auditRes.ForContainer(*id)
|
||||
|
||||
prm := PutPrm{}
|
||||
prm.SetResult(auditRes)
|
||||
prm.SetResult(&auditRes)
|
||||
|
||||
require.NoError(t, auditClientWrapper.PutAuditResult(prm))
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
// Report tracks the progress of auditing container data.
|
||||
type Report struct {
|
||||
mu sync.RWMutex
|
||||
res *audit.Result
|
||||
res audit.Result
|
||||
}
|
||||
|
||||
// Reporter is an interface of the entity that records
|
||||
|
@ -22,13 +22,10 @@ type Reporter interface {
|
|||
|
||||
// NewReport creates and returns blank Report instance.
|
||||
func NewReport(cid *cid.ID) *Report {
|
||||
rep := &Report{
|
||||
res: audit.NewResult(),
|
||||
}
|
||||
var rep Report
|
||||
rep.res.ForContainer(*cid)
|
||||
|
||||
rep.res.SetContainerID(cid)
|
||||
|
||||
return rep
|
||||
return &rep
|
||||
}
|
||||
|
||||
// Result forms the structure of the data audit result.
|
||||
|
@ -36,7 +33,7 @@ func (r *Report) Result() *audit.Result {
|
|||
r.mu.RLock()
|
||||
defer r.mu.RUnlock()
|
||||
|
||||
return r.res
|
||||
return &r.res
|
||||
}
|
||||
|
||||
// Complete completes audit report.
|
||||
|
@ -44,7 +41,7 @@ func (r *Report) Complete() {
|
|||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
r.res.SetComplete(true)
|
||||
r.res.Complete()
|
||||
}
|
||||
|
||||
// PassedPoR updates list of passed storage groups.
|
||||
|
@ -52,7 +49,7 @@ func (r *Report) PassedPoR(sg *oidSDK.ID) {
|
|||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
r.res.SetPassSG(append(r.res.PassSG(), *sg))
|
||||
r.res.SubmitPassedStorageGroup(*sg)
|
||||
}
|
||||
|
||||
// FailedPoR updates list of failed storage groups.
|
||||
|
@ -60,7 +57,7 @@ func (r *Report) FailedPoR(sg *oidSDK.ID) {
|
|||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
r.res.SetFailSG(append(r.res.FailSG(), *sg))
|
||||
r.res.SubmitFailedStorageGroup(*sg)
|
||||
}
|
||||
|
||||
// SetPlacementCounters sets counters of compliance with placement.
|
||||
|
@ -68,9 +65,9 @@ func (r *Report) SetPlacementCounters(hit, miss, fail uint32) {
|
|||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
r.res.SetHit(hit)
|
||||
r.res.SetMiss(miss)
|
||||
r.res.SetFail(fail)
|
||||
r.res.SetHits(hit)
|
||||
r.res.SetMisses(miss)
|
||||
r.res.SetFailures(fail)
|
||||
}
|
||||
|
||||
// SetPDPResults sets lists of nodes according to their PDP results.
|
||||
|
@ -78,8 +75,8 @@ func (r *Report) SetPDPResults(passed, failed [][]byte) {
|
|||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
r.res.SetPassNodes(passed)
|
||||
r.res.SetFailNodes(failed)
|
||||
r.res.SubmitPassedStorageNodes(passed)
|
||||
r.res.SubmitFailedStorageNodes(failed)
|
||||
}
|
||||
|
||||
// SetPoRCounters sets amounts of head requests and retries at PoR audit stage.
|
||||
|
@ -87,6 +84,6 @@ func (r *Report) SetPoRCounters(requests, retries uint32) {
|
|||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
r.res.SetRequests(requests)
|
||||
r.res.SetRetries(retries)
|
||||
r.res.SetRequestsPoR(requests)
|
||||
r.res.SetRetriesPoR(retries)
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue