forked from TrueCloudLab/frostfs-node
[#1238] Adopt neofs-ir for non pointer slices in SDK
Signed-off-by: Alex Vanin <alexey@nspcc.ru>
This commit is contained in:
parent
7f3195b197
commit
9fad29dfe0
9 changed files with 28 additions and 27 deletions
|
@ -67,7 +67,7 @@ func (v *Validator) VerifyAndUpdate(n *netmap.NodeInfo) error {
|
|||
continue
|
||||
}
|
||||
|
||||
a := netmap.NewNodeAttribute()
|
||||
var a netmap.NodeAttribute
|
||||
a.SetKey(attrKey)
|
||||
a.SetValue(attrVal)
|
||||
|
||||
|
@ -86,8 +86,8 @@ func (v *Validator) VerifyAndUpdate(n *netmap.NodeInfo) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func uniqueAttributes(as []*netmap.NodeAttribute) map[string]*netmap.NodeAttribute {
|
||||
mAttr := make(map[string]*netmap.NodeAttribute, len(as))
|
||||
func uniqueAttributes(as []netmap.NodeAttribute) map[string]netmap.NodeAttribute {
|
||||
mAttr := make(map[string]netmap.NodeAttribute, len(as))
|
||||
|
||||
for _, attr := range as {
|
||||
mAttr[attr.Key()] = attr
|
||||
|
|
|
@ -35,7 +35,7 @@ func (x db) Get(lc *locodestd.LOCODE) (locode.Record, error) {
|
|||
}
|
||||
|
||||
func addAttrKV(n *netmap.NodeInfo, key, val string) {
|
||||
a := netmap.NewNodeAttribute()
|
||||
var a netmap.NodeAttribute
|
||||
|
||||
a.SetKey(key)
|
||||
a.SetValue(val)
|
||||
|
|
|
@ -216,13 +216,14 @@ func (c *Calculator) sumSGSizes(ctx *singleResultCtx) bool {
|
|||
addr := addressSDK.NewAddress()
|
||||
addr.SetContainerID(ctx.containerID())
|
||||
|
||||
for _, sgID := range ctx.auditResult.PassSG() {
|
||||
addr.SetObjectID(sgID)
|
||||
passSG := ctx.auditResult.PassSG()
|
||||
for i := range passSG {
|
||||
addr.SetObjectID(&passSG[i])
|
||||
|
||||
sgInfo, err := c.prm.SGStorage.SGInfo(addr)
|
||||
if err != nil {
|
||||
ctx.log.Error("could not get SG info",
|
||||
zap.Stringer("id", sgID),
|
||||
zap.Stringer("id", &passSG[i]), // stringer defined on pointer
|
||||
)
|
||||
|
||||
return false // we also can continue and calculate at least some part
|
||||
|
|
|
@ -160,7 +160,7 @@ func (s settlementDeps) ContainerNodes(e uint64, cid *cid.ID) ([]common.NodeInfo
|
|||
|
||||
for i := range ns {
|
||||
res = append(res, &nodeInfoWrapper{
|
||||
ni: ns[i],
|
||||
ni: &ns[i],
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ type Context struct {
|
|||
report *audit.Report
|
||||
|
||||
sgMembersMtx sync.RWMutex
|
||||
sgMembersCache map[int][]*oidSDK.ID
|
||||
sgMembersCache map[int][]oidSDK.ID
|
||||
|
||||
placementMtx sync.Mutex
|
||||
placementCache map[string][]netmap.Nodes
|
||||
|
@ -158,7 +158,7 @@ func (c *Context) containerID() *cid.ID {
|
|||
func (c *Context) init() {
|
||||
c.report = audit.NewReport(c.containerID())
|
||||
|
||||
c.sgMembersCache = make(map[int][]*oidSDK.ID)
|
||||
c.sgMembersCache = make(map[int][]oidSDK.ID)
|
||||
|
||||
c.placementCache = make(map[string][]netmap.Nodes)
|
||||
|
||||
|
@ -264,7 +264,7 @@ func (c *Context) updateHeadResponses(hdr *object.Object) {
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Context) updateSGInfo(ind int, members []*oidSDK.ID) {
|
||||
func (c *Context) updateSGInfo(ind int, members []oidSDK.ID) {
|
||||
c.sgMembersMtx.Lock()
|
||||
defer c.sgMembersMtx.Unlock()
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ func (c *Context) processObjectPlacement(id *oidSDK.ID, nodes netmap.Nodes, repl
|
|||
|
||||
for i := 0; ok < replicas && i < len(nodes); i++ {
|
||||
// try to get object header from node
|
||||
hdr, err := c.cnrCom.GetHeader(c.task, nodes[i], id, false)
|
||||
hdr, err := c.cnrCom.GetHeader(c.task, &nodes[i], id, false)
|
||||
if err != nil {
|
||||
c.log.Debug("could not get object header from candidate",
|
||||
zap.Stringer("id", id),
|
||||
|
@ -95,9 +95,9 @@ func (c *Context) processObjectPlacement(id *oidSDK.ID, nodes netmap.Nodes, repl
|
|||
|
||||
if unpairedCandidate1 >= 0 {
|
||||
if unpairedCandidate2 >= 0 {
|
||||
c.composePair(id, nodes[unpairedCandidate1], nodes[unpairedCandidate2])
|
||||
c.composePair(id, &nodes[unpairedCandidate1], &nodes[unpairedCandidate2])
|
||||
} else if pairedCandidate >= 0 {
|
||||
c.composePair(id, nodes[unpairedCandidate1], nodes[pairedCandidate])
|
||||
c.composePair(id, &nodes[unpairedCandidate1], &nodes[pairedCandidate])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ func (c *Context) iterateSGMembersPlacementRand(f func(*oidSDK.ID, int, netmap.N
|
|||
}
|
||||
|
||||
func (c *Context) iterateSGMembersRand(f func(*oidSDK.ID) bool) {
|
||||
c.iterateSGInfo(func(members []*oidSDK.ID) bool {
|
||||
c.iterateSGInfo(func(members []oidSDK.ID) bool {
|
||||
ln := len(members)
|
||||
|
||||
processed := make(map[uint64]struct{}, ln-1)
|
||||
|
@ -152,7 +152,7 @@ func (c *Context) iterateSGMembersRand(f func(*oidSDK.ID) bool) {
|
|||
ind := nextRandUint64(uint64(ln), processed)
|
||||
processed[ind] = struct{}{}
|
||||
|
||||
if f(members[ind]) {
|
||||
if f(&members[ind]) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -161,7 +161,7 @@ func (c *Context) iterateSGMembersRand(f func(*oidSDK.ID) bool) {
|
|||
})
|
||||
}
|
||||
|
||||
func (c *Context) iterateSGInfo(f func([]*oidSDK.ID) bool) {
|
||||
func (c *Context) iterateSGInfo(f func([]oidSDK.ID) bool) {
|
||||
c.sgMembersMtx.RLock()
|
||||
defer c.sgMembersMtx.RUnlock()
|
||||
|
||||
|
@ -169,8 +169,8 @@ func (c *Context) iterateSGInfo(f func([]*oidSDK.ID) bool) {
|
|||
// but list of storage groups is already expected
|
||||
// to be shuffled since it is a Search response
|
||||
// with unpredictable order
|
||||
for _, members := range c.sgMembersCache {
|
||||
if f(members) {
|
||||
for i := range c.sgMembersCache {
|
||||
if f(c.sgMembersCache[i]) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
@ -56,11 +56,11 @@ func (c *Context) checkStorageGroupPoR(ind int, sg *oidSDK.ID) {
|
|||
)
|
||||
|
||||
for i := range members {
|
||||
objectPlacement, err := c.buildPlacement(members[i])
|
||||
objectPlacement, err := c.buildPlacement(&members[i])
|
||||
if err != nil {
|
||||
c.log.Info("can't build placement for storage group member",
|
||||
zap.Stringer("sg", sg),
|
||||
zap.Stringer("member_id", members[i]),
|
||||
zap.Stringer("member_id", &members[i]), // stringer defined on pointer
|
||||
)
|
||||
|
||||
continue
|
||||
|
@ -78,11 +78,11 @@ func (c *Context) checkStorageGroupPoR(ind int, sg *oidSDK.ID) {
|
|||
accRetries++
|
||||
}
|
||||
|
||||
hdr, err := c.cnrCom.GetHeader(c.task, flat[j], members[i], true)
|
||||
hdr, err := c.cnrCom.GetHeader(c.task, &flat[j], &members[i], true)
|
||||
if err != nil {
|
||||
c.log.Debug("can't head object",
|
||||
zap.String("remote_node", hex.EncodeToString(flat[j].PublicKey())),
|
||||
zap.Stringer("oid", members[i]))
|
||||
zap.Stringer("oid", &members[i])) // stringer defined on pointer
|
||||
|
||||
continue
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ func (c *Context) checkStorageGroupPoR(ind int, sg *oidSDK.ID) {
|
|||
})
|
||||
if err != nil {
|
||||
c.log.Debug("can't concatenate tz hash",
|
||||
zap.Stringer("oid", members[i]),
|
||||
zap.Stringer("oid", &members[i]), // stringer defined on pointer
|
||||
zap.String("error", err.Error()))
|
||||
|
||||
break
|
||||
|
|
|
@ -52,7 +52,7 @@ func (r *Report) PassedPoR(sg *oidSDK.ID) {
|
|||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
r.res.SetPassSG(append(r.res.PassSG(), sg))
|
||||
r.res.SetPassSG(append(r.res.PassSG(), *sg))
|
||||
}
|
||||
|
||||
// FailedPoR updates list of failed storage groups.
|
||||
|
@ -60,7 +60,7 @@ func (r *Report) FailedPoR(sg *oidSDK.ID) {
|
|||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
r.res.SetFailSG(append(r.res.FailSG(), sg))
|
||||
r.res.SetFailSG(append(r.res.FailSG(), *sg))
|
||||
}
|
||||
|
||||
// SetPlacementCounters sets counters of compliance with placement.
|
||||
|
|
|
@ -64,7 +64,7 @@ func (mb *managerBuilder) BuildManagers(epoch uint64, p reputation.PeerID) ([]Se
|
|||
}
|
||||
|
||||
// make a copy to keep order consistency of the origin netmap after sorting
|
||||
nodes := make([]*apiNetmap.Node, len(nm.Nodes))
|
||||
nodes := make([]apiNetmap.Node, len(nm.Nodes))
|
||||
|
||||
copy(nodes, nm.Nodes)
|
||||
|
||||
|
|
Loading…
Reference in a new issue