forked from TrueCloudLab/frostfs-node
[#6] services/object: Remove mutex from traversal
struct
Make it being used from a single thread. Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
This commit is contained in:
parent
b3dd9bcb89
commit
1d3d0b7cc1
1 changed files with 6 additions and 13 deletions
|
@ -45,9 +45,6 @@ type traversal struct {
|
||||||
// need of additional broadcast after the object is saved
|
// need of additional broadcast after the object is saved
|
||||||
extraBroadcastEnabled bool
|
extraBroadcastEnabled bool
|
||||||
|
|
||||||
// mtx protects mExclude map.
|
|
||||||
mtx sync.RWMutex
|
|
||||||
|
|
||||||
// container nodes which was processed during the primary object placement
|
// container nodes which was processed during the primary object placement
|
||||||
mExclude map[string]struct{}
|
mExclude map[string]struct{}
|
||||||
}
|
}
|
||||||
|
@ -73,21 +70,17 @@ func (x *traversal) submitProcessed(n placement.Node) {
|
||||||
if x.extraBroadcastEnabled {
|
if x.extraBroadcastEnabled {
|
||||||
key := string(n.PublicKey())
|
key := string(n.PublicKey())
|
||||||
|
|
||||||
x.mtx.Lock()
|
|
||||||
if x.mExclude == nil {
|
if x.mExclude == nil {
|
||||||
x.mExclude = make(map[string]struct{}, 1)
|
x.mExclude = make(map[string]struct{}, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
x.mExclude[key] = struct{}{}
|
x.mExclude[key] = struct{}{}
|
||||||
x.mtx.Unlock()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// checks if specified node was processed during the primary object placement.
|
// checks if specified node was processed during the primary object placement.
|
||||||
func (x *traversal) processed(n placement.Node) bool {
|
func (x *traversal) processed(n placement.Node) bool {
|
||||||
x.mtx.RLock()
|
|
||||||
_, ok := x.mExclude[string(n.PublicKey())]
|
_, ok := x.mExclude[string(n.PublicKey())]
|
||||||
x.mtx.RUnlock()
|
|
||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -198,12 +191,6 @@ loop:
|
||||||
|
|
||||||
err := f(nodeDesc{local: isLocal, info: addr})
|
err := f(nodeDesc{local: isLocal, info: addr})
|
||||||
|
|
||||||
// mark the container node as processed in order to exclude it
|
|
||||||
// in subsequent container broadcast. Note that we don't
|
|
||||||
// process this node during broadcast if primary placement
|
|
||||||
// on it failed.
|
|
||||||
t.traversal.submitProcessed(addr)
|
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
resErr.Store(err)
|
resErr.Store(err)
|
||||||
svcutil.LogServiceError(t.log, "PUT", addr.Addresses(), err)
|
svcutil.LogServiceError(t.log, "PUT", addr.Addresses(), err)
|
||||||
|
@ -218,6 +205,12 @@ loop:
|
||||||
|
|
||||||
break loop
|
break loop
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mark the container node as processed in order to exclude it
|
||||||
|
// in subsequent container broadcast. Note that we don't
|
||||||
|
// process this node during broadcast if primary placement
|
||||||
|
// on it failed.
|
||||||
|
t.traversal.submitProcessed(addrs[i])
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
Loading…
Reference in a new issue