forked from TrueCloudLab/frostfs-node
[#796] cli: Fix object nodes command
Tombstone objects must be present on all container nodes.
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
# Conflicts:
# cmd/frostfs-cli/modules/object/nodes.go
#
# It looks like you may be committing a cherry-pick.
# If this is not correct, please run
# git update-ref -d CHERRY_PICK_HEAD
# and try again.
# Please enter the commit message for your changes. Lines starting
# with '#' will be ignored, and an empty message aborts the commit.
#
# Date: Thu Nov 9 13:33:59 2023 +0300
#
# On branch fix/zombie_object_supportv037
# You are currently cherry-picking commit 78cfb6ae
.
#
# Changes to be committed:
# modified: cmd/frostfs-cli/modules/object/nodes.go
#
This commit is contained in:
parent
0dc7013844
commit
3131c3e3d5
1 changed files with 10 additions and 9 deletions
|
@ -31,10 +31,10 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
type objectNodesInfo struct {
|
type objectNodesInfo struct {
|
||||||
containerID cid.ID
|
containerID cid.ID
|
||||||
objectID oid.ID
|
objectID oid.ID
|
||||||
relatedObjectIDs []oid.ID
|
relatedObjectIDs []oid.ID
|
||||||
isLock bool
|
isLockOrTombstone bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type boolError struct {
|
type boolError struct {
|
||||||
|
@ -101,9 +101,9 @@ func getObjectInfo(cmd *cobra.Command, cnrID cid.ID, objID oid.ID, cli *client.C
|
||||||
res, err := internalclient.HeadObject(cmd.Context(), prmHead)
|
res, err := internalclient.HeadObject(cmd.Context(), prmHead)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return &objectNodesInfo{
|
return &objectNodesInfo{
|
||||||
containerID: cnrID,
|
containerID: cnrID,
|
||||||
objectID: objID,
|
objectID: objID,
|
||||||
isLock: res.Header().Type() == objectSDK.TypeLock,
|
isLockOrTombstone: res.Header().Type() == objectSDK.TypeLock || res.Header().Type() == objectSDK.TypeTombstone,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -191,7 +191,7 @@ func getRequiredPlacement(cmd *cobra.Command, objInfo *objectNodesInfo, placemen
|
||||||
numOfReplicas := placementPolicy.ReplicaNumberByIndex(repIdx)
|
numOfReplicas := placementPolicy.ReplicaNumberByIndex(repIdx)
|
||||||
var nodeIdx uint32
|
var nodeIdx uint32
|
||||||
for _, n := range rep {
|
for _, n := range rep {
|
||||||
if !objInfo.isLock && nodeIdx == numOfReplicas { //lock object should be on all container nodes
|
if !objInfo.isLockOrTombstone && nodeIdx == numOfReplicas { // lock and tombstone objects should be on all container nodes
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
nodes[n.Hash()] = n
|
nodes[n.Hash()] = n
|
||||||
|
@ -213,7 +213,8 @@ func getRequiredPlacement(cmd *cobra.Command, objInfo *objectNodesInfo, placemen
|
||||||
}
|
}
|
||||||
|
|
||||||
func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, requiredPlacement map[uint64]netmapSDK.NodeInfo,
|
func getActualPlacement(cmd *cobra.Command, netmap *netmapSDK.NetMap, requiredPlacement map[uint64]netmapSDK.NodeInfo,
|
||||||
pk *ecdsa.PrivateKey, objInfo *objectNodesInfo) map[uint64]boolError {
|
pk *ecdsa.PrivateKey, objInfo *objectNodesInfo,
|
||||||
|
) map[uint64]boolError {
|
||||||
result := make(map[uint64]boolError)
|
result := make(map[uint64]boolError)
|
||||||
resultMtx := &sync.Mutex{}
|
resultMtx := &sync.Mutex{}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue