Fix Search for EC-splitted objects #1144
7 changed files with 67 additions and 17 deletions
2
go.mod
2
go.mod
|
@ -7,7 +7,7 @@ require (
|
|||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240530152826-2f6d3209e1d3
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240409111539-e7a05a49ff45
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240531121442-717a7d00ef21
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240531132048-ebd8fcd1685f
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240513163744-1f6f4163d40d
|
||||
git.frostfs.info/TrueCloudLab/tzhash v1.8.0
|
||||
|
|
4
go.sum
4
go.sum
|
@ -8,8 +8,8 @@ git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSV
|
|||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65 h1:PaZ8GpnUoXxUoNsc1qp36bT2u7FU+neU4Jn9cl8AWqI=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20231101111734-b3ad3335ff65/go.mod h1:6aAX80dvJ3r5fjN9CzzPglRptoiPgIC9KFGGsUA+1Hw=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240531121442-717a7d00ef21 h1:ugo9k9s0+51BCMhD4mqHD+qD5P4BOTfDxcEMqTsM9+A=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240531121442-717a7d00ef21/go.mod h1:dwBHqBoseOpU4EiIPSGxhNeQx2QOEao/1r8h26syswI=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240531132048-ebd8fcd1685f h1:vBLC1OSGMSn7lRJv/p1of0veifuBdZdztVrF9Vn+UFk=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240531132048-ebd8fcd1685f/go.mod h1:4AObM67VUqkXQJlODTFThFnuMGEuK8h9DrAXHDZqvCU=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240513163744-1f6f4163d40d h1:hHk8FWbWgEnwm2I045CaBIrZBjy/o81CehIVOySA/pQ=
|
||||
|
|
|
@ -262,7 +262,7 @@ func unknownMatcherBucket(_ *bbolt.Bucket, _ string, _ string, _ func([]byte, []
|
|||
// in boltDB. Useful for getting filter values from unique and list indexes.
|
||||
func bucketKeyHelper(hdr string, val string) []byte {
|
||||
switch hdr {
|
||||
case v2object.FilterHeaderParent:
|
||||
case v2object.FilterHeaderParent, v2object.FilterHeaderECParent:
|
||||
v, err := base58.Decode(val)
|
||||
if err != nil {
|
||||
return nil
|
||||
|
|
|
@ -257,17 +257,19 @@ func putUniqueIndexes(
|
|||
}
|
||||
}
|
||||
|
||||
err = putUniqueIndexItem(tx, namedBucketItem{
|
||||
name: rootBucketName(cnr, bucketName),
|
||||
key: objKey,
|
||||
val: splitInfo,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
isObjKeySet := true
|
||||
if ecHead := obj.ECHeader(); ecHead != nil {
|
||||
err = putECInfo(tx, cnr, objKey, ecHead)
|
||||
if err = putECInfo(tx, cnr, objKey, ecHead); err != nil {
|
||||
return err
|
||||
}
|
||||
objKey, isObjKeySet = objectKeyByECHeader(ecHead)
|
||||
}
|
||||
if isObjKeySet {
|
||||
err = putUniqueIndexItem(tx, namedBucketItem{
|
||||
name: rootBucketName(cnr, bucketName),
|
||||
key: objKey,
|
||||
val: splitInfo,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -277,6 +279,23 @@ func putUniqueIndexes(
|
|||
return nil
|
||||
}
|
||||
|
||||
// objectKeyByECHeader returns objectKey for an object that has EC Header.
|
||||
// If object's parent is in Split, then parent's non-nil Split parent ID is set to object key.
|
||||
// If object's parent is not in Split, then its ID is set to object key.
|
||||
// Otherwise, such object keys should be ignored -- they are not put to the root bucket.
|
||||
func objectKeyByECHeader(ech *objectSDK.ECHeader) (objKey []byte, isSet bool) {
|
||||
if ech.ParentSplitID() != nil {
|
||||
if parentSplitParentID := ech.ParentSplitParentID(); parentSplitParentID != nil {
|
||||
isSet = true
|
||||
objKey = objectKey(*parentSplitParentID, make([]byte, objectKeySize))
|
||||
}
|
||||
return
|
||||
}
|
||||
isSet = true
|
||||
objKey = objectKey(ech.Parent(), make([]byte, objectKeySize))
|
||||
return
|
||||
}
|
||||
|
||||
type updateIndexItemFunc = func(tx *bbolt.Tx, item namedBucketItem) error
|
||||
|
||||
func updateListIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFunc) error {
|
||||
|
@ -323,6 +342,17 @@ func updateListIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFun
|
|||
}
|
||||
}
|
||||
|
||||
if ech := obj.ECHeader(); ech != nil {
|
||||
err := f(tx, namedBucketItem{
|
||||
name: ecParentToChunksBucketName(cnr, bucketName),
|
||||
|
||||
key: objectKey(ech.Parent(), make([]byte, objectKeySize)),
|
||||
val: objKey,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -331,8 +361,6 @@ func updateFKBTIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFun
|
|||
cnr, _ := obj.ContainerID()
|
||||
objKey := objectKey(id, make([]byte, objectKeySize))
|
||||
|
||||
attrs := obj.Attributes()
|
||||
|
||||
key := make([]byte, bucketKeySize)
|
||||
err := f(tx, namedBucketItem{
|
||||
name: ownerBucketName(cnr, key),
|
||||
|
@ -343,6 +371,14 @@ func updateFKBTIndexes(tx *bbolt.Tx, obj *objectSDK.Object, f updateIndexItemFun
|
|||
return err
|
||||
}
|
||||
|
||||
var attrs []objectSDK.Attribute
|
||||
if obj.ECHeader() != nil {
|
||||
attrs = obj.ECHeader().ParentAttributes()
|
||||
objKey = objectKey(obj.ECHeader().Parent(), make([]byte, objectKeySize))
|
||||
fyrchik
commented
We can probably reuse the slice in the second argument, no? We can probably reuse the slice in the second argument, no?
|
||||
} else {
|
||||
attrs = obj.Attributes()
|
||||
}
|
||||
|
||||
// user specified attributes
|
||||
for i := range attrs {
|
||||
key = attributeBucketName(cnr, attrs[i].Key(), key)
|
||||
|
|
|
@ -211,6 +211,9 @@ func (db *DB) selectFastFilter(
|
|||
case v2object.FilterHeaderSplitID:
|
||||
bucketName := splitBucketName(cnr, bucketName)
|
||||
db.selectFromList(tx, bucketName, f, to, fNum)
|
||||
case v2object.FilterHeaderECParent:
|
||||
bucketName := ecParentToChunksBucketName(cnr, bucketName)
|
||||
db.selectFromList(tx, bucketName, f, to, fNum)
|
||||
case v2object.FilterPropertyRoot:
|
||||
selectAllFromBucket(tx, rootBucketName(cnr, bucketName), to, fNum)
|
||||
case v2object.FilterPropertyPhy:
|
||||
|
|
|
@ -124,6 +124,12 @@ const (
|
|||
// Key: container ID + type
|
||||
// Value: Object id
|
||||
ecInfoPrefix
|
||||
|
||||
// ecParentToChunksPrefix is used to store a relation between EC parent ID and chunks,
|
||||
// but unlike for ecInfoPrefix the list of chunk IDs is encoded with encodeList.
|
||||
// Key: EC parent ID
|
||||
// Value: list of EC chunk IDs
|
||||
ecParentToChunksPrefix
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -200,6 +206,11 @@ func ecInfoBucketName(cnr cid.ID, key []byte) []byte {
|
|||
return bucketName(cnr, ecInfoPrefix, key)
|
||||
}
|
||||
|
||||
// ecParentToChunksBucketName returns <CID>_ecParentToChunks.
|
||||
func ecParentToChunksBucketName(cnr cid.ID, key []byte) []byte {
|
||||
return bucketName(cnr, ecParentToChunksPrefix, key)
|
||||
}
|
||||
|
||||
// addressKey returns key for K-V tables when key is a whole address.
|
||||
func addressKey(addr oid.Address, key []byte) []byte {
|
||||
addr.Container().Encode(key)
|
||||
|
|
|
@ -114,7 +114,7 @@ func TestECChunkHasInvalidPlacement(t *testing.T) {
|
|||
chunkObject.SetID(chunkAddress.Object())
|
||||
chunkObject.SetPayload([]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})
|
||||
chunkObject.SetPayloadSize(uint64(10))
|
||||
chunkObject.SetECHeader(objectSDK.NewECHeader(parentID, nil, nil, 1, 3, []byte{}, 0))
|
||||
chunkObject.SetECHeader(objectSDK.NewECHeader(objectSDK.ECParentInfo{ID: parentID}, 0, 3, []byte{}, 0))
|
||||
|
||||
var policy netmapSDK.PlacementPolicy
|
||||
require.NoError(t, policy.DecodeString("EC 2.1"))
|
||||
|
|
Loading…
Add table
Reference in a new issue
Am I right that this bucket will be removed automatically here?
Yes, it should be