forked from TrueCloudLab/frostfs-testcases
Update range tests
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
This commit is contained in:
parent
29a8c1e252
commit
dc6e521f59
7 changed files with 166 additions and 68 deletions
|
@ -20,6 +20,10 @@ LOCK_NON_REGULAR_OBJECT = "code = 2051"
|
||||||
LIFETIME_REQUIRED = "either expiration epoch of a lifetime is required"
|
LIFETIME_REQUIRED = "either expiration epoch of a lifetime is required"
|
||||||
LOCK_OBJECT_REMOVAL = "lock object removal"
|
LOCK_OBJECT_REMOVAL = "lock object removal"
|
||||||
LOCK_OBJECT_EXPIRATION = "lock object expiration: {expiration_epoch}; current: {current_epoch}"
|
LOCK_OBJECT_EXPIRATION = "lock object expiration: {expiration_epoch}; current: {current_epoch}"
|
||||||
|
INVALID_RANGE_ZERO_LENGTH = "invalid '{range}' range: zero length"
|
||||||
|
INVALID_RANGE_OVERFLOW = "invalid '{range}' range: uint64 overflow"
|
||||||
|
INVALID_OFFSET_SPECIFIER = "invalid '{range}' range offset specifier"
|
||||||
|
INVALID_LENGTH_SPECIFIER = "invalid '{range}' range length specifier"
|
||||||
|
|
||||||
|
|
||||||
def error_matches_status(error: Exception, status_pattern: str) -> bool:
|
def error_matches_status(error: Exception, status_pattern: str) -> bool:
|
||||||
|
|
|
@ -106,7 +106,7 @@ def prepare_objects(k6_instance: K6):
|
||||||
@allure.title("Prepare K6 instances and objects")
|
@allure.title("Prepare K6 instances and objects")
|
||||||
def prepare_k6_instances(
|
def prepare_k6_instances(
|
||||||
load_nodes: list, login: str, pkey: str, load_params: LoadParams, prepare: bool = True
|
load_nodes: list, login: str, pkey: str, load_params: LoadParams, prepare: bool = True
|
||||||
) -> list:
|
) -> list[K6]:
|
||||||
k6_load_objects = []
|
k6_load_objects = []
|
||||||
for load_node in load_nodes:
|
for load_node in load_nodes:
|
||||||
ssh_client = SSHShell(host=load_node, login=login, private_key_path=pkey)
|
ssh_client = SSHShell(host=load_node, login=login, private_key_path=pkey)
|
||||||
|
|
|
@ -37,7 +37,6 @@ from neofs_testlib.reporter import AllureHandler, get_reporter
|
||||||
from neofs_testlib.shell import LocalShell, Shell
|
from neofs_testlib.shell import LocalShell, Shell
|
||||||
from neofs_testlib.utils.wallet import init_wallet
|
from neofs_testlib.utils.wallet import init_wallet
|
||||||
from payment_neogo import deposit_gas, transfer_gas
|
from payment_neogo import deposit_gas, transfer_gas
|
||||||
from pytest import FixtureRequest
|
|
||||||
from python_keywords.neofs_verbs import get_netmap_netinfo
|
from python_keywords.neofs_verbs import get_netmap_netinfo
|
||||||
from python_keywords.node_management import storage_node_healthcheck
|
from python_keywords.node_management import storage_node_healthcheck
|
||||||
|
|
||||||
|
@ -172,7 +171,7 @@ def run_health_check(collect_logs, cluster: Cluster):
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture(scope="session")
|
@pytest.fixture(scope="session")
|
||||||
def background_grpc_load(client_shell):
|
def background_grpc_load(client_shell: Shell, hosting: Hosting):
|
||||||
registry_file = os.path.join("/tmp/", f"{str(uuid.uuid4())}.bolt")
|
registry_file = os.path.join("/tmp/", f"{str(uuid.uuid4())}.bolt")
|
||||||
prepare_file = os.path.join("/tmp/", f"{str(uuid.uuid4())}.json")
|
prepare_file = os.path.join("/tmp/", f"{str(uuid.uuid4())}.json")
|
||||||
allure.dynamic.title(
|
allure.dynamic.title(
|
||||||
|
|
|
@ -5,8 +5,15 @@ import sys
|
||||||
import allure
|
import allure
|
||||||
import pytest
|
import pytest
|
||||||
from cluster import Cluster
|
from cluster import Cluster
|
||||||
|
from complex_object_actions import get_complex_object_split_ranges
|
||||||
from file_helper import generate_file, get_file_content, get_file_hash
|
from file_helper import generate_file, get_file_content, get_file_hash
|
||||||
from grpc_responses import OUT_OF_RANGE
|
from grpc_responses import (
|
||||||
|
INVALID_LENGTH_SPECIFIER,
|
||||||
|
INVALID_OFFSET_SPECIFIER,
|
||||||
|
INVALID_RANGE_OVERFLOW,
|
||||||
|
INVALID_RANGE_ZERO_LENGTH,
|
||||||
|
OUT_OF_RANGE,
|
||||||
|
)
|
||||||
from neofs_testlib.shell import Shell
|
from neofs_testlib.shell import Shell
|
||||||
from pytest import FixtureRequest
|
from pytest import FixtureRequest
|
||||||
from python_keywords.container import create_container
|
from python_keywords.container import create_container
|
||||||
|
@ -43,36 +50,41 @@ RANGE_MAX_LEN = 500
|
||||||
STATIC_RANGES = {}
|
STATIC_RANGES = {}
|
||||||
|
|
||||||
|
|
||||||
def generate_ranges(file_size: int, max_object_size: int) -> list[(int, int)]:
|
def generate_ranges(
|
||||||
file_range_step = file_size / RANGES_COUNT
|
storage_object: StorageObjectInfo, max_object_size: int, shell: Shell, cluster: Cluster
|
||||||
|
) -> list[(int, int)]:
|
||||||
|
file_range_step = storage_object.size / RANGES_COUNT
|
||||||
|
|
||||||
file_ranges = []
|
file_ranges = []
|
||||||
file_ranges_to_test = []
|
file_ranges_to_test = []
|
||||||
|
|
||||||
for i in range(0, RANGES_COUNT):
|
for i in range(0, RANGES_COUNT):
|
||||||
file_ranges.append((int(file_range_step * i), int(file_range_step * (i + 1))))
|
file_ranges.append((int(file_range_step * i), int(file_range_step)))
|
||||||
|
|
||||||
# For simple object we can read all file ranges without too much time for testing
|
# For simple object we can read all file ranges without too much time for testing
|
||||||
if file_size < max_object_size:
|
if storage_object.size < max_object_size:
|
||||||
file_ranges_to_test.extend(file_ranges)
|
file_ranges_to_test.extend(file_ranges)
|
||||||
# For complex object we need to fetch multiple child objects from different nodes.
|
# For complex object we need to fetch multiple child objects from different nodes.
|
||||||
else:
|
else:
|
||||||
assert (
|
assert (
|
||||||
file_size >= RANGE_MAX_LEN + max_object_size
|
storage_object.size >= RANGE_MAX_LEN + max_object_size
|
||||||
), f"Complex object size should be at least {max_object_size + RANGE_MAX_LEN}. Current: {file_size}"
|
), f"Complex object size should be at least {max_object_size + RANGE_MAX_LEN}. Current: {storage_object.size}"
|
||||||
file_ranges_to_test.append((RANGE_MAX_LEN, RANGE_MAX_LEN + max_object_size))
|
file_ranges_to_test.append((RANGE_MAX_LEN, max_object_size - RANGE_MAX_LEN))
|
||||||
|
file_ranges_to_test.extend(get_complex_object_split_ranges(storage_object, shell, cluster))
|
||||||
|
|
||||||
# Special cases to read some bytes from start and some bytes from end of object
|
# Special cases to read some bytes from start and some bytes from end of object
|
||||||
file_ranges_to_test.append((0, RANGE_MIN_LEN))
|
file_ranges_to_test.append((0, RANGE_MIN_LEN))
|
||||||
file_ranges_to_test.append((file_size - RANGE_MIN_LEN, file_size))
|
file_ranges_to_test.append((storage_object.size - RANGE_MIN_LEN, RANGE_MIN_LEN))
|
||||||
|
|
||||||
for start, end in file_ranges:
|
for offset, length in file_ranges:
|
||||||
range_length = random.randint(RANGE_MIN_LEN, RANGE_MAX_LEN)
|
range_length = random.randint(RANGE_MIN_LEN, RANGE_MAX_LEN)
|
||||||
range_start = random.randint(start, end)
|
range_start = random.randint(offset, offset + length)
|
||||||
|
|
||||||
file_ranges_to_test.append((range_start, min(range_start + range_length, file_size)))
|
file_ranges_to_test.append(
|
||||||
|
(range_start, min(range_length, storage_object.size - range_start))
|
||||||
|
)
|
||||||
|
|
||||||
file_ranges_to_test.extend(STATIC_RANGES.get(file_size, []))
|
file_ranges_to_test.extend(STATIC_RANGES.get(storage_object.size, []))
|
||||||
|
|
||||||
return file_ranges_to_test
|
return file_ranges_to_test
|
||||||
|
|
||||||
|
@ -330,7 +342,7 @@ class TestObjectApi(ClusterTestBase):
|
||||||
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo], max_object_size
|
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo], max_object_size
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Validate get_range_hash for object by common gRPC API
|
Validate get_range_hash for object by native gRPC API
|
||||||
"""
|
"""
|
||||||
allure.dynamic.title(
|
allure.dynamic.title(
|
||||||
f"Validate native get_range_hash object API for {request.node.callspec.id}"
|
f"Validate native get_range_hash object API for {request.node.callspec.id}"
|
||||||
|
@ -341,11 +353,12 @@ class TestObjectApi(ClusterTestBase):
|
||||||
oids = [storage_object.oid for storage_object in storage_objects[:2]]
|
oids = [storage_object.oid for storage_object in storage_objects[:2]]
|
||||||
file_path = storage_objects[0].file_path
|
file_path = storage_objects[0].file_path
|
||||||
|
|
||||||
file_ranges_to_test = generate_ranges(storage_objects[0].size, max_object_size)
|
file_ranges_to_test = generate_ranges(
|
||||||
|
storage_objects[0], max_object_size, self.shell, self.cluster
|
||||||
|
)
|
||||||
logging.info(f"Ranges used in test {file_ranges_to_test}")
|
logging.info(f"Ranges used in test {file_ranges_to_test}")
|
||||||
|
|
||||||
for range_start, range_end in file_ranges_to_test:
|
for range_start, range_len in file_ranges_to_test:
|
||||||
range_len = range_end - range_start
|
|
||||||
range_cut = f"{range_start}:{range_len}"
|
range_cut = f"{range_start}:{range_len}"
|
||||||
with allure.step(f"Get range hash ({range_cut})"):
|
with allure.step(f"Get range hash ({range_cut})"):
|
||||||
for oid in oids:
|
for oid in oids:
|
||||||
|
@ -368,7 +381,7 @@ class TestObjectApi(ClusterTestBase):
|
||||||
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo], max_object_size
|
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo], max_object_size
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Validate get_range for object by common gRPC API
|
Validate get_range for object by native gRPC API
|
||||||
"""
|
"""
|
||||||
allure.dynamic.title(f"Validate native get_range object API for {request.node.callspec.id}")
|
allure.dynamic.title(f"Validate native get_range object API for {request.node.callspec.id}")
|
||||||
|
|
||||||
|
@ -377,11 +390,12 @@ class TestObjectApi(ClusterTestBase):
|
||||||
oids = [storage_object.oid for storage_object in storage_objects[:2]]
|
oids = [storage_object.oid for storage_object in storage_objects[:2]]
|
||||||
file_path = storage_objects[0].file_path
|
file_path = storage_objects[0].file_path
|
||||||
|
|
||||||
file_ranges_to_test = generate_ranges(storage_objects[0].size, max_object_size)
|
file_ranges_to_test = generate_ranges(
|
||||||
|
storage_objects[0], max_object_size, self.shell, self.cluster
|
||||||
|
)
|
||||||
logging.info(f"Ranges used in test {file_ranges_to_test}")
|
logging.info(f"Ranges used in test {file_ranges_to_test}")
|
||||||
|
|
||||||
for range_start, range_end in file_ranges_to_test:
|
for range_start, range_len in file_ranges_to_test:
|
||||||
range_len = range_end - range_start
|
|
||||||
range_cut = f"{range_start}:{range_len}"
|
range_cut = f"{range_start}:{range_len}"
|
||||||
with allure.step(f"Get range ({range_cut})"):
|
with allure.step(f"Get range ({range_cut})"):
|
||||||
for oid in oids:
|
for oid in oids:
|
||||||
|
@ -409,7 +423,7 @@ class TestObjectApi(ClusterTestBase):
|
||||||
storage_objects: list[StorageObjectInfo],
|
storage_objects: list[StorageObjectInfo],
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Validate get_range negative for object by common gRPC API
|
Validate get_range negative for object by native gRPC API
|
||||||
"""
|
"""
|
||||||
allure.dynamic.title(
|
allure.dynamic.title(
|
||||||
f"Validate native get_range negative object API for {request.node.callspec.id}"
|
f"Validate native get_range negative object API for {request.node.callspec.id}"
|
||||||
|
@ -424,20 +438,30 @@ class TestObjectApi(ClusterTestBase):
|
||||||
RANGE_MIN_LEN < file_size
|
RANGE_MIN_LEN < file_size
|
||||||
), f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})"
|
), f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})"
|
||||||
|
|
||||||
file_ranges_to_test = [
|
file_ranges_to_test: list[tuple(int, int, str)] = [
|
||||||
# Offset is bigger than the file size, the length is small.
|
# Offset is bigger than the file size, the length is small.
|
||||||
(file_size + 1, RANGE_MIN_LEN),
|
(file_size + 1, RANGE_MIN_LEN, OUT_OF_RANGE),
|
||||||
# Offset is ok, but offset+length is too big.
|
# Offset is ok, but offset+length is too big.
|
||||||
(file_size - RANGE_MIN_LEN, RANGE_MIN_LEN * 2),
|
(file_size - RANGE_MIN_LEN, RANGE_MIN_LEN * 2, OUT_OF_RANGE),
|
||||||
# Offset is ok, and length is very-very big (e.g. MaxUint64) so that offset+length is wrapped and still "valid".
|
# Offset is ok, and length is very-very big (e.g. MaxUint64) so that offset+length is wrapped and still "valid".
|
||||||
(RANGE_MIN_LEN, sys.maxsize * 2 + 1),
|
(RANGE_MIN_LEN, sys.maxsize * 2 + 1, INVALID_RANGE_OVERFLOW),
|
||||||
|
# Length is zero
|
||||||
|
(10, 0, INVALID_RANGE_ZERO_LENGTH),
|
||||||
|
# Negative values
|
||||||
|
(-1, 1, INVALID_OFFSET_SPECIFIER),
|
||||||
|
(10, -5, INVALID_LENGTH_SPECIFIER),
|
||||||
]
|
]
|
||||||
|
|
||||||
for range_start, range_len in file_ranges_to_test:
|
for range_start, range_len, expected_error in file_ranges_to_test:
|
||||||
range_cut = f"{range_start}:{range_len}"
|
range_cut = f"{range_start}:{range_len}"
|
||||||
|
expected_error = (
|
||||||
|
expected_error.format(range=range_cut)
|
||||||
|
if "{range}" in expected_error
|
||||||
|
else expected_error
|
||||||
|
)
|
||||||
with allure.step(f"Get range ({range_cut})"):
|
with allure.step(f"Get range ({range_cut})"):
|
||||||
for oid in oids:
|
for oid in oids:
|
||||||
with pytest.raises(Exception, match=OUT_OF_RANGE):
|
with pytest.raises(Exception, match=expected_error):
|
||||||
get_range(
|
get_range(
|
||||||
wallet,
|
wallet,
|
||||||
cid,
|
cid,
|
||||||
|
@ -454,7 +478,7 @@ class TestObjectApi(ClusterTestBase):
|
||||||
storage_objects: list[StorageObjectInfo],
|
storage_objects: list[StorageObjectInfo],
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Validate get_range_hash negative for object by common gRPC API
|
Validate get_range_hash negative for object by native gRPC API
|
||||||
"""
|
"""
|
||||||
allure.dynamic.title(
|
allure.dynamic.title(
|
||||||
f"Validate native get_range_hash negative object API for {request.node.callspec.id}"
|
f"Validate native get_range_hash negative object API for {request.node.callspec.id}"
|
||||||
|
@ -469,20 +493,30 @@ class TestObjectApi(ClusterTestBase):
|
||||||
RANGE_MIN_LEN < file_size
|
RANGE_MIN_LEN < file_size
|
||||||
), f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})"
|
), f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})"
|
||||||
|
|
||||||
file_ranges_to_test = [
|
file_ranges_to_test: list[tuple(int, int, str)] = [
|
||||||
# Offset is bigger than the file size, the length is small.
|
# Offset is bigger than the file size, the length is small.
|
||||||
(file_size + 1, RANGE_MIN_LEN),
|
(file_size + 1, RANGE_MIN_LEN, OUT_OF_RANGE),
|
||||||
# Offset is ok, but offset+length is too big.
|
# Offset is ok, but offset+length is too big.
|
||||||
(file_size - RANGE_MIN_LEN, RANGE_MIN_LEN * 2),
|
(file_size - RANGE_MIN_LEN, RANGE_MIN_LEN * 2, OUT_OF_RANGE),
|
||||||
# Offset is ok, and length is very-very big (e.g. MaxUint64) so that offset+length is wrapped and still "valid".
|
# Offset is ok, and length is very-very big (e.g. MaxUint64) so that offset+length is wrapped and still "valid".
|
||||||
(RANGE_MIN_LEN, sys.maxsize * 2 + 1),
|
(RANGE_MIN_LEN, sys.maxsize * 2 + 1, INVALID_RANGE_OVERFLOW),
|
||||||
|
# Length is zero
|
||||||
|
(10, 0, INVALID_RANGE_ZERO_LENGTH),
|
||||||
|
# Negative values
|
||||||
|
(-1, 1, INVALID_OFFSET_SPECIFIER),
|
||||||
|
(10, -5, INVALID_LENGTH_SPECIFIER),
|
||||||
]
|
]
|
||||||
|
|
||||||
for range_start, range_len in file_ranges_to_test:
|
for range_start, range_len, expected_error in file_ranges_to_test:
|
||||||
range_cut = f"{range_start}:{range_len}"
|
range_cut = f"{range_start}:{range_len}"
|
||||||
with allure.step(f"Get range ({range_cut})"):
|
expected_error = (
|
||||||
|
expected_error.format(range=range_cut)
|
||||||
|
if "{range}" in expected_error
|
||||||
|
else expected_error
|
||||||
|
)
|
||||||
|
with allure.step(f"Get range hash ({range_cut})"):
|
||||||
for oid in oids:
|
for oid in oids:
|
||||||
with pytest.raises(Exception, match=OUT_OF_RANGE):
|
with pytest.raises(Exception, match=expected_error):
|
||||||
get_range_hash(
|
get_range_hash(
|
||||||
wallet,
|
wallet,
|
||||||
cid,
|
cid,
|
||||||
|
|
|
@ -6,7 +6,7 @@ import pytest
|
||||||
from cluster import Cluster
|
from cluster import Cluster
|
||||||
from cluster_test_base import ClusterTestBase
|
from cluster_test_base import ClusterTestBase
|
||||||
from common import STORAGE_GC_TIME
|
from common import STORAGE_GC_TIME
|
||||||
from complex_object_actions import get_link_object
|
from complex_object_actions import get_link_object, get_storage_object_chunks
|
||||||
from epoch import ensure_fresh_epoch, get_epoch, tick_epoch
|
from epoch import ensure_fresh_epoch, get_epoch, tick_epoch
|
||||||
from grpc_responses import (
|
from grpc_responses import (
|
||||||
LIFETIME_REQUIRED,
|
LIFETIME_REQUIRED,
|
||||||
|
@ -145,29 +145,6 @@ class TestObjectLockWithGrpc(ClusterTestBase):
|
||||||
|
|
||||||
return storage_object
|
return storage_object
|
||||||
|
|
||||||
def get_storage_object_chunks(self, storage_object: StorageObjectInfo) -> list[str]:
|
|
||||||
with allure.step(f"Get complex object chunks (f{storage_object.oid})"):
|
|
||||||
split_object_id = get_link_object(
|
|
||||||
storage_object.wallet_file_path,
|
|
||||||
storage_object.cid,
|
|
||||||
storage_object.oid,
|
|
||||||
self.shell,
|
|
||||||
self.cluster.storage_nodes,
|
|
||||||
is_direct=False,
|
|
||||||
)
|
|
||||||
head = head_object(
|
|
||||||
storage_object.wallet_file_path,
|
|
||||||
storage_object.cid,
|
|
||||||
split_object_id,
|
|
||||||
self.shell,
|
|
||||||
self.cluster.default_rpc_endpoint,
|
|
||||||
)
|
|
||||||
|
|
||||||
chunks_object_ids = []
|
|
||||||
if "split" in head["header"] and "children" in head["header"]["split"]:
|
|
||||||
chunks_object_ids = head["header"]["split"]["children"]
|
|
||||||
return chunks_object_ids
|
|
||||||
|
|
||||||
@allure.title("Locked object should be protected from deletion")
|
@allure.title("Locked object should be protected from deletion")
|
||||||
@pytest.mark.parametrize(
|
@pytest.mark.parametrize(
|
||||||
"locked_storage_object",
|
"locked_storage_object",
|
||||||
|
@ -555,7 +532,9 @@ class TestObjectLockWithGrpc(ClusterTestBase):
|
||||||
Complex object chunks should also be protected from deletion
|
Complex object chunks should also be protected from deletion
|
||||||
"""
|
"""
|
||||||
|
|
||||||
chunk_object_ids = self.get_storage_object_chunks(locked_storage_object)
|
chunk_object_ids = get_storage_object_chunks(
|
||||||
|
locked_storage_object, self.shell, self.cluster
|
||||||
|
)
|
||||||
for chunk_object_id in chunk_object_ids:
|
for chunk_object_id in chunk_object_ids:
|
||||||
with allure.step(f"Try to delete chunk object {chunk_object_id}"):
|
with allure.step(f"Try to delete chunk object {chunk_object_id}"):
|
||||||
with pytest.raises(Exception, match=OBJECT_IS_LOCKED):
|
with pytest.raises(Exception, match=OBJECT_IS_LOCKED):
|
||||||
|
@ -608,7 +587,9 @@ class TestObjectLockWithGrpc(ClusterTestBase):
|
||||||
def test_chunks_of_locked_complex_object_can_be_dropped(
|
def test_chunks_of_locked_complex_object_can_be_dropped(
|
||||||
self, new_locked_storage_object: StorageObjectInfo
|
self, new_locked_storage_object: StorageObjectInfo
|
||||||
):
|
):
|
||||||
chunk_objects = self.get_storage_object_chunks(new_locked_storage_object)
|
chunk_objects = get_storage_object_chunks(
|
||||||
|
new_locked_storage_object, self.shell, self.cluster
|
||||||
|
)
|
||||||
|
|
||||||
for chunk_object_id in chunk_objects:
|
for chunk_object_id in chunk_objects:
|
||||||
with allure.step(f"Drop chunk object with id {chunk_object_id} from nodes"):
|
with allure.step(f"Drop chunk object with id {chunk_object_id} from nodes"):
|
||||||
|
|
|
@ -11,17 +11,97 @@
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from typing import Optional
|
from typing import Optional, Tuple
|
||||||
|
|
||||||
import allure
|
import allure
|
||||||
import neofs_verbs
|
import neofs_verbs
|
||||||
from cluster import StorageNode
|
from cluster import Cluster, StorageNode
|
||||||
from common import WALLET_CONFIG
|
from common import WALLET_CONFIG
|
||||||
from neofs_testlib.shell import Shell
|
from neofs_testlib.shell import Shell
|
||||||
|
from neofs_verbs import head_object
|
||||||
|
from storage_object import StorageObjectInfo
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
|
||||||
|
def get_storage_object_chunks(
|
||||||
|
storage_object: StorageObjectInfo, shell: Shell, cluster: Cluster
|
||||||
|
) -> list[str]:
|
||||||
|
"""
|
||||||
|
Get complex object split objects ids (no linker object)
|
||||||
|
|
||||||
|
Args:
|
||||||
|
storage_object: storage_object to get it's chunks
|
||||||
|
shell: client shell to do cmd requests
|
||||||
|
cluster: cluster object under test
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list of object ids of complex object chunks
|
||||||
|
"""
|
||||||
|
|
||||||
|
with allure.step(f"Get complex object chunks (f{storage_object.oid})"):
|
||||||
|
split_object_id = get_link_object(
|
||||||
|
storage_object.wallet_file_path,
|
||||||
|
storage_object.cid,
|
||||||
|
storage_object.oid,
|
||||||
|
shell,
|
||||||
|
cluster.storage_nodes,
|
||||||
|
is_direct=False,
|
||||||
|
)
|
||||||
|
head = head_object(
|
||||||
|
storage_object.wallet_file_path,
|
||||||
|
storage_object.cid,
|
||||||
|
split_object_id,
|
||||||
|
shell,
|
||||||
|
cluster.default_rpc_endpoint,
|
||||||
|
)
|
||||||
|
|
||||||
|
chunks_object_ids = []
|
||||||
|
if "split" in head["header"] and "children" in head["header"]["split"]:
|
||||||
|
chunks_object_ids = head["header"]["split"]["children"]
|
||||||
|
|
||||||
|
return chunks_object_ids
|
||||||
|
|
||||||
|
|
||||||
|
def get_complex_object_split_ranges(
|
||||||
|
storage_object: StorageObjectInfo, shell: Shell, cluster: Cluster
|
||||||
|
) -> list[Tuple[int, int]]:
|
||||||
|
|
||||||
|
"""
|
||||||
|
Get list of split ranges tuples (offset, length) of a complex object
|
||||||
|
For example if object size if 100 and max object size in system is 30
|
||||||
|
the returned list should be
|
||||||
|
[(0, 30), (30, 30), (60, 30), (90, 10)]
|
||||||
|
|
||||||
|
Args:
|
||||||
|
storage_object: storage_object to get it's chunks
|
||||||
|
shell: client shell to do cmd requests
|
||||||
|
cluster: cluster object under test
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list of object ids of complex object chunks
|
||||||
|
"""
|
||||||
|
|
||||||
|
ranges: list = []
|
||||||
|
offset = 0
|
||||||
|
chunks_ids = get_storage_object_chunks(storage_object, shell, cluster)
|
||||||
|
for chunk_id in chunks_ids:
|
||||||
|
head = head_object(
|
||||||
|
storage_object.wallet_file_path,
|
||||||
|
storage_object.cid,
|
||||||
|
chunk_id,
|
||||||
|
shell,
|
||||||
|
cluster.default_rpc_endpoint,
|
||||||
|
)
|
||||||
|
|
||||||
|
length = int(head["header"]["payloadLength"])
|
||||||
|
ranges.append((offset, length))
|
||||||
|
|
||||||
|
offset = offset + length
|
||||||
|
|
||||||
|
return ranges
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Get Link Object")
|
@allure.step("Get Link Object")
|
||||||
def get_link_object(
|
def get_link_object(
|
||||||
wallet: str,
|
wallet: str,
|
||||||
|
|
|
@ -5,4 +5,4 @@ pushd $DEVENV_PATH > /dev/null
|
||||||
export `make env`
|
export `make env`
|
||||||
popd > /dev/null
|
popd > /dev/null
|
||||||
|
|
||||||
export PYTHONPATH=${PYTHONPATH}:${VIRTUAL_ENV}/../robot/resources/lib/:${VIRTUAL_ENV}/../robot/resources/lib/python_keywords:${VIRTUAL_ENV}/../robot/resources/lib/robot:${VIRTUAL_ENV}/../robot/variables:${VIRTUAL_ENV}/../pytest_tests/helpers:${VIRTUAL_ENV}/../pytest_tests/steps
|
export PYTHONPATH=${PYTHONPATH}:${VIRTUAL_ENV}/../robot/resources/lib/:${VIRTUAL_ENV}/../robot/resources/lib/python_keywords:${VIRTUAL_ENV}/../robot/resources/lib/robot:${VIRTUAL_ENV}/../robot/variables:${VIRTUAL_ENV}/../pytest_tests/helpers:${VIRTUAL_ENV}/../pytest_tests/steps:${VIRTUAL_ENV}/../pytest_tests/resources
|
||||||
|
|
Loading…
Reference in a new issue