1036 lines
45 KiB
Python
1036 lines
45 KiB
Python
import allure
|
|
import pytest
|
|
from frostfs_testlib import reporter
|
|
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
|
|
from frostfs_testlib.storage.cluster import ClusterNode
|
|
from frostfs_testlib.storage.constants import PlacementRule
|
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
|
from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy
|
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
|
from frostfs_testlib.storage.grpc_operations.interfaces import GrpcClientWrapper
|
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
|
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
|
|
|
|
from ...helpers.container_creation import create_container_with_ape
|
|
from ...helpers.container_request import APE_EVERYONE_ALLOW_ALL, ContainerRequest
|
|
|
|
OPERATIONS_TO_REDUCE = 100
|
|
DEFAULT_FILE_SIZE = 10
|
|
EMPTY_FILE_SIZE = 0
|
|
|
|
|
|
@pytest.mark.nightly
|
|
@pytest.mark.grpc_api
|
|
class TestObjectApiPatch(ClusterTestBase):
|
|
@allure.title("[Class] Create container with all operations allowed")
|
|
@pytest.fixture(scope="class")
|
|
def container(self, placement_policy: PlacementPolicy, frostfs_cli: FrostfsCli, default_wallet: WalletInfo) -> str:
|
|
container_request = ContainerRequest(placement_policy.value, APE_EVERYONE_ALLOW_ALL)
|
|
return create_container_with_ape(
|
|
container_request,
|
|
frostfs_cli,
|
|
default_wallet,
|
|
self.shell,
|
|
self.cluster,
|
|
self.cluster.default_rpc_endpoint,
|
|
)
|
|
|
|
@pytest.fixture(scope="function")
|
|
def original_object(self, grpc_client: GrpcClientWrapper, container: str, object_size: ObjectSize) -> str:
|
|
with reporter.step("Generate test object"):
|
|
file = generate_file(object_size.value)
|
|
|
|
with reporter.step("Put object"):
|
|
return grpc_client.object.put(file, container, self.cluster.default_rpc_endpoint)
|
|
|
|
@pytest.fixture(scope="function", params=[pytest.param(DEFAULT_FILE_SIZE, id="default_file_size")])
|
|
def sized_object(self, grpc_client: GrpcClientWrapper, container: str, request: pytest.FixtureRequest) -> str:
|
|
size = request.param
|
|
with reporter.step(f"Generate test object of {size} bytes"):
|
|
file = generate_file(size)
|
|
|
|
with reporter.step(f"Put object of {size} bytes"):
|
|
return grpc_client.object.put(file, container, self.cluster.default_rpc_endpoint)
|
|
|
|
@pytest.fixture(scope="class")
|
|
def container_nodes(self, container: str, grpc_client: GrpcClientWrapper) -> list[ClusterNode]:
|
|
return grpc_client.container.nodes(self.cluster.default_rpc_endpoint, container, self.cluster)
|
|
|
|
@pytest.fixture(scope="class")
|
|
def non_container_nodes(self, container_nodes: list[ClusterNode]) -> list[ClusterNode]:
|
|
return list(set(self.cluster.cluster_nodes) - set(container_nodes))
|
|
|
|
def _get_bytes_relative_to_object(self, value: int | str, object_size: int = None, part_size: int = None) -> int:
|
|
if isinstance(value, int):
|
|
return value
|
|
|
|
if "part" not in value and "object" not in value:
|
|
return int(value)
|
|
|
|
if object_size is not None:
|
|
value = value.replace("object", str(object_size))
|
|
|
|
if part_size is not None:
|
|
value = value.replace("part", str(part_size))
|
|
|
|
return int(eval(value))
|
|
|
|
def _get_range_relative_to_object(
|
|
self, rng: str, object_size: int = None, part_size: int = None, int_values: bool = False
|
|
) -> str | int:
|
|
offset, length = rng.split(":")
|
|
offset = self._get_bytes_relative_to_object(offset, object_size, part_size)
|
|
length = self._get_bytes_relative_to_object(length, object_size, part_size)
|
|
return (offset, length) if int_values else f"{offset}:{length}"
|
|
|
|
def _sorted_ranges(self, ranges: list[str], payloads: list[str], sizes: list[int]) -> tuple[list, list, list]:
|
|
sorted_ranges = []
|
|
sorted_payloads = []
|
|
sorted_sizes = []
|
|
|
|
# Sort by the left border of the range
|
|
for r, p, s in sorted(zip(ranges, payloads, sizes), key=lambda t: int(t[0].split(":")[0])):
|
|
sorted_ranges.append(r)
|
|
sorted_payloads.append(p)
|
|
sorted_sizes.append(s)
|
|
|
|
return sorted_ranges, sorted_payloads, sorted_sizes
|
|
|
|
def _verify_range_with_patch(
|
|
self,
|
|
grpc_client: GrpcClientWrapper,
|
|
cid: str,
|
|
original_oid: str,
|
|
patched_oid: str,
|
|
computed_offset: int,
|
|
offset: int,
|
|
length: int = None,
|
|
payload_path: str = None,
|
|
payload_size: int = None,
|
|
original_size: int = None,
|
|
expected_size: int = None,
|
|
endpoint: str = None,
|
|
) -> int:
|
|
"""
|
|
Method for verifying hashes of range of unmodified part and range of patched part.
|
|
|
|
Args:
|
|
`computed_offset` - Iteratively calculated offset for patched object.
|
|
`offset` - Current offset for original object before patch.
|
|
`length` - Current patch length for the original object before it was applied.
|
|
`original_size` - Original size of object.
|
|
`expected_size` - Expected size of patched object after applying previous patches.
|
|
Return:
|
|
`int` - offset for patched object after applying current patch.
|
|
"""
|
|
|
|
if not endpoint:
|
|
endpoint = self.cluster.default_rpc_endpoint
|
|
|
|
# Verify part of object that is between current and next range
|
|
if offset > computed_offset and payload_size is not None:
|
|
with reporter.step(f"Check that range [{computed_offset}:{offset - computed_offset}] has not changed"):
|
|
with reporter.step("Original object range hash"):
|
|
expected_hash = grpc_client.object.hash(
|
|
endpoint, cid, original_oid, range=f"{computed_offset}:{offset - computed_offset}"
|
|
)
|
|
|
|
with reporter.step("Patched object range hash"):
|
|
patched_hash = grpc_client.object.hash(
|
|
endpoint, cid, patched_oid, range=f"{computed_offset}:{offset - computed_offset}"
|
|
)
|
|
|
|
assert expected_hash == patched_hash, "Hash of patched object does not match original"
|
|
|
|
# Verify end of object if it is not covered by patch
|
|
if payload_path is None and payload_size is None:
|
|
with reporter.step(f"Check that range following patch has not changed"):
|
|
with reporter.step("Original object range hash"):
|
|
expected_hash = grpc_client.object.hash(endpoint, cid, original_oid, range=f"{offset}:{original_size - offset}")
|
|
|
|
with reporter.step("Patched object range hash"):
|
|
patched_hash = grpc_client.object.hash(
|
|
endpoint, cid, patched_oid, range=f"{computed_offset}:{expected_size - computed_offset}"
|
|
)
|
|
|
|
assert expected_hash == patched_hash, "Hash of patched object does not match original"
|
|
|
|
return expected_size - computed_offset
|
|
|
|
# After verifying unmodified part, move the pointer to patched part
|
|
computed_offset = max(computed_offset, offset)
|
|
|
|
# If part of object is deleted, pointer does not change
|
|
if payload_size == 0:
|
|
return computed_offset
|
|
|
|
# Verify patch
|
|
with reporter.step(f"Check that patch for range [{computed_offset}:{length}] has been applied"):
|
|
with reporter.step("Payload hash"):
|
|
expected_hash = get_file_hash(payload_path)
|
|
|
|
with reporter.step("Patched range hash"):
|
|
patched_hash = grpc_client.object.hash(endpoint, cid, patched_oid, range=f"{computed_offset}:{payload_size}")
|
|
|
|
assert expected_hash == patched_hash, "Hash of patched object does not match payload"
|
|
|
|
return computed_offset + payload_size
|
|
|
|
def _verify_object_payload(
|
|
self,
|
|
grpc_client: GrpcClientWrapper,
|
|
cid: str,
|
|
object_size: int,
|
|
original_oid: str,
|
|
patched_oid: str,
|
|
ranges: list[str],
|
|
payloads: list[str],
|
|
sizes: list[int],
|
|
endpoint: str = None,
|
|
):
|
|
"""
|
|
Method for verifying payload hashes of patched object with expected one.
|
|
|
|
Range of unchanged part and range of patched part are iteratively verified,
|
|
and at end remaining unchanged part is verified, if there is one.
|
|
|
|
Example: `some-object-data-123-354-999`
|
|
|
|
Patched: `some_patch_data-123-111-999`
|
|
|
|
Parts for verification:
|
|
- First iteration:
|
|
- Unchanged part [0:4]: `some`
|
|
- Patch part [4:7]: `_patch_`
|
|
- Second iteration:
|
|
- Unchanged part [11:9]: `data-123-`
|
|
- Patch part [20:3]: `111`
|
|
- Remaining part:
|
|
- Unchanged part [23:4]: `-999`
|
|
"""
|
|
|
|
computed_offset = 0
|
|
expected_size = object_size
|
|
ranges, payloads, sizes = self._sorted_ranges(ranges, payloads, sizes)
|
|
|
|
for i, _ in enumerate(ranges):
|
|
offset, length = self._get_range_relative_to_object(ranges[i], int_values=True)
|
|
expected_size += sizes[i] - length
|
|
computed_offset = self._verify_range_with_patch(
|
|
grpc_client,
|
|
cid,
|
|
original_oid,
|
|
patched_oid,
|
|
computed_offset,
|
|
offset,
|
|
length,
|
|
payloads[i],
|
|
sizes[i],
|
|
endpoint=endpoint,
|
|
)
|
|
|
|
if computed_offset < expected_size:
|
|
self._verify_range_with_patch(
|
|
grpc_client,
|
|
cid,
|
|
original_oid,
|
|
patched_oid,
|
|
computed_offset,
|
|
offset + length,
|
|
original_size=object_size,
|
|
expected_size=expected_size,
|
|
endpoint=endpoint,
|
|
)
|
|
|
|
@allure.title("Patch simple object payload (range={offset}:{length}, payload_size={payload_size}, policy={placement_policy})")
|
|
@pytest.mark.parametrize("object_size", ["simple"], indirect=True)
|
|
@pytest.mark.parametrize(
|
|
"offset, length, payload_size",
|
|
[
|
|
# String "object" denotes size of object.
|
|
# Not change size
|
|
[0, 10, 10],
|
|
[200, 123, 123],
|
|
["object-500", 500, 500],
|
|
[0, "object", "object"],
|
|
# Increase size
|
|
[0, 0, 100],
|
|
[500, 0, 50],
|
|
["object", 0, 30],
|
|
[0, "object", "object+100"],
|
|
["object-100", 100, 200],
|
|
# Decrease size
|
|
[0, 50, 0],
|
|
[200, 20, 0],
|
|
[300, 100, 50],
|
|
[0, "object", "object-100"],
|
|
# TODO: Empty payload is temporarily not supported for EC policy
|
|
# (0, "object", 0),
|
|
],
|
|
)
|
|
def test_patch_simple_object(
|
|
self,
|
|
grpc_client: GrpcClientWrapper,
|
|
container: str,
|
|
original_object: str,
|
|
object_size: ObjectSize,
|
|
offset: int | str,
|
|
length: int | str,
|
|
payload_size: int | str,
|
|
):
|
|
relative_offset = self._get_bytes_relative_to_object(offset, object_size.value)
|
|
relative_length = self._get_bytes_relative_to_object(length, object_size.value)
|
|
relative_size = self._get_bytes_relative_to_object(payload_size, object_size.value)
|
|
expected_size = object_size.value + relative_size - relative_length
|
|
patch_range = f"{relative_offset}:{relative_length}"
|
|
|
|
with reporter.step("Generate payload object"):
|
|
patch_payload = generate_file(relative_size)
|
|
|
|
with reporter.step("Patch simple object"):
|
|
patched_oid = grpc_client.object.patch(
|
|
container,
|
|
original_object,
|
|
self.cluster.default_rpc_endpoint,
|
|
ranges=[patch_range],
|
|
payloads=[patch_payload],
|
|
timeout="200s",
|
|
)
|
|
assert patched_oid != original_object, "Patched object's OID must be different from original one"
|
|
|
|
with reporter.step("Head patched object and check that length is equal expected one"):
|
|
patched_info: dict = grpc_client.object.head(container, patched_oid, self.cluster.default_rpc_endpoint)
|
|
patched_size = int(patched_info["header"]["payloadLength"])
|
|
assert patched_size == expected_size, f"Size of object does not match expected size: {patched_size}"
|
|
|
|
with reporter.step("Check range hashes to ensure object was modified correctly"):
|
|
self._verify_object_payload(
|
|
grpc_client,
|
|
container,
|
|
object_size.value,
|
|
original_object,
|
|
patched_oid,
|
|
[patch_range],
|
|
[patch_payload],
|
|
[relative_size],
|
|
)
|
|
|
|
@allure.title("Patch complex object payload (range={offset}:{length}, payload_size={payload_size}, policy={placement_policy})")
|
|
@pytest.mark.parametrize("object_size", ["complex"], indirect=True)
|
|
@pytest.mark.parametrize(
|
|
"offset, length, payload_size",
|
|
[
|
|
# Strings "object" and "part" denote size of object and its part, respectively.
|
|
["part", 100, 50],
|
|
["object-part", "part", "part"],
|
|
[0, "part", "part+100"],
|
|
["part*2", "part", "part-200"],
|
|
["part-1", "part", "part-100"],
|
|
["part+1", "part-1", "part+100"],
|
|
# TODO: Empty payload is temporarily not supported for EC policy
|
|
# ("part", "part", 0),
|
|
],
|
|
)
|
|
def test_patch_complex_object(
|
|
self,
|
|
grpc_client: GrpcClientWrapper,
|
|
container: str,
|
|
original_object: str,
|
|
object_size: ObjectSize,
|
|
max_object_size: int,
|
|
offset: int | str,
|
|
length: int | str,
|
|
payload_size: int | str,
|
|
):
|
|
relative_offset = self._get_bytes_relative_to_object(offset, object_size.value, max_object_size)
|
|
relative_length = self._get_bytes_relative_to_object(length, object_size.value, max_object_size)
|
|
relative_size = self._get_bytes_relative_to_object(payload_size, object_size.value, max_object_size)
|
|
expected_size = object_size.value + relative_size - relative_length
|
|
patch_range = f"{relative_offset}:{relative_length}"
|
|
|
|
with reporter.step("Generate payload object"):
|
|
patch_payload = generate_file(relative_size)
|
|
|
|
with reporter.step("Patch complex object"):
|
|
patched_oid = grpc_client.object.patch(
|
|
container,
|
|
original_object,
|
|
self.cluster.default_rpc_endpoint,
|
|
ranges=[patch_range],
|
|
payloads=[patch_payload],
|
|
timeout="200s",
|
|
)
|
|
assert patched_oid != original_object, "Patched object's OID must be different from original one"
|
|
|
|
with reporter.step("Head patched object and check that length is equal expected one"):
|
|
patched_info: dict = grpc_client.object.head(container, patched_oid, self.cluster.default_rpc_endpoint)
|
|
patched_size = int(patched_info["header"]["payloadLength"])
|
|
assert patched_size == expected_size, f"Size of object does not match expected size: {patched_size}"
|
|
|
|
with reporter.step("Check range hashes to ensure object was modified correctly"):
|
|
self._verify_object_payload(
|
|
grpc_client,
|
|
container,
|
|
object_size.value,
|
|
original_object,
|
|
patched_oid,
|
|
[patch_range],
|
|
[patch_payload],
|
|
[relative_size],
|
|
)
|
|
|
|
@allure.title("Patch simple object with complex one and vice versa (policy={placement_policy})")
|
|
def test_replace_simple_object_with_complex_one_and_rollback(
|
|
self,
|
|
grpc_client: GrpcClientWrapper,
|
|
container: str,
|
|
simple_object_size: ObjectSize,
|
|
complex_object_size: ObjectSize,
|
|
):
|
|
with reporter.step(f"Generate {simple_object_size} and {complex_object_size} objects"):
|
|
simple_file = generate_file(simple_object_size.value)
|
|
complex_file = generate_file(complex_object_size.value)
|
|
|
|
with reporter.step("Put simple object"):
|
|
simple_oid = grpc_client.object.put(simple_file, container, self.cluster.default_rpc_endpoint)
|
|
|
|
with reporter.step("Completely replace simple object with complex one"):
|
|
patched_oid = grpc_client.object.patch(
|
|
container,
|
|
simple_oid,
|
|
self.cluster.default_rpc_endpoint,
|
|
ranges=[f"0:{simple_object_size.value}"],
|
|
payloads=[complex_file.path],
|
|
timeout="200s",
|
|
)
|
|
assert simple_oid != patched_oid, "Patched object's OID must be different from simple one"
|
|
|
|
with reporter.step("Get patched object and make sure it is identical to complex one"):
|
|
patched_file = grpc_client.object.get(container, patched_oid, self.cluster.default_rpc_endpoint)
|
|
assert get_file_hash(patched_file) == get_file_hash(complex_file), "Patched object is not identical to complex one"
|
|
|
|
complex_oid = patched_oid
|
|
|
|
with reporter.step("Completely replace complex object with simple one"):
|
|
patched_oid = grpc_client.object.patch(
|
|
container,
|
|
complex_oid,
|
|
self.cluster.default_rpc_endpoint,
|
|
ranges=[f"0:{complex_object_size.value}"],
|
|
payloads=[simple_file.path],
|
|
timeout="200s",
|
|
)
|
|
assert patched_oid != complex_oid, "Patched object's OID must be different from complex one"
|
|
|
|
with reporter.step("Get patched object and make sure it is identical to simple one"):
|
|
patched_file = grpc_client.object.get(container, patched_oid, self.cluster.default_rpc_endpoint)
|
|
assert get_file_hash(patched_file) == get_file_hash(simple_file), "Patched object is not identical to simple one"
|
|
|
|
# TODO: Empty payload is temporarily not supported for EC policy
|
|
@allure.title("Reduce object payload to zero length with iterative patching")
|
|
@pytest.mark.parametrize("placement_policy", ["rep"], indirect=True)
|
|
@pytest.mark.parametrize("sized_object", [OPERATIONS_TO_REDUCE], indirect=True)
|
|
def test_iterative_reduction_object_payload_by_patching_rep(self, grpc_client: GrpcClientWrapper, container: str, sized_object: str):
|
|
oid = sized_object
|
|
previous_oids = {oid}
|
|
with reporter.step("Generate empty payload object"):
|
|
patch_payload = generate_file(EMPTY_FILE_SIZE)
|
|
|
|
with reporter.step(f"Iterative patch {OPERATIONS_TO_REDUCE} times by 1 byte"):
|
|
for i in range(OPERATIONS_TO_REDUCE):
|
|
with reporter.step(f"Patch {i + 1}"):
|
|
oid = grpc_client.object.patch(
|
|
container,
|
|
oid,
|
|
self.cluster.default_rpc_endpoint,
|
|
ranges=["0:1"],
|
|
payloads=[patch_payload],
|
|
timeout="200s",
|
|
)
|
|
assert oid not in previous_oids, f"New OID expected, previous ones: {previous_oids}"
|
|
previous_oids.add(oid)
|
|
|
|
with reporter.step("Head object and make sure its size is 0"):
|
|
patched_info: dict = grpc_client.object.head(container, oid, self.cluster.default_rpc_endpoint)
|
|
payload_length = int(patched_info["header"]["payloadLength"])
|
|
assert payload_length == 0, f"Expected file size 0 bytes, received: {payload_length}"
|
|
|
|
@allure.title(
|
|
"[NEGATIVE] Non-zero length patch cannot be applied to end of object (object_size={object_size}, policy={placement_policy})"
|
|
)
|
|
def test_patch_end_of_object_with_non_zero_length(
|
|
self, grpc_client: GrpcClientWrapper, container: str, original_object: str, object_size: ObjectSize
|
|
):
|
|
patch_range = f"{object_size.value}:1"
|
|
with reporter.step("Generate payload object"):
|
|
patch_payload = generate_file(DEFAULT_FILE_SIZE)
|
|
|
|
with reporter.step("Try to patch object and wait for an exception"):
|
|
with pytest.raises(Exception, match="out of range"):
|
|
grpc_client.object.patch(
|
|
container,
|
|
original_object,
|
|
self.cluster.default_rpc_endpoint,
|
|
ranges=[patch_range],
|
|
payloads=[patch_payload],
|
|
timeout="200s",
|
|
)
|
|
|
|
@allure.title(
|
|
"[NEGATIVE] Patch with out of range offset cannot be applied "
|
|
"(offset={offset}, object_size={object_size}, policy={placement_policy})"
|
|
)
|
|
@pytest.mark.parametrize("offset", [-1, "object+1"])
|
|
def test_patch_with_out_of_range_offset(
|
|
self,
|
|
grpc_client: GrpcClientWrapper,
|
|
container: str,
|
|
original_object: str,
|
|
object_size: ObjectSize,
|
|
offset: int | str,
|
|
):
|
|
offset = self._get_bytes_relative_to_object(offset, object_size.value)
|
|
patch_range = f"{offset}:5"
|
|
with reporter.step("Generate payload object"):
|
|
patch_payload = generate_file(DEFAULT_FILE_SIZE)
|
|
|
|
with reporter.step("Try to patch object and wait for an exception"):
|
|
# Invalid syntax for offset '-1'
|
|
with pytest.raises(Exception, match="patch offset exceeds object size|invalid syntax"):
|
|
grpc_client.object.patch(
|
|
container,
|
|
original_object,
|
|
self.cluster.default_rpc_endpoint,
|
|
ranges=[patch_range],
|
|
payloads=[patch_payload],
|
|
timeout="200s",
|
|
)
|
|
|
|
@allure.title(
|
|
"Patch object with multiple ranges and payloads "
|
|
"(ranges={ranges}, payload_sizes={payload_sizes}, object_size={object_size}, policy={placement_policy})"
|
|
)
|
|
@pytest.mark.parametrize(
|
|
"ranges, payload_sizes",
|
|
[
|
|
# String "object" denotes size of object.
|
|
[["0:0", "0:0"], [5, 10]],
|
|
[["0:10", "10:20", "30:100", "130:0"], [10, 50, 100, 30]],
|
|
[["100:100", "0:1", "500:200"], [100, 1, 400]],
|
|
[["0:object-1", "object-1:1"], ["object-1", 1]],
|
|
],
|
|
ids=["insert_insert", "order_ranges", "disorder_ranges", "replace_object"],
|
|
)
|
|
def test_patch_object_with_multiple_ranges_and_payloads(
|
|
self,
|
|
grpc_client: GrpcClientWrapper,
|
|
container: str,
|
|
original_object: str,
|
|
object_size: ObjectSize,
|
|
ranges: list[str],
|
|
payload_sizes: list[int | str],
|
|
):
|
|
patch_ranges = [self._get_range_relative_to_object(rng, object_size.value) for rng in ranges]
|
|
patch_sizes = [self._get_bytes_relative_to_object(size, object_size.value) for size in payload_sizes]
|
|
with reporter.step("Generate multiple test objects"):
|
|
patch_payloads = [generate_file(size) for size in patch_sizes]
|
|
|
|
expected_size = object_size.value
|
|
for i, _ in enumerate(patch_ranges):
|
|
_, length = self._get_range_relative_to_object(patch_ranges[i], object_size.value, int_values=True)
|
|
expected_size += patch_sizes[i] - length
|
|
|
|
with reporter.step("Patch object with multiple ranges and payloads"):
|
|
patched_oid = grpc_client.object.patch(
|
|
container,
|
|
original_object,
|
|
self.cluster.default_rpc_endpoint,
|
|
patch_ranges,
|
|
patch_payloads,
|
|
timeout="200s",
|
|
)
|
|
assert patched_oid != original_object, "Patched object's OID must be different from original one"
|
|
|
|
with reporter.step("Head patched object and make sure it changes size"):
|
|
patched_info: dict = grpc_client.object.head(container, patched_oid, self.cluster.default_rpc_endpoint)
|
|
patched_size = int(patched_info["header"]["payloadLength"])
|
|
assert patched_size == expected_size, f"Size of object does not match expected size: {patched_size}"
|
|
|
|
with reporter.step("Check range hashes to ensure object was modified correctly"):
|
|
self._verify_object_payload(
|
|
grpc_client,
|
|
container,
|
|
object_size.value,
|
|
original_object,
|
|
patched_oid,
|
|
ranges=patch_ranges,
|
|
payloads=patch_payloads,
|
|
sizes=patch_sizes,
|
|
)
|
|
|
|
@allure.title("[NEGATIVE] Patch cannot be applied with range collisions (ranges={ranges}, policy={placement_policy})")
|
|
@pytest.mark.parametrize(
|
|
"ranges",
|
|
# String "object" denotes size of object.
|
|
[["0:1", "1:2", "2:3"], ["0:100", "10:50"], ["0:object", "object-1:1"]],
|
|
ids=["left_overlap", "full_overlap", "object_overlap"],
|
|
)
|
|
def test_patch_object_with_multiple_range_collisions(
|
|
self,
|
|
grpc_client: GrpcClientWrapper,
|
|
container: str,
|
|
sized_object: str,
|
|
ranges: list[str],
|
|
):
|
|
patch_ranges = [self._get_range_relative_to_object(rng, DEFAULT_FILE_SIZE) for rng in ranges]
|
|
with reporter.step("Generate multiple payload objects"):
|
|
payload_file = generate_file(DEFAULT_FILE_SIZE)
|
|
patch_payloads = [payload_file for _ in ranges]
|
|
|
|
with reporter.step("Try to patch object with invalid range and catch exception"):
|
|
with pytest.raises(Exception, match="invalid patch offset order"):
|
|
grpc_client.object.patch(
|
|
container,
|
|
sized_object,
|
|
self.cluster.default_rpc_endpoint,
|
|
patch_ranges,
|
|
patch_payloads,
|
|
timeout="200s",
|
|
)
|
|
|
|
@allure.title(
|
|
"[NEGATIVE] Patch cannot be applied if ranges and payloads do not match "
|
|
"(ranges={ranges}, payloads={payload_count}, policy={placement_policy})"
|
|
)
|
|
@pytest.mark.parametrize(
|
|
"ranges, payload_count",
|
|
[(["0:1", "5:5", "20:11"], 1), (["22:1"], 5), ([], 3), (["50:10", "90:20"], 0)],
|
|
ids=["more_ranges", "more_payloads", "no_ranges", "no_payloads"],
|
|
)
|
|
def test_patch_with_ranges_do_not_match_payloads(
|
|
self,
|
|
grpc_client: GrpcClientWrapper,
|
|
container: str,
|
|
sized_object: str,
|
|
ranges: list[str],
|
|
payload_count: int,
|
|
):
|
|
with reporter.step("Generate multiple payload objects"):
|
|
payload_file = generate_file(DEFAULT_FILE_SIZE)
|
|
patch_payloads = [payload_file for _ in range(payload_count)]
|
|
|
|
with reporter.step("Try patch object with mismatched ranges and payloads and catch exception"):
|
|
with pytest.raises(Exception, match="number of ranges and payloads are not equal"):
|
|
grpc_client.object.patch(container, sized_object, self.cluster.default_rpc_endpoint, ranges, patch_payloads, timeout="200s")
|
|
|
|
@allure.title("[NEGATIVE] Patch cannot be applied with non-existent payload (policy={placement_policy})")
|
|
def test_patch_with_non_existent_payload(self, grpc_client: GrpcClientWrapper, container: str, sized_object: str):
|
|
with reporter.step("Try patch object with non-existent payload and catch exception"):
|
|
with pytest.raises(Exception, match="no such file or directory"):
|
|
grpc_client.object.patch(
|
|
container,
|
|
sized_object,
|
|
self.cluster.default_rpc_endpoint,
|
|
ranges=["20:300"],
|
|
payloads=["non_existent_file_path"],
|
|
timeout="200s",
|
|
)
|
|
|
|
@allure.title("[NEGATIVE] Patch cannot be applied to deleted object (object_size={object_size}, policy={placement_policy})")
|
|
def test_patch_deleted_object(self, grpc_client: GrpcClientWrapper, container: str, original_object: str):
|
|
patch_range = "0:10"
|
|
with reporter.step("Generate payload object"):
|
|
patch_payload = generate_file(DEFAULT_FILE_SIZE)
|
|
|
|
with reporter.step("Delete object"):
|
|
grpc_client.object.delete(container, original_object, self.cluster.default_rpc_endpoint)
|
|
|
|
with reporter.step("Try patch deleted object and catch exception"):
|
|
with pytest.raises(Exception, match="object already removed"):
|
|
grpc_client.object.patch(
|
|
container,
|
|
original_object,
|
|
self.cluster.default_rpc_endpoint,
|
|
ranges=[patch_range],
|
|
payloads=[patch_payload],
|
|
timeout="200s",
|
|
)
|
|
|
|
@allure.title("[NEGATIVE] Patch cannot be applied to tombstone object (object_size={object_size}, policy={placement_policy})")
|
|
def test_patch_tombstone_object(self, grpc_client: GrpcClientWrapper, container: str, original_object: str):
|
|
patch_range = "0:10"
|
|
with reporter.step("Generate payload object"):
|
|
patch_payload = generate_file(DEFAULT_FILE_SIZE)
|
|
|
|
with reporter.step("Delete object"):
|
|
tombstone_oid = grpc_client.object.delete(container, original_object, self.cluster.default_rpc_endpoint)
|
|
|
|
with reporter.step("Try patch tombstone object and catch exception"):
|
|
with pytest.raises(Exception, match="non-regular object can't be patched"):
|
|
grpc_client.object.patch(
|
|
container,
|
|
tombstone_oid,
|
|
self.cluster.default_rpc_endpoint,
|
|
ranges=[patch_range],
|
|
payloads=[patch_payload],
|
|
timeout="200s",
|
|
)
|
|
|
|
@allure.title("[NEGATIVE] Patch cannot be applied to locked object (object_size={object_size}, policy={placement_policy})")
|
|
def test_patch_locked_object(self, grpc_client: GrpcClientWrapper, container: str, original_object: str):
|
|
patch_range = "0:10"
|
|
with reporter.step("Generate payload object"):
|
|
patch_payload = generate_file(DEFAULT_FILE_SIZE)
|
|
|
|
current_epoch = self.get_epoch()
|
|
|
|
with reporter.step("Lock object"):
|
|
locked_oid = grpc_client.object.lock(
|
|
container,
|
|
original_object,
|
|
self.cluster.default_rpc_endpoint,
|
|
expire_at=current_epoch + 1,
|
|
)
|
|
|
|
with reporter.step("Try patch locked object and catch exception"):
|
|
with pytest.raises(Exception, match="non-regular object can't be patched"):
|
|
grpc_client.object.patch(
|
|
container,
|
|
locked_oid,
|
|
self.cluster.default_rpc_endpoint,
|
|
ranges=[patch_range],
|
|
payloads=[patch_payload],
|
|
timeout="200s",
|
|
)
|
|
|
|
@allure.title("[NEGATIVE] Patch cannot be applied to linked object (policy={placement_policy})")
|
|
@pytest.mark.parametrize("object_size", ["complex"], indirect=True)
|
|
def test_patch_link_object(self, grpc_client: GrpcClientWrapper, container: str, original_object: str):
|
|
patch_range = "0:10"
|
|
with reporter.step("Generate payload object"):
|
|
patch_payload = generate_file(DEFAULT_FILE_SIZE)
|
|
|
|
with reporter.step("Get link of complex object"):
|
|
object_info: dict = grpc_client.object.head(
|
|
container,
|
|
original_object,
|
|
self.cluster.default_rpc_endpoint,
|
|
is_raw=True,
|
|
)
|
|
link_oid = object_info["link"]
|
|
|
|
with reporter.step("Try patch link object and catch exception"):
|
|
with pytest.raises(Exception, match="linking object can't be patched"):
|
|
grpc_client.object.patch(
|
|
container,
|
|
link_oid,
|
|
self.cluster.default_rpc_endpoint,
|
|
ranges=[patch_range],
|
|
payloads=[patch_payload],
|
|
timeout="200s",
|
|
)
|
|
|
|
@allure.title("[NEGATIVE] Patch cannot be applied to part of complex object (policy={placement_policy})")
|
|
@pytest.mark.parametrize("placement_policy", ["rep"], indirect=True)
|
|
@pytest.mark.parametrize("object_size", ["complex"], indirect=True)
|
|
def test_patch_part_of_complex_object_rep(self, grpc_client: GrpcClientWrapper, container: str, original_object: str):
|
|
with reporter.step("Get parts of complex object"):
|
|
parts = grpc_client.object.parts(container, original_object, self.cluster.cluster_nodes[0])
|
|
assert parts, f"Expected list of OIDs of object parts: {parts}"
|
|
|
|
part_oid = parts[0]
|
|
|
|
with reporter.step("Try patch part of complex object and catch exception"):
|
|
with pytest.raises(Exception, match="complex object parts can't be patched"):
|
|
grpc_client.object.patch(
|
|
container,
|
|
part_oid,
|
|
self.cluster.default_rpc_endpoint,
|
|
new_attrs="some_key=some_value",
|
|
timeout="200s",
|
|
)
|
|
|
|
@allure.title("[NEGATIVE] Patch cannot be applied to EC chunk (object_size={object_size}, policy={placement_policy})")
|
|
@pytest.mark.parametrize("placement_policy", ["ec"], indirect=True)
|
|
def test_patch_ec_chunk(self, grpc_client: GrpcClientWrapper, container: str, original_object: str):
|
|
with reporter.step("Get chunks of object"):
|
|
chunks = grpc_client.object.chunks.get_all(self.cluster.default_rpc_endpoint, container, original_object)
|
|
assert chunks, f"Expected object chunks, but they are not there: {chunks}"
|
|
|
|
with reporter.step("Try patch chunk of object and catch exception"):
|
|
with pytest.raises(Exception, match="complex object parts can't be patched"):
|
|
grpc_client.object.patch(
|
|
container,
|
|
chunks[0].object_id,
|
|
self.cluster.default_rpc_endpoint,
|
|
new_attrs="some_key=some_value",
|
|
timeout="200s",
|
|
)
|
|
|
|
@allure.title("Patch object attributes (ranges={ranges}, new_attrs={new_attrs}, replace={replace}, policy={placement_policy})")
|
|
@pytest.mark.parametrize("replace", [True, False])
|
|
@pytest.mark.parametrize("new_attrs", [{"key_1": "val_1"}, {"key_1": "20", "key_2": "false", "FileName": "new-object"}])
|
|
@pytest.mark.parametrize("ranges", [[], ["0:10", "20:50"]])
|
|
def test_patch_object_attributes(
|
|
self,
|
|
grpc_client: GrpcClientWrapper,
|
|
container: str,
|
|
simple_object_size: ObjectSize,
|
|
new_attrs: dict,
|
|
replace: bool,
|
|
ranges: list[str],
|
|
):
|
|
with reporter.step("Generate simple object"):
|
|
simple_file = generate_file(simple_object_size.value)
|
|
|
|
with reporter.step("Generate multiple payload objects"):
|
|
payload_file = generate_file(DEFAULT_FILE_SIZE)
|
|
patch_payloads = [payload_file for _ in ranges]
|
|
|
|
patch_attrs = ",".join(f"{k}={v}" for k, v in new_attrs.items())
|
|
|
|
with reporter.step("Put simple object" + " with attributes" if replace else ""):
|
|
original_oid = grpc_client.object.put(
|
|
simple_file,
|
|
container,
|
|
self.cluster.default_rpc_endpoint,
|
|
attributes={"key_1": "1", "key_2": "true", "key_3": "val_3"} if replace else None,
|
|
)
|
|
|
|
with reporter.step("Get simple object attributes"):
|
|
original_info: dict = grpc_client.object.head(container, original_oid, self.cluster.default_rpc_endpoint)
|
|
original_attrs: dict = original_info["header"]["attributes"]
|
|
|
|
expected_attrs = {}
|
|
if not replace:
|
|
expected_attrs.update(original_attrs)
|
|
expected_attrs.update(new_attrs)
|
|
|
|
with reporter.step("Patch simple object attributes" + " with replace" if replace else ""):
|
|
patched_oid = grpc_client.object.patch(
|
|
container,
|
|
original_oid,
|
|
self.cluster.default_rpc_endpoint,
|
|
ranges=ranges,
|
|
payloads=patch_payloads,
|
|
new_attrs=patch_attrs,
|
|
replace_attrs=replace,
|
|
timeout="200s",
|
|
)
|
|
assert patched_oid != original_oid, "Patched object's OID must be different from original one"
|
|
|
|
with reporter.step("Get patched object attributes and make sure they are as expected"):
|
|
patched_info: dict = grpc_client.object.head(container, patched_oid, self.cluster.default_rpc_endpoint)
|
|
patched_attrs: dict = patched_info["header"]["attributes"]
|
|
assert (
|
|
patched_attrs == expected_attrs
|
|
), f"Attributes of patched object do not match expected ones\nPatched attrs: {patched_attrs}\nExpected attrs: {expected_attrs}"
|
|
|
|
@allure.title("Patch an object via container nodes (object_size={object_size}, policy={placement_policy})")
|
|
@pytest.mark.parametrize(
|
|
"placement_policy",
|
|
[
|
|
PlacementPolicy("rep1select2", PlacementRule.REP_1_FOR_2_NODES_PLACEMENT_RULE),
|
|
PlacementPolicy("ec1.1select2", PlacementRule.EC_1_1_FOR_2_NODES_PLACEMENT_RULE),
|
|
],
|
|
ids=["rep1select2", "ec1.1select2"],
|
|
indirect=True,
|
|
)
|
|
def test_patch_via_container_node(
|
|
self,
|
|
grpc_client: GrpcClientWrapper,
|
|
container: str,
|
|
container_nodes: list[ClusterNode],
|
|
object_size: ObjectSize,
|
|
):
|
|
with reporter.step("Generate test object"):
|
|
original_file = generate_file(object_size.value)
|
|
|
|
with reporter.step("Generate payload object"):
|
|
patch_payload = generate_file(DEFAULT_FILE_SIZE)
|
|
|
|
patch_range = "0:50"
|
|
|
|
with reporter.step("Put object via container node"):
|
|
original_oid = grpc_client.object.put(
|
|
original_file,
|
|
container,
|
|
container_nodes[0].storage_node.get_rpc_endpoint(),
|
|
)
|
|
|
|
with reporter.step("Patch object payload via container node"):
|
|
payload_patched_oid = grpc_client.object.patch(
|
|
container,
|
|
original_oid,
|
|
container_nodes[1].storage_node.get_rpc_endpoint(),
|
|
ranges=[patch_range],
|
|
payloads=[patch_payload],
|
|
timeout="200s",
|
|
)
|
|
assert payload_patched_oid != original_oid, "Patched object's OID must be different from original one"
|
|
|
|
with reporter.step("Head patched object via container node and make sure it changes size"):
|
|
patched_info: dict = grpc_client.object.head(
|
|
container,
|
|
payload_patched_oid,
|
|
container_nodes[0].storage_node.get_rpc_endpoint(),
|
|
)
|
|
|
|
expected_size = object_size.value + DEFAULT_FILE_SIZE - int(patch_range.split(":")[1])
|
|
patched_size = int(patched_info["header"]["payloadLength"])
|
|
|
|
assert patched_size == expected_size, f"Size of object does not match expected size: {patched_size}"
|
|
|
|
with reporter.step("Check range hashes to ensure object was modified correctly"):
|
|
self._verify_object_payload(
|
|
grpc_client,
|
|
container,
|
|
object_size.value,
|
|
original_oid,
|
|
payload_patched_oid,
|
|
[patch_range],
|
|
[patch_payload],
|
|
[DEFAULT_FILE_SIZE],
|
|
endpoint=container_nodes[1].storage_node.get_rpc_endpoint(),
|
|
)
|
|
|
|
replace = True
|
|
new_attrs = {"FileName": "new-object-name"}
|
|
patch_attrs = ",".join(f"{k}={v}" for k, v in new_attrs.items())
|
|
|
|
with reporter.step("Get original object attributes via container node"):
|
|
original_info: dict = grpc_client.object.head(
|
|
container,
|
|
original_oid,
|
|
container_nodes[0].storage_node.get_rpc_endpoint(),
|
|
)
|
|
original_attrs: dict = original_info["header"]["attributes"]
|
|
|
|
expected_attrs = {}
|
|
if not replace:
|
|
expected_attrs.update(original_attrs)
|
|
expected_attrs.update(new_attrs)
|
|
|
|
with reporter.step("Patch previous patched object attributes via container node"):
|
|
attrs_patched_oid = grpc_client.object.patch(
|
|
container,
|
|
payload_patched_oid,
|
|
container_nodes[1].storage_node.get_rpc_endpoint(),
|
|
new_attrs=patch_attrs,
|
|
replace_attrs=replace,
|
|
timeout="200s",
|
|
)
|
|
assert attrs_patched_oid != payload_patched_oid, "Patched object's OID must be different from previous patched one"
|
|
|
|
with reporter.step("Get patched object attributes and make sure they are as expected"):
|
|
patched_info: dict = grpc_client.object.head(container, attrs_patched_oid, container_nodes[0].storage_node.get_rpc_endpoint())
|
|
patched_attrs: dict = patched_info["header"]["attributes"]
|
|
assert (
|
|
patched_attrs == expected_attrs
|
|
), f"Attributes of patched object do not match expected ones\nPatched attrs: {patched_attrs}\nExpected attrs: {expected_attrs}"
|
|
|
|
@allure.title("Patch an object via non container node (object_size={object_size}, policy={placement_policy})")
|
|
@pytest.mark.parametrize(
|
|
"placement_policy",
|
|
[
|
|
PlacementPolicy("rep1select2", PlacementRule.REP_1_FOR_2_NODES_PLACEMENT_RULE),
|
|
PlacementPolicy("ec1.1select2", PlacementRule.EC_1_1_FOR_2_NODES_PLACEMENT_RULE),
|
|
],
|
|
ids=["rep1select2", "ec1.1select2"],
|
|
indirect=True,
|
|
)
|
|
def test_patch_via_non_container_node(
|
|
self,
|
|
grpc_client: GrpcClientWrapper,
|
|
container: str,
|
|
non_container_nodes: list[ClusterNode],
|
|
object_size: ObjectSize,
|
|
):
|
|
with reporter.step("Generate test object"):
|
|
original_file = generate_file(object_size.value)
|
|
|
|
with reporter.step("Generate payload object"):
|
|
patch_payload = generate_file(DEFAULT_FILE_SIZE)
|
|
|
|
patch_range = "0:50"
|
|
|
|
with reporter.step("Put object via non container node"):
|
|
original_oid = grpc_client.object.put(
|
|
original_file,
|
|
container,
|
|
non_container_nodes[0].storage_node.get_rpc_endpoint(),
|
|
timeout="300s",
|
|
)
|
|
|
|
with reporter.step("Patch object payload via non container node"):
|
|
payload_patched_oid = grpc_client.object.patch(
|
|
container,
|
|
original_oid,
|
|
non_container_nodes[1].storage_node.get_rpc_endpoint(),
|
|
ranges=[patch_range],
|
|
payloads=[patch_payload],
|
|
timeout="300s",
|
|
)
|
|
assert payload_patched_oid != original_oid, "Patched object's OID must be different from original one"
|
|
|
|
with reporter.step("Head patched object via non container node and make sure it changes size"):
|
|
patched_info: dict = grpc_client.object.head(
|
|
container,
|
|
payload_patched_oid,
|
|
non_container_nodes[0].storage_node.get_rpc_endpoint(),
|
|
)
|
|
|
|
expected_size = object_size.value + DEFAULT_FILE_SIZE - int(patch_range.split(":")[1])
|
|
patched_size = int(patched_info["header"]["payloadLength"])
|
|
|
|
assert patched_size == expected_size, f"Size of object does not match expected size: {patched_size}"
|
|
|
|
with reporter.step("Check range hashes to ensure object was modified correctly"):
|
|
self._verify_object_payload(
|
|
grpc_client,
|
|
container,
|
|
object_size.value,
|
|
original_oid,
|
|
payload_patched_oid,
|
|
[patch_range],
|
|
[patch_payload],
|
|
[DEFAULT_FILE_SIZE],
|
|
endpoint=non_container_nodes[1].storage_node.get_rpc_endpoint(),
|
|
)
|
|
|
|
replace = True
|
|
new_attrs = {"FileName": "new-object-name"}
|
|
patch_attrs = ",".join(f"{k}={v}" for k, v in new_attrs.items())
|
|
|
|
with reporter.step("Get original object attributes via non container node"):
|
|
original_info: dict = grpc_client.object.head(
|
|
container,
|
|
original_oid,
|
|
non_container_nodes[0].storage_node.get_rpc_endpoint(),
|
|
)
|
|
original_attrs: dict = original_info["header"]["attributes"]
|
|
|
|
expected_attrs = {}
|
|
if not replace:
|
|
expected_attrs.update(original_attrs)
|
|
expected_attrs.update(new_attrs)
|
|
|
|
with reporter.step("Patch previous patched object attributes via non container node"):
|
|
attrs_patched_oid = grpc_client.object.patch(
|
|
container,
|
|
payload_patched_oid,
|
|
non_container_nodes[1].storage_node.get_rpc_endpoint(),
|
|
new_attrs=patch_attrs,
|
|
replace_attrs=replace,
|
|
timeout="300s",
|
|
)
|
|
assert attrs_patched_oid != payload_patched_oid, "Patched object's OID must be different from previous patched one"
|
|
|
|
with reporter.step("Get patched object attributes and make sure they are as expected"):
|
|
patched_info: dict = grpc_client.object.head(
|
|
container, attrs_patched_oid, non_container_nodes[0].storage_node.get_rpc_endpoint()
|
|
)
|
|
patched_attrs: dict = patched_info["header"]["attributes"]
|
|
assert (
|
|
patched_attrs == expected_attrs
|
|
), f"Attributes of patched object do not match expected ones\nPatched attrs: {patched_attrs}\nExpected attrs: {expected_attrs}"
|