frostfs-testcases/pytest_tests/testsuites/object/test_object_api.py

503 lines
21 KiB
Python
Raw Normal View History

import logging
import random
import sys
import allure
import pytest
from frostfs_testlib import reporter
from frostfs_testlib.resources.error_patterns import (
INVALID_LENGTH_SPECIFIER,
INVALID_OFFSET_SPECIFIER,
INVALID_RANGE_OVERFLOW,
INVALID_RANGE_ZERO_LENGTH,
OBJECT_ALREADY_REMOVED,
OUT_OF_RANGE,
)
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import (
get_object_from_random_node,
get_range,
get_range_hash,
head_object,
put_object_to_random_node,
search_object,
)
from frostfs_testlib.steps.complex_object_actions import get_complex_object_split_ranges
from frostfs_testlib.steps.storage_object import delete_object, delete_objects
from frostfs_testlib.steps.storage_policy import get_complex_object_copies, get_simple_object_copies
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.policy import PlacementPolicy
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file, get_file_content, get_file_hash
logger = logging.getLogger("NeoLogger")
CLEANUP_TIMEOUT = 10
COMMON_ATTRIBUTE = {"common_key": "common_value"}
# Will upload object for each attribute set
OBJECT_ATTRIBUTES = [
None,
{"key1": 1, "key2": "abc", "common_key": "common_value"},
{"key1": 2, "common_key": "common_value"},
]
# Config for Range tests
RANGES_COUNT = 4 # by quarters
RANGE_MIN_LEN = 10
RANGE_MAX_LEN = 500
# Used for static ranges found with issues
STATIC_RANGES = {}
def generate_ranges(
storage_object: StorageObjectInfo, max_object_size: int, shell: Shell, cluster: Cluster
) -> list[(int, int)]:
file_range_step = storage_object.size / RANGES_COUNT
file_ranges = []
file_ranges_to_test = []
for i in range(0, RANGES_COUNT):
file_ranges.append((int(file_range_step * i), int(file_range_step)))
# For simple object we can read all file ranges without too much time for testing
if storage_object.size < max_object_size:
file_ranges_to_test.extend(file_ranges)
# For complex object we need to fetch multiple child objects from different nodes.
else:
assert (
storage_object.size >= RANGE_MAX_LEN + max_object_size
), f"Complex object size should be at least {max_object_size + RANGE_MAX_LEN}. Current: {storage_object.size}"
file_ranges_to_test.append((RANGE_MAX_LEN, max_object_size - RANGE_MAX_LEN))
file_ranges_to_test.extend(get_complex_object_split_ranges(storage_object, shell, cluster))
# Special cases to read some bytes from start and some bytes from end of object
file_ranges_to_test.append((0, RANGE_MIN_LEN))
file_ranges_to_test.append((storage_object.size - RANGE_MIN_LEN, RANGE_MIN_LEN))
for offset, length in file_ranges:
range_length = random.randint(RANGE_MIN_LEN, RANGE_MAX_LEN)
range_start = random.randint(offset, offset + length)
file_ranges_to_test.append((range_start, min(range_length, storage_object.size - range_start)))
file_ranges_to_test.extend(STATIC_RANGES.get(storage_object.size, []))
return file_ranges_to_test
@pytest.fixture(
# Scope session to upload/delete each files set only once
2023-09-08 10:35:34 +00:00
scope="module"
)
def storage_objects(
default_wallet: WalletInfo,
client_shell: Shell,
cluster: Cluster,
object_size: ObjectSize,
placement_policy: PlacementPolicy,
) -> list[StorageObjectInfo]:
wallet = default_wallet
# Separate containers for complex/simple objects to avoid side-effects
cid = create_container(
wallet, shell=client_shell, rule=placement_policy.value, endpoint=cluster.default_rpc_endpoint
)
file_path = generate_file(object_size.value)
file_hash = get_file_hash(file_path)
storage_objects = []
with reporter.step("Put objects"):
# We need to upload objects multiple times with different attributes
for attributes in OBJECT_ATTRIBUTES:
storage_object_id = put_object_to_random_node(
wallet=wallet,
path=file_path,
cid=cid,
shell=client_shell,
cluster=cluster,
attributes=attributes,
)
storage_object = StorageObjectInfo(cid, storage_object_id)
storage_object.size = object_size.value
storage_object.wallet = wallet
storage_object.file_path = file_path
storage_object.file_hash = file_hash
storage_object.attributes = attributes
storage_objects.append(storage_object)
yield storage_objects
# Teardown after all tests done with current param
delete_objects(storage_objects, client_shell, cluster)
2024-06-04 16:22:59 +00:00
@pytest.fixture()
def expected_object_copies(placement_policy: PlacementPolicy) -> int:
if placement_policy.name == "rep":
return 2
return 4
@pytest.mark.sanity
@pytest.mark.grpc_api
class TestObjectApi(ClusterTestBase):
@allure.title("Storage policy by native API (obj_size={object_size}, policy={placement_policy})")
def test_object_storage_policies(
self,
storage_objects: list[StorageObjectInfo],
simple_object_size: ObjectSize,
2024-06-04 15:24:21 +00:00
expected_object_copies: int,
):
"""
Validate object storage policy
"""
with reporter.step("Validate storage policy for objects"):
for storage_object in storage_objects:
if storage_object.size == simple_object_size.value:
copies = get_simple_object_copies(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
)
else:
copies = get_complex_object_copies(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
)
2024-06-04 15:24:21 +00:00
assert copies == expected_object_copies, f"Expected {expected_object_copies} copies"
@allure.title("Get object by native API (obj_size={object_size}, policy={placement_policy})")
2023-09-08 10:35:34 +00:00
def test_get_object_api(self, storage_objects: list[StorageObjectInfo]):
"""
Validate get object native API
"""
with reporter.step("Get objects and compare hashes"):
for storage_object in storage_objects:
file_path = get_object_from_random_node(
storage_object.wallet,
storage_object.cid,
storage_object.oid,
self.shell,
cluster=self.cluster,
)
file_hash = get_file_hash(file_path)
assert storage_object.file_hash == file_hash
@allure.title("Head object by native API (obj_size={object_size}, policy={placement_policy})")
2023-09-08 10:35:34 +00:00
def test_head_object_api(self, storage_objects: list[StorageObjectInfo]):
"""
Validate head object native API
"""
storage_object_1 = storage_objects[0]
storage_object_2 = storage_objects[1]
with reporter.step("Head object and validate"):
head_object(
storage_object_1.wallet,
storage_object_1.cid,
storage_object_1.oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
head_info = head_object(
storage_object_2.wallet,
storage_object_2.cid,
storage_object_2.oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
self.check_header_is_presented(head_info, storage_object_2.attributes)
@allure.title("Head deleted object with --raw arg (obj_size={object_size}, policy={placement_policy})")
def test_object_head_raw(self, default_wallet: str, object_size: ObjectSize, placement_policy: PlacementPolicy):
with reporter.step("Create container"):
cid = create_container(
default_wallet, self.shell, self.cluster.default_rpc_endpoint, placement_policy.value
)
with reporter.step("Upload object"):
file_path = generate_file(object_size.value)
oid = put_object_to_random_node(default_wallet, file_path, cid, self.shell, self.cluster)
with reporter.step("Delete object"):
delete_object(default_wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
with reporter.step("Call object head --raw and expect error"):
with pytest.raises(Exception, match=OBJECT_ALREADY_REMOVED):
head_object(default_wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint, is_raw=True)
@allure.title("Search objects by native API (obj_size={object_size}, policy={placement_policy})")
2023-09-08 10:35:34 +00:00
def test_search_object_api(self, storage_objects: list[StorageObjectInfo]):
"""
Validate object search by native API
"""
oids = [storage_object.oid for storage_object in storage_objects]
wallet = storage_objects[0].wallet
cid = storage_objects[0].cid
test_table = [
(OBJECT_ATTRIBUTES[1], oids[1:2]),
(OBJECT_ATTRIBUTES[2], oids[2:3]),
(COMMON_ATTRIBUTE, oids[1:3]),
]
with reporter.step("Search objects"):
# Search with no attributes
result = search_object(
wallet,
cid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
expected_objects_list=oids,
root=True,
)
assert sorted(oids) == sorted(result)
# search by test table
for filter, expected_oids in test_table:
result = search_object(
wallet,
cid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
filters=filter,
expected_objects_list=expected_oids,
root=True,
)
assert sorted(expected_oids) == sorted(result)
2023-09-08 10:35:34 +00:00
@allure.title("Search objects with removed items (obj_size={object_size})")
def test_object_search_should_return_tombstone_items(self, default_wallet: WalletInfo, object_size: ObjectSize):
"""
Validate object search with removed items
"""
wallet = default_wallet
cid = create_container(wallet, self.shell, self.cluster.default_rpc_endpoint)
with reporter.step("Upload file"):
file_path = generate_file(object_size.value)
file_hash = get_file_hash(file_path)
storage_object = StorageObjectInfo(
cid=cid,
oid=put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster),
size=object_size.value,
wallet=wallet,
file_path=file_path,
file_hash=file_hash,
)
with reporter.step("Search object"):
# Root Search object should return root object oid
result = search_object(wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, root=True)
assert result == [storage_object.oid]
with reporter.step("Delete file"):
delete_objects([storage_object], self.shell, self.cluster)
with reporter.step("Search deleted object with --root"):
# Root Search object should return nothing
result = search_object(wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, root=True)
assert len(result) == 0
with reporter.step("Search deleted object with --phy should return only tombstones"):
# Physical Search object should return only tombstones
result = search_object(wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, phy=True)
assert storage_object.tombstone in result, "Search result should contain tombstone of removed object"
assert storage_object.oid not in result, "Search result should not contain ObjectId of removed object"
for tombstone_oid in result:
header = head_object(
wallet,
cid,
tombstone_oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)["header"]
object_type = header["objectType"]
assert (
object_type == "TOMBSTONE"
), f"Object wasn't deleted properly. Found object {tombstone_oid} with type {object_type}"
@allure.title("Get range hash by native API (obj_size={object_size}, policy={placement_policy})")
@pytest.mark.grpc_api
2023-09-08 10:35:34 +00:00
def test_object_get_range_hash(self, storage_objects: list[StorageObjectInfo], max_object_size):
"""
Validate get_range_hash for object by native gRPC API
"""
wallet = storage_objects[0].wallet
cid = storage_objects[0].cid
oids = [storage_object.oid for storage_object in storage_objects[:2]]
file_path = storage_objects[0].file_path
file_ranges_to_test = generate_ranges(storage_objects[0], max_object_size, self.shell, self.cluster)
logging.info(f"Ranges used in test {file_ranges_to_test}")
for range_start, range_len in file_ranges_to_test:
range_cut = f"{range_start}:{range_len}"
with reporter.step(f"Get range hash ({range_cut})"):
for oid in oids:
range_hash = get_range_hash(
wallet,
cid,
oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
range_cut=range_cut,
)
assert (
get_file_hash(file_path, range_len, range_start) == range_hash
), f"Expected range hash to match {range_cut} slice of file payload"
@allure.title("Get range by native API (obj_size={object_size}, policy={placement_policy})")
@pytest.mark.grpc_api
2023-09-08 10:35:34 +00:00
def test_object_get_range(self, storage_objects: list[StorageObjectInfo], max_object_size):
"""
Validate get_range for object by native gRPC API
"""
wallet = storage_objects[0].wallet
cid = storage_objects[0].cid
oids = [storage_object.oid for storage_object in storage_objects[:2]]
file_path = storage_objects[0].file_path
file_ranges_to_test = generate_ranges(storage_objects[0], max_object_size, self.shell, self.cluster)
logging.info(f"Ranges used in test {file_ranges_to_test}")
for range_start, range_len in file_ranges_to_test:
range_cut = f"{range_start}:{range_len}"
with reporter.step(f"Get range ({range_cut})"):
for oid in oids:
_, range_content = get_range(
wallet,
cid,
oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
range_cut=range_cut,
)
assert (
get_file_content(file_path, content_len=range_len, mode="rb", offset=range_start)
== range_content
), f"Expected range content to match {range_cut} slice of file payload"
@allure.title("[NEGATIVE] Get invalid range by native API (obj_size={object_size}, policy={placement_policy})")
@pytest.mark.grpc_api
def test_object_get_range_negatives(
self,
storage_objects: list[StorageObjectInfo],
):
"""
Validate get_range negative for object by native gRPC API
"""
wallet = storage_objects[0].wallet
cid = storage_objects[0].cid
oids = [storage_object.oid for storage_object in storage_objects[:2]]
file_size = storage_objects[0].size
assert (
RANGE_MIN_LEN < file_size
), f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})"
file_ranges_to_test: list[tuple(int, int, str)] = [
# Offset is bigger than the file size, the length is small.
(file_size + 1, RANGE_MIN_LEN, OUT_OF_RANGE),
# Offset is ok, but offset+length is too big.
(file_size - RANGE_MIN_LEN, RANGE_MIN_LEN * 2, OUT_OF_RANGE),
# Offset is ok, and length is very-very big (e.g. MaxUint64) so that offset+length is wrapped and still "valid".
(RANGE_MIN_LEN, sys.maxsize * 2 + 1, INVALID_RANGE_OVERFLOW),
# Length is zero
(10, 0, INVALID_RANGE_ZERO_LENGTH),
# Negative values
(-1, 1, INVALID_OFFSET_SPECIFIER),
(10, -5, INVALID_LENGTH_SPECIFIER),
]
for range_start, range_len, expected_error in file_ranges_to_test:
range_cut = f"{range_start}:{range_len}"
expected_error = expected_error.format(range=range_cut) if "{range}" in expected_error else expected_error
with reporter.step(f"Get range ({range_cut})"):
for oid in oids:
with pytest.raises(Exception, match=expected_error):
get_range(
wallet,
cid,
oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
range_cut=range_cut,
)
@allure.title("[NEGATIVE] Get invalid range hash by native API (obj_size={object_size}, policy={placement_policy})")
def test_object_get_range_hash_negatives(
self,
storage_objects: list[StorageObjectInfo],
):
"""
Validate get_range_hash negative for object by native gRPC API
"""
wallet = storage_objects[0].wallet
cid = storage_objects[0].cid
oids = [storage_object.oid for storage_object in storage_objects[:2]]
file_size = storage_objects[0].size
Add test suites for acl, container and node management Signed-off-by: Vladimir Domnich <v.domnich@yadro.com> commit f7c68cfb423e3213179521954dccb6053fc6382d Merge: e234b61 99bfe6b Merge branch 'avolkov/add_ssh' into internal_tmp_b commit 99bfe6b56cd75590f868313910068cf1a80bd43f Tick one more epoch. commit bd70bc49391d578cdda727edb4dcd181b832bf1e Start nodes in case of test fail. commit b3888ec62cfc3c18b1dff58962a94a3094342186 Catch json decode error. commit c18e415b783ec3e4ce804f43c19246240c186a97 Add ssh-key access. commit 7dbdeb653b7d5b7ab3874b546e05a48b502c2460 Add some tests. commit 844367c68638c7f97ba4860dd0069c07f499d66d Add some tests for nodes management. commit 1b84b37048dcd3cc0888aa54639975fc11fb2d75 Add some tests for nodes management. commit b30c1336a6919e0c8e500bdf2a9be3d5a14470ea Add ssh execution option. commit 2df40eca74ee20bd668778715185ffddda63cb05 Change AWS cli v1 to cli v2. commit 7403da3d7c2a5963cfbb12b7c0f3d1d641f52a7e Change AWS cli v1 to cli v2. commit b110dcdb655a585e6c53e6ebc3eae7bf1f1e792f Change AWS cli v1 to cli v2. commit 6183756a4c064c932ee193c2e08a79343017fa49 Change AWS cli v1 to cli v2. commit 398006544d60896faa3fc6e6a9dbb51ada06759c Fix container run. commit e7202136dabbe7e2d3da508e0a2ec55a0d5cb67a Added tests with AWS CLI. commit 042e1478ee1fd700c8572cbc6d0d9e6b312b8e8d Fix PR comments. commit e234b61dbb9b8b10812e069322ab03615af0d44e Add debug for env. commit 14febd06713dc03a8207bb80384acb4a7d32df0e Move env variables for pytest docker into env file. commit bafdc6131b5ac855a43b672be194cde2ccf6f75b Move env variables for pytest docker into env file. commit 27c2c6b11f51d2e3c085d44b814cb4c00f81b376 Move env variables for pytest docker into env file. commit e4db4948978e092adb83aeacdf06619f5ca2f242 Merge branch 'master' into avolkov/try_pytest commit c83a7e625e8daba3a40b65a1d69b2b1323e9ae28 WIP. commit 42489bbf8058acd2926cdb04074dc9a8ff86a0a0 Merge branch 'avolkov/try_pytest' into internal_tmp_b commit 62526d94dc2bf72372125bea119fa66f670cf7e1 Improve allure attachments. commit 4564dae697cb069ac45bc4ba7eb0b5bbdcf1d153 Merge branch 'avolkov/try_pytest' into internal_tmp_b commit ab65810b23410ca7382ed4bdd257addfa6619659 Added tests for S3 API. commit 846c495a846c977f3e5f0bada01e5a9691a81e3d Let's get NEOFS_IR_CONTRACTS_NEOFS from env. commit c39bd88568b70ffcb76b76d68531b17d3747829d Added S3 test for versioning. commit d7c9f351abc7e02d4ebf162475604a2d6b46e712 Merge remote-tracking branch 'origin/avolkov/try_pytest' into internal_tmp_b commit bfbed22a50ce4cb6a49de383cfef66452ba9f4c1 Added some tests for S3 API and curl tests for HTTP. commit 1c49def3ddd0b3f7cf97f131e269ad465c70a680 Add yadro submodule commit 2a91685f9108101ab523e05cc9287d0f5a20196b Fix. commit 33fc2813e205766e69ef74a42a10850db6c63ce6 Add debug. commit aaaceca59e4c67253ecd4a741667b7327d1fb679 Add env variables for data nodes. commit 001cb26bcc22c8543fb2672564e898928d20622b Merge: b48a87d c70da26 Merge branch 'avolkov/try_pytest' into tmp_b commit b48a87d9a09309fea671573ba6cf303c31b11b6a Added submodule commit c70da265d319950977774e34740276f324eb57a7 Added tests for S3 bucket API. commit 3d335abe6de45d1859454f1ddf85a97514667b8f Added tests for S3 object API. commit 2ac829c700f5bc20c28953f1d40cd953fed8b390 flake8 changes for python_keywords module. commit 2de5963e96b13a5e944906b695e5d9c0829de9ad Add pytest tests. commit 4472c079b9dfd979b7c101bea32893c80cb1fe57 Add pytest tests. Signed-off-by: a.y.volkov <a.y.volkov@yadro.com>
2022-07-08 17:24:55 +00:00
assert (
RANGE_MIN_LEN < file_size
), f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})"
file_ranges_to_test: list[tuple(int, int, str)] = [
# Offset is bigger than the file size, the length is small.
(file_size + 1, RANGE_MIN_LEN, OUT_OF_RANGE),
# Offset is ok, but offset+length is too big.
(file_size - RANGE_MIN_LEN, RANGE_MIN_LEN * 2, OUT_OF_RANGE),
# Offset is ok, and length is very-very big (e.g. MaxUint64) so that offset+length is wrapped and still "valid".
(RANGE_MIN_LEN, sys.maxsize * 2 + 1, INVALID_RANGE_OVERFLOW),
# Length is zero
(10, 0, INVALID_RANGE_ZERO_LENGTH),
# Negative values
(-1, 1, INVALID_OFFSET_SPECIFIER),
(10, -5, INVALID_LENGTH_SPECIFIER),
]
for range_start, range_len, expected_error in file_ranges_to_test:
range_cut = f"{range_start}:{range_len}"
expected_error = expected_error.format(range=range_cut) if "{range}" in expected_error else expected_error
with reporter.step(f"Get range hash ({range_cut})"):
for oid in oids:
with pytest.raises(Exception, match=expected_error):
get_range_hash(
wallet,
cid,
oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
range_cut=range_cut,
)
def check_header_is_presented(self, head_info: dict, object_header: dict) -> None:
for key_to_check, val_to_check in object_header.items():
assert key_to_check in head_info["header"]["attributes"], f"Key {key_to_check} is found in {head_object}"
assert head_info["header"]["attributes"].get(key_to_check) == str(
val_to_check
), f"Value {val_to_check} is equal"