Changes for object size usage

Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
This commit is contained in:
Andrey Berezin 2023-08-02 14:54:03 +03:00
parent 05b5f7d133
commit 6449264dcf
29 changed files with 425 additions and 302 deletions

View file

@ -13,6 +13,7 @@ from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.acl import EACLRole
from frostfs_testlib.storage.dataclasses.frostfs_services import InnerRing, StorageNode
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils import wallet_utils
from frostfs_testlib.utils.file_utils import generate_file
@ -69,8 +70,8 @@ def wallets(default_wallet: str, temp_directory: str, cluster: Cluster) -> Walle
@pytest.fixture(scope="module")
def file_path(simple_object_size: int) -> str:
yield generate_file(simple_object_size)
def file_path(simple_object_size: ObjectSize) -> str:
yield generate_file(simple_object_size.value)
@pytest.fixture(scope="function")

View file

@ -24,6 +24,7 @@ from frostfs_testlib.steps.node_management import storage_node_healthcheck
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletFactory, WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils import env_utils, version_utils
@ -115,12 +116,14 @@ def max_object_size(cluster: Cluster, client_shell: Shell) -> int:
@pytest.fixture(scope="session")
def simple_object_size(max_object_size: int) -> int:
yield int(SIMPLE_OBJECT_SIZE) if int(SIMPLE_OBJECT_SIZE) < max_object_size else max_object_size
size = min(int(SIMPLE_OBJECT_SIZE), max_object_size)
return ObjectSize("simple", size)
@pytest.fixture(scope="session")
def complex_object_size(max_object_size: int) -> int:
return max_object_size * int(COMPLEX_OBJECT_CHUNKS_COUNT) + int(COMPLEX_OBJECT_TAIL_SIZE)
size = max_object_size * int(COMPLEX_OBJECT_CHUNKS_COUNT) + int(COMPLEX_OBJECT_TAIL_SIZE)
return ObjectSize("complex", size)
@pytest.fixture(scope="session")

View file

@ -8,6 +8,7 @@ from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import get_object, put_object_to_random_node
from frostfs_testlib.storage.cluster import StorageNode
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.failover_utils import (
wait_all_storage_nodes_returned,
@ -45,7 +46,7 @@ class TestFailoverNetwork(ClusterTestBase):
self,
default_wallet: str,
require_multiple_hosts,
simple_object_size: int,
simple_object_size: ObjectSize,
):
"""
Block storage nodes traffic using iptables and wait for replication for objects.
@ -55,7 +56,7 @@ class TestFailoverNetwork(ClusterTestBase):
wakeup_node_timeout = 10 # timeout to let nodes detect that traffic has blocked
nodes_to_block_count = 2
source_file_path = generate_file(simple_object_size)
source_file_path = generate_file(simple_object_size.value)
cid = create_container(
wallet,
shell=self.shell,

View file

@ -16,6 +16,7 @@ from frostfs_testlib.steps.cli.object import get_object
from frostfs_testlib.steps.node_management import check_node_in_map, check_node_not_in_map
from frostfs_testlib.storage.cluster import ClusterNode, StorageNode
from frostfs_testlib.storage.controllers import ClusterStateController
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
@ -75,15 +76,15 @@ class TestFailoverServer(ClusterTestBase):
self,
request: FixtureRequest,
containers: list[StorageContainer],
simple_object_size: int,
complex_object_size: int,
simple_object_size: ObjectSize,
complex_object_size: ObjectSize,
) -> StorageObjectInfo:
count_object = request.param
object_size = [simple_object_size, complex_object_size]
object_sizes = [simple_object_size, complex_object_size]
object_list = []
for cont in containers:
for _ in range(count_object):
object_list.append(cont.generate_object(size=random.choice(object_size)))
object_list.append(cont.generate_object(size=random.choice(object_sizes).value))
for storage_object in object_list:
os.remove(storage_object.file_path)

View file

@ -27,6 +27,7 @@ from frostfs_testlib.steps.node_management import (
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode
from frostfs_testlib.storage.controllers import ClusterStateController, ShardsWatcher
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
@ -43,6 +44,11 @@ logger = logging.getLogger("NeoLogger")
stopped_nodes: list[StorageNode] = []
def pytest_generate_tests(metafunc: pytest.Metafunc):
if "s3_client" in metafunc.fixturenames:
metafunc.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
@pytest.fixture(scope="function")
@allure.title("Provide File Keeper")
def file_keeper():
@ -92,14 +98,18 @@ def return_stopped_hosts(shell: Shell, cluster: Cluster) -> None:
@pytest.mark.failover
class TestFailoverStorage(ClusterTestBase):
@allure.title("Lose and return storage node's host")
@pytest.mark.parametrize("hard_reboot", [True, False])
@pytest.mark.parametrize("stop_mode", ["hard", "soft"])
@pytest.mark.failover_reboot
def test_lose_storage_node_host(
self, default_wallet, hard_reboot: bool, require_multiple_hosts, simple_object_size
self,
default_wallet,
stop_mode: str,
require_multiple_hosts,
simple_object_size: ObjectSize,
):
wallet = default_wallet
placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
source_file_path = generate_file(simple_object_size)
source_file_path = generate_file(simple_object_size.value)
cid = create_container(
wallet,
shell=self.shell,
@ -118,7 +128,7 @@ class TestFailoverStorage(ClusterTestBase):
stopped_nodes.append(node)
with allure.step(f"Stop host {node}"):
node.host.stop_host("hard" if hard_reboot else "soft")
node.host.stop_host(stop_mode)
new_nodes = wait_object_replication(
cid,
@ -151,11 +161,11 @@ class TestFailoverStorage(ClusterTestBase):
@pytest.mark.parametrize("sequence", [True, False])
@pytest.mark.failover_panic
def test_panic_storage_node_host(
self, default_wallet, require_multiple_hosts, sequence: bool, simple_object_size
self, default_wallet, require_multiple_hosts, sequence: bool, simple_object_size: ObjectSize
):
wallet = default_wallet
placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
source_file_path = generate_file(simple_object_size)
source_file_path = generate_file(simple_object_size.value)
cid = create_container(
wallet,
shell=self.shell,
@ -223,7 +233,7 @@ class TestFailoverStorage(ClusterTestBase):
def test_unhealthy_tree(
self,
s3_client: S3ClientWrapper,
simple_object_size: int,
simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
after_run_return_all_stopped_s3,
after_run_return_all_stopped_services,
@ -248,7 +258,7 @@ class TestFailoverStorage(ClusterTestBase):
location_constraint="load-1-1",
)
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Put object into bucket"):
put_object = s3_client.put_object(bucket, file_path)
@ -263,11 +273,6 @@ class TestFailoverStorage(ClusterTestBase):
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
def pytest_generate_tests(metafunc: pytest.Metafunc):
if "s3_client" in metafunc.fixturenames:
metafunc.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
@pytest.mark.failover
@pytest.mark.failover_empty_map
class TestEmptyMap(ClusterTestBase):
@ -294,7 +299,7 @@ class TestEmptyMap(ClusterTestBase):
self,
s3_client: S3ClientWrapper,
bucket: str,
simple_object_size: int,
simple_object_size: ObjectSize,
empty_map_offline_teardown,
):
"""
@ -311,7 +316,7 @@ class TestEmptyMap(ClusterTestBase):
bucket: bucket which contains tested object
simple_object_size: size of object
"""
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
bucket_objects = [file_name]
@ -364,7 +369,7 @@ class TestEmptyMap(ClusterTestBase):
self,
s3_client: S3ClientWrapper,
bucket: str,
simple_object_size: int,
simple_object_size: ObjectSize,
empty_map_stop_service_teardown,
):
"""
@ -384,7 +389,7 @@ class TestEmptyMap(ClusterTestBase):
bucket: bucket which contains tested object
simple_object_size: size of object
"""
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
bucket_objects = [file_name]
@ -438,13 +443,13 @@ class TestEmptyMap(ClusterTestBase):
def test_s3_fstree_blobovnicza_loss_versioning_on(
self,
s3_client: S3ClientWrapper,
simple_object_size: int,
simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
):
bucket = s3_client.create_bucket()
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
object_versions = []
@ -483,12 +488,12 @@ class TestEmptyMap(ClusterTestBase):
def test_s3_fstree_blobovnicza_loss_versioning_off(
self,
s3_client: S3ClientWrapper,
simple_object_size: int,
simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
):
bucket = s3_client.create_bucket()
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Put object into one bucket"):
@ -526,7 +531,7 @@ class TestEmptyMap(ClusterTestBase):
def test_s3_pilorama_loss(
self,
s3_client: S3ClientWrapper,
simple_object_size: int,
simple_object_size: ObjectSize,
versioning_status: VersioningStatus,
cluster_state_controller: ClusterStateController,
):
@ -534,7 +539,7 @@ class TestEmptyMap(ClusterTestBase):
if versioning_status:
s3_helper.set_bucket_versioning(s3_client, bucket, versioning_status)
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Put object into one bucket"):
@ -586,8 +591,8 @@ class TestStorageDataLoss(ClusterTestBase):
def test_metabase_loss(
self,
s3_client: S3ClientWrapper,
simple_object_size: int,
complex_object_size: int,
simple_object_size: ObjectSize,
complex_object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
after_run_return_all_stopped_services: str,
file_keeper: FileKeeper,
@ -598,10 +603,10 @@ class TestStorageDataLoss(ClusterTestBase):
bucket = s3_client.create_bucket()
with allure.step("Put objects into bucket"):
simple_object_path = generate_file(simple_object_size)
simple_object_path = generate_file(simple_object_size.value)
simple_object_key = s3_helper.object_key_from_file_path(simple_object_path)
complex_object_path = generate_file(complex_object_size)
complex_object_path = generate_file(complex_object_size.value)
complex_object_key = s3_helper.object_key_from_file_path(complex_object_path)
s3_client.put_object(bucket, simple_object_path)
@ -651,7 +656,7 @@ class TestStorageDataLoss(ClusterTestBase):
def test_write_cache_loss_on_one_node(
self,
node_under_test: ClusterNode,
simple_object_size: int,
simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
shards_watcher: ShardsWatcher,
default_wallet: str,
@ -683,7 +688,8 @@ class TestStorageDataLoss(ClusterTestBase):
storage_objects: list[StorageObjectInfo] = []
for _ in range(5):
storage_object = container.generate_object(
simple_object_size, endpoint=node_under_test.storage_node.get_rpc_endpoint()
simple_object_size.value,
endpoint=node_under_test.storage_node.get_rpc_endpoint(),
)
storage_objects.append(storage_object)
@ -738,7 +744,7 @@ class TestStorageDataLoss(ClusterTestBase):
self,
bucket,
s3_client: S3ClientWrapper,
simple_object_size: int,
simple_object_size: ObjectSize,
after_run_return_all_stopped_services,
cluster_state_controller: ClusterStateController,
):
@ -752,7 +758,7 @@ class TestStorageDataLoss(ClusterTestBase):
# waiting for rebalance connection of s3 gate to storage service
sleep(60)
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Put object into one bucket"):
put_object = s3_client.put_object(bucket, file_path)
@ -762,7 +768,7 @@ class TestStorageDataLoss(ClusterTestBase):
def test_s3_one_pilorama_loss(
self,
s3_client: S3ClientWrapper,
simple_object_size: int,
simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
):
bucket = s3_client.create_bucket(
@ -775,7 +781,7 @@ class TestStorageDataLoss(ClusterTestBase):
bucket_versioning = s3_client.get_bucket_versioning_status(bucket)
assert bucket_versioning == "Enabled", "Bucket should have enabled versioning"
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
object_versions = []

View file

@ -33,6 +33,7 @@ from frostfs_testlib.steps.node_management import (
)
from frostfs_testlib.steps.storage_policy import get_nodes_with_object, get_simple_object_copies
from frostfs_testlib.storage.cluster import StorageNode
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils import datetime_utils, string_utils
from frostfs_testlib.utils.failover_utils import wait_object_replication
@ -54,9 +55,9 @@ class TestNodeManagement(ClusterTestBase):
@pytest.fixture
@allure.title("Create container and pick the node with data")
def create_container_and_pick_node(
self, default_wallet: str, simple_object_size
self, default_wallet: str, simple_object_size: ObjectSize
) -> Tuple[str, StorageNode]:
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
placement_rule = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
endpoint = self.cluster.default_rpc_endpoint
@ -129,9 +130,9 @@ class TestNodeManagement(ClusterTestBase):
@pytest.mark.add_nodes
def test_add_nodes(
self,
default_wallet,
default_wallet: str,
simple_object_size: ObjectSize,
return_nodes_after_test_run,
simple_object_size,
):
"""
This test remove one node from frostfs_testlib.storage.cluster then add it back. Test uses base control operations with storage nodes (healthcheck, netmap-snapshot, set-status).
@ -139,7 +140,7 @@ class TestNodeManagement(ClusterTestBase):
wallet = default_wallet
placement_rule_3 = "REP 3 IN X CBF 1 SELECT 3 FROM * AS X"
placement_rule_4 = "REP 4 IN X CBF 1 SELECT 4 FROM * AS X"
source_file_path = generate_file(simple_object_size)
source_file_path = generate_file(simple_object_size.value)
storage_nodes = self.cluster.storage_nodes
random_node = random.choice(storage_nodes[1:])
@ -226,13 +227,13 @@ class TestNodeManagement(ClusterTestBase):
@pytest.mark.node_mgmt
@allure.title("Test object copies based on placement policy")
def test_placement_policy(
self, default_wallet, placement_rule, expected_copies, simple_object_size
self, default_wallet, placement_rule, expected_copies, simple_object_size: ObjectSize
):
"""
This test checks object's copies based on container's placement policy.
"""
wallet = default_wallet
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
self.validate_object_copies(wallet, placement_rule, file_path, expected_copies)
@pytest.mark.parametrize(
@ -292,14 +293,14 @@ class TestNodeManagement(ClusterTestBase):
placement_rule,
expected_copies,
expected_nodes_id: set[int],
simple_object_size,
simple_object_size: ObjectSize,
):
"""
Based on container's placement policy check that storage nodes are piked correctly and object has
correct copies amount.
"""
wallet = default_wallet
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
cid, oid, found_nodes = self.validate_object_copies(
wallet, placement_rule, file_path, expected_copies
)
@ -317,27 +318,28 @@ class TestNodeManagement(ClusterTestBase):
@pytest.mark.node_mgmt
@allure.title("Negative cases for placement policy")
def test_placement_policy_negative(
self, default_wallet, placement_rule, expected_copies, simple_object_size
self, default_wallet, placement_rule, expected_copies, simple_object_size: ObjectSize
):
"""
Negative test for placement policy.
"""
wallet = default_wallet
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
with pytest.raises(RuntimeError, match=".*not enough nodes to SELECT from.*"):
self.validate_object_copies(wallet, placement_rule, file_path, expected_copies)
@pytest.mark.node_mgmt
@allure.title("FrostFS object could be dropped using control command")
def test_drop_object(self, default_wallet, complex_object_size: int, simple_object_size: int):
def test_drop_object(
self, default_wallet, complex_object_size: ObjectSize, simple_object_size: ObjectSize
):
"""
Test checks object could be dropped using `frostfs-cli control drop-objects` command.
"""
wallet = default_wallet
endpoint = self.cluster.default_rpc_endpoint
file_path_simple, file_path_complex = generate_file(simple_object_size), generate_file(
complex_object_size
)
file_path_simple = generate_file(simple_object_size.value)
file_path_complex = generate_file(complex_object_size.value)
locode = get_locode_from_random_node(self.cluster)
rule = f"REP 1 CBF 1 SELECT 1 FROM * FILTER 'UN-LOCODE' EQ '{locode}' AS LOC"
@ -375,10 +377,10 @@ class TestNodeManagement(ClusterTestBase):
self,
default_wallet,
create_container_and_pick_node,
simple_object_size,
simple_object_size: ObjectSize,
):
wallet = default_wallet
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
cid, node = create_container_and_pick_node
original_oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster)
@ -415,10 +417,12 @@ class TestNodeManagement(ClusterTestBase):
@pytest.mark.node_mgmt
@allure.title("Put object with stopped node")
def test_stop_node(self, default_wallet, return_nodes_after_test_run, simple_object_size: int):
def test_stop_node(
self, default_wallet, return_nodes_after_test_run, simple_object_size: ObjectSize
):
wallet = default_wallet
placement_rule = "REP 3 SELECT 4 FROM * AS X"
source_file_path = generate_file(simple_object_size)
source_file_path = generate_file(simple_object_size.value)
storage_nodes = self.cluster.storage_nodes
random_node = random.choice(storage_nodes[1:])
alive_node = random.choice(

View file

@ -25,6 +25,7 @@ from frostfs_testlib.steps.complex_object_actions import get_complex_object_spli
from frostfs_testlib.steps.storage_object import delete_objects
from frostfs_testlib.steps.storage_policy import get_complex_object_copies, get_simple_object_copies
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file, get_file_content, get_file_hash
@ -90,7 +91,7 @@ def generate_ranges(
@pytest.fixture(
params=[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
ids=["simple object size", "complex object size"],
# Scope session to upload/delete each files set only once
scope="module",
)
@ -100,8 +101,9 @@ def storage_objects(
wallet = default_wallet
# Separate containers for complex/simple objects to avoid side-effects
cid = create_container(wallet, shell=client_shell, endpoint=cluster.default_rpc_endpoint)
object_size: ObjectSize = request.param
file_path = generate_file(request.param)
file_path = generate_file(object_size.value)
file_hash = get_file_hash(file_path)
storage_objects = []
@ -119,7 +121,7 @@ def storage_objects(
)
storage_object = StorageObjectInfo(cid, storage_object_id)
storage_object.size = request.param
storage_object.size = object_size.value
storage_object.wallet_file_path = wallet
storage_object.file_path = file_path
storage_object.file_hash = file_hash
@ -138,7 +140,10 @@ def storage_objects(
class TestObjectApi(ClusterTestBase):
@allure.title("Validate object storage policy by native API")
def test_object_storage_policies(
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo], simple_object_size
self,
request: FixtureRequest,
storage_objects: list[StorageObjectInfo],
simple_object_size: ObjectSize,
):
"""
Validate object storage policy
@ -149,7 +154,7 @@ class TestObjectApi(ClusterTestBase):
with allure.step("Validate storage policy for objects"):
for storage_object in storage_objects:
if storage_object.size == simple_object_size:
if storage_object.size == simple_object_size.value:
copies = get_simple_object_copies(
storage_object.wallet_file_path,
storage_object.cid,
@ -265,10 +270,10 @@ class TestObjectApi(ClusterTestBase):
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
ids=["simple object size", "complex object size"],
)
def test_object_search_should_return_tombstone_items(
self, default_wallet: str, request: FixtureRequest, object_size: int
self, default_wallet: str, request: FixtureRequest, object_size: ObjectSize
):
"""
Validate object search with removed items
@ -281,13 +286,13 @@ class TestObjectApi(ClusterTestBase):
cid = create_container(wallet, self.shell, self.cluster.default_rpc_endpoint)
with allure.step("Upload file"):
file_path = generate_file(object_size)
file_path = generate_file(object_size.value)
file_hash = get_file_hash(file_path)
storage_object = StorageObjectInfo(
cid=cid,
oid=put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster),
size=object_size,
size=object_size.value,
wallet_file_path=wallet,
file_path=file_path,
file_hash=file_hash,

View file

@ -15,6 +15,7 @@ from frostfs_testlib.steps.epoch import get_epoch
from frostfs_testlib.steps.storage_object import StorageObjectInfo
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.acl import EACLAccess, EACLOperation, EACLRole, EACLRule
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
@ -69,10 +70,11 @@ def storage_objects(
) -> list[StorageObjectInfo]:
epoch = get_epoch(client_shell, cluster)
storage_objects: list[StorageObjectInfo] = []
object_size: ObjectSize = request.param
for node in cluster.storage_nodes:
storage_objects.append(
user_container.generate_object(
request.param,
object_size.value,
epoch + 3,
bearer_token=bearer_token_file_all_allow,
endpoint=node.get_rpc_endpoint(),
@ -93,7 +95,7 @@ class TestObjectApiWithBearerToken(ClusterTestBase):
@pytest.mark.parametrize(
"storage_objects",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
ids=["simple object size", "complex object size"],
indirect=True,
)
def test_delete_object_with_s3_wallet_bearer(
@ -127,14 +129,14 @@ class TestObjectApiWithBearerToken(ClusterTestBase):
indirect=True,
)
@pytest.mark.parametrize(
"file_size",
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
ids=["simple object size", "complex object size"],
)
def test_get_object_with_s3_wallet_bearer_from_all_nodes(
self,
user_container: StorageContainer,
file_size: int,
object_size: ObjectSize,
bearer_token_file_all_allow: str,
request: FixtureRequest,
):
@ -146,7 +148,7 @@ class TestObjectApiWithBearerToken(ClusterTestBase):
with allure.step("Put one object to container"):
epoch = self.get_epoch()
storage_object = user_container.generate_object(
file_size, epoch + 3, bearer_token=bearer_token_file_all_allow
object_size.value, epoch + 3, bearer_token=bearer_token_file_all_allow
)
with allure.step("Try to fetch object from each storage node"):

View file

@ -4,8 +4,13 @@ import allure
import pytest
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import get_object_from_random_node, put_object_to_random_node, head_object
from frostfs_testlib.steps.cli.object import (
get_object_from_random_node,
head_object,
put_object_to_random_node,
)
from frostfs_testlib.steps.epoch import get_epoch
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
from pytest import FixtureRequest
@ -22,10 +27,10 @@ class TestObjectApiLifetime(ClusterTestBase):
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
ids=["simple object size", "complex object size"],
)
def test_object_api_lifetime(
self, default_wallet: str, request: FixtureRequest, object_size: int
self, default_wallet: str, request: FixtureRequest, object_size: ObjectSize
):
"""
Test object deleted after expiration epoch.
@ -37,7 +42,7 @@ class TestObjectApiLifetime(ClusterTestBase):
endpoint = self.cluster.default_rpc_endpoint
cid = create_container(wallet, self.shell, endpoint)
file_path = generate_file(object_size)
file_path = generate_file(object_size.value)
file_hash = get_file_hash(file_path)
epoch = get_epoch(self.shell, self.cluster)

View file

@ -26,6 +26,7 @@ from frostfs_testlib.steps.node_management import drop_object
from frostfs_testlib.steps.storage_object import delete_objects
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import (
LockObjectInfo,
StorageObjectInfo,
@ -75,12 +76,13 @@ def locked_storage_object(
"""
Intention of this fixture is to provide storage object which is NOT expected to be deleted during test act phase
"""
object_size: ObjectSize = request.param
with allure.step("Creating locked object"):
current_epoch = ensure_fresh_epoch(client_shell, cluster)
expiration_epoch = current_epoch + FIXTURE_LOCK_LIFETIME
storage_object = user_container.generate_object(
request.param, expire_at=current_epoch + FIXTURE_OBJECT_LIFETIME
object_size.value, expire_at=current_epoch + FIXTURE_OBJECT_LIFETIME
)
lock_object_id = lock_object(
storage_object.wallet_file_path,
@ -135,11 +137,12 @@ class TestObjectLockWithGrpc(ClusterTestBase):
Intention of this fixture is to provide new storage object for tests which may delete or corrupt the object or it's complementary objects
So we need a new one each time we ask for it
"""
object_size: ObjectSize = request.param
with allure.step("Creating locked object"):
current_epoch = self.get_epoch()
storage_object = user_container.generate_object(
request.param, expire_at=current_epoch + FIXTURE_OBJECT_LIFETIME
object_size.value, expire_at=current_epoch + FIXTURE_OBJECT_LIFETIME
)
lock_object(
storage_object.wallet_file_path,
@ -156,7 +159,7 @@ class TestObjectLockWithGrpc(ClusterTestBase):
@pytest.mark.parametrize(
"locked_storage_object",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
ids=["simple object size", "complex object size"],
indirect=True,
)
def test_locked_object_cannot_be_deleted(
@ -279,13 +282,13 @@ class TestObjectLockWithGrpc(ClusterTestBase):
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
ids=["simple object size", "complex object size"],
)
def test_expired_object_should_be_deleted_after_locks_are_expired(
self,
request: FixtureRequest,
user_container: StorageContainer,
object_size: int,
object_size: ObjectSize,
):
"""
Expired object should be deleted after locks are expired
@ -295,7 +298,9 @@ class TestObjectLockWithGrpc(ClusterTestBase):
)
current_epoch = self.ensure_fresh_epoch()
storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 1)
storage_object = user_container.generate_object(
object_size.value, expire_at=current_epoch + 1
)
with allure.step("Lock object for couple epochs"):
lock_object(
@ -347,13 +352,13 @@ class TestObjectLockWithGrpc(ClusterTestBase):
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
ids=["simple object size", "complex object size"],
)
def test_should_be_possible_to_lock_multiple_objects_at_once(
self,
request: FixtureRequest,
user_container: StorageContainer,
object_size: int,
object_size: ObjectSize,
):
"""
Should be possible to lock multiple objects at once
@ -368,7 +373,7 @@ class TestObjectLockWithGrpc(ClusterTestBase):
with allure.step("Generate three objects"):
for _ in range(3):
storage_objects.append(
user_container.generate_object(object_size, expire_at=current_epoch + 5)
user_container.generate_object(object_size.value, expire_at=current_epoch + 5)
)
lock_object(
@ -402,13 +407,13 @@ class TestObjectLockWithGrpc(ClusterTestBase):
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
ids=["simple object size", "complex object size"],
)
def test_already_outdated_lock_should_not_be_applied(
self,
request: FixtureRequest,
user_container: StorageContainer,
object_size: int,
object_size: ObjectSize,
):
"""
Already outdated lock should not be applied
@ -419,7 +424,9 @@ class TestObjectLockWithGrpc(ClusterTestBase):
current_epoch = self.ensure_fresh_epoch()
storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 1)
storage_object = user_container.generate_object(
object_size.value, expire_at=current_epoch + 1
)
expiration_epoch = current_epoch - 1
with pytest.raises(
@ -441,14 +448,14 @@ class TestObjectLockWithGrpc(ClusterTestBase):
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
ids=["simple object size", "complex object size"],
)
@expect_not_raises()
def test_after_lock_expiration_with_lifetime_user_should_be_able_to_delete_object(
self,
request: FixtureRequest,
user_container: StorageContainer,
object_size: int,
object_size: ObjectSize,
):
"""
After lock expiration with lifetime user should be able to delete object
@ -458,7 +465,9 @@ class TestObjectLockWithGrpc(ClusterTestBase):
)
current_epoch = self.ensure_fresh_epoch()
storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 5)
storage_object = user_container.generate_object(
object_size.value, expire_at=current_epoch + 5
)
lock_object(
storage_object.wallet_file_path,
@ -483,14 +492,14 @@ class TestObjectLockWithGrpc(ClusterTestBase):
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
ids=["simple object size", "complex object size"],
)
@expect_not_raises()
def test_after_lock_expiration_with_expire_at_user_should_be_able_to_delete_object(
self,
request: FixtureRequest,
user_container: StorageContainer,
object_size: int,
object_size: ObjectSize,
):
"""
After lock expiration with expire_at user should be able to delete object
@ -501,7 +510,9 @@ class TestObjectLockWithGrpc(ClusterTestBase):
current_epoch = self.ensure_fresh_epoch()
storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 5)
storage_object = user_container.generate_object(
object_size.value, expire_at=current_epoch + 5
)
lock_object(
storage_object.wallet_file_path,
@ -613,7 +624,7 @@ class TestObjectLockWithGrpc(ClusterTestBase):
@pytest.mark.parametrize(
"new_locked_storage_object",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
ids=["simple object size", "complex object size"],
indirect=True,
)
def test_locked_object_can_be_dropped(

View file

@ -9,6 +9,7 @@ from frostfs_testlib.steps.cli.container import create_container, delete_contain
from frostfs_testlib.steps.cli.object import delete_object, head_object, put_object
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
@ -29,7 +30,7 @@ class TestReplication(ClusterTestBase):
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
ids=["simple object size", "complex object size"],
)
@allure.title("Test replication")
def test_replication(
@ -37,7 +38,7 @@ class TestReplication(ClusterTestBase):
default_wallet: str,
client_shell: Shell,
cluster: Cluster,
object_size,
object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
):
nodes_count = len(cluster.cluster_nodes)
@ -56,7 +57,7 @@ class TestReplication(ClusterTestBase):
cluster_state_controller.stop_node_host(node_for_rep, mode="hard")
file_path = generate_file(object_size)
file_path = generate_file(object_size.value)
with allure.step("Put object"):
oid = put_object(

View file

@ -14,6 +14,7 @@ from frostfs_testlib.steps.acl import (
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.http.http_gate import upload_via_http_gate_curl, verify_object_hash
from frostfs_testlib.storage.dataclasses.acl import EACLAccess, EACLOperation, EACLRole, EACLRule
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
@ -82,12 +83,12 @@ class Test_http_bearer(ClusterTestBase):
@allure.title(f"[negative] Put object without bearer token for {EACLRole.OTHERS}")
def test_unable_put_without_bearer_token(
self, simple_object_size: int, user_container: str, eacl_deny_for_others
self, simple_object_size: ObjectSize, user_container: str, eacl_deny_for_others
):
eacl_deny_for_others
upload_via_http_gate_curl(
cid=user_container,
filepath=generate_file(simple_object_size),
filepath=generate_file(simple_object_size.value),
endpoint=self.cluster.default_http_gate_endpoint,
error_pattern="access to object operation denied",
)
@ -96,18 +97,18 @@ class Test_http_bearer(ClusterTestBase):
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
ids=["simple object size", "complex object size"],
)
def test_put_with_bearer_when_eacl_restrict(
self,
object_size: int,
object_size: ObjectSize,
user_container: str,
eacl_deny_for_others,
bearer_token_no_limit_for_others: str,
):
eacl_deny_for_others
bearer = bearer_token_no_limit_for_others
file_path = generate_file(object_size)
file_path = generate_file(object_size.value)
with allure.step(
f"Put object with bearer token for {EACLRole.OTHERS}, then get and verify hashes"
):

View file

@ -17,6 +17,7 @@ from frostfs_testlib.steps.http.http_gate import (
upload_via_http_gate_curl,
verify_object_hash,
)
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
@ -43,7 +44,9 @@ class TestHttpGate(ClusterTestBase):
TestHttpGate.wallet = default_wallet
@allure.title("Test Put over gRPC, Get over HTTP")
def test_put_grpc_get_http(self, complex_object_size: int, simple_object_size: int):
def test_put_grpc_get_http(
self, complex_object_size: ObjectSize, simple_object_size: ObjectSize
):
"""
Test that object can be put using gRPC interface and get using HTTP.
@ -65,9 +68,8 @@ class TestHttpGate(ClusterTestBase):
rule=self.PLACEMENT_RULE_1,
basic_acl=PUBLIC_ACL,
)
file_path_simple, file_path_large = generate_file(simple_object_size), generate_file(
complex_object_size
)
file_path_simple = generate_file(simple_object_size.value)
file_path_large = generate_file(complex_object_size.value)
with allure.step("Put objects using gRPC"):
oid_simple = put_object_to_random_node(
@ -102,7 +104,9 @@ class TestHttpGate(ClusterTestBase):
@allure.title("Test Put over HTTP, Get over HTTP")
@pytest.mark.skip("Skipped due to deprecated PUT via http")
@pytest.mark.smoke
def test_put_http_get_http(self, complex_object_size: int, simple_object_size: int):
def test_put_http_get_http(
self, complex_object_size: ObjectSize, simple_object_size: ObjectSize
):
"""
Test that object can be put and get using HTTP interface.
@ -122,9 +126,8 @@ class TestHttpGate(ClusterTestBase):
rule=self.PLACEMENT_RULE_2,
basic_acl=PUBLIC_ACL,
)
file_path_simple, file_path_large = generate_file(simple_object_size), generate_file(
complex_object_size
)
file_path_simple = generate_file(simple_object_size.value)
file_path_large = generate_file(complex_object_size.value)
with allure.step("Put objects using HTTP"):
oid_simple = upload_via_http_gate(
@ -161,7 +164,7 @@ class TestHttpGate(ClusterTestBase):
],
ids=["simple", "hyphen", "percent"],
)
def test_put_http_get_http_with_headers(self, attributes: dict, simple_object_size: int):
def test_put_http_get_http_with_headers(self, attributes: dict, simple_object_size: ObjectSize):
"""
Test that object can be downloaded using different attributes in HTTP header.
@ -181,7 +184,7 @@ class TestHttpGate(ClusterTestBase):
rule=self.PLACEMENT_RULE_2,
basic_acl=PUBLIC_ACL,
)
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
with allure.step("Put objects using HTTP with attribute"):
headers = attr_into_header(attributes)
@ -204,7 +207,7 @@ class TestHttpGate(ClusterTestBase):
@pytest.mark.skip("Skipped due to deprecated PUT via http")
@allure.title("Test Expiration-Epoch in HTTP header")
@pytest.mark.parametrize("epoch_gap", [0, 1])
def test_expiration_epoch_in_http(self, simple_object_size: int, epoch_gap):
def test_expiration_epoch_in_http(self, simple_object_size: ObjectSize, epoch_gap: int):
endpoint = self.cluster.default_rpc_endpoint
http_endpoint = self.cluster.default_http_gate_endpoint
min_valid_epoch = get_epoch(self.shell, self.cluster) + epoch_gap
@ -216,7 +219,7 @@ class TestHttpGate(ClusterTestBase):
rule=self.PLACEMENT_RULE_2,
basic_acl=PUBLIC_ACL,
)
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
oids_to_be_expired = []
oids_to_be_valid = []
@ -269,7 +272,7 @@ class TestHttpGate(ClusterTestBase):
@pytest.mark.skip("Skipped due to deprecated PUT via http")
@allure.title("Test Zip in HTTP header")
def test_zip_in_http(self, complex_object_size: int, simple_object_size: int):
def test_zip_in_http(self, complex_object_size: ObjectSize, simple_object_size: ObjectSize):
cid = create_container(
self.wallet,
shell=self.shell,
@ -277,9 +280,8 @@ class TestHttpGate(ClusterTestBase):
rule=self.PLACEMENT_RULE_2,
basic_acl=PUBLIC_ACL,
)
file_path_simple, file_path_large = generate_file(simple_object_size), generate_file(
complex_object_size
)
file_path_simple = generate_file(simple_object_size.value)
file_path_large = generate_file(complex_object_size.value)
common_prefix = "my_files"
headers1 = {"X-Attribute-FilePath": f"{common_prefix}/file1"}
@ -312,7 +314,7 @@ class TestHttpGate(ClusterTestBase):
@pytest.mark.long
@pytest.mark.skip("Skipped due to deprecated PUT via http")
@allure.title("Test Put over HTTP/Curl, Get over HTTP/Curl for large object")
def test_put_http_get_http_large_file(self, complex_object_size: int):
def test_put_http_get_http_large_file(self, complex_object_size: ObjectSize):
"""
This test checks upload and download using curl with 'large' object.
Large is object with size up to 20Mb.
@ -325,8 +327,7 @@ class TestHttpGate(ClusterTestBase):
basic_acl=PUBLIC_ACL,
)
obj_size = int(os.getenv("BIG_OBJ_SIZE", complex_object_size))
file_path = generate_file(obj_size)
file_path = generate_file(complex_object_size.value)
with allure.step("Put objects using HTTP"):
oid_gate = upload_via_http_gate(
@ -362,7 +363,9 @@ class TestHttpGate(ClusterTestBase):
@pytest.mark.skip("Skipped due to deprecated PUT via http")
@allure.title("Test Put/Get over HTTP using Curl utility")
def test_put_http_get_http_curl(self, complex_object_size: int, simple_object_size: int):
def test_put_http_get_http_curl(
self, complex_object_size: ObjectSize, simple_object_size: ObjectSize
):
"""
Test checks upload and download over HTTP using curl utility.
"""
@ -373,9 +376,8 @@ class TestHttpGate(ClusterTestBase):
rule=self.PLACEMENT_RULE_2,
basic_acl=PUBLIC_ACL,
)
file_path_simple, file_path_large = generate_file(simple_object_size), generate_file(
complex_object_size
)
file_path_simple = generate_file(simple_object_size.value)
file_path_large = generate_file(complex_object_size.value)
with allure.step("Put objects using curl utility"):
oid_simple = upload_via_http_gate_curl(

View file

@ -18,6 +18,7 @@ from frostfs_testlib.steps.http.http_gate import (
try_to_get_object_via_passed_request_and_expect_error,
upload_via_http_gate_curl,
)
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
@ -51,11 +52,12 @@ class Test_http_headers(ClusterTestBase):
# pytest.lazy_fixture("complex_object_size"),
],
# TODO: Temp disable for v0.37
# ids=["simple object", "complex object"],
ids=["simple object"],
# ids=["simple object size", "complex object size"],
ids=["simple object size"],
scope="class",
)
def storage_objects_with_attributes(self, request: FixtureRequest) -> list[StorageObjectInfo]:
object_size: ObjectSize = request.param
storage_objects = []
wallet = self.wallet
cid = create_container(
@ -65,7 +67,7 @@ class Test_http_headers(ClusterTestBase):
rule=self.PLACEMENT_RULE,
basic_acl=PUBLIC_ACL,
)
file_path = generate_file(request.param)
file_path = generate_file(object_size.value)
for attributes in self.OBJECT_ATTRIBUTES:
storage_object_id = upload_via_http_gate_curl(
cid=cid,

View file

@ -10,6 +10,7 @@ from frostfs_testlib.steps.http.http_gate import (
try_to_get_object_via_passed_request_and_expect_error,
verify_object_hash,
)
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
@ -30,9 +31,9 @@ class Test_http_object(ClusterTestBase):
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
ids=["simple object size", "complex object size"],
)
def test_object_put_get_attributes(self, object_size: int):
def test_object_put_get_attributes(self, object_size: ObjectSize):
"""
Test that object can be put using gRPC interface and get using HTTP.
@ -61,7 +62,7 @@ class Test_http_object(ClusterTestBase):
)
# Generate file
file_path = generate_file(object_size)
file_path = generate_file(object_size.value)
# List of Key=Value attributes
obj_key1 = "chapter1"
@ -116,7 +117,7 @@ class Test_http_object(ClusterTestBase):
cid=cid,
attrs=attrs,
endpoint=self.cluster.default_http_gate_endpoint,
hostname=self.cluster.default_http_hostname,
http_hostname=self.cluster.default_http_hostname,
)
with allure.step("[Negative] try to get object: get_by_attribute/$CID/$OID"):
request = f"/get_by_attribute/{cid}/{oid}"

View file

@ -5,6 +5,7 @@ import pytest
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.http.http_gate import upload_via_http_gate_curl, verify_object_hash
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
@ -25,10 +26,10 @@ class Test_http_streaming(ClusterTestBase):
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("complex_object_size")],
ids=["complex object"],
ids=["complex object size"],
)
@pytest.mark.skip("Skipped due to deprecated PUT via http")
def test_object_can_be_put_get_by_streaming(self, object_size: int):
def test_object_can_be_put_get_by_streaming(self, object_size: ObjectSize):
"""
Test that object can be put using gRPC interface and get using HTTP.
@ -51,7 +52,7 @@ class Test_http_streaming(ClusterTestBase):
)
with allure.step("Allocate big object"):
# Generate file
file_path = generate_file(object_size)
file_path = generate_file(object_size.value)
with allure.step(
"Put objects using curl utility and Get object and verify hashes [ get/$CID/$OID ]"

View file

@ -20,6 +20,7 @@ from frostfs_testlib.steps.http.http_gate import (
upload_via_http_gate_curl,
verify_object_hash,
)
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
@ -142,11 +143,11 @@ class Test_http_system_header(ClusterTestBase):
return oid, head
@allure.title("[negative] attempt to put object with expired epoch")
def test_unable_put_expired_epoch(self, user_container: str, simple_object_size: int):
def test_unable_put_expired_epoch(self, user_container: str, simple_object_size: ObjectSize):
headers = attr_into_str_header_curl(
{"System-Expiration-Epoch": str(get_epoch(self.shell, self.cluster) - 1)}
)
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
with allure.step(
"Put object using HTTP with attribute Expiration-Epoch where epoch is expired"
):
@ -159,9 +160,11 @@ class Test_http_system_header(ClusterTestBase):
)
@allure.title("[negative] attempt to put object with negative System-Expiration-Duration")
def test_unable_put_negative_duration(self, user_container: str, simple_object_size: int):
def test_unable_put_negative_duration(
self, user_container: str, simple_object_size: ObjectSize
):
headers = attr_into_str_header_curl({"System-Expiration-Duration": "-1h"})
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
with allure.step(
"Put object using HTTP with attribute System-Expiration-Duration where duration is negative"
):
@ -176,9 +179,11 @@ class Test_http_system_header(ClusterTestBase):
@allure.title(
"[negative] attempt to put object with System-Expiration-Timestamp value in the past"
)
def test_unable_put_expired_timestamp(self, user_container: str, simple_object_size: int):
def test_unable_put_expired_timestamp(
self, user_container: str, simple_object_size: ObjectSize
):
headers = attr_into_str_header_curl({"System-Expiration-Timestamp": "1635075727"})
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
with allure.step(
"Put object using HTTP with attribute System-Expiration-Timestamp where duration is in the past"
):
@ -193,9 +198,9 @@ class Test_http_system_header(ClusterTestBase):
@allure.title(
"[negative] Put object using HTTP with attribute System-Expiration-RFC3339 where duration is in the past"
)
def test_unable_put_expired_rfc(self, user_container: str, simple_object_size: int):
def test_unable_put_expired_rfc(self, user_container: str, simple_object_size: ObjectSize):
headers = attr_into_str_header_curl({"System-Expiration-RFC3339": "2021-11-22T09:55:49Z"})
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
upload_via_http_gate_curl(
cid=user_container,
filepath=file_path,
@ -208,11 +213,11 @@ class Test_http_system_header(ClusterTestBase):
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
ids=["simple object size", "complex object size"],
)
@pytest.mark.skip("Temp disable for v0.37")
def test_http_attr_priority_epoch_duration(
self, user_container: str, object_size: int, epoch_duration: int
self, user_container: str, object_size: ObjectSize, epoch_duration: int
):
self.tick_epoch()
epoch_count = 1
@ -221,7 +226,7 @@ class Test_http_system_header(ClusterTestBase):
f"epoch duration={epoch_duration}, current_epoch= {get_epoch(self.shell, self.cluster)} expected_epoch {expected_epoch}"
)
attributes = {SYSTEM_EXPIRATION_EPOCH: expected_epoch, SYSTEM_EXPIRATION_DURATION: "1m"}
file_path = generate_file(object_size)
file_path = generate_file(object_size.value)
with allure.step(
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
):
@ -257,11 +262,11 @@ class Test_http_system_header(ClusterTestBase):
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
ids=["simple object size", "complex object size"],
)
@pytest.mark.skip("Temp disable for v0.37")
def test_http_attr_priority_dur_timestamp(
self, user_container: str, object_size: int, epoch_duration: int
self, user_container: str, object_size: ObjectSize, epoch_duration: int
):
self.tick_epoch()
epoch_count = 2
@ -277,7 +282,7 @@ class Test_http_system_header(ClusterTestBase):
epoch_duration=epoch_duration, epoch=1
),
}
file_path = generate_file(object_size)
file_path = generate_file(object_size.value)
with allure.step(
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
):
@ -313,11 +318,11 @@ class Test_http_system_header(ClusterTestBase):
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
ids=["simple object size", "complex object size"],
)
@pytest.mark.skip("Temp disable for v0.37")
def test_http_attr_priority_timestamp_rfc(
self, user_container: str, object_size: int, epoch_duration: int
self, user_container: str, object_size: ObjectSize, epoch_duration: int
):
self.tick_epoch()
epoch_count = 2
@ -333,7 +338,7 @@ class Test_http_system_header(ClusterTestBase):
epoch_duration=epoch_duration, epoch=1, rfc3339=True
),
}
file_path = generate_file(object_size)
file_path = generate_file(object_size.value)
with allure.step(
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
):
@ -369,11 +374,11 @@ class Test_http_system_header(ClusterTestBase):
# TODO: Temp disabled for v0.37
# [pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
[pytest.lazy_fixture("simple_object_size")],
# ids=["simple object", "complex object"],
ids=["simple object"],
# ids=["simple object size", "complex object size"],
ids=["simple object size"],
)
def test_http_rfc_object_unavailable_after_expir(
self, user_container: str, object_size: int, epoch_duration: int
self, user_container: str, object_size: ObjectSize, epoch_duration: int
):
self.tick_epoch()
epoch_count = 2
@ -386,7 +391,7 @@ class Test_http_system_header(ClusterTestBase):
epoch_duration=epoch_duration, epoch=2, rfc3339=True
)
}
file_path = generate_file(object_size)
file_path = generate_file(object_size.value)
with allure.step(
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
):

View file

@ -2,6 +2,7 @@ import allure
import pytest
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils.file_utils import generate_file
@ -11,8 +12,10 @@ from frostfs_testlib.utils.file_utils import generate_file
class TestS3GateACL:
@allure.title("Test S3: Object ACL")
@pytest.mark.parametrize("s3_client", [AwsCliClient], indirect=True)
def test_s3_object_ACL(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int):
file_path = generate_file(simple_object_size)
def test_s3_object_ACL(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
):
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Put object into bucket, Check ACL is empty"):

View file

@ -4,6 +4,7 @@ import allure
import pytest
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils.file_utils import generate_file
@ -73,8 +74,10 @@ class TestS3GateBucket:
s3_helper.assert_s3_acl(acl_grants=bucket_acl_2, permitted_users="AllUsers")
@allure.title("Test S3: create bucket with object lock")
def test_s3_bucket_object_lock(self, s3_client: S3ClientWrapper, simple_object_size: int):
file_path = generate_file(simple_object_size)
def test_s3_bucket_object_lock(
self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize
):
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Create bucket with --no-object-lock-enabled-for-bucket"):
@ -106,10 +109,10 @@ class TestS3GateBucket:
)
@allure.title("Test S3: delete bucket")
def test_s3_delete_bucket(self, s3_client: S3ClientWrapper, simple_object_size: int):
file_path_1 = generate_file(simple_object_size)
def test_s3_delete_bucket(self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize):
file_path_1 = generate_file(simple_object_size.value)
file_name_1 = s3_helper.object_key_from_file_path(file_path_1)
file_path_2 = generate_file(simple_object_size)
file_path_2 = generate_file(simple_object_size.value)
file_name_2 = s3_helper.object_key_from_file_path(file_path_2)
bucket = s3_client.create_bucket()

View file

@ -10,6 +10,7 @@ from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.epoch import tick_epoch
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils.file_utils import (
generate_file,
generate_file_with_content,
@ -39,13 +40,13 @@ class TestS3Gate:
s3_client: S3ClientWrapper,
client_shell: Shell,
cluster: Cluster,
simple_object_size: int,
simple_object_size: ObjectSize,
):
"""
Test base S3 Bucket API (Create/List/Head/Delete).
"""
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Create buckets"):
@ -108,22 +109,23 @@ class TestS3Gate:
@allure.title("Test S3 Object API")
@pytest.mark.parametrize(
"file_type", ["simple", "large"], ids=["Simple object", "Large object"]
"object_size",
["simple object size", "complex object size"],
ids=["simple object size", "complex object size"],
)
def test_s3_api_object(
self,
s3_client: S3ClientWrapper,
file_type: str,
object_size: str,
two_buckets: tuple[str, str],
simple_object_size: int,
complex_object_size: int,
simple_object_size: ObjectSize,
complex_object_size: ObjectSize,
):
"""
Test base S3 Object API (Put/Head/List) for simple and large objects.
Test base S3 Object API (Put/Head/List) for simple and complex objects.
"""
file_path = generate_file(
simple_object_size if file_type == "simple" else complex_object_size
)
size = simple_object_size if object_size == "simple object size" else complex_object_size
file_path = generate_file(size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
bucket_1, bucket_2 = two_buckets
@ -146,7 +148,9 @@ class TestS3Gate:
s3_client.get_object_attributes(bucket, file_name, attrs)
@allure.title("Test S3 Sync directory")
def test_s3_sync_dir(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int):
def test_s3_sync_dir(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
):
"""
Test checks sync directory with AWS CLI utility.
"""
@ -157,8 +161,8 @@ class TestS3Gate:
if not isinstance(s3_client, AwsCliClient):
pytest.skip("This test is not supported with boto3 client")
generate_file_with_content(simple_object_size, file_path=file_path_1)
generate_file_with_content(simple_object_size, file_path=file_path_2)
generate_file_with_content(simple_object_size.value, file_path=file_path_1)
generate_file_with_content(simple_object_size.value, file_path=file_path_2)
s3_client.sync(bucket=bucket, dir_path=os.path.dirname(file_path_1))
@ -177,21 +181,23 @@ class TestS3Gate:
@allure.title("Test S3 Object versioning")
def test_s3_api_versioning(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
):
"""
Test checks basic versioning functionality for S3 bucket.
"""
version_1_content = "Version 1"
version_2_content = "Version 2"
file_name_simple = generate_file_with_content(simple_object_size, content=version_1_content)
file_name_simple = generate_file_with_content(
simple_object_size.value, content=version_1_content
)
obj_key = os.path.basename(file_name_simple)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_client.put_object(bucket, file_name_simple)
generate_file_with_content(
simple_object_size, file_path=file_name_simple, content=version_2_content
simple_object_size.value, file_path=file_name_simple, content=version_2_content
)
version_id_2 = s3_client.put_object(bucket, file_name_simple)
@ -255,7 +261,7 @@ class TestS3Gate:
@pytest.mark.s3_gate_multipart
@allure.title("Test S3 Object Multipart API")
def test_s3_api_multipart(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
):
"""
Test checks S3 Multipart API (Create multipart upload/Abort multipart upload/List multipart upload/
@ -263,7 +269,7 @@ class TestS3Gate:
"""
parts_count = 3
file_name_large = generate_file(
simple_object_size * 1024 * 6 * parts_count
simple_object_size.value * 1024 * 6 * parts_count
) # 5Mb - min part
object_key = s3_helper.object_key_from_file_path(file_name_large)
part_files = split_file(file_name_large, parts_count)
@ -325,7 +331,7 @@ class TestS3Gate:
@allure.title("Test S3 Object tagging API")
def test_s3_api_object_tagging(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
):
"""
Test checks S3 Object tagging API (Put tag/Get tag/Update tag).
@ -336,7 +342,7 @@ class TestS3Gate:
("some-key--obj2", "some-value--obj2"),
]
key_value_pair_obj_new = [("some-key-obj-new", "some-value-obj-new")]
file_name_simple = generate_file(simple_object_size)
file_name_simple = generate_file(simple_object_size.value)
obj_key = s3_helper.object_key_from_file_path(file_name_simple)
s3_client.put_bucket_tagging(bucket, key_value_pair_bucket)
@ -360,8 +366,8 @@ class TestS3Gate:
self,
s3_client: S3ClientWrapper,
two_buckets: tuple[str, str],
simple_object_size: int,
complex_object_size: int,
simple_object_size: ObjectSize,
complex_object_size: ObjectSize,
):
"""
Check delete_object and delete_objects S3 API operation. From first bucket some objects deleted one by one.
@ -377,7 +383,7 @@ class TestS3Gate:
with allure.step(f"Generate {max_obj_count} files"):
for _ in range(max_obj_count):
file_paths.append(generate_file(choice(obj_sizes)))
file_paths.append(generate_file(choice(obj_sizes).value))
for bucket in (bucket_1, bucket_2):
with allure.step(f"Bucket {bucket} must be empty as it just created"):
@ -426,16 +432,15 @@ class TestS3Gate:
self,
s3_client: S3ClientWrapper,
bucket: str,
complex_object_size: int,
simple_object_size: int,
complex_object_size: ObjectSize,
simple_object_size: ObjectSize,
):
"""
Test object can be copied to the same bucket.
#TODO: delete after test_s3_copy_object will be merge
"""
file_path_simple, file_path_large = generate_file(simple_object_size), generate_file(
complex_object_size
)
file_path_simple = generate_file(simple_object_size.value)
file_path_large = generate_file(complex_object_size.value)
file_name_simple = s3_helper.object_key_from_file_path(file_path_simple)
file_name_large = s3_helper.object_key_from_file_path(file_path_large)
bucket_objects = [file_name_simple, file_name_large]
@ -476,16 +481,15 @@ class TestS3Gate:
self,
s3_client: S3ClientWrapper,
two_buckets: tuple[str, str],
complex_object_size: int,
simple_object_size: int,
complex_object_size: ObjectSize,
simple_object_size: ObjectSize,
):
"""
Test object can be copied to another bucket.
#TODO: delete after test_s3_copy_object will be merge
"""
file_path_simple, file_path_large = generate_file(simple_object_size), generate_file(
complex_object_size
)
file_path_simple = generate_file(simple_object_size.value)
file_path_large = generate_file(complex_object_size.value)
file_name_simple = s3_helper.object_key_from_file_path(file_path_simple)
file_name_large = s3_helper.object_key_from_file_path(file_path_large)
bucket_1_objects = [file_name_simple, file_name_large]

View file

@ -5,6 +5,7 @@ import allure
import pytest
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils.file_utils import generate_file, generate_file_with_content
@ -20,9 +21,9 @@ def pytest_generate_tests(metafunc: pytest.Metafunc):
class TestS3GateLocking:
@allure.title("Test S3: Checking the operation of retention period & legal lock on the object")
def test_s3_object_locking(
self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: int
self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize
):
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
retention_period = 2
@ -30,7 +31,7 @@ class TestS3GateLocking:
with allure.step("Put several versions of object into bucket"):
s3_client.put_object(bucket, file_path)
file_name_1 = generate_file_with_content(simple_object_size, file_path=file_path)
file_name_1 = generate_file_with_content(simple_object_size.value, file_path=file_path)
version_id_2 = s3_client.put_object(bucket, file_name_1)
s3_helper.check_objects_in_bucket(s3_client, bucket, [file_name])
if version_id:
@ -75,9 +76,9 @@ class TestS3GateLocking:
@allure.title("Test S3: Checking the impossibility to change the retention mode COMPLIANCE")
def test_s3_mode_compliance(
self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: int
self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize
):
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
retention_period = 2
retention_period_1 = 1
@ -114,9 +115,9 @@ class TestS3GateLocking:
@allure.title("Test S3: Checking the ability to change retention mode GOVERNANCE")
def test_s3_mode_governance(
self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: int
self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize
):
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
retention_period = 3
retention_period_1 = 2
@ -176,9 +177,9 @@ class TestS3GateLocking:
@allure.title("Test S3: Checking if an Object Cannot Be Locked")
def test_s3_legal_hold(
self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: int
self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize
):
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=False)
@ -197,8 +198,8 @@ class TestS3GateLocking:
@pytest.mark.s3_gate
class TestS3GateLockingBucket:
@allure.title("Test S3: Bucket Lock")
def test_s3_bucket_lock(self, s3_client: S3ClientWrapper, simple_object_size: int):
file_path = generate_file(simple_object_size)
def test_s3_bucket_lock(self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize):
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
configuration = {"Rule": {"DefaultRetention": {"Mode": "COMPLIANCE", "Days": 1}}}

View file

@ -3,6 +3,7 @@ import pytest
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
from frostfs_testlib.steps.cli.container import list_objects, search_container_by_name
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash, split_file
@ -64,11 +65,11 @@ class TestS3GateMultipart(ClusterTestBase):
s3_client: S3ClientWrapper,
default_wallet: str,
bucket: str,
simple_object_size: int,
complex_object_size: int,
simple_object_size: ObjectSize,
complex_object_size: ObjectSize,
):
complex_file = generate_file(complex_object_size)
simple_file = generate_file(simple_object_size)
complex_file = generate_file(complex_object_size.value)
simple_file = generate_file(simple_object_size.value)
to_upload = [complex_file, complex_file, simple_file]
files_count = len(to_upload)
upload_key = "multipart_abort"

View file

@ -11,6 +11,7 @@ from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_PASS
from frostfs_testlib.resources.error_patterns import S3_MALFORMED_XML_REQUEST
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils import wallet_utils
from frostfs_testlib.utils.file_utils import (
@ -68,9 +69,12 @@ class TestS3GateObject:
@allure.title("Test S3: Copy object")
def test_s3_copy_object(
self, s3_client: S3ClientWrapper, two_buckets: tuple[str, str], simple_object_size: int
self,
s3_client: S3ClientWrapper,
two_buckets: tuple[str, str],
simple_object_size: ObjectSize,
):
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
bucket_1_objects = [file_name]
@ -121,10 +125,15 @@ class TestS3GateObject:
@allure.title("Test S3: Copy version of object")
def test_s3_copy_version_object(
self, s3_client: S3ClientWrapper, two_buckets: tuple[str, str], simple_object_size: int
self,
s3_client: S3ClientWrapper,
two_buckets: tuple[str, str],
simple_object_size: ObjectSize,
):
version_1_content = "Version 1"
file_name_simple = generate_file_with_content(simple_object_size, content=version_1_content)
file_name_simple = generate_file_with_content(
simple_object_size.value, content=version_1_content
)
obj_key = os.path.basename(file_name_simple)
bucket_1, bucket_2 = two_buckets
@ -162,9 +171,13 @@ class TestS3GateObject:
s3_client.copy_object(bucket_1, obj_key)
@allure.title("Test S3: Checking copy with acl")
def test_s3_copy_acl(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int):
def test_s3_copy_acl(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
):
version_1_content = "Version 1"
file_name_simple = generate_file_with_content(simple_object_size, content=version_1_content)
file_name_simple = generate_file_with_content(
simple_object_size.value, content=version_1_content
)
obj_key = os.path.basename(file_name_simple)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
@ -180,10 +193,10 @@ class TestS3GateObject:
@allure.title("Test S3: Copy object with metadata")
def test_s3_copy_metadate(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
):
object_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
bucket_1_objects = [file_name]
@ -227,10 +240,10 @@ class TestS3GateObject:
@allure.title("Test S3: Copy object with tagging")
def test_s3_copy_tagging(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
):
object_tagging = [(f"{uuid.uuid4()}", f"{uuid.uuid4()}")]
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
file_name_simple = s3_helper.object_key_from_file_path(file_path)
bucket_1_objects = [file_name_simple]
@ -281,12 +294,14 @@ class TestS3GateObject:
self,
s3_client: S3ClientWrapper,
bucket: str,
simple_object_size: int,
complex_object_size: int,
simple_object_size: ObjectSize,
complex_object_size: ObjectSize,
):
version_1_content = "Version 1"
version_2_content = "Version 2"
file_name_simple = generate_file_with_content(simple_object_size, content=version_1_content)
file_name_simple = generate_file_with_content(
simple_object_size.value, content=version_1_content
)
obj_key = os.path.basename(file_name_simple)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
@ -294,7 +309,7 @@ class TestS3GateObject:
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_client.put_object(bucket, file_name_simple)
file_name_1 = generate_file_with_content(
simple_object_size, file_path=file_name_simple, content=version_2_content
simple_object_size.value, file_path=file_name_simple, content=version_2_content
)
version_id_2 = s3_client.put_object(bucket, file_name_1)
@ -327,7 +342,7 @@ class TestS3GateObject:
assert "DeleteMarker" not in delete_obj.keys(), "Delete markers should not be created"
with allure.step("Put new object into bucket"):
file_name_simple = generate_file(complex_object_size)
file_name_simple = generate_file(complex_object_size.value)
obj_key = os.path.basename(file_name_simple)
s3_client.put_object(bucket, file_name_simple)
@ -339,13 +354,15 @@ class TestS3GateObject:
@allure.title("Test S3: bulk delete version of object")
def test_s3_bulk_delete_versioning(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
):
version_1_content = "Version 1"
version_2_content = "Version 2"
version_3_content = "Version 3"
version_4_content = "Version 4"
file_name_1 = generate_file_with_content(simple_object_size, content=version_1_content)
file_name_1 = generate_file_with_content(
simple_object_size.value, content=version_1_content
)
obj_key = os.path.basename(file_name_1)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
@ -353,15 +370,15 @@ class TestS3GateObject:
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_client.put_object(bucket, file_name_1)
file_name_2 = generate_file_with_content(
simple_object_size, file_path=file_name_1, content=version_2_content
simple_object_size.value, file_path=file_name_1, content=version_2_content
)
version_id_2 = s3_client.put_object(bucket, file_name_2)
file_name_3 = generate_file_with_content(
simple_object_size, file_path=file_name_1, content=version_3_content
simple_object_size.value, file_path=file_name_1, content=version_3_content
)
version_id_3 = s3_client.put_object(bucket, file_name_3)
file_name_4 = generate_file_with_content(
simple_object_size, file_path=file_name_1, content=version_4_content
simple_object_size.value, file_path=file_name_1, content=version_4_content
)
version_id_4 = s3_client.put_object(bucket, file_name_4)
version_ids = {version_id_1, version_id_2, version_id_3, version_id_4}
@ -392,18 +409,20 @@ class TestS3GateObject:
@allure.title("Test S3: Get versions of object")
def test_s3_get_versioning(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
):
version_1_content = "Version 1"
version_2_content = "Version 2"
file_name_simple = generate_file_with_content(simple_object_size, content=version_1_content)
file_name_simple = generate_file_with_content(
simple_object_size.value, content=version_1_content
)
obj_key = os.path.basename(file_name_simple)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_client.put_object(bucket, file_name_simple)
file_name_1 = generate_file_with_content(
simple_object_size, file_path=file_name_simple, content=version_2_content
simple_object_size.value, file_path=file_name_simple, content=version_2_content
)
version_id_2 = s3_client.put_object(bucket, file_name_1)
@ -430,16 +449,16 @@ class TestS3GateObject:
self,
s3_client: S3ClientWrapper,
bucket: str,
complex_object_size: int,
simple_object_size: int,
complex_object_size: ObjectSize,
simple_object_size: ObjectSize,
):
file_path = generate_file(complex_object_size)
file_path = generate_file(complex_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
file_hash = get_file_hash(file_path)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_client.put_object(bucket, file_path)
file_name_1 = generate_file_with_content(simple_object_size, file_path=file_path)
file_name_1 = generate_file_with_content(simple_object_size.value, file_path=file_path)
version_id_2 = s3_client.put_object(bucket, file_name_1)
with allure.step("Get first version of object"):
@ -447,19 +466,25 @@ class TestS3GateObject:
bucket,
file_name,
version_id_1,
object_range=[0, int(complex_object_size / 3)],
object_range=[0, int(complex_object_size.value / 3)],
)
object_1_part_2 = s3_client.get_object(
bucket,
file_name,
version_id_1,
object_range=[int(complex_object_size / 3) + 1, 2 * int(complex_object_size / 3)],
object_range=[
int(complex_object_size.value / 3) + 1,
2 * int(complex_object_size.value / 3),
],
)
object_1_part_3 = s3_client.get_object(
bucket,
file_name,
version_id_1,
object_range=[2 * int(complex_object_size / 3) + 1, complex_object_size],
object_range=[
2 * int(complex_object_size.value / 3) + 1,
complex_object_size.value,
],
)
con_file = concat_files([object_1_part_1, object_1_part_2, object_1_part_3])
assert get_file_hash(con_file) == file_hash, "Hashes must be the same"
@ -469,19 +494,22 @@ class TestS3GateObject:
bucket,
file_name,
version_id_2,
object_range=[0, int(simple_object_size / 3)],
object_range=[0, int(simple_object_size.value / 3)],
)
object_2_part_2 = s3_client.get_object(
bucket,
file_name,
version_id_2,
object_range=[int(simple_object_size / 3) + 1, 2 * int(simple_object_size / 3)],
object_range=[
int(simple_object_size.value / 3) + 1,
2 * int(simple_object_size.value / 3),
],
)
object_2_part_3 = s3_client.get_object(
bucket,
file_name,
version_id_2,
object_range=[2 * int(simple_object_size / 3) + 1, simple_object_size],
object_range=[2 * int(simple_object_size.value / 3) + 1, simple_object_size.value],
)
con_file_1 = concat_files([object_2_part_1, object_2_part_2, object_2_part_3])
assert get_file_hash(con_file_1) == get_file_hash(
@ -490,17 +518,20 @@ class TestS3GateObject:
with allure.step("Get object"):
object_3_part_1 = s3_client.get_object(
bucket, file_name, object_range=[0, int(simple_object_size / 3)]
bucket, file_name, object_range=[0, int(simple_object_size.value / 3)]
)
object_3_part_2 = s3_client.get_object(
bucket,
file_name,
object_range=[int(simple_object_size / 3) + 1, 2 * int(simple_object_size / 3)],
object_range=[
int(simple_object_size.value / 3) + 1,
2 * int(simple_object_size.value / 3),
],
)
object_3_part_3 = s3_client.get_object(
bucket,
file_name,
object_range=[2 * int(simple_object_size / 3) + 1, simple_object_size],
object_range=[2 * int(simple_object_size.value / 3) + 1, simple_object_size.value],
)
con_file = concat_files([object_3_part_1, object_3_part_2, object_3_part_3])
assert get_file_hash(con_file) == get_file_hash(file_name_1), "Hashes must be the same"
@ -540,17 +571,17 @@ class TestS3GateObject:
self,
s3_client: S3ClientWrapper,
bucket: str,
complex_object_size: int,
simple_object_size: int,
complex_object_size: ObjectSize,
simple_object_size: ObjectSize,
):
object_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
file_path = generate_file(complex_object_size)
file_path = generate_file(complex_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_client.put_object(bucket, file_path, metadata=object_metadata)
file_name_1 = generate_file_with_content(simple_object_size, file_path=file_path)
file_name_1 = generate_file_with_content(simple_object_size.value, file_path=file_path)
version_id_2 = s3_client.put_object(bucket, file_name_1)
with allure.step("Get head of first version of object"):
@ -578,11 +609,15 @@ class TestS3GateObject:
@allure.title("Test S3: list of object with versions")
@pytest.mark.parametrize("list_type", ["v1", "v2"])
def test_s3_list_object(
self, s3_client: S3ClientWrapper, list_type: str, bucket: str, complex_object_size: int
self,
s3_client: S3ClientWrapper,
list_type: str,
bucket: str,
complex_object_size: ObjectSize,
):
file_path_1 = generate_file(complex_object_size)
file_path_1 = generate_file(complex_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path_1)
file_path_2 = generate_file(complex_object_size)
file_path_2 = generate_file(complex_object_size.value)
file_name_2 = s3_helper.object_key_from_file_path(file_path_2)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
@ -618,10 +653,10 @@ class TestS3GateObject:
self,
s3_client: S3ClientWrapper,
bucket: str,
complex_object_size: int,
simple_object_size: int,
complex_object_size: ObjectSize,
simple_object_size: ObjectSize,
):
file_path_1 = generate_file(complex_object_size)
file_path_1 = generate_file(complex_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path_1)
object_1_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
tag_key_1 = "tag1"
@ -644,7 +679,9 @@ class TestS3GateObject:
], "Tags must be the same"
with allure.step("Rewrite file into bucket"):
file_path_2 = generate_file_with_content(simple_object_size, file_path=file_path_1)
file_path_2 = generate_file_with_content(
simple_object_size.value, file_path=file_path_1
)
s3_client.put_object(bucket, file_path_2, metadata=object_2_metadata, tagging=tag_2)
obj_head = s3_client.head_object(bucket, file_name)
assert obj_head.get("Metadata") == object_2_metadata, "Metadata must be the same"
@ -656,7 +693,7 @@ class TestS3GateObject:
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
file_path_3 = generate_file(complex_object_size)
file_path_3 = generate_file(complex_object_size.value)
file_hash = get_file_hash(file_path_3)
file_name_3 = s3_helper.object_key_from_file_path(file_path_3)
object_3_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
@ -677,7 +714,9 @@ class TestS3GateObject:
], "Tags must be the same"
with allure.step("Put new version of file into bucket"):
file_path_4 = generate_file_with_content(simple_object_size, file_path=file_path_3)
file_path_4 = generate_file_with_content(
simple_object_size.value, file_path=file_path_3
)
version_id_2 = s3_client.put_object(bucket, file_path_4)
versions = s3_client.list_objects_versions(bucket)
obj_versions = {
@ -722,11 +761,11 @@ class TestS3GateObject:
s3_client: S3ClientWrapper,
bucket_versioning: Literal["ENABLED", "SUSPENDED"],
bucket: str,
complex_object_size: int,
simple_object_size: int,
complex_object_size: ObjectSize,
simple_object_size: ObjectSize,
second_wallet_public_key: str,
):
file_path_1 = generate_file(complex_object_size)
file_path_1 = generate_file(complex_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path_1)
if bucket_versioning == "ENABLED":
status = VersioningStatus.ENABLED
@ -742,7 +781,9 @@ class TestS3GateObject:
assert get_file_hash(file_path_1) == get_file_hash(object_1), "Hashes must be the same"
with allure.step("Put object with acl public-read"):
file_path_2 = generate_file_with_content(simple_object_size, file_path=file_path_1)
file_path_2 = generate_file_with_content(
simple_object_size.value, file_path=file_path_1
)
s3_client.put_object(bucket, file_path_2, acl="public-read")
obj_acl = s3_client.get_object_acl(bucket, file_name)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
@ -750,7 +791,9 @@ class TestS3GateObject:
assert get_file_hash(file_path_2) == get_file_hash(object_2), "Hashes must be the same"
with allure.step("Put object with acl public-read-write"):
file_path_3 = generate_file_with_content(simple_object_size, file_path=file_path_1)
file_path_3 = generate_file_with_content(
simple_object_size.value, file_path=file_path_1
)
s3_client.put_object(bucket, file_path_3, acl="public-read-write")
obj_acl = s3_client.get_object_acl(bucket, file_name)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
@ -758,18 +801,20 @@ class TestS3GateObject:
assert get_file_hash(file_path_3) == get_file_hash(object_3), "Hashes must be the same"
with allure.step("Put object with acl authenticated-read"):
file_path_4 = generate_file_with_content(simple_object_size, file_path=file_path_1)
file_path_4 = generate_file_with_content(
simple_object_size.value, file_path=file_path_1
)
s3_client.put_object(bucket, file_path_4, acl="authenticated-read")
obj_acl = s3_client.get_object_acl(bucket, file_name)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
object_4 = s3_client.get_object(bucket, file_name)
assert get_file_hash(file_path_4) == get_file_hash(object_4), "Hashes must be the same"
file_path_5 = generate_file(complex_object_size)
file_path_5 = generate_file(complex_object_size.value)
file_name_5 = s3_helper.object_key_from_file_path(file_path_5)
with allure.step("Put object with --grant-full-control id=mycanonicaluserid"):
generate_file_with_content(simple_object_size, file_path=file_path_5)
generate_file_with_content(simple_object_size.value, file_path=file_path_5)
s3_client.put_object(
bucket,
file_path_5,
@ -783,7 +828,7 @@ class TestS3GateObject:
with allure.step(
"Put object with --grant-read uri=http://acs.amazonaws.com/groups/global/AllUsers"
):
generate_file_with_content(simple_object_size, file_path=file_path_5)
generate_file_with_content(simple_object_size.value, file_path=file_path_5)
s3_client.put_object(
bucket,
file_path_5,
@ -796,10 +841,13 @@ class TestS3GateObject:
@allure.title("Test S3: put object with lock-mode")
def test_s3_put_object_lock_mode(
self, s3_client: S3ClientWrapper, complex_object_size: int, simple_object_size: int
self,
s3_client: S3ClientWrapper,
complex_object_size: ObjectSize,
simple_object_size: ObjectSize,
):
file_path_1 = generate_file(complex_object_size)
file_path_1 = generate_file(complex_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path_1)
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
@ -823,7 +871,7 @@ class TestS3GateObject:
"Put new version of object with [--object-lock-mode COMPLIANCE] и [--object-lock-retain-until-date +3days]"
):
date_obj = datetime.utcnow() + timedelta(days=2)
generate_file_with_content(simple_object_size, file_path=file_path_1)
generate_file_with_content(simple_object_size.value, file_path=file_path_1)
s3_client.put_object(
bucket,
file_path_1,
@ -838,7 +886,7 @@ class TestS3GateObject:
"Put new version of object with [--object-lock-mode COMPLIANCE] и [--object-lock-retain-until-date +2days]"
):
date_obj = datetime.utcnow() + timedelta(days=3)
generate_file_with_content(simple_object_size, file_path=file_path_1)
generate_file_with_content(simple_object_size.value, file_path=file_path_1)
s3_client.put_object(
bucket,
file_path_1,
@ -879,7 +927,7 @@ class TestS3GateObject:
s3_client: S3ClientWrapper,
sync_type: Literal["sync", "cp"],
bucket: str,
simple_object_size: int,
simple_object_size: ObjectSize,
):
file_path_1 = os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_1")
file_path_2 = os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_2")
@ -889,8 +937,8 @@ class TestS3GateObject:
if not isinstance(s3_client, AwsCliClient):
pytest.skip("This test is not supported with boto3 client")
generate_file_with_content(simple_object_size, file_path=file_path_1)
generate_file_with_content(simple_object_size, file_path=file_path_2)
generate_file_with_content(simple_object_size.value, file_path=file_path_1)
generate_file_with_content(simple_object_size.value, file_path=file_path_2)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
# TODO: return ACL, when https://github.com/nspcc-dev/neofs-s3-gw/issues/685 will be closed
if sync_type == "sync":
@ -930,11 +978,15 @@ class TestS3GateObject:
@allure.title("Test S3 Put 10 nested level object")
def test_s3_put_10_folder(
self, s3_client: S3ClientWrapper, bucket: str, temp_directory, simple_object_size: int
self,
s3_client: S3ClientWrapper,
bucket: str,
temp_directory,
simple_object_size: ObjectSize,
):
path = "/".join(["".join(choices(string.ascii_letters, k=3)) for _ in range(10)])
file_path_1 = os.path.join(temp_directory, path, "test_file_1")
generate_file_with_content(simple_object_size, file_path=file_path_1)
generate_file_with_content(simple_object_size.value, file_path=file_path_1)
file_name = s3_helper.object_key_from_file_path(file_path_1)
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
@ -962,14 +1014,14 @@ class TestS3GateObject:
@allure.title("Test S3: Delete the same object twice")
def test_s3_delete_twice(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
):
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
objects_list = s3_client.list_objects(bucket)
with allure.step("Check that bucket is empty"):
assert not objects_list, f"Expected empty bucket, got {objects_list}"
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Put object into one bucket"):

View file

@ -6,6 +6,7 @@ from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper
from frostfs_testlib.steps.cli.container import search_container_by_name
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.steps.storage_policy import get_simple_object_copies
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils.file_utils import generate_file
@ -26,11 +27,11 @@ def pytest_generate_tests(metafunc: pytest.Metafunc):
class TestS3GatePolicy(ClusterTestBase):
@allure.title("Test S3: Verify bucket creation with retention policy applied")
def test_s3_bucket_location(
self, default_wallet: str, s3_client: S3ClientWrapper, simple_object_size: int
self, default_wallet: str, s3_client: S3ClientWrapper, simple_object_size: ObjectSize
):
file_path_1 = generate_file(simple_object_size)
file_path_1 = generate_file(simple_object_size.value)
file_name_1 = s3_helper.object_key_from_file_path(file_path_1)
file_path_2 = generate_file(simple_object_size)
file_path_2 = generate_file(simple_object_size.value)
file_name_2 = s3_helper.object_key_from_file_path(file_path_2)
with allure.step("Create two buckets with different bucket configuration"):

View file

@ -6,6 +6,7 @@ import allure
import pytest
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils.file_utils import generate_file
@ -29,9 +30,9 @@ class TestS3GateTagging:
@allure.title("Test S3: Object tagging")
def test_s3_object_tagging(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
):
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Put with 3 tags object into bucket"):

View file

@ -4,6 +4,7 @@ import allure
import pytest
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.utils.file_utils import generate_file, generate_file_with_content
@ -23,8 +24,8 @@ class TestS3GateVersioning:
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.SUSPENDED)
@allure.title("Test S3: Enable and disable versioning")
def test_s3_version(self, s3_client: S3ClientWrapper, simple_object_size: int):
file_path = generate_file(simple_object_size)
def test_s3_version(self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize):
file_path = generate_file(simple_object_size.value)
file_name = s3_helper.object_key_from_file_path(file_path)
bucket_objects = [file_name]
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=False)
@ -54,7 +55,7 @@ class TestS3GateVersioning:
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_client.put_object(bucket, file_path)
file_name_1 = generate_file_with_content(simple_object_size, file_path=file_path)
file_name_1 = generate_file_with_content(simple_object_size.value, file_path=file_path)
version_id_2 = s3_client.put_object(bucket, file_name_1)
with allure.step("Check bucket shows all versions"):

View file

@ -7,6 +7,7 @@ from frostfs_testlib.resources.error_patterns import SESSION_NOT_FOUND
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import delete_object, put_object, put_object_to_random_node
from frostfs_testlib.steps.session_token import create_session_token
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils import wallet_utils
from frostfs_testlib.utils.file_utils import generate_file
@ -19,9 +20,9 @@ class TestDynamicObjectSession(ClusterTestBase):
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
ids=["simple object size", "complex object size"],
)
def test_object_session_token(self, default_wallet, object_size):
def test_object_session_token(self, default_wallet: str, object_size: ObjectSize):
"""
Test how operations over objects are executed with a session token
@ -69,7 +70,7 @@ class TestDynamicObjectSession(ClusterTestBase):
)
with allure.step("Put Objects"):
file_path = generate_file(object_size)
file_path = generate_file(object_size.value)
oid = put_object_to_random_node(
wallet=wallet,
path=file_path,

View file

@ -35,6 +35,7 @@ from frostfs_testlib.steps.session_token import (
)
from frostfs_testlib.steps.storage_object import delete_objects
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
@ -62,7 +63,7 @@ def storage_containers(
@pytest.fixture(
params=[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
ids=["simple object size", "complex object size"],
# Scope module to upload/delete each files set only once
scope="module",
)
@ -74,7 +75,8 @@ def storage_objects(
request: FixtureRequest,
) -> list[StorageObjectInfo]:
file_path = generate_file(request.param)
object_size: ObjectSize = request.param
file_path = generate_file(object_size.value)
storage_objects = []
with allure.step("Put objects"):
@ -89,7 +91,7 @@ def storage_objects(
)
storage_object = StorageObjectInfo(storage_containers[0], storage_object_id)
storage_object.size = request.param
storage_object.size = object_size.value
storage_object.wallet_file_path = owner_wallet.path
storage_object.file_path = file_path
storage_objects.append(storage_object)

View file

@ -11,6 +11,7 @@ from frostfs_testlib.steps.cli.container import (
)
from frostfs_testlib.steps.session_token import ContainerVerb, get_container_signed_token
from frostfs_testlib.storage.dataclasses.acl import EACLAccess, EACLOperation, EACLRole, EACLRule
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
@ -141,7 +142,7 @@ class TestSessionTokenContainer(ClusterTestBase):
user_wallet: WalletInfo,
stranger_wallet: WalletInfo,
static_sessions: dict[ContainerVerb, str],
simple_object_size,
simple_object_size: ObjectSize,
):
"""
Validate static session with set eacl operation
@ -153,7 +154,7 @@ class TestSessionTokenContainer(ClusterTestBase):
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
file_path = generate_file(simple_object_size)
file_path = generate_file(simple_object_size.value)
assert can_put_object(stranger_wallet.path, cid, file_path, self.shell, self.cluster)
with allure.step("Deny all operations for other via eACL"):