Remove pytest hooks
This commit is contained in:
parent
975d06de40
commit
e5093bf6ac
22 changed files with 215 additions and 645 deletions
|
@ -19,7 +19,7 @@ from frostfs_testlib.resources.common import (
|
|||
DEFAULT_WALLET_PASS,
|
||||
SIMPLE_OBJECT_SIZE,
|
||||
)
|
||||
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus
|
||||
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
|
||||
from frostfs_testlib.shell import LocalShell, Shell
|
||||
from frostfs_testlib.steps.cli.container import list_containers
|
||||
from frostfs_testlib.steps.cli.object import get_netmap_netinfo
|
||||
|
@ -203,7 +203,13 @@ def cluster_state_controller(client_shell: Shell, cluster: Cluster, healthcheck:
|
|||
|
||||
|
||||
@allure.step("[Class]: Create S3 client")
|
||||
@pytest.fixture(scope="class")
|
||||
@pytest.fixture(
|
||||
scope="class",
|
||||
params=[
|
||||
pytest.param(AwsCliClient, marks=[pytest.mark.aws, pytest.mark.sanity]),
|
||||
pytest.param(Boto3ClientWrapper, marks=pytest.mark.boto3),
|
||||
],
|
||||
)
|
||||
def s3_client(
|
||||
default_wallet: str,
|
||||
client_shell: Shell,
|
||||
|
@ -247,7 +253,9 @@ def bucket(s3_client: S3ClientWrapper, versioning_status: VersioningStatus):
|
|||
s3_helper.set_bucket_versioning(s3_client, bucket_name, versioning_status)
|
||||
|
||||
yield bucket_name
|
||||
s3_helper.delete_bucket_with_objects(s3_client, bucket_name)
|
||||
|
||||
|
||||
# s3_helper.delete_bucket_with_objects(s3_client, bucket_name)
|
||||
|
||||
|
||||
@allure.step("Create two buckets")
|
||||
|
@ -256,8 +264,10 @@ def two_buckets(s3_client: S3ClientWrapper):
|
|||
bucket_1 = s3_client.create_bucket()
|
||||
bucket_2 = s3_client.create_bucket()
|
||||
yield bucket_1, bucket_2
|
||||
for bucket_name in [bucket_1, bucket_2]:
|
||||
s3_helper.delete_bucket_with_objects(s3_client, bucket_name)
|
||||
|
||||
|
||||
# for bucket_name in [bucket_1, bucket_2]:
|
||||
# s3_helper.delete_bucket_with_objects(s3_client, bucket_name)
|
||||
|
||||
|
||||
@allure.step("[Autouse/Session] Check binary versions")
|
||||
|
|
|
@ -21,6 +21,7 @@ from pytest_tests.resources.policy_error_patterns import (
|
|||
)
|
||||
|
||||
|
||||
@pytest.mark.sanity
|
||||
@pytest.mark.container
|
||||
@pytest.mark.policy
|
||||
class TestPolicy(ClusterTestBase):
|
||||
|
|
|
@ -6,7 +6,6 @@ from time import sleep
|
|||
import allure
|
||||
import pytest
|
||||
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
|
||||
from frostfs_testlib.s3 import AwsCliClient
|
||||
from frostfs_testlib.steps.cli.container import create_container
|
||||
from frostfs_testlib.steps.cli.object import (
|
||||
get_object,
|
||||
|
@ -38,11 +37,6 @@ OBJECT_ATTRIBUTES = [
|
|||
]
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc: pytest.Metafunc):
|
||||
if "s3_client" in metafunc.fixturenames:
|
||||
metafunc.parametrize("s3_client", [AwsCliClient], indirect=True)
|
||||
|
||||
|
||||
@pytest.mark.failover
|
||||
@pytest.mark.failover_network
|
||||
class TestFailoverNetwork(ClusterTestBase):
|
||||
|
|
|
@ -7,7 +7,8 @@ import allure
|
|||
import pytest
|
||||
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
|
||||
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
|
||||
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
|
||||
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
|
||||
from frostfs_testlib.shell import CommandOptions
|
||||
from frostfs_testlib.steps.cli.container import StorageContainer, StorageContainerInfo, create_container
|
||||
from frostfs_testlib.steps.cli.object import get_object, put_object_to_random_node
|
||||
from frostfs_testlib.steps.node_management import (
|
||||
|
@ -35,11 +36,6 @@ logger = logging.getLogger("NeoLogger")
|
|||
stopped_nodes: list[StorageNode] = []
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc: pytest.Metafunc):
|
||||
if "s3_client" in metafunc.fixturenames:
|
||||
metafunc.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
@allure.title("Provide File Keeper")
|
||||
def file_keeper():
|
||||
|
|
|
@ -28,10 +28,7 @@ def bearer_token_file_all_allow(default_wallet: str, client_shell: Shell, cluste
|
|||
bearer = form_bearertoken_file(
|
||||
default_wallet,
|
||||
"",
|
||||
[
|
||||
EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.OTHERS)
|
||||
for op in EACLOperation
|
||||
],
|
||||
[EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.OTHERS) for op in EACLOperation],
|
||||
shell=client_shell,
|
||||
endpoint=cluster.default_rpc_endpoint,
|
||||
)
|
||||
|
@ -82,12 +79,11 @@ def storage_objects(
|
|||
return storage_objects
|
||||
|
||||
|
||||
@pytest.mark.sanity
|
||||
@pytest.mark.smoke
|
||||
@pytest.mark.bearer
|
||||
class TestObjectApiWithBearerToken(ClusterTestBase):
|
||||
@allure.title(
|
||||
"Object can be deleted from any node using s3gate wallet with bearer token (obj_size={object_size})"
|
||||
)
|
||||
@allure.title("Object can be deleted from any node using s3gate wallet with bearer token (obj_size={object_size})")
|
||||
@pytest.mark.parametrize(
|
||||
"user_container",
|
||||
[SINGLE_PLACEMENT_RULE],
|
||||
|
@ -112,9 +108,7 @@ class TestObjectApiWithBearerToken(ClusterTestBase):
|
|||
wallet_config=s3_gate_wallet.get_wallet_config_path(),
|
||||
)
|
||||
|
||||
@allure.title(
|
||||
"Object can be fetched from any node using s3gate wallet with bearer token (obj_size={object_size})"
|
||||
)
|
||||
@allure.title("Object can be fetched from any node using s3gate wallet with bearer token (obj_size={object_size})")
|
||||
@pytest.mark.parametrize(
|
||||
"user_container",
|
||||
[REP_2_FOR_3_NODES_PLACEMENT_RULE],
|
||||
|
|
|
@ -20,6 +20,7 @@ OBJECT_ATTRIBUTES = {"common_key": "common_value"}
|
|||
WAIT_FOR_REPLICATION = 60
|
||||
|
||||
# Adding failover mark because it may make cluster unhealthy
|
||||
@pytest.mark.sanity
|
||||
@pytest.mark.failover
|
||||
@pytest.mark.replication
|
||||
class TestReplication(ClusterTestBase):
|
||||
|
|
|
@ -21,7 +21,6 @@ from frostfs_testlib.utils.file_utils import generate_file
|
|||
logger = logging.getLogger("NeoLogger")
|
||||
|
||||
|
||||
@pytest.mark.sanity
|
||||
@pytest.mark.http_gate
|
||||
@pytest.mark.skip("Skipped due to deprecated PUT via http")
|
||||
@pytest.mark.http_put
|
||||
|
@ -46,9 +45,7 @@ class Test_http_bearer(ClusterTestBase):
|
|||
@pytest.fixture(scope="class")
|
||||
def eacl_deny_for_others(self, user_container: str) -> None:
|
||||
with allure.step(f"Set deny all operations for {EACLRole.OTHERS} via eACL"):
|
||||
eacl = EACLRule(
|
||||
access=EACLAccess.DENY, role=EACLRole.OTHERS, operation=EACLOperation.PUT
|
||||
)
|
||||
eacl = EACLRule(access=EACLAccess.DENY, role=EACLRole.OTHERS, operation=EACLOperation.PUT)
|
||||
set_eacl(
|
||||
self.wallet,
|
||||
user_container,
|
||||
|
@ -64,10 +61,7 @@ class Test_http_bearer(ClusterTestBase):
|
|||
bearer = form_bearertoken_file(
|
||||
self.wallet,
|
||||
user_container,
|
||||
[
|
||||
EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.OTHERS)
|
||||
for op in EACLOperation
|
||||
],
|
||||
[EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.OTHERS) for op in EACLOperation],
|
||||
shell=self.shell,
|
||||
endpoint=self.cluster.default_rpc_endpoint,
|
||||
sign=False,
|
||||
|
@ -105,9 +99,7 @@ class Test_http_bearer(ClusterTestBase):
|
|||
eacl_deny_for_others
|
||||
bearer = bearer_token_no_limit_for_others
|
||||
file_path = generate_file(object_size.value)
|
||||
with allure.step(
|
||||
f"Put object with bearer token for {EACLRole.OTHERS}, then get and verify hashes"
|
||||
):
|
||||
with allure.step(f"Put object with bearer token for {EACLRole.OTHERS}, then get and verify hashes"):
|
||||
headers = [f" -H 'Authorization: Bearer {bearer}'"]
|
||||
oid = upload_via_http_gate_curl(
|
||||
cid=user_container,
|
||||
|
|
|
@ -44,9 +44,7 @@ class TestHttpGate(ClusterTestBase):
|
|||
TestHttpGate.wallet = default_wallet
|
||||
|
||||
@allure.title("Put over gRPC, Get over HTTP")
|
||||
def test_put_grpc_get_http(
|
||||
self, complex_object_size: ObjectSize, simple_object_size: ObjectSize
|
||||
):
|
||||
def test_put_grpc_get_http(self, complex_object_size: ObjectSize, simple_object_size: ObjectSize):
|
||||
"""
|
||||
Test that object can be put using gRPC interface and get using HTTP.
|
||||
|
||||
|
@ -106,7 +104,6 @@ class TestHttpGate(ClusterTestBase):
|
|||
)
|
||||
@allure.link("https://github.com/TrueCloudLab/frostfs-http-gw#uploading", name="uploading")
|
||||
@allure.link("https://github.com/TrueCloudLab/frostfs-http-gw#downloading", name="downloading")
|
||||
@pytest.mark.sanity
|
||||
@pytest.mark.http_gate
|
||||
@pytest.mark.http_put
|
||||
@pytest.mark.skip("Skipped due to deprecated PUT via http")
|
||||
|
@ -115,9 +112,7 @@ class TestHttpPut(ClusterTestBase):
|
|||
@allure.link("https://github.com/TrueCloudLab/frostfs-http-gw#downloading", name="downloading")
|
||||
@allure.title("Put over HTTP, Get over HTTP")
|
||||
@pytest.mark.smoke
|
||||
def test_put_http_get_http(
|
||||
self, complex_object_size: ObjectSize, simple_object_size: ObjectSize
|
||||
):
|
||||
def test_put_http_get_http(self, complex_object_size: ObjectSize, simple_object_size: ObjectSize):
|
||||
"""
|
||||
Test that object can be put and get using HTTP interface.
|
||||
|
||||
|
@ -341,9 +336,7 @@ class TestHttpPut(ClusterTestBase):
|
|||
file_path = generate_file(complex_object_size.value)
|
||||
|
||||
with allure.step("Put objects using HTTP"):
|
||||
oid_gate = upload_via_http_gate(
|
||||
cid=cid, path=file_path, endpoint=self.cluster.default_http_gate_endpoint
|
||||
)
|
||||
oid_gate = upload_via_http_gate(cid=cid, path=file_path, endpoint=self.cluster.default_http_gate_endpoint)
|
||||
oid_curl = upload_via_http_gate_curl(
|
||||
cid=cid,
|
||||
filepath=file_path,
|
||||
|
@ -374,9 +367,7 @@ class TestHttpPut(ClusterTestBase):
|
|||
|
||||
@pytest.mark.skip("Skipped due to deprecated PUT via http")
|
||||
@allure.title("Put/Get over HTTP using Curl utility")
|
||||
def test_put_http_get_http_curl(
|
||||
self, complex_object_size: ObjectSize, simple_object_size: ObjectSize
|
||||
):
|
||||
def test_put_http_get_http_curl(self, complex_object_size: ObjectSize, simple_object_size: ObjectSize):
|
||||
"""
|
||||
Test checks upload and download over HTTP using curl utility.
|
||||
"""
|
||||
|
|
|
@ -27,7 +27,6 @@ OBJECT_ALREADY_REMOVED_ERROR = "object already removed"
|
|||
logger = logging.getLogger("NeoLogger")
|
||||
|
||||
|
||||
@pytest.mark.sanity
|
||||
@pytest.mark.http_gate
|
||||
@pytest.mark.http_put
|
||||
@pytest.mark.skip("Skipped due to deprecated PUT via http")
|
||||
|
@ -79,9 +78,7 @@ class Test_http_headers(ClusterTestBase):
|
|||
yield storage_objects
|
||||
|
||||
@allure.title("Get object1 by attribute")
|
||||
def test_object1_can_be_get_by_attr(
|
||||
self, storage_objects_with_attributes: list[StorageObjectInfo]
|
||||
):
|
||||
def test_object1_can_be_get_by_attr(self, storage_objects_with_attributes: list[StorageObjectInfo]):
|
||||
"""
|
||||
Test to get object#1 by attribute and comapre hashes
|
||||
|
||||
|
@ -104,9 +101,7 @@ class Test_http_headers(ClusterTestBase):
|
|||
)
|
||||
|
||||
@allure.title("Get object2 with different attributes, then delete object2 and get object1")
|
||||
def test_object2_can_be_get_by_attr(
|
||||
self, storage_objects_with_attributes: list[StorageObjectInfo]
|
||||
):
|
||||
def test_object2_can_be_get_by_attr(self, storage_objects_with_attributes: list[StorageObjectInfo]):
|
||||
"""
|
||||
Test to get object2 with different attributes, then delete object2 and get object1 using 1st attribute. Note: obj1 and obj2 have the same attribute#1,
|
||||
and when obj2 is deleted you can get obj1 by 1st attribute
|
||||
|
@ -167,9 +162,7 @@ class Test_http_headers(ClusterTestBase):
|
|||
)
|
||||
|
||||
@allure.title("[NEGATIVE] Put object and get right after container is deleted")
|
||||
def test_negative_put_and_get_object3(
|
||||
self, storage_objects_with_attributes: list[StorageObjectInfo]
|
||||
):
|
||||
def test_negative_put_and_get_object3(self, storage_objects_with_attributes: list[StorageObjectInfo]):
|
||||
"""
|
||||
Test to attempt to put object and try to download it right after the container has been deleted
|
||||
|
||||
|
@ -214,9 +207,7 @@ class Test_http_headers(ClusterTestBase):
|
|||
assert storage_object_1.cid not in list_containers(
|
||||
self.wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
|
||||
)
|
||||
with allure.step(
|
||||
"[Negative] Try to download (wget) object via wget with attributes [peace=peace]"
|
||||
):
|
||||
with allure.step("[Negative] Try to download (wget) object via wget with attributes [peace=peace]"):
|
||||
request = f"/get/{storage_object_1.cid}/peace/peace"
|
||||
error_pattern = "404 Not Found"
|
||||
try_to_get_object_via_passed_request_and_expect_error(
|
||||
|
|
|
@ -12,7 +12,6 @@ from frostfs_testlib.utils.file_utils import generate_file
|
|||
logger = logging.getLogger("NeoLogger")
|
||||
|
||||
|
||||
@pytest.mark.sanity
|
||||
@pytest.mark.http_gate
|
||||
@pytest.mark.http_put
|
||||
@pytest.mark.skip("Skipped due to deprecated PUT via http")
|
||||
|
@ -50,9 +49,7 @@ class Test_http_streaming(ClusterTestBase):
|
|||
# Generate file
|
||||
file_path = generate_file(complex_object_size.value)
|
||||
|
||||
with allure.step(
|
||||
"Put objects using curl utility and Get object and verify hashes [ get/$CID/$OID ]"
|
||||
):
|
||||
with allure.step("Put objects using curl utility and Get object and verify hashes [ get/$CID/$OID ]"):
|
||||
oid = upload_via_http_gate_curl(
|
||||
cid=cid, filepath=file_path, endpoint=self.cluster.default_http_gate_endpoint
|
||||
)
|
||||
|
|
|
@ -8,11 +8,7 @@ import pytest
|
|||
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
|
||||
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
|
||||
from frostfs_testlib.steps.cli.container import create_container
|
||||
from frostfs_testlib.steps.cli.object import (
|
||||
get_netmap_netinfo,
|
||||
get_object_from_random_node,
|
||||
head_object,
|
||||
)
|
||||
from frostfs_testlib.steps.cli.object import get_netmap_netinfo, get_object_from_random_node, head_object
|
||||
from frostfs_testlib.steps.epoch import get_epoch, wait_for_epochs_align
|
||||
from frostfs_testlib.steps.http.http_gate import (
|
||||
attr_into_str_header_curl,
|
||||
|
@ -35,7 +31,6 @@ SYSTEM_EXPIRATION_TIMESTAMP = "System-Expiration-Timestamp"
|
|||
SYSTEM_EXPIRATION_RFC3339 = "System-Expiration-RFC3339"
|
||||
|
||||
|
||||
@pytest.mark.sanity
|
||||
@pytest.mark.http_gate
|
||||
@pytest.mark.http_put
|
||||
@pytest.mark.skip("Skipped due to deprecated PUT via http")
|
||||
|
@ -76,9 +71,7 @@ class Test_http_system_header(ClusterTestBase):
|
|||
return f"{mins}m"
|
||||
|
||||
@allure.title("Return future timestamp after N epochs are passed")
|
||||
def epoch_count_into_timestamp(
|
||||
self, epoch_duration: int, epoch: int, rfc3339: Optional[bool] = False
|
||||
) -> str:
|
||||
def epoch_count_into_timestamp(self, epoch_duration: int, epoch: int, rfc3339: Optional[bool] = False) -> str:
|
||||
current_datetime = datetime.datetime.utcnow()
|
||||
epoch_count_in_seconds = epoch_duration * epoch
|
||||
future_datetime = current_datetime + datetime.timedelta(seconds=epoch_count_in_seconds)
|
||||
|
@ -96,9 +89,7 @@ class Test_http_system_header(ClusterTestBase):
|
|||
return False
|
||||
return True
|
||||
|
||||
@allure.title(
|
||||
f"Validate that only {EXPIRATION_EPOCH_HEADER} exists in header and other headers are abesent"
|
||||
)
|
||||
@allure.title(f"Validate that only {EXPIRATION_EPOCH_HEADER} exists in header and other headers are abesent")
|
||||
def validation_for_http_header_attr(self, head_info: dict, expected_epoch: int) -> None:
|
||||
# check that __SYSTEM__EXPIRATION_EPOCH attribute has corresponding epoch
|
||||
assert self.check_key_value_presented_header(
|
||||
|
@ -146,13 +137,9 @@ class Test_http_system_header(ClusterTestBase):
|
|||
|
||||
@allure.title("[NEGATIVE] Put object with expired epoch")
|
||||
def test_unable_put_expired_epoch(self, user_container: str, simple_object_size: ObjectSize):
|
||||
headers = attr_into_str_header_curl(
|
||||
{"System-Expiration-Epoch": str(get_epoch(self.shell, self.cluster) - 1)}
|
||||
)
|
||||
headers = attr_into_str_header_curl({"System-Expiration-Epoch": str(get_epoch(self.shell, self.cluster) - 1)})
|
||||
file_path = generate_file(simple_object_size.value)
|
||||
with allure.step(
|
||||
"Put object using HTTP with attribute Expiration-Epoch where epoch is expired"
|
||||
):
|
||||
with allure.step("Put object using HTTP with attribute Expiration-Epoch where epoch is expired"):
|
||||
upload_via_http_gate_curl(
|
||||
cid=user_container,
|
||||
filepath=file_path,
|
||||
|
@ -162,14 +149,10 @@ class Test_http_system_header(ClusterTestBase):
|
|||
)
|
||||
|
||||
@allure.title("[NEGATIVE] Put object with negative System-Expiration-Duration")
|
||||
def test_unable_put_negative_duration(
|
||||
self, user_container: str, simple_object_size: ObjectSize
|
||||
):
|
||||
def test_unable_put_negative_duration(self, user_container: str, simple_object_size: ObjectSize):
|
||||
headers = attr_into_str_header_curl({"System-Expiration-Duration": "-1h"})
|
||||
file_path = generate_file(simple_object_size.value)
|
||||
with allure.step(
|
||||
"Put object using HTTP with attribute System-Expiration-Duration where duration is negative"
|
||||
):
|
||||
with allure.step("Put object using HTTP with attribute System-Expiration-Duration where duration is negative"):
|
||||
upload_via_http_gate_curl(
|
||||
cid=user_container,
|
||||
filepath=file_path,
|
||||
|
@ -179,9 +162,7 @@ class Test_http_system_header(ClusterTestBase):
|
|||
)
|
||||
|
||||
@allure.title("[NEGATIVE] Put object with System-Expiration-Timestamp value in the past")
|
||||
def test_unable_put_expired_timestamp(
|
||||
self, user_container: str, simple_object_size: ObjectSize
|
||||
):
|
||||
def test_unable_put_expired_timestamp(self, user_container: str, simple_object_size: ObjectSize):
|
||||
headers = attr_into_str_header_curl({"System-Expiration-Timestamp": "1635075727"})
|
||||
file_path = generate_file(simple_object_size.value)
|
||||
with allure.step(
|
||||
|
@ -211,9 +192,7 @@ class Test_http_system_header(ClusterTestBase):
|
|||
|
||||
@allure.title("Priority of attributes epoch>duration (obj_size={object_size})")
|
||||
@pytest.mark.skip("Temp disable for v0.37")
|
||||
def test_http_attr_priority_epoch_duration(
|
||||
self, user_container: str, object_size: ObjectSize, epoch_duration: int
|
||||
):
|
||||
def test_http_attr_priority_epoch_duration(self, user_container: str, object_size: ObjectSize, epoch_duration: int):
|
||||
self.tick_epoch()
|
||||
epoch_count = 1
|
||||
expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count
|
||||
|
@ -247,15 +226,11 @@ class Test_http_system_header(ClusterTestBase):
|
|||
)
|
||||
# check that object is not available via grpc
|
||||
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
|
||||
get_object_from_random_node(
|
||||
self.wallet, user_container, oid, self.shell, self.cluster
|
||||
)
|
||||
get_object_from_random_node(self.wallet, user_container, oid, self.shell, self.cluster)
|
||||
|
||||
@allure.title("Priority of attributes duration>timestamp (obj_size={object_size})")
|
||||
@pytest.mark.skip("Temp disable for v0.37")
|
||||
def test_http_attr_priority_dur_timestamp(
|
||||
self, user_container: str, object_size: ObjectSize, epoch_duration: int
|
||||
):
|
||||
def test_http_attr_priority_dur_timestamp(self, user_container: str, object_size: ObjectSize, epoch_duration: int):
|
||||
self.tick_epoch()
|
||||
epoch_count = 2
|
||||
expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count
|
||||
|
@ -263,12 +238,8 @@ class Test_http_system_header(ClusterTestBase):
|
|||
f"epoch duration={epoch_duration}, current_epoch= {get_epoch(self.shell, self.cluster)} expected_epoch {expected_epoch}"
|
||||
)
|
||||
attributes = {
|
||||
SYSTEM_EXPIRATION_DURATION: self.epoch_count_into_mins(
|
||||
epoch_duration=epoch_duration, epoch=2
|
||||
),
|
||||
SYSTEM_EXPIRATION_TIMESTAMP: self.epoch_count_into_timestamp(
|
||||
epoch_duration=epoch_duration, epoch=1
|
||||
),
|
||||
SYSTEM_EXPIRATION_DURATION: self.epoch_count_into_mins(epoch_duration=epoch_duration, epoch=2),
|
||||
SYSTEM_EXPIRATION_TIMESTAMP: self.epoch_count_into_timestamp(epoch_duration=epoch_duration, epoch=1),
|
||||
}
|
||||
file_path = generate_file(object_size.value)
|
||||
with allure.step(
|
||||
|
@ -296,15 +267,11 @@ class Test_http_system_header(ClusterTestBase):
|
|||
)
|
||||
# check that object is not available via grpc
|
||||
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
|
||||
get_object_from_random_node(
|
||||
self.wallet, user_container, oid, self.shell, self.cluster
|
||||
)
|
||||
get_object_from_random_node(self.wallet, user_container, oid, self.shell, self.cluster)
|
||||
|
||||
@allure.title("Priority of attributes timestamp>Expiration-RFC (obj_size={object_size})")
|
||||
@pytest.mark.skip("Temp disable for v0.37")
|
||||
def test_http_attr_priority_timestamp_rfc(
|
||||
self, user_container: str, object_size: ObjectSize, epoch_duration: int
|
||||
):
|
||||
def test_http_attr_priority_timestamp_rfc(self, user_container: str, object_size: ObjectSize, epoch_duration: int):
|
||||
self.tick_epoch()
|
||||
epoch_count = 2
|
||||
expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count
|
||||
|
@ -312,9 +279,7 @@ class Test_http_system_header(ClusterTestBase):
|
|||
f"epoch duration={epoch_duration}, current_epoch= {get_epoch(self.shell, self.cluster)} expected_epoch {expected_epoch}"
|
||||
)
|
||||
attributes = {
|
||||
SYSTEM_EXPIRATION_TIMESTAMP: self.epoch_count_into_timestamp(
|
||||
epoch_duration=epoch_duration, epoch=2
|
||||
),
|
||||
SYSTEM_EXPIRATION_TIMESTAMP: self.epoch_count_into_timestamp(epoch_duration=epoch_duration, epoch=2),
|
||||
SYSTEM_EXPIRATION_RFC3339: self.epoch_count_into_timestamp(
|
||||
epoch_duration=epoch_duration, epoch=1, rfc3339=True
|
||||
),
|
||||
|
@ -345,9 +310,7 @@ class Test_http_system_header(ClusterTestBase):
|
|||
)
|
||||
# check that object is not available via grpc
|
||||
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
|
||||
get_object_from_random_node(
|
||||
self.wallet, user_container, oid, self.shell, self.cluster
|
||||
)
|
||||
get_object_from_random_node(self.wallet, user_container, oid, self.shell, self.cluster)
|
||||
|
||||
@allure.title("Object should be deleted when expiration passed (obj_size={object_size})")
|
||||
@pytest.mark.parametrize(
|
||||
|
@ -399,6 +362,4 @@ class Test_http_system_header(ClusterTestBase):
|
|||
)
|
||||
# check that object is not available via grpc
|
||||
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
|
||||
get_object_from_random_node(
|
||||
self.wallet, user_container, oid, self.shell, self.cluster
|
||||
)
|
||||
get_object_from_random_node(self.wallet, user_container, oid, self.shell, self.cluster)
|
||||
|
|
|
@ -2,17 +2,12 @@ from datetime import datetime, timedelta
|
|||
|
||||
import allure
|
||||
import pytest
|
||||
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper
|
||||
from frostfs_testlib.s3 import S3ClientWrapper
|
||||
from frostfs_testlib.steps.s3 import s3_helper
|
||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||
from frostfs_testlib.utils.file_utils import generate_file
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc: pytest.Metafunc):
|
||||
if "s3_client" in metafunc.fixturenames:
|
||||
metafunc.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
|
||||
|
||||
|
||||
@pytest.mark.sanity
|
||||
@pytest.mark.s3_gate
|
||||
@pytest.mark.s3_gate_bucket
|
||||
|
@ -26,23 +21,17 @@ class TestS3GateBucket:
|
|||
s3_helper.assert_s3_acl(acl_grants=bucket_acl, permitted_users="CanonicalUser")
|
||||
|
||||
with allure.step("Create bucket with ACL = public-read"):
|
||||
bucket_1 = s3_client.create_bucket(
|
||||
object_lock_enabled_for_bucket=True, acl="public-read"
|
||||
)
|
||||
bucket_1 = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read")
|
||||
bucket_acl_1 = s3_client.get_bucket_acl(bucket_1)
|
||||
s3_helper.assert_s3_acl(acl_grants=bucket_acl_1, permitted_users="AllUsers")
|
||||
|
||||
with allure.step("Create bucket with ACL public-read-write"):
|
||||
bucket_2 = s3_client.create_bucket(
|
||||
object_lock_enabled_for_bucket=True, acl="public-read-write"
|
||||
)
|
||||
bucket_2 = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="public-read-write")
|
||||
bucket_acl_2 = s3_client.get_bucket_acl(bucket_2)
|
||||
s3_helper.assert_s3_acl(acl_grants=bucket_acl_2, permitted_users="AllUsers")
|
||||
|
||||
with allure.step("Create bucket with ACL = authenticated-read"):
|
||||
bucket_3 = s3_client.create_bucket(
|
||||
object_lock_enabled_for_bucket=True, acl="authenticated-read"
|
||||
)
|
||||
bucket_3 = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="authenticated-read")
|
||||
bucket_acl_3 = s3_client.get_bucket_acl(bucket_3)
|
||||
s3_helper.assert_s3_acl(acl_grants=bucket_acl_3, permitted_users="AllUsers")
|
||||
|
||||
|
@ -74,18 +63,14 @@ class TestS3GateBucket:
|
|||
s3_helper.assert_s3_acl(acl_grants=bucket_acl_2, permitted_users="AllUsers")
|
||||
|
||||
@allure.title("Create bucket with object lock (s3_client={s3_client})")
|
||||
def test_s3_bucket_object_lock(
|
||||
self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize
|
||||
):
|
||||
def test_s3_bucket_object_lock(self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize):
|
||||
file_path = generate_file(simple_object_size.value)
|
||||
file_name = s3_helper.object_key_from_file_path(file_path)
|
||||
|
||||
with allure.step("Create bucket with --no-object-lock-enabled-for-bucket"):
|
||||
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=False)
|
||||
date_obj = datetime.utcnow() + timedelta(days=1)
|
||||
with pytest.raises(
|
||||
Exception, match=r".*Object Lock configuration does not exist for this bucket.*"
|
||||
):
|
||||
with pytest.raises(Exception, match=r".*Object Lock configuration does not exist for this bucket.*"):
|
||||
# An error occurred (ObjectLockConfigurationNotFoundError) when calling the PutObject operation (reached max retries: 0):
|
||||
# Object Lock configuration does not exist for this bucket
|
||||
s3_client.put_object(
|
||||
|
@ -104,9 +89,7 @@ class TestS3GateBucket:
|
|||
object_lock_retain_until_date=date_obj_1.strftime("%Y-%m-%dT%H:%M:%S"),
|
||||
object_lock_legal_hold_status="ON",
|
||||
)
|
||||
s3_helper.assert_object_lock_mode(
|
||||
s3_client, bucket_1, file_name, "COMPLIANCE", date_obj_1, "ON"
|
||||
)
|
||||
s3_helper.assert_object_lock_mode(s3_client, bucket_1, file_name, "COMPLIANCE", date_obj_1, "ON")
|
||||
|
||||
@allure.title("Delete bucket (s3_client={s3_client})")
|
||||
def test_s3_delete_bucket(self, s3_client: S3ClientWrapper, simple_object_size: ObjectSize):
|
||||
|
|
|
@ -22,15 +22,7 @@ from frostfs_testlib.utils.file_utils import (
|
|||
logger = logging.getLogger("NeoLogger")
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc: pytest.Metafunc):
|
||||
if "s3_client" in metafunc.fixturenames:
|
||||
metafunc.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
|
||||
|
||||
|
||||
@allure.link(
|
||||
"https://github.com/TrueCloudLab/frostfs-s3-gw#frostfs-s3-gw", name="frostfs-s3-gateway"
|
||||
)
|
||||
@pytest.mark.sanity
|
||||
@allure.link("https://github.com/TrueCloudLab/frostfs-s3-gw#frostfs-s3-gw", name="frostfs-s3-gateway")
|
||||
@pytest.mark.s3_gate
|
||||
@pytest.mark.s3_gate_base
|
||||
class TestS3Gate:
|
||||
|
@ -73,9 +65,7 @@ class TestS3Gate:
|
|||
s3_client.head_object(bucket_1, file_name)
|
||||
|
||||
bucket_objects = s3_client.list_objects(bucket_1)
|
||||
assert (
|
||||
file_name in bucket_objects
|
||||
), f"Expected file {file_name} in objects list {bucket_objects}"
|
||||
assert file_name in bucket_objects, f"Expected file {file_name} in objects list {bucket_objects}"
|
||||
|
||||
with allure.step("Try to delete not empty bucket and get error"):
|
||||
with pytest.raises(Exception, match=r".*The bucket you tried to delete is not empty.*"):
|
||||
|
@ -136,18 +126,14 @@ class TestS3Gate:
|
|||
s3_client.head_object(bucket, file_name)
|
||||
|
||||
bucket_objects = s3_client.list_objects(bucket)
|
||||
assert (
|
||||
file_name in bucket_objects
|
||||
), f"Expected file {file_name} in objects list {bucket_objects}"
|
||||
assert file_name in bucket_objects, f"Expected file {file_name} in objects list {bucket_objects}"
|
||||
|
||||
with allure.step("Check object's attributes"):
|
||||
for attrs in (["ETag"], ["ObjectSize", "StorageClass"]):
|
||||
s3_client.get_object_attributes(bucket, file_name, attrs)
|
||||
|
||||
@allure.title("Sync directory (s3_client={s3_client})")
|
||||
def test_s3_sync_dir(
|
||||
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
|
||||
):
|
||||
def test_s3_sync_dir(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
|
||||
"""
|
||||
Test checks sync directory with AWS CLI utility.
|
||||
"""
|
||||
|
@ -167,9 +153,7 @@ class TestS3Gate:
|
|||
objects = s3_client.list_objects(bucket)
|
||||
|
||||
with allure.step("Check these are the same objects"):
|
||||
assert set(key_to_path.keys()) == set(
|
||||
objects
|
||||
), f"Expected all objects saved. Got {objects}"
|
||||
assert set(key_to_path.keys()) == set(objects), f"Expected all objects saved. Got {objects}"
|
||||
for obj_key in objects:
|
||||
got_object = s3_client.get_object(bucket, obj_key)
|
||||
assert get_file_hash(got_object) == get_file_hash(
|
||||
|
@ -177,32 +161,24 @@ class TestS3Gate:
|
|||
), "Expected hashes are the same"
|
||||
|
||||
@allure.title("Object versioning (s3_client={s3_client})")
|
||||
def test_s3_api_versioning(
|
||||
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
|
||||
):
|
||||
def test_s3_api_versioning(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
|
||||
"""
|
||||
Test checks basic versioning functionality for S3 bucket.
|
||||
"""
|
||||
version_1_content = "Version 1"
|
||||
version_2_content = "Version 2"
|
||||
file_name_simple = generate_file_with_content(
|
||||
simple_object_size.value, content=version_1_content
|
||||
)
|
||||
file_name_simple = generate_file_with_content(simple_object_size.value, content=version_1_content)
|
||||
obj_key = os.path.basename(file_name_simple)
|
||||
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
|
||||
|
||||
with allure.step("Put several versions of object into bucket"):
|
||||
version_id_1 = s3_client.put_object(bucket, file_name_simple)
|
||||
generate_file_with_content(
|
||||
simple_object_size.value, file_path=file_name_simple, content=version_2_content
|
||||
)
|
||||
generate_file_with_content(simple_object_size.value, file_path=file_name_simple, content=version_2_content)
|
||||
version_id_2 = s3_client.put_object(bucket, file_name_simple)
|
||||
|
||||
with allure.step("Check bucket shows all versions"):
|
||||
versions = s3_client.list_objects_versions(bucket)
|
||||
obj_versions = {
|
||||
version.get("VersionId") for version in versions if version.get("Key") == obj_key
|
||||
}
|
||||
obj_versions = {version.get("VersionId") for version in versions if version.get("Key") == obj_key}
|
||||
assert obj_versions == {
|
||||
version_id_1,
|
||||
version_id_2,
|
||||
|
@ -213,20 +189,14 @@ class TestS3Gate:
|
|||
response = s3_client.head_object(bucket, obj_key, version_id=version_id)
|
||||
assert "LastModified" in response, "Expected LastModified field"
|
||||
assert "ETag" in response, "Expected ETag field"
|
||||
assert (
|
||||
response.get("VersionId") == version_id
|
||||
), f"Expected VersionId is {version_id}"
|
||||
assert response.get("VersionId") == version_id, f"Expected VersionId is {version_id}"
|
||||
assert response.get("ContentLength") != 0, "Expected ContentLength is not zero"
|
||||
|
||||
with allure.step("Check object's attributes"):
|
||||
for version_id in (version_id_1, version_id_2):
|
||||
got_attrs = s3_client.get_object_attributes(
|
||||
bucket, obj_key, ["ETag"], version_id=version_id
|
||||
)
|
||||
got_attrs = s3_client.get_object_attributes(bucket, obj_key, ["ETag"], version_id=version_id)
|
||||
if got_attrs:
|
||||
assert (
|
||||
got_attrs.get("VersionId") == version_id
|
||||
), f"Expected VersionId is {version_id}"
|
||||
assert got_attrs.get("VersionId") == version_id, f"Expected VersionId is {version_id}"
|
||||
|
||||
with allure.step("Delete object and check it was deleted"):
|
||||
response = s3_client.delete_object(bucket, obj_key)
|
||||
|
@ -242,9 +212,7 @@ class TestS3Gate:
|
|||
):
|
||||
file_name = s3_client.get_object(bucket, obj_key, version_id=version)
|
||||
got_content = get_file_content(file_name)
|
||||
assert (
|
||||
got_content == content
|
||||
), f"Expected object content is\n{content}\nGot\n{got_content}"
|
||||
assert got_content == content, f"Expected object content is\n{content}\nGot\n{got_content}"
|
||||
|
||||
with allure.step("Restore previous object version"):
|
||||
s3_client.delete_object(bucket, obj_key, version_id=version_id_delete)
|
||||
|
@ -257,17 +225,13 @@ class TestS3Gate:
|
|||
|
||||
@pytest.mark.s3_gate_multipart
|
||||
@allure.title("Object Multipart API (s3_client={s3_client})")
|
||||
def test_s3_api_multipart(
|
||||
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
|
||||
):
|
||||
def test_s3_api_multipart(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
|
||||
"""
|
||||
Test checks S3 Multipart API (Create multipart upload/Abort multipart upload/List multipart upload/
|
||||
Upload part/List parts/Complete multipart upload).
|
||||
"""
|
||||
parts_count = 3
|
||||
file_name_large = generate_file(
|
||||
simple_object_size.value * 1024 * 6 * parts_count
|
||||
) # 5Mb - min part
|
||||
file_name_large = generate_file(simple_object_size.value * 1024 * 6 * parts_count) # 5Mb - min part
|
||||
object_key = s3_helper.object_key_from_file_path(file_name_large)
|
||||
part_files = split_file(file_name_large, parts_count)
|
||||
parts = []
|
||||
|
@ -279,12 +243,8 @@ class TestS3Gate:
|
|||
upload_id = s3_client.create_multipart_upload(bucket, object_key)
|
||||
uploads = s3_client.list_multipart_uploads(bucket)
|
||||
assert uploads, f"Expected there one upload in bucket {bucket}"
|
||||
assert (
|
||||
uploads[0].get("Key") == object_key
|
||||
), f"Expected correct key {object_key} in upload {uploads}"
|
||||
assert (
|
||||
uploads[0].get("UploadId") == upload_id
|
||||
), f"Expected correct UploadId {upload_id} in upload {uploads}"
|
||||
assert uploads[0].get("Key") == object_key, f"Expected correct key {object_key} in upload {uploads}"
|
||||
assert uploads[0].get("UploadId") == upload_id, f"Expected correct UploadId {upload_id} in upload {uploads}"
|
||||
|
||||
s3_client.abort_multipart_upload(bucket, object_key, upload_id)
|
||||
uploads = s3_client.list_multipart_uploads(bucket)
|
||||
|
@ -298,9 +258,7 @@ class TestS3Gate:
|
|||
|
||||
with allure.step("Check all parts are visible in bucket"):
|
||||
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
|
||||
assert len(got_parts) == len(
|
||||
part_files
|
||||
), f"Expected {parts_count} parts, got\n{got_parts}"
|
||||
assert len(got_parts) == len(part_files), f"Expected {parts_count} parts, got\n{got_parts}"
|
||||
|
||||
s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
|
||||
|
||||
|
@ -327,9 +285,7 @@ class TestS3Gate:
|
|||
s3_helper.check_tags_by_bucket(s3_client, bucket, [])
|
||||
|
||||
@allure.title("Object tagging API (s3_client={s3_client})")
|
||||
def test_s3_api_object_tagging(
|
||||
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
|
||||
):
|
||||
def test_s3_api_object_tagging(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
|
||||
"""
|
||||
Test checks S3 Object tagging API (Put tag/Get tag/Update tag).
|
||||
"""
|
||||
|
@ -458,9 +414,7 @@ class TestS3Gate:
|
|||
|
||||
with allure.step("Check copied object has the same content"):
|
||||
got_copied_file = s3_client.get_object(bucket, copy_obj_path)
|
||||
assert get_file_hash(file_path_simple) == get_file_hash(
|
||||
got_copied_file
|
||||
), "Hashes must be the same"
|
||||
assert get_file_hash(file_path_simple) == get_file_hash(got_copied_file), "Hashes must be the same"
|
||||
|
||||
with allure.step("Delete one object from bucket"):
|
||||
s3_client.delete_object(bucket, file_name_simple)
|
||||
|
@ -509,9 +463,7 @@ class TestS3Gate:
|
|||
|
||||
with allure.step("Check copied object has the same content"):
|
||||
got_copied_file_b2 = s3_client.get_object(bucket_2, copy_obj_path_b2)
|
||||
assert get_file_hash(file_path_large) == get_file_hash(
|
||||
got_copied_file_b2
|
||||
), "Hashes must be the same"
|
||||
assert get_file_hash(file_path_large) == get_file_hash(got_copied_file_b2), "Hashes must be the same"
|
||||
|
||||
with allure.step("Delete one object from first bucket"):
|
||||
s3_client.delete_object(bucket_1, file_name_simple)
|
||||
|
@ -524,23 +476,15 @@ class TestS3Gate:
|
|||
s3_client.delete_object(bucket_2, copy_obj_path_b2)
|
||||
s3_helper.check_objects_in_bucket(s3_client, bucket_2, expected_objects=[])
|
||||
|
||||
def check_object_attributes(
|
||||
self, s3_client: S3ClientWrapper, bucket: str, object_key: str, parts_count: int
|
||||
):
|
||||
def check_object_attributes(self, s3_client: S3ClientWrapper, bucket: str, object_key: str, parts_count: int):
|
||||
if not isinstance(s3_client, AwsCliClient):
|
||||
logger.warning("Attributes check is not supported for boto3 implementation")
|
||||
return
|
||||
|
||||
with allure.step("Check object's attributes"):
|
||||
obj_parts = s3_client.get_object_attributes(
|
||||
bucket, object_key, ["ObjectParts"], full_output=False
|
||||
)
|
||||
assert (
|
||||
obj_parts.get("TotalPartsCount") == parts_count
|
||||
), f"Expected TotalPartsCount is {parts_count}"
|
||||
assert (
|
||||
len(obj_parts.get("Parts")) == parts_count
|
||||
), f"Expected Parts cunt is {parts_count}"
|
||||
obj_parts = s3_client.get_object_attributes(bucket, object_key, ["ObjectParts"], full_output=False)
|
||||
assert obj_parts.get("TotalPartsCount") == parts_count, f"Expected TotalPartsCount is {parts_count}"
|
||||
assert len(obj_parts.get("Parts")) == parts_count, f"Expected Parts cunt is {parts_count}"
|
||||
|
||||
with allure.step("Check object's attribute max-parts"):
|
||||
max_parts = 2
|
||||
|
@ -551,13 +495,9 @@ class TestS3Gate:
|
|||
max_parts=max_parts,
|
||||
full_output=False,
|
||||
)
|
||||
assert (
|
||||
obj_parts.get("TotalPartsCount") == parts_count
|
||||
), f"Expected TotalPartsCount is {parts_count}"
|
||||
assert obj_parts.get("TotalPartsCount") == parts_count, f"Expected TotalPartsCount is {parts_count}"
|
||||
assert obj_parts.get("MaxParts") == max_parts, f"Expected MaxParts is {parts_count}"
|
||||
assert (
|
||||
len(obj_parts.get("Parts")) == max_parts
|
||||
), f"Expected Parts count is {parts_count}"
|
||||
assert len(obj_parts.get("Parts")) == max_parts, f"Expected Parts count is {parts_count}"
|
||||
|
||||
with allure.step("Check object's attribute part-number-marker"):
|
||||
part_number_marker = 3
|
||||
|
@ -568,9 +508,7 @@ class TestS3Gate:
|
|||
part_number=part_number_marker,
|
||||
full_output=False,
|
||||
)
|
||||
assert (
|
||||
obj_parts.get("TotalPartsCount") == parts_count
|
||||
), f"Expected TotalPartsCount is {parts_count}"
|
||||
assert obj_parts.get("TotalPartsCount") == parts_count, f"Expected TotalPartsCount is {parts_count}"
|
||||
assert (
|
||||
obj_parts.get("PartNumberMarker") == part_number_marker
|
||||
), f"Expected PartNumberMarker is {part_number_marker}"
|
||||
|
|
|
@ -3,28 +3,19 @@ from datetime import datetime, timedelta
|
|||
|
||||
import allure
|
||||
import pytest
|
||||
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper
|
||||
from frostfs_testlib.s3 import S3ClientWrapper
|
||||
from frostfs_testlib.steps.s3 import s3_helper
|
||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||
from frostfs_testlib.utils.file_utils import generate_file, generate_file_with_content
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc: pytest.Metafunc):
|
||||
if "s3_client" in metafunc.fixturenames:
|
||||
metafunc.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
|
||||
|
||||
|
||||
@pytest.mark.sanity
|
||||
@pytest.mark.s3_gate
|
||||
@pytest.mark.s3_gate_locking
|
||||
@pytest.mark.parametrize("version_id", [None, "second"])
|
||||
class TestS3GateLocking:
|
||||
@allure.title(
|
||||
"Retention period and legal lock on object (version_id={version_id}, s3_client={s3_client})"
|
||||
)
|
||||
def test_s3_object_locking(
|
||||
self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize
|
||||
):
|
||||
@allure.title("Retention period and legal lock on object (version_id={version_id}, s3_client={s3_client})")
|
||||
def test_s3_object_locking(self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize):
|
||||
file_path = generate_file(simple_object_size.value)
|
||||
file_name = s3_helper.object_key_from_file_path(file_path)
|
||||
retention_period = 2
|
||||
|
@ -46,15 +37,11 @@ class TestS3GateLocking:
|
|||
"RetainUntilDate": date_obj,
|
||||
}
|
||||
s3_client.put_object_retention(bucket, file_name, retention, version_id)
|
||||
s3_helper.assert_object_lock_mode(
|
||||
s3_client, bucket, file_name, "COMPLIANCE", date_obj, "OFF"
|
||||
)
|
||||
s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "COMPLIANCE", date_obj, "OFF")
|
||||
|
||||
with allure.step(f"Put legal hold to object {file_name}"):
|
||||
s3_client.put_object_legal_hold(bucket, file_name, "ON", version_id)
|
||||
s3_helper.assert_object_lock_mode(
|
||||
s3_client, bucket, file_name, "COMPLIANCE", date_obj, "ON"
|
||||
)
|
||||
s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "COMPLIANCE", date_obj, "ON")
|
||||
|
||||
with allure.step("Fail with deleting object with legal hold and retention period"):
|
||||
if version_id:
|
||||
|
@ -64,9 +51,7 @@ class TestS3GateLocking:
|
|||
|
||||
with allure.step("Check retention period is no longer set on the uploaded object"):
|
||||
time.sleep((retention_period + 1) * 60)
|
||||
s3_helper.assert_object_lock_mode(
|
||||
s3_client, bucket, file_name, "COMPLIANCE", date_obj, "ON"
|
||||
)
|
||||
s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "COMPLIANCE", date_obj, "ON")
|
||||
|
||||
with allure.step("Fail with deleting object with legal hold and retention period"):
|
||||
if version_id:
|
||||
|
@ -76,12 +61,8 @@ class TestS3GateLocking:
|
|||
else:
|
||||
s3_client.delete_object(bucket, file_name, version_id)
|
||||
|
||||
@allure.title(
|
||||
"Impossible to change retention mode COMPLIANCE (version_id={version_id}, s3_client={s3_client})"
|
||||
)
|
||||
def test_s3_mode_compliance(
|
||||
self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize
|
||||
):
|
||||
@allure.title("Impossible to change retention mode COMPLIANCE (version_id={version_id}, s3_client={s3_client})")
|
||||
def test_s3_mode_compliance(self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize):
|
||||
file_path = generate_file(simple_object_size.value)
|
||||
file_name = s3_helper.object_key_from_file_path(file_path)
|
||||
retention_period = 2
|
||||
|
@ -102,13 +83,9 @@ class TestS3GateLocking:
|
|||
"RetainUntilDate": date_obj,
|
||||
}
|
||||
s3_client.put_object_retention(bucket, file_name, retention, version_id)
|
||||
s3_helper.assert_object_lock_mode(
|
||||
s3_client, bucket, file_name, "COMPLIANCE", date_obj, "OFF"
|
||||
)
|
||||
s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "COMPLIANCE", date_obj, "OFF")
|
||||
|
||||
with allure.step(
|
||||
f"Try to change retention period {retention_period_1}min to object {file_name}"
|
||||
):
|
||||
with allure.step(f"Try to change retention period {retention_period_1}min to object {file_name}"):
|
||||
date_obj = datetime.utcnow() + timedelta(minutes=retention_period_1)
|
||||
retention = {
|
||||
"Mode": "COMPLIANCE",
|
||||
|
@ -117,12 +94,8 @@ class TestS3GateLocking:
|
|||
with pytest.raises(Exception):
|
||||
s3_client.put_object_retention(bucket, file_name, retention, version_id)
|
||||
|
||||
@allure.title(
|
||||
"Change retention mode GOVERNANCE (version_id={version_id}, s3_client={s3_client})"
|
||||
)
|
||||
def test_s3_mode_governance(
|
||||
self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize
|
||||
):
|
||||
@allure.title("Change retention mode GOVERNANCE (version_id={version_id}, s3_client={s3_client})")
|
||||
def test_s3_mode_governance(self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize):
|
||||
file_path = generate_file(simple_object_size.value)
|
||||
file_name = s3_helper.object_key_from_file_path(file_path)
|
||||
retention_period = 3
|
||||
|
@ -144,13 +117,9 @@ class TestS3GateLocking:
|
|||
"RetainUntilDate": date_obj,
|
||||
}
|
||||
s3_client.put_object_retention(bucket, file_name, retention, version_id)
|
||||
s3_helper.assert_object_lock_mode(
|
||||
s3_client, bucket, file_name, "GOVERNANCE", date_obj, "OFF"
|
||||
)
|
||||
s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "GOVERNANCE", date_obj, "OFF")
|
||||
|
||||
with allure.step(
|
||||
f"Try to change retention period {retention_period_1}min to object {file_name}"
|
||||
):
|
||||
with allure.step(f"Try to change retention period {retention_period_1}min to object {file_name}"):
|
||||
date_obj = datetime.utcnow() + timedelta(minutes=retention_period_1)
|
||||
retention = {
|
||||
"Mode": "GOVERNANCE",
|
||||
|
@ -159,9 +128,7 @@ class TestS3GateLocking:
|
|||
with pytest.raises(Exception):
|
||||
s3_client.put_object_retention(bucket, file_name, retention, version_id)
|
||||
|
||||
with allure.step(
|
||||
f"Try to change retention period {retention_period_1}min to object {file_name}"
|
||||
):
|
||||
with allure.step(f"Try to change retention period {retention_period_1}min to object {file_name}"):
|
||||
date_obj = datetime.utcnow() + timedelta(minutes=retention_period_1)
|
||||
retention = {
|
||||
"Mode": "GOVERNANCE",
|
||||
|
@ -177,16 +144,12 @@ class TestS3GateLocking:
|
|||
"RetainUntilDate": date_obj,
|
||||
}
|
||||
s3_client.put_object_retention(bucket, file_name, retention, version_id, True)
|
||||
s3_helper.assert_object_lock_mode(
|
||||
s3_client, bucket, file_name, "GOVERNANCE", date_obj, "OFF"
|
||||
)
|
||||
s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "GOVERNANCE", date_obj, "OFF")
|
||||
|
||||
@allure.title(
|
||||
"[NEGATIVE] Lock object in bucket with disabled locking (version_id={version_id}, s3_client={s3_client})"
|
||||
)
|
||||
def test_s3_legal_hold(
|
||||
self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize
|
||||
):
|
||||
def test_s3_legal_hold(self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: ObjectSize):
|
||||
file_path = generate_file(simple_object_size.value)
|
||||
file_name = s3_helper.object_key_from_file_path(file_path)
|
||||
|
||||
|
@ -227,6 +190,4 @@ class TestS3GateLockingBucket:
|
|||
|
||||
with allure.step("Put object into bucket"):
|
||||
s3_client.put_object(bucket, file_path)
|
||||
s3_helper.assert_object_lock_mode(
|
||||
s3_client, bucket, file_name, "COMPLIANCE", None, "OFF", 1
|
||||
)
|
||||
s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "COMPLIANCE", None, "OFF", 1)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import allure
|
||||
import pytest
|
||||
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
|
||||
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
|
||||
from frostfs_testlib.steps.cli.container import list_objects, search_container_by_name
|
||||
from frostfs_testlib.steps.s3 import s3_helper
|
||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||
|
@ -10,18 +10,11 @@ from frostfs_testlib.utils.file_utils import generate_file, get_file_hash, split
|
|||
PART_SIZE = 5 * 1024 * 1024
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc: pytest.Metafunc):
|
||||
if "s3_client" in metafunc.fixturenames:
|
||||
metafunc.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
|
||||
|
||||
|
||||
@pytest.mark.sanity
|
||||
@pytest.mark.s3_gate
|
||||
@pytest.mark.s3_gate_multipart
|
||||
class TestS3GateMultipart(ClusterTestBase):
|
||||
NO_SUCH_UPLOAD = (
|
||||
"The upload ID may be invalid, or the upload may have been aborted or completed."
|
||||
)
|
||||
NO_SUCH_UPLOAD = "The upload ID may be invalid, or the upload may have been aborted or completed."
|
||||
|
||||
@allure.title("Object Multipart API (s3_client={s3_client})")
|
||||
@pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED], indirect=True)
|
||||
|
@ -46,9 +39,7 @@ class TestS3GateMultipart(ClusterTestBase):
|
|||
parts.append((part_id, etag))
|
||||
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
|
||||
s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
|
||||
assert len(got_parts) == len(
|
||||
part_files
|
||||
), f"Expected {parts_count} parts, got\n{got_parts}"
|
||||
assert len(got_parts) == len(part_files), f"Expected {parts_count} parts, got\n{got_parts}"
|
||||
|
||||
with allure.step("Check upload list is empty"):
|
||||
uploads = s3_client.list_multipart_uploads(bucket)
|
||||
|
@ -91,12 +82,8 @@ class TestS3GateMultipart(ClusterTestBase):
|
|||
assert len(parts) == files_count, f"Expected {files_count} parts, got\n{parts}"
|
||||
|
||||
with allure.step(f"Check that we have {files_count} files in container '{container_id}'"):
|
||||
objects = list_objects(
|
||||
default_wallet, self.shell, container_id, self.cluster.default_rpc_endpoint
|
||||
)
|
||||
assert (
|
||||
len(objects) == files_count
|
||||
), f"Expected {files_count} objects in container, got\n{objects}"
|
||||
objects = list_objects(default_wallet, self.shell, container_id, self.cluster.default_rpc_endpoint)
|
||||
assert len(objects) == files_count, f"Expected {files_count} objects in container, got\n{objects}"
|
||||
|
||||
with allure.step("Abort multipart upload"):
|
||||
s3_client.abort_multipart_upload(bucket, upload_key, upload_id)
|
||||
|
@ -108,9 +95,7 @@ class TestS3GateMultipart(ClusterTestBase):
|
|||
s3_client.list_parts(bucket, upload_key, upload_id)
|
||||
|
||||
with allure.step("Check that we have no files in container since upload was aborted"):
|
||||
objects = list_objects(
|
||||
default_wallet, self.shell, container_id, self.cluster.default_rpc_endpoint
|
||||
)
|
||||
objects = list_objects(default_wallet, self.shell, container_id, self.cluster.default_rpc_endpoint)
|
||||
assert len(objects) == 0, f"Expected no objects in container, got\n{objects}"
|
||||
|
||||
@allure.title("Upload Part Copy (s3_client={s3_client})")
|
||||
|
@ -136,17 +121,13 @@ class TestS3GateMultipart(ClusterTestBase):
|
|||
|
||||
with allure.step("Upload parts to multipart upload"):
|
||||
for part_id, obj_key in enumerate(objs, start=1):
|
||||
etag = s3_client.upload_part_copy(
|
||||
bucket, object_key, upload_id, part_id, f"{bucket}/{obj_key}"
|
||||
)
|
||||
etag = s3_client.upload_part_copy(bucket, object_key, upload_id, part_id, f"{bucket}/{obj_key}")
|
||||
parts.append((part_id, etag))
|
||||
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
|
||||
|
||||
with allure.step("Complete multipart upload"):
|
||||
s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
|
||||
assert len(got_parts) == len(
|
||||
part_files
|
||||
), f"Expected {parts_count} parts, got\n{got_parts}"
|
||||
assert len(got_parts) == len(part_files), f"Expected {parts_count} parts, got\n{got_parts}"
|
||||
|
||||
with allure.step("Check we can get whole object from bucket"):
|
||||
got_object = s3_client.get_object(bucket, object_key)
|
||||
|
|
|
@ -9,22 +9,12 @@ import allure
|
|||
import pytest
|
||||
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_PASS
|
||||
from frostfs_testlib.resources.error_patterns import S3_MALFORMED_XML_REQUEST
|
||||
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
|
||||
from frostfs_testlib.s3 import AwsCliClient, S3ClientWrapper, VersioningStatus
|
||||
from frostfs_testlib.steps.s3 import s3_helper
|
||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||
from frostfs_testlib.testing.test_control import expect_not_raises
|
||||
from frostfs_testlib.utils import wallet_utils
|
||||
from frostfs_testlib.utils.file_utils import (
|
||||
concat_files,
|
||||
generate_file,
|
||||
generate_file_with_content,
|
||||
get_file_hash,
|
||||
)
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc: pytest.Metafunc):
|
||||
if "s3_client" in metafunc.fixturenames:
|
||||
metafunc.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
|
||||
from frostfs_testlib.utils.file_utils import concat_files, generate_file, generate_file_with_content, get_file_hash
|
||||
|
||||
|
||||
@pytest.mark.sanity
|
||||
|
@ -67,28 +57,18 @@ class TestS3GateObject:
|
|||
|
||||
with allure.step("Copy object from first bucket into second"):
|
||||
copy_obj_path_b2 = s3_client.copy_object(bucket_1, file_name, bucket=bucket_2)
|
||||
s3_helper.check_objects_in_bucket(
|
||||
s3_client, bucket_1, expected_objects=bucket_1_objects
|
||||
)
|
||||
s3_helper.check_objects_in_bucket(
|
||||
s3_client, bucket_2, expected_objects=[copy_obj_path_b2]
|
||||
)
|
||||
s3_helper.check_objects_in_bucket(s3_client, bucket_1, expected_objects=bucket_1_objects)
|
||||
s3_helper.check_objects_in_bucket(s3_client, bucket_2, expected_objects=[copy_obj_path_b2])
|
||||
|
||||
with allure.step("Check copied object has the same content"):
|
||||
got_copied_file_b2 = s3_client.get_object(bucket_2, copy_obj_path_b2)
|
||||
assert get_file_hash(file_path) == get_file_hash(
|
||||
got_copied_file_b2
|
||||
), "Hashes must be the same"
|
||||
assert get_file_hash(file_path) == get_file_hash(got_copied_file_b2), "Hashes must be the same"
|
||||
|
||||
with allure.step("Delete one object from first bucket"):
|
||||
s3_client.delete_object(bucket_1, file_name)
|
||||
bucket_1_objects.remove(file_name)
|
||||
s3_helper.check_objects_in_bucket(
|
||||
s3_client, bucket_1, expected_objects=bucket_1_objects
|
||||
)
|
||||
s3_helper.check_objects_in_bucket(
|
||||
s3_client, bucket_2, expected_objects=[copy_obj_path_b2]
|
||||
)
|
||||
s3_helper.check_objects_in_bucket(s3_client, bucket_1, expected_objects=bucket_1_objects)
|
||||
s3_helper.check_objects_in_bucket(s3_client, bucket_2, expected_objects=[copy_obj_path_b2])
|
||||
|
||||
with allure.step("Copy one object into the same bucket"):
|
||||
with pytest.raises(Exception):
|
||||
|
@ -102,9 +82,7 @@ class TestS3GateObject:
|
|||
simple_object_size: ObjectSize,
|
||||
):
|
||||
version_1_content = "Version 1"
|
||||
file_name_simple = generate_file_with_content(
|
||||
simple_object_size.value, content=version_1_content
|
||||
)
|
||||
file_name_simple = generate_file_with_content(simple_object_size.value, content=version_1_content)
|
||||
obj_key = os.path.basename(file_name_simple)
|
||||
|
||||
bucket_1, bucket_2 = two_buckets
|
||||
|
@ -123,32 +101,22 @@ class TestS3GateObject:
|
|||
s3_helper.set_bucket_versioning(s3_client, bucket_2, VersioningStatus.ENABLED)
|
||||
with allure.step("Copy object from first bucket into second"):
|
||||
copy_obj_path_b2 = s3_client.copy_object(bucket_1, obj_key, bucket=bucket_2)
|
||||
s3_helper.check_objects_in_bucket(
|
||||
s3_client, bucket_1, expected_objects=bucket_1_objects
|
||||
)
|
||||
s3_helper.check_objects_in_bucket(
|
||||
s3_client, bucket_2, expected_objects=[copy_obj_path_b2]
|
||||
)
|
||||
s3_helper.check_objects_in_bucket(s3_client, bucket_1, expected_objects=bucket_1_objects)
|
||||
s3_helper.check_objects_in_bucket(s3_client, bucket_2, expected_objects=[copy_obj_path_b2])
|
||||
|
||||
with allure.step("Delete one object from first bucket and check object in bucket"):
|
||||
s3_client.delete_object(bucket_1, obj_key)
|
||||
bucket_1_objects.remove(obj_key)
|
||||
s3_helper.check_objects_in_bucket(
|
||||
s3_client, bucket_1, expected_objects=bucket_1_objects
|
||||
)
|
||||
s3_helper.check_objects_in_bucket(s3_client, bucket_1, expected_objects=bucket_1_objects)
|
||||
|
||||
with allure.step("Copy one object into the same bucket"):
|
||||
with pytest.raises(Exception):
|
||||
s3_client.copy_object(bucket_1, obj_key)
|
||||
|
||||
@allure.title("Copy with acl (s3_client={s3_client})")
|
||||
def test_s3_copy_acl(
|
||||
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
|
||||
):
|
||||
def test_s3_copy_acl(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
|
||||
version_1_content = "Version 1"
|
||||
file_name_simple = generate_file_with_content(
|
||||
simple_object_size.value, content=version_1_content
|
||||
)
|
||||
file_name_simple = generate_file_with_content(simple_object_size.value, content=version_1_content)
|
||||
obj_key = os.path.basename(file_name_simple)
|
||||
|
||||
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
|
||||
|
@ -163,9 +131,7 @@ class TestS3GateObject:
|
|||
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="CanonicalUser")
|
||||
|
||||
@allure.title("Copy object with metadata (s3_client={s3_client})")
|
||||
def test_s3_copy_metadate(
|
||||
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
|
||||
):
|
||||
def test_s3_copy_metadate(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
|
||||
object_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
|
||||
file_path = generate_file(simple_object_size.value)
|
||||
file_name = s3_helper.object_key_from_file_path(file_path)
|
||||
|
@ -183,17 +149,13 @@ class TestS3GateObject:
|
|||
bucket_1_objects.append(copy_obj_path)
|
||||
s3_helper.check_objects_in_bucket(s3_client, bucket, bucket_1_objects)
|
||||
obj_head = s3_client.head_object(bucket, copy_obj_path)
|
||||
assert (
|
||||
obj_head.get("Metadata") == object_metadata
|
||||
), f"Metadata must be {object_metadata}"
|
||||
assert obj_head.get("Metadata") == object_metadata, f"Metadata must be {object_metadata}"
|
||||
|
||||
with allure.step("Copy one object with metadata"):
|
||||
copy_obj_path = s3_client.copy_object(bucket, file_name, metadata_directive="COPY")
|
||||
bucket_1_objects.append(copy_obj_path)
|
||||
obj_head = s3_client.head_object(bucket, copy_obj_path)
|
||||
assert (
|
||||
obj_head.get("Metadata") == object_metadata
|
||||
), f"Metadata must be {object_metadata}"
|
||||
assert obj_head.get("Metadata") == object_metadata, f"Metadata must be {object_metadata}"
|
||||
|
||||
with allure.step("Copy one object with new metadata"):
|
||||
object_metadata_1 = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
|
||||
|
@ -205,14 +167,10 @@ class TestS3GateObject:
|
|||
)
|
||||
bucket_1_objects.append(copy_obj_path)
|
||||
obj_head = s3_client.head_object(bucket, copy_obj_path)
|
||||
assert (
|
||||
obj_head.get("Metadata") == object_metadata_1
|
||||
), f"Metadata must be {object_metadata_1}"
|
||||
assert obj_head.get("Metadata") == object_metadata_1, f"Metadata must be {object_metadata_1}"
|
||||
|
||||
@allure.title("Copy object with tagging (s3_client={s3_client})")
|
||||
def test_s3_copy_tagging(
|
||||
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
|
||||
):
|
||||
def test_s3_copy_tagging(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
|
||||
object_tagging = [(f"{uuid.uuid4()}", f"{uuid.uuid4()}")]
|
||||
file_path = generate_file(simple_object_size.value)
|
||||
file_name_simple = s3_helper.object_key_from_file_path(file_path)
|
||||
|
@ -235,9 +193,7 @@ class TestS3GateObject:
|
|||
assert tag in got_tags, f"Expected tag {tag} in {got_tags}"
|
||||
|
||||
with allure.step("Copy one object with tag"):
|
||||
copy_obj_path_1 = s3_client.copy_object(
|
||||
bucket, file_name_simple, tagging_directive="COPY"
|
||||
)
|
||||
copy_obj_path_1 = s3_client.copy_object(bucket, file_name_simple, tagging_directive="COPY")
|
||||
got_tags = s3_client.get_object_tagging(bucket, copy_obj_path_1)
|
||||
assert got_tags, f"Expected tags, got {got_tags}"
|
||||
expected_tags = [{"Key": key, "Value": value} for key, value in object_tagging]
|
||||
|
@ -270,9 +226,7 @@ class TestS3GateObject:
|
|||
):
|
||||
version_1_content = "Version 1"
|
||||
version_2_content = "Version 2"
|
||||
file_name_simple = generate_file_with_content(
|
||||
simple_object_size.value, content=version_1_content
|
||||
)
|
||||
file_name_simple = generate_file_with_content(simple_object_size.value, content=version_1_content)
|
||||
|
||||
obj_key = os.path.basename(file_name_simple)
|
||||
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
|
||||
|
@ -286,9 +240,7 @@ class TestS3GateObject:
|
|||
|
||||
with allure.step("Check bucket shows all versions"):
|
||||
versions = s3_client.list_objects_versions(bucket)
|
||||
obj_versions = {
|
||||
version.get("VersionId") for version in versions if version.get("Key") == obj_key
|
||||
}
|
||||
obj_versions = {version.get("VersionId") for version in versions if version.get("Key") == obj_key}
|
||||
assert obj_versions == {
|
||||
version_id_1,
|
||||
version_id_2,
|
||||
|
@ -297,18 +249,14 @@ class TestS3GateObject:
|
|||
with allure.step("Delete 1 version of object"):
|
||||
delete_obj = s3_client.delete_object(bucket, obj_key, version_id=version_id_1)
|
||||
versions = s3_client.list_objects_versions(bucket)
|
||||
obj_versions = {
|
||||
version.get("VersionId") for version in versions if version.get("Key") == obj_key
|
||||
}
|
||||
obj_versions = {version.get("VersionId") for version in versions if version.get("Key") == obj_key}
|
||||
assert obj_versions == {version_id_2}, f"Object should have versions: {version_id_2}"
|
||||
assert "DeleteMarker" not in delete_obj.keys(), "Delete markers should not be created"
|
||||
|
||||
with allure.step("Delete second version of object"):
|
||||
delete_obj = s3_client.delete_object(bucket, obj_key, version_id=version_id_2)
|
||||
versions = s3_client.list_objects_versions(bucket)
|
||||
obj_versions = {
|
||||
version.get("VersionId") for version in versions if version.get("Key") == obj_key
|
||||
}
|
||||
obj_versions = {version.get("VersionId") for version in versions if version.get("Key") == obj_key}
|
||||
assert not obj_versions, "Expected object not found"
|
||||
assert "DeleteMarker" not in delete_obj.keys(), "Delete markers should not be created"
|
||||
|
||||
|
@ -324,16 +272,12 @@ class TestS3GateObject:
|
|||
assert "DeleteMarker" in delete_obj.keys(), "Expected delete Marker"
|
||||
|
||||
@allure.title("Bulk delete version of object (s3_client={s3_client})")
|
||||
def test_s3_bulk_delete_versioning(
|
||||
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
|
||||
):
|
||||
def test_s3_bulk_delete_versioning(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
|
||||
version_1_content = "Version 1"
|
||||
version_2_content = "Version 2"
|
||||
version_3_content = "Version 3"
|
||||
version_4_content = "Version 4"
|
||||
file_name_1 = generate_file_with_content(
|
||||
simple_object_size.value, content=version_1_content
|
||||
)
|
||||
file_name_1 = generate_file_with_content(simple_object_size.value, content=version_1_content)
|
||||
|
||||
obj_key = os.path.basename(file_name_1)
|
||||
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
|
||||
|
@ -356,37 +300,25 @@ class TestS3GateObject:
|
|||
|
||||
with allure.step("Check bucket shows all versions"):
|
||||
versions = s3_client.list_objects_versions(bucket)
|
||||
obj_versions = {
|
||||
version.get("VersionId") for version in versions if version.get("Key") == obj_key
|
||||
}
|
||||
obj_versions = {version.get("VersionId") for version in versions if version.get("Key") == obj_key}
|
||||
assert obj_versions == version_ids, f"Object should have versions: {version_ids}"
|
||||
|
||||
with allure.step("Delete two objects from bucket one by one"):
|
||||
version_to_delete_b1 = sample(
|
||||
[version_id_1, version_id_2, version_id_3, version_id_4], k=2
|
||||
)
|
||||
version_to_delete_b1 = sample([version_id_1, version_id_2, version_id_3, version_id_4], k=2)
|
||||
version_to_save = list(set(version_ids) - set(version_to_delete_b1))
|
||||
for ver in version_to_delete_b1:
|
||||
s3_client.delete_object(bucket, obj_key, ver)
|
||||
|
||||
with allure.step("Check bucket shows all versions"):
|
||||
versions = s3_client.list_objects_versions(bucket)
|
||||
obj_versions = [
|
||||
version.get("VersionId") for version in versions if version.get("Key") == obj_key
|
||||
]
|
||||
assert (
|
||||
obj_versions.sort() == version_to_save.sort()
|
||||
), f"Object should have versions: {version_to_save}"
|
||||
obj_versions = [version.get("VersionId") for version in versions if version.get("Key") == obj_key]
|
||||
assert obj_versions.sort() == version_to_save.sort(), f"Object should have versions: {version_to_save}"
|
||||
|
||||
@allure.title("Get versions of object (s3_client={s3_client})")
|
||||
def test_s3_get_versioning(
|
||||
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
|
||||
):
|
||||
def test_s3_get_versioning(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
|
||||
version_1_content = "Version 1"
|
||||
version_2_content = "Version 2"
|
||||
file_name_simple = generate_file_with_content(
|
||||
simple_object_size.value, content=version_1_content
|
||||
)
|
||||
file_name_simple = generate_file_with_content(simple_object_size.value, content=version_1_content)
|
||||
|
||||
obj_key = os.path.basename(file_name_simple)
|
||||
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
|
||||
|
@ -399,21 +331,15 @@ class TestS3GateObject:
|
|||
|
||||
with allure.step("Get first version of object"):
|
||||
object_1 = s3_client.get_object(bucket, obj_key, version_id_1, full_output=True)
|
||||
assert (
|
||||
object_1.get("VersionId") == version_id_1
|
||||
), f"Get object with version {version_id_1}"
|
||||
assert object_1.get("VersionId") == version_id_1, f"Get object with version {version_id_1}"
|
||||
|
||||
with allure.step("Get second version of object"):
|
||||
object_2 = s3_client.get_object(bucket, obj_key, version_id_2, full_output=True)
|
||||
assert (
|
||||
object_2.get("VersionId") == version_id_2
|
||||
), f"Get object with version {version_id_2}"
|
||||
assert object_2.get("VersionId") == version_id_2, f"Get object with version {version_id_2}"
|
||||
|
||||
with allure.step("Get object"):
|
||||
object_3 = s3_client.get_object(bucket, obj_key, full_output=True)
|
||||
assert (
|
||||
object_3.get("VersionId") == version_id_2
|
||||
), f"Get object with version {version_id_2}"
|
||||
assert object_3.get("VersionId") == version_id_2, f"Get object with version {version_id_2}"
|
||||
|
||||
@allure.title("Get range (s3_client={s3_client})")
|
||||
def test_s3_get_range(
|
||||
|
@ -483,9 +409,7 @@ class TestS3GateObject:
|
|||
object_range=[2 * int(simple_object_size.value / 3) + 1, simple_object_size.value],
|
||||
)
|
||||
con_file_1 = concat_files([object_2_part_1, object_2_part_2, object_2_part_3])
|
||||
assert get_file_hash(con_file_1) == get_file_hash(
|
||||
file_name_1
|
||||
), "Hashes must be the same"
|
||||
assert get_file_hash(con_file_1) == get_file_hash(file_name_1), "Hashes must be the same"
|
||||
|
||||
with allure.step("Get object"):
|
||||
object_3_part_1 = s3_client.get_object(
|
||||
|
@ -568,26 +492,18 @@ class TestS3GateObject:
|
|||
assert "LastModified" in response, "Expected LastModified field"
|
||||
assert "ETag" in response, "Expected ETag field"
|
||||
assert response.get("Metadata") == {}, "Expected Metadata empty"
|
||||
assert (
|
||||
response.get("VersionId") == version_id_2
|
||||
), f"Expected VersionId is {version_id_2}"
|
||||
assert response.get("VersionId") == version_id_2, f"Expected VersionId is {version_id_2}"
|
||||
assert response.get("ContentLength") != 0, "Expected ContentLength is not zero"
|
||||
|
||||
with allure.step("Get head ob first version of object"):
|
||||
response = s3_client.head_object(bucket, file_name, version_id=version_id_1)
|
||||
assert "LastModified" in response, "Expected LastModified field"
|
||||
assert "ETag" in response, "Expected ETag field"
|
||||
assert (
|
||||
response.get("Metadata") == object_metadata
|
||||
), f"Expected Metadata is {object_metadata}"
|
||||
assert (
|
||||
response.get("VersionId") == version_id_1
|
||||
), f"Expected VersionId is {version_id_1}"
|
||||
assert response.get("Metadata") == object_metadata, f"Expected Metadata is {object_metadata}"
|
||||
assert response.get("VersionId") == version_id_1, f"Expected VersionId is {version_id_1}"
|
||||
assert response.get("ContentLength") != 0, "Expected ContentLength is not zero"
|
||||
|
||||
@allure.title(
|
||||
"List of objects with version (method_version={list_type}, s3_client={s3_client})"
|
||||
)
|
||||
@allure.title("List of objects with version (method_version={list_type}, s3_client={s3_client})")
|
||||
@pytest.mark.parametrize("list_type", ["v1", "v2"])
|
||||
def test_s3_list_object(
|
||||
self,
|
||||
|
@ -624,9 +540,7 @@ class TestS3GateObject:
|
|||
list_obj_1 = s3_client.list_objects_v2(bucket, full_output=True)
|
||||
contents = list_obj_1.get("Contents", [])
|
||||
assert len(contents) == 1, "bucket should have only 1 object"
|
||||
assert (
|
||||
contents[0].get("Key") == file_name_2
|
||||
), f"bucket should have object key {file_name_2}"
|
||||
assert contents[0].get("Key") == file_name_2, f"bucket should have object key {file_name_2}"
|
||||
assert "DeleteMarker" in delete_obj.keys(), "Expected delete Marker"
|
||||
|
||||
@allure.title("Put object (s3_client={s3_client})")
|
||||
|
@ -655,22 +569,16 @@ class TestS3GateObject:
|
|||
assert obj_head.get("Metadata") == object_1_metadata, "Metadata must be the same"
|
||||
got_tags = s3_client.get_object_tagging(bucket, file_name)
|
||||
assert got_tags, f"Expected tags, got {got_tags}"
|
||||
assert got_tags == [
|
||||
{"Key": tag_key_1, "Value": str(tag_value_1)}
|
||||
], "Tags must be the same"
|
||||
assert got_tags == [{"Key": tag_key_1, "Value": str(tag_value_1)}], "Tags must be the same"
|
||||
|
||||
with allure.step("Rewrite file into bucket"):
|
||||
file_path_2 = generate_file_with_content(
|
||||
simple_object_size.value, file_path=file_path_1
|
||||
)
|
||||
file_path_2 = generate_file_with_content(simple_object_size.value, file_path=file_path_1)
|
||||
s3_client.put_object(bucket, file_path_2, metadata=object_2_metadata, tagging=tag_2)
|
||||
obj_head = s3_client.head_object(bucket, file_name)
|
||||
assert obj_head.get("Metadata") == object_2_metadata, "Metadata must be the same"
|
||||
got_tags_1 = s3_client.get_object_tagging(bucket, file_name)
|
||||
assert got_tags_1, f"Expected tags, got {got_tags_1}"
|
||||
assert got_tags_1 == [
|
||||
{"Key": tag_key_2, "Value": str(tag_value_2)}
|
||||
], "Tags must be the same"
|
||||
assert got_tags_1 == [{"Key": tag_key_2, "Value": str(tag_value_2)}], "Tags must be the same"
|
||||
|
||||
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
|
||||
|
||||
|
@ -683,28 +591,18 @@ class TestS3GateObject:
|
|||
tag_3 = f"{tag_key_3}={tag_value_3}"
|
||||
|
||||
with allure.step("Put third object into bucket"):
|
||||
version_id_1 = s3_client.put_object(
|
||||
bucket, file_path_3, metadata=object_3_metadata, tagging=tag_3
|
||||
)
|
||||
version_id_1 = s3_client.put_object(bucket, file_path_3, metadata=object_3_metadata, tagging=tag_3)
|
||||
obj_head_3 = s3_client.head_object(bucket, file_name_3)
|
||||
assert obj_head_3.get("Metadata") == object_3_metadata, "Matadata must be the same"
|
||||
got_tags_3 = s3_client.get_object_tagging(bucket, file_name_3)
|
||||
assert got_tags_3, f"Expected tags, got {got_tags_3}"
|
||||
assert got_tags_3 == [
|
||||
{"Key": tag_key_3, "Value": str(tag_value_3)}
|
||||
], "Tags must be the same"
|
||||
assert got_tags_3 == [{"Key": tag_key_3, "Value": str(tag_value_3)}], "Tags must be the same"
|
||||
|
||||
with allure.step("Put new version of file into bucket"):
|
||||
file_path_4 = generate_file_with_content(
|
||||
simple_object_size.value, file_path=file_path_3
|
||||
)
|
||||
file_path_4 = generate_file_with_content(simple_object_size.value, file_path=file_path_3)
|
||||
version_id_2 = s3_client.put_object(bucket, file_path_4)
|
||||
versions = s3_client.list_objects_versions(bucket)
|
||||
obj_versions = {
|
||||
version.get("VersionId")
|
||||
for version in versions
|
||||
if version.get("Key") == file_name_3
|
||||
}
|
||||
obj_versions = {version.get("VersionId") for version in versions if version.get("Key") == file_name_3}
|
||||
assert obj_versions == {
|
||||
version_id_1,
|
||||
version_id_2,
|
||||
|
@ -714,26 +612,20 @@ class TestS3GateObject:
|
|||
|
||||
with allure.step("Get object"):
|
||||
object_3 = s3_client.get_object(bucket, file_name_3, full_output=True)
|
||||
assert (
|
||||
object_3.get("VersionId") == version_id_2
|
||||
), f"get object with version {version_id_2}"
|
||||
assert object_3.get("VersionId") == version_id_2, f"get object with version {version_id_2}"
|
||||
object_3 = s3_client.get_object(bucket, file_name_3)
|
||||
assert get_file_hash(file_path_4) == get_file_hash(object_3), "Hashes must be the same"
|
||||
|
||||
with allure.step("Get first version of object"):
|
||||
object_4 = s3_client.get_object(bucket, file_name_3, version_id_1, full_output=True)
|
||||
assert (
|
||||
object_4.get("VersionId") == version_id_1
|
||||
), f"get object with version {version_id_1}"
|
||||
assert object_4.get("VersionId") == version_id_1, f"get object with version {version_id_1}"
|
||||
object_4 = s3_client.get_object(bucket, file_name_3, version_id_1)
|
||||
assert file_hash == get_file_hash(object_4), "Hashes must be the same"
|
||||
obj_head_3 = s3_client.head_object(bucket, file_name_3, version_id_1)
|
||||
assert obj_head_3.get("Metadata") == object_3_metadata, "Metadata must be the same"
|
||||
got_tags_3 = s3_client.get_object_tagging(bucket, file_name_3, version_id_1)
|
||||
assert got_tags_3, f"Expected tags, got {got_tags_3}"
|
||||
assert got_tags_3 == [
|
||||
{"Key": tag_key_3, "Value": str(tag_value_3)}
|
||||
], "Tags must be the same"
|
||||
assert got_tags_3 == [{"Key": tag_key_3, "Value": str(tag_value_3)}], "Tags must be the same"
|
||||
|
||||
@allure.title("Put object with ACL (versioning={bucket_versioning}, s3_client={s3_client})")
|
||||
@pytest.mark.parametrize("bucket_versioning", ["ENABLED", "SUSPENDED"])
|
||||
|
@ -762,9 +654,7 @@ class TestS3GateObject:
|
|||
assert get_file_hash(file_path_1) == get_file_hash(object_1), "Hashes must be the same"
|
||||
|
||||
with allure.step("Put object with acl public-read"):
|
||||
file_path_2 = generate_file_with_content(
|
||||
simple_object_size.value, file_path=file_path_1
|
||||
)
|
||||
file_path_2 = generate_file_with_content(simple_object_size.value, file_path=file_path_1)
|
||||
s3_client.put_object(bucket, file_path_2, acl="public-read")
|
||||
obj_acl = s3_client.get_object_acl(bucket, file_name)
|
||||
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
|
||||
|
@ -772,9 +662,7 @@ class TestS3GateObject:
|
|||
assert get_file_hash(file_path_2) == get_file_hash(object_2), "Hashes must be the same"
|
||||
|
||||
with allure.step("Put object with acl public-read-write"):
|
||||
file_path_3 = generate_file_with_content(
|
||||
simple_object_size.value, file_path=file_path_1
|
||||
)
|
||||
file_path_3 = generate_file_with_content(simple_object_size.value, file_path=file_path_1)
|
||||
s3_client.put_object(bucket, file_path_3, acl="public-read-write")
|
||||
obj_acl = s3_client.get_object_acl(bucket, file_name)
|
||||
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
|
||||
|
@ -782,9 +670,7 @@ class TestS3GateObject:
|
|||
assert get_file_hash(file_path_3) == get_file_hash(object_3), "Hashes must be the same"
|
||||
|
||||
with allure.step("Put object with acl authenticated-read"):
|
||||
file_path_4 = generate_file_with_content(
|
||||
simple_object_size.value, file_path=file_path_1
|
||||
)
|
||||
file_path_4 = generate_file_with_content(simple_object_size.value, file_path=file_path_1)
|
||||
s3_client.put_object(bucket, file_path_4, acl="authenticated-read")
|
||||
obj_acl = s3_client.get_object_acl(bucket, file_name)
|
||||
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
|
||||
|
@ -806,9 +692,7 @@ class TestS3GateObject:
|
|||
object_5 = s3_client.get_object(bucket, file_name_5)
|
||||
assert get_file_hash(file_path_5) == get_file_hash(object_5), "Hashes must be the same"
|
||||
|
||||
with allure.step(
|
||||
"Put object with --grant-read uri=http://acs.amazonaws.com/groups/global/AllUsers"
|
||||
):
|
||||
with allure.step("Put object with --grant-read uri=http://acs.amazonaws.com/groups/global/AllUsers"):
|
||||
generate_file_with_content(simple_object_size.value, file_path=file_path_5)
|
||||
s3_client.put_object(
|
||||
bucket,
|
||||
|
@ -833,9 +717,7 @@ class TestS3GateObject:
|
|||
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
|
||||
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
|
||||
|
||||
with allure.step(
|
||||
"Put object with lock-mode GOVERNANCE lock-retain-until-date +1day, lock-legal-hold-status"
|
||||
):
|
||||
with allure.step("Put object with lock-mode GOVERNANCE lock-retain-until-date +1day, lock-legal-hold-status"):
|
||||
date_obj = datetime.utcnow() + timedelta(days=1)
|
||||
s3_client.put_object(
|
||||
bucket,
|
||||
|
@ -844,9 +726,7 @@ class TestS3GateObject:
|
|||
object_lock_retain_until_date=date_obj.strftime("%Y-%m-%dT%H:%M:%S"),
|
||||
object_lock_legal_hold_status="OFF",
|
||||
)
|
||||
s3_helper.assert_object_lock_mode(
|
||||
s3_client, bucket, file_name, "GOVERNANCE", date_obj, "OFF"
|
||||
)
|
||||
s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "GOVERNANCE", date_obj, "OFF")
|
||||
|
||||
with allure.step(
|
||||
"Put new version of object with [--object-lock-mode COMPLIANCE] и [--object-lock-retain-until-date +3days]"
|
||||
|
@ -859,9 +739,7 @@ class TestS3GateObject:
|
|||
object_lock_mode="COMPLIANCE",
|
||||
object_lock_retain_until_date=date_obj,
|
||||
)
|
||||
s3_helper.assert_object_lock_mode(
|
||||
s3_client, bucket, file_name, "COMPLIANCE", date_obj, "OFF"
|
||||
)
|
||||
s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "COMPLIANCE", date_obj, "OFF")
|
||||
|
||||
with allure.step(
|
||||
"Put new version of object with [--object-lock-mode COMPLIANCE] и [--object-lock-retain-until-date +2days]"
|
||||
|
@ -875,9 +753,7 @@ class TestS3GateObject:
|
|||
object_lock_retain_until_date=date_obj,
|
||||
object_lock_legal_hold_status="ON",
|
||||
)
|
||||
s3_helper.assert_object_lock_mode(
|
||||
s3_client, bucket, file_name, "COMPLIANCE", date_obj, "ON"
|
||||
)
|
||||
s3_helper.assert_object_lock_mode(s3_client, bucket, file_name, "COMPLIANCE", date_obj, "ON")
|
||||
|
||||
with allure.step("Put object with lock-mode"):
|
||||
with pytest.raises(
|
||||
|
@ -939,9 +815,7 @@ class TestS3GateObject:
|
|||
|
||||
with allure.step("Check objects are synced"):
|
||||
objects = s3_client.list_objects(bucket)
|
||||
assert set(key_to_path.keys()) == set(
|
||||
objects
|
||||
), f"Expected all abjects saved. Got {objects}"
|
||||
assert set(key_to_path.keys()) == set(objects), f"Expected all abjects saved. Got {objects}"
|
||||
|
||||
with allure.step("Check these are the same objects"):
|
||||
for obj_key in objects:
|
||||
|
@ -950,9 +824,7 @@ class TestS3GateObject:
|
|||
key_to_path.get(obj_key)
|
||||
), "Expected hashes are the same"
|
||||
obj_head = s3_client.head_object(bucket, obj_key)
|
||||
assert (
|
||||
obj_head.get("Metadata") == object_metadata
|
||||
), f"Metadata of object is {object_metadata}"
|
||||
assert obj_head.get("Metadata") == object_metadata, f"Metadata of object is {object_metadata}"
|
||||
# Uncomment after https://github.com/nspcc-dev/neofs-s3-gw/issues/685 is solved
|
||||
# obj_acl = s3_client.get_object_acl(bucket, obj_key)
|
||||
# s3_helper.assert_s3_acl(acl_grants = obj_acl, permitted_users = "AllUsers")
|
||||
|
@ -994,9 +866,7 @@ class TestS3GateObject:
|
|||
assert not objects_list, f"Expected empty bucket, got {objects_list}"
|
||||
|
||||
@allure.title("Delete the same object twice (s3_client={s3_client})")
|
||||
def test_s3_delete_twice(
|
||||
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
|
||||
):
|
||||
def test_s3_delete_twice(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
|
||||
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
|
||||
objects_list = s3_client.list_objects(bucket)
|
||||
with allure.step("Check that bucket is empty"):
|
||||
|
@ -1012,9 +882,7 @@ class TestS3GateObject:
|
|||
delete_object = s3_client.delete_object(bucket, file_name)
|
||||
versions = s3_client.list_objects_versions(bucket)
|
||||
|
||||
obj_versions = {
|
||||
version.get("VersionId") for version in versions if version.get("Key") == file_name
|
||||
}
|
||||
obj_versions = {version.get("VersionId") for version in versions if version.get("Key") == file_name}
|
||||
assert obj_versions, f"Object versions were not found {objects_list}"
|
||||
assert "DeleteMarker" in delete_object.keys(), "Delete markers not found"
|
||||
|
||||
|
@ -1022,9 +890,7 @@ class TestS3GateObject:
|
|||
delete_object_2nd_attempt = s3_client.delete_object(bucket, file_name)
|
||||
versions_2nd_attempt = s3_client.list_objects_versions(bucket)
|
||||
|
||||
assert (
|
||||
delete_object.keys() == delete_object_2nd_attempt.keys()
|
||||
), "Delete markers are not the same"
|
||||
assert delete_object.keys() == delete_object_2nd_attempt.keys(), "Delete markers are not the same"
|
||||
# check that nothing was changed
|
||||
# checking here not VersionId only, but all data (for example LastModified)
|
||||
assert versions == versions_2nd_attempt, "Versions are not the same"
|
||||
|
|
|
@ -2,7 +2,7 @@ import os
|
|||
|
||||
import allure
|
||||
import pytest
|
||||
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
|
||||
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
|
||||
from frostfs_testlib.steps.cli.container import search_container_by_name
|
||||
from frostfs_testlib.steps.s3 import s3_helper
|
||||
from frostfs_testlib.steps.storage_policy import get_simple_object_copies
|
||||
|
@ -12,23 +12,12 @@ from frostfs_testlib.testing.test_control import expect_not_raises
|
|||
from frostfs_testlib.utils.file_utils import generate_file
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc: pytest.Metafunc):
|
||||
policy = f"{os.getcwd()}/pytest_tests/resources/files/policy.json"
|
||||
if "s3_client" in metafunc.fixturenames:
|
||||
metafunc.parametrize(
|
||||
"s3_client, s3_policy",
|
||||
[(AwsCliClient, policy), (Boto3ClientWrapper, policy)],
|
||||
indirect=True,
|
||||
ids=["aws cli", "boto3"],
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.sanity
|
||||
@pytest.mark.s3_gate
|
||||
@pytest.mark.parametrize("s3_policy", ["pytest_tests/resources/files/policy.json"], indirect=True)
|
||||
class TestS3GatePolicy(ClusterTestBase):
|
||||
@allure.title("Bucket creation with retention policy applied (s3_client={s3_client})")
|
||||
def test_s3_bucket_location(
|
||||
self, default_wallet: str, s3_client: S3ClientWrapper, simple_object_size: ObjectSize
|
||||
):
|
||||
def test_s3_bucket_location(self, default_wallet: str, s3_client: S3ClientWrapper, simple_object_size: ObjectSize):
|
||||
file_path_1 = generate_file(simple_object_size.value)
|
||||
file_name_1 = s3_helper.object_key_from_file_path(file_path_1)
|
||||
file_path_2 = generate_file(simple_object_size.value)
|
||||
|
@ -156,9 +145,7 @@ class TestS3GatePolicy(ClusterTestBase):
|
|||
}
|
||||
s3_client.put_bucket_cors(bucket, cors)
|
||||
bucket_cors = s3_client.get_bucket_cors(bucket)
|
||||
assert bucket_cors == cors.get(
|
||||
"CORSRules"
|
||||
), f"Expected CORSRules must be {cors.get('CORSRules')}"
|
||||
assert bucket_cors == cors.get("CORSRules"), f"Expected CORSRules must be {cors.get('CORSRules')}"
|
||||
|
||||
with allure.step("delete bucket cors"):
|
||||
s3_client.delete_bucket_cors(bucket)
|
||||
|
|
|
@ -4,17 +4,12 @@ from typing import Tuple
|
|||
|
||||
import allure
|
||||
import pytest
|
||||
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper
|
||||
from frostfs_testlib.s3 import S3ClientWrapper
|
||||
from frostfs_testlib.steps.s3 import s3_helper
|
||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||
from frostfs_testlib.utils.file_utils import generate_file
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc: pytest.Metafunc):
|
||||
if "s3_client" in metafunc.fixturenames:
|
||||
metafunc.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
|
||||
|
||||
|
||||
@pytest.mark.sanity
|
||||
@pytest.mark.s3_gate
|
||||
@pytest.mark.s3_gate_tagging
|
||||
|
@ -29,9 +24,7 @@ class TestS3GateTagging:
|
|||
return tags
|
||||
|
||||
@allure.title("Object tagging (s3_client={s3_client})")
|
||||
def test_s3_object_tagging(
|
||||
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize
|
||||
):
|
||||
def test_s3_object_tagging(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: ObjectSize):
|
||||
file_path = generate_file(simple_object_size.value)
|
||||
file_name = s3_helper.object_key_from_file_path(file_path)
|
||||
|
||||
|
@ -45,9 +38,7 @@ class TestS3GateTagging:
|
|||
with allure.step("Put 10 new tags for object"):
|
||||
tags_2 = self.create_tags(10)
|
||||
s3_client.put_object_tagging(bucket, file_name, tags=tags_2)
|
||||
s3_helper.check_tags_by_object(
|
||||
s3_client, bucket, file_name, tags_2, [("Tag1", "Value1")]
|
||||
)
|
||||
s3_helper.check_tags_by_object(s3_client, bucket, file_name, tags_2, [("Tag1", "Value1")])
|
||||
|
||||
with allure.step("Put 10 extra new tags for object"):
|
||||
tags_3 = self.create_tags(10)
|
||||
|
|
|
@ -1,18 +1,11 @@
|
|||
import os
|
||||
|
||||
import allure
|
||||
import pytest
|
||||
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
|
||||
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
|
||||
from frostfs_testlib.steps.s3 import s3_helper
|
||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||
from frostfs_testlib.utils.file_utils import generate_file, generate_file_with_content
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc: pytest.Metafunc):
|
||||
if "s3_client" in metafunc.fixturenames:
|
||||
metafunc.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
|
||||
|
||||
|
||||
@pytest.mark.sanity
|
||||
@pytest.mark.s3_gate
|
||||
@pytest.mark.s3_gate_versioning
|
||||
|
@ -34,18 +27,10 @@ class TestS3GateVersioning:
|
|||
with allure.step("Put object into bucket"):
|
||||
s3_client.put_object(bucket, file_path)
|
||||
objects_list = s3_client.list_objects(bucket)
|
||||
assert (
|
||||
objects_list == bucket_objects
|
||||
), f"Expected list with single objects in bucket, got {objects_list}"
|
||||
assert objects_list == bucket_objects, f"Expected list with single objects in bucket, got {objects_list}"
|
||||
object_version = s3_client.list_objects_versions(bucket)
|
||||
actual_version = [
|
||||
version.get("VersionId")
|
||||
for version in object_version
|
||||
if version.get("Key") == file_name
|
||||
]
|
||||
assert actual_version == [
|
||||
"null"
|
||||
], f"Expected version is null in list-object-versions, got {object_version}"
|
||||
actual_version = [version.get("VersionId") for version in object_version if version.get("Key") == file_name]
|
||||
assert actual_version == ["null"], f"Expected version is null in list-object-versions, got {object_version}"
|
||||
object_0 = s3_client.head_object(bucket, file_name)
|
||||
assert (
|
||||
object_0.get("VersionId") == "null"
|
||||
|
@ -60,27 +45,19 @@ class TestS3GateVersioning:
|
|||
|
||||
with allure.step("Check bucket shows all versions"):
|
||||
versions = s3_client.list_objects_versions(bucket)
|
||||
obj_versions = [
|
||||
version.get("VersionId") for version in versions if version.get("Key") == file_name
|
||||
]
|
||||
obj_versions = [version.get("VersionId") for version in versions if version.get("Key") == file_name]
|
||||
assert (
|
||||
obj_versions.sort() == [version_id_1, version_id_2, "null"].sort()
|
||||
), f"Expected object has versions: {version_id_1, version_id_2, 'null'}"
|
||||
|
||||
with allure.step("Get object"):
|
||||
object_1 = s3_client.get_object(bucket, file_name, full_output=True)
|
||||
assert (
|
||||
object_1.get("VersionId") == version_id_2
|
||||
), f"Get object with version {version_id_2}"
|
||||
assert object_1.get("VersionId") == version_id_2, f"Get object with version {version_id_2}"
|
||||
|
||||
with allure.step("Get first version of object"):
|
||||
object_2 = s3_client.get_object(bucket, file_name, version_id_1, full_output=True)
|
||||
assert (
|
||||
object_2.get("VersionId") == version_id_1
|
||||
), f"Get object with version {version_id_1}"
|
||||
assert object_2.get("VersionId") == version_id_1, f"Get object with version {version_id_1}"
|
||||
|
||||
with allure.step("Get second version of object"):
|
||||
object_3 = s3_client.get_object(bucket, file_name, version_id_2, full_output=True)
|
||||
assert (
|
||||
object_3.get("VersionId") == version_id_2
|
||||
), f"Get object with version {version_id_2}"
|
||||
assert object_3.get("VersionId") == version_id_2, f"Get object with version {version_id_2}"
|
||||
|
|
|
@ -48,15 +48,9 @@ RANGE_OFFSET_FOR_COMPLEX_OBJECT = 200
|
|||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def storage_containers(
|
||||
owner_wallet: WalletInfo, client_shell: Shell, cluster: Cluster
|
||||
) -> list[str]:
|
||||
cid = create_container(
|
||||
owner_wallet.path, shell=client_shell, endpoint=cluster.default_rpc_endpoint
|
||||
)
|
||||
other_cid = create_container(
|
||||
owner_wallet.path, shell=client_shell, endpoint=cluster.default_rpc_endpoint
|
||||
)
|
||||
def storage_containers(owner_wallet: WalletInfo, client_shell: Shell, cluster: Cluster) -> list[str]:
|
||||
cid = create_container(owner_wallet.path, shell=client_shell, endpoint=cluster.default_rpc_endpoint)
|
||||
other_cid = create_container(owner_wallet.path, shell=client_shell, endpoint=cluster.default_rpc_endpoint)
|
||||
yield [cid, other_cid]
|
||||
|
||||
|
||||
|
@ -99,9 +93,7 @@ def storage_objects(
|
|||
|
||||
|
||||
@allure.step("Get ranges for test")
|
||||
def get_ranges(
|
||||
storage_object: StorageObjectInfo, max_object_size: int, shell: Shell, endpoint: str
|
||||
) -> list[str]:
|
||||
def get_ranges(storage_object: StorageObjectInfo, max_object_size: int, shell: Shell, endpoint: str) -> list[str]:
|
||||
"""
|
||||
Returns ranges to test range/hash methods via static session
|
||||
"""
|
||||
|
@ -112,8 +104,7 @@ def get_ranges(
|
|||
return [
|
||||
"0:10",
|
||||
f"{object_size-10}:10",
|
||||
f"{max_object_size - RANGE_OFFSET_FOR_COMPLEX_OBJECT}:"
|
||||
f"{RANGE_OFFSET_FOR_COMPLEX_OBJECT * 2}",
|
||||
f"{max_object_size - RANGE_OFFSET_FOR_COMPLEX_OBJECT}:" f"{RANGE_OFFSET_FOR_COMPLEX_OBJECT * 2}",
|
||||
]
|
||||
else:
|
||||
return ["0:10", f"{object_size-10}:10"]
|
||||
|
@ -146,11 +137,10 @@ def static_sessions(
|
|||
}
|
||||
|
||||
|
||||
@pytest.mark.sanity
|
||||
@pytest.mark.static_session
|
||||
class TestObjectStaticSession(ClusterTestBase):
|
||||
@allure.title(
|
||||
"Read operations with static session (method={method_under_test.__name__}, obj_size={object_size})"
|
||||
)
|
||||
@allure.title("Read operations with static session (method={method_under_test.__name__}, obj_size={object_size})")
|
||||
@pytest.mark.parametrize(
|
||||
"method_under_test,verb",
|
||||
[
|
||||
|
@ -181,9 +171,7 @@ class TestObjectStaticSession(ClusterTestBase):
|
|||
session=static_sessions[verb],
|
||||
)
|
||||
|
||||
@allure.title(
|
||||
"Range operations with static session (method={method_under_test.__name__}, obj_size={object_size})"
|
||||
)
|
||||
@allure.title("Range operations with static session (method={method_under_test.__name__}, obj_size={object_size})")
|
||||
@pytest.mark.parametrize(
|
||||
"method_under_test,verb",
|
||||
[(get_range, ObjectVerb.RANGE), (get_range_hash, ObjectVerb.RANGEHASH)],
|
||||
|
@ -201,9 +189,7 @@ class TestObjectStaticSession(ClusterTestBase):
|
|||
Validate static session with range operations
|
||||
"""
|
||||
storage_object = storage_objects[0]
|
||||
ranges_to_test = get_ranges(
|
||||
storage_object, max_object_size, self.shell, self.cluster.default_rpc_endpoint
|
||||
)
|
||||
ranges_to_test = get_ranges(storage_object, max_object_size, self.shell, self.cluster.default_rpc_endpoint)
|
||||
|
||||
for range_to_test in ranges_to_test:
|
||||
with allure.step(f"Check range {range_to_test}"):
|
||||
|
@ -241,9 +227,7 @@ class TestObjectStaticSession(ClusterTestBase):
|
|||
)
|
||||
assert sorted(expected_object_ids) == sorted(actual_object_ids)
|
||||
|
||||
@allure.title(
|
||||
"[NEGATIVE] Static session with object id not in session (obj_size={object_size})"
|
||||
)
|
||||
@allure.title("[NEGATIVE] Static session with object id not in session (obj_size={object_size})")
|
||||
def test_static_session_unrelated_object(
|
||||
self,
|
||||
user_wallet: WalletInfo,
|
||||
|
@ -307,9 +291,7 @@ class TestObjectStaticSession(ClusterTestBase):
|
|||
session=static_sessions[ObjectVerb.HEAD],
|
||||
)
|
||||
|
||||
@allure.title(
|
||||
"[NEGATIVE] Static session with container id not in session (obj_size={object_size})"
|
||||
)
|
||||
@allure.title("[NEGATIVE] Static session with container id not in session (obj_size={object_size})")
|
||||
def test_static_session_unrelated_container(
|
||||
self,
|
||||
user_wallet: WalletInfo,
|
||||
|
@ -473,9 +455,7 @@ class TestObjectStaticSession(ClusterTestBase):
|
|||
session=token_expire_at_next_epoch,
|
||||
)
|
||||
|
||||
with allure.step(
|
||||
"Object should be available at last epoch before session token expiration"
|
||||
):
|
||||
with allure.step("Object should be available at last epoch before session token expiration"):
|
||||
self.tick_epoch()
|
||||
with expect_not_raises():
|
||||
head_object(
|
||||
|
@ -540,9 +520,7 @@ class TestObjectStaticSession(ClusterTestBase):
|
|||
session=token_start_at_next_epoch,
|
||||
)
|
||||
|
||||
with allure.step(
|
||||
"Object should be available with session token starting from token nbf epoch"
|
||||
):
|
||||
with allure.step("Object should be available with session token starting from token nbf epoch"):
|
||||
self.tick_epoch()
|
||||
with expect_not_raises():
|
||||
head_object(
|
||||
|
@ -554,9 +532,7 @@ class TestObjectStaticSession(ClusterTestBase):
|
|||
session=token_start_at_next_epoch,
|
||||
)
|
||||
|
||||
with allure.step(
|
||||
"Object should be available at last epoch before session token expiration"
|
||||
):
|
||||
with allure.step("Object should be available at last epoch before session token expiration"):
|
||||
self.tick_epoch()
|
||||
with expect_not_raises():
|
||||
head_object(
|
||||
|
|
|
@ -3,12 +3,7 @@ import pytest
|
|||
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
|
||||
from frostfs_testlib.shell import Shell
|
||||
from frostfs_testlib.steps.acl import create_eacl, set_eacl, wait_for_cache_expired
|
||||
from frostfs_testlib.steps.cli.container import (
|
||||
create_container,
|
||||
delete_container,
|
||||
get_container,
|
||||
list_containers,
|
||||
)
|
||||
from frostfs_testlib.steps.cli.container import create_container, delete_container, get_container, list_containers
|
||||
from frostfs_testlib.steps.session_token import ContainerVerb, get_container_signed_token
|
||||
from frostfs_testlib.storage.dataclasses.acl import EACLAccess, EACLOperation, EACLRole, EACLRule
|
||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||
|
@ -19,6 +14,7 @@ from frostfs_testlib.utils.file_utils import generate_file
|
|||
from pytest_tests.helpers.object_access import can_put_object
|
||||
|
||||
|
||||
@pytest.mark.sanity
|
||||
@pytest.mark.static_session_container
|
||||
class TestSessionTokenContainer(ClusterTestBase):
|
||||
@pytest.fixture(scope="module")
|
||||
|
@ -33,9 +29,7 @@ class TestSessionTokenContainer(ClusterTestBase):
|
|||
Returns dict with static session token file paths for all verbs with default lifetime
|
||||
"""
|
||||
return {
|
||||
verb: get_container_signed_token(
|
||||
owner_wallet, user_wallet, verb, client_shell, temp_directory
|
||||
)
|
||||
verb: get_container_signed_token(owner_wallet, user_wallet, verb, client_shell, temp_directory)
|
||||
for verb in ContainerVerb
|
||||
}
|
||||
|
||||
|
@ -65,9 +59,7 @@ class TestSessionTokenContainer(ClusterTestBase):
|
|||
assert cid not in list_containers(
|
||||
user_wallet.path, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
|
||||
)
|
||||
assert cid in list_containers(
|
||||
owner_wallet.path, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
|
||||
)
|
||||
assert cid in list_containers(owner_wallet.path, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
|
||||
|
||||
def test_static_session_token_container_create_with_other_verb(
|
||||
self,
|
||||
|
@ -158,10 +150,7 @@ class TestSessionTokenContainer(ClusterTestBase):
|
|||
assert can_put_object(stranger_wallet.path, cid, file_path, self.shell, self.cluster)
|
||||
|
||||
with allure.step("Deny all operations for other via eACL"):
|
||||
eacl_deny = [
|
||||
EACLRule(access=EACLAccess.DENY, role=EACLRole.OTHERS, operation=op)
|
||||
for op in EACLOperation
|
||||
]
|
||||
eacl_deny = [EACLRule(access=EACLAccess.DENY, role=EACLRole.OTHERS, operation=op) for op in EACLOperation]
|
||||
set_eacl(
|
||||
user_wallet.path,
|
||||
cid,
|
||||
|
|
|
@ -66,8 +66,7 @@ class Shard:
|
|||
|
||||
blobstor_count = Shard._get_blobstor_count_from_section(config_object, shard_id)
|
||||
blobstors = [
|
||||
Blobstor.from_config_object(config_object, shard_id, blobstor_id)
|
||||
for blobstor_id in range(blobstor_count)
|
||||
Blobstor.from_config_object(config_object, shard_id, blobstor_id) for blobstor_id in range(blobstor_count)
|
||||
]
|
||||
|
||||
write_cache_enabled = config_object.as_bool(f"{var_prefix}_WRITECACHE_ENABLED")
|
||||
|
@ -81,15 +80,10 @@ class Shard:
|
|||
@staticmethod
|
||||
def from_object(shard):
|
||||
metabase = shard["metabase"]["path"] if "path" in shard["metabase"] else shard["metabase"]
|
||||
writecache = (
|
||||
shard["writecache"]["path"] if "path" in shard["writecache"] else shard["writecache"]
|
||||
)
|
||||
writecache = shard["writecache"]["path"] if "path" in shard["writecache"] else shard["writecache"]
|
||||
|
||||
return Shard(
|
||||
blobstor=[
|
||||
Blobstor(path=blobstor["path"], path_type=blobstor["type"])
|
||||
for blobstor in shard["blobstor"]
|
||||
],
|
||||
blobstor=[Blobstor(path=blobstor["path"], path_type=blobstor["type"]) for blobstor in shard["blobstor"]],
|
||||
metabase=metabase,
|
||||
writecache=writecache,
|
||||
)
|
||||
|
@ -111,7 +105,6 @@ def shards_from_env(contents: str) -> list[Shard]:
|
|||
return [Shard.from_config_object(configObj, shard_id) for shard_id in range(num_shards)]
|
||||
|
||||
|
||||
@pytest.mark.sanity
|
||||
@pytest.mark.shard
|
||||
class TestControlShard:
|
||||
@staticmethod
|
||||
|
|
Loading…
Reference in a new issue