forked from TrueCloudLab/frostfs-testcases
[#253] Use TestFiles which automatically deletes itself
Signed-off-by: a.berezin <a.berezin@yadro.com>
This commit is contained in:
parent
6094f06d1f
commit
3a1e67863b
3 changed files with 34 additions and 36 deletions
|
@ -6,7 +6,7 @@ import pytest
|
||||||
from frostfs_testlib.storage.cluster import ClusterNode
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
from frostfs_testlib.storage.controllers import ShardsWatcher
|
from frostfs_testlib.storage.controllers import ShardsWatcher
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
from frostfs_testlib.utils.file_utils import generate_file
|
from frostfs_testlib.utils.file_utils import TestFile, generate_file
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture()
|
@pytest.fixture()
|
||||||
|
@ -25,7 +25,6 @@ def test_start_time() -> datetime:
|
||||||
|
|
||||||
@pytest.fixture()
|
@pytest.fixture()
|
||||||
@allure.title("Generate simple size file")
|
@allure.title("Generate simple size file")
|
||||||
def simple_file(simple_object_size: ObjectSize) -> str:
|
def simple_file(simple_object_size: ObjectSize) -> TestFile:
|
||||||
path_file = generate_file(size=simple_object_size.value)
|
path_file = generate_file(size=simple_object_size.value)
|
||||||
yield path_file
|
return path_file
|
||||||
os.remove(path_file)
|
|
||||||
|
|
|
@ -6,13 +6,14 @@ import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.resources.common import ASSETS_DIR
|
from frostfs_testlib.resources.common import ASSETS_DIR
|
||||||
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
|
from frostfs_testlib.s3 import AwsCliClient, S3ClientWrapper, VersioningStatus
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.epoch import tick_epoch
|
from frostfs_testlib.steps.epoch import tick_epoch
|
||||||
from frostfs_testlib.steps.s3 import s3_helper
|
from frostfs_testlib.steps.s3 import s3_helper
|
||||||
from frostfs_testlib.storage.cluster import Cluster
|
from frostfs_testlib.storage.cluster import Cluster
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
from frostfs_testlib.utils.file_utils import (
|
from frostfs_testlib.utils.file_utils import (
|
||||||
|
TestFile,
|
||||||
generate_file,
|
generate_file,
|
||||||
generate_file_with_content,
|
generate_file_with_content,
|
||||||
get_file_content,
|
get_file_content,
|
||||||
|
@ -140,14 +141,14 @@ class TestS3Gate:
|
||||||
"""
|
"""
|
||||||
Test checks sync directory with AWS CLI utility.
|
Test checks sync directory with AWS CLI utility.
|
||||||
"""
|
"""
|
||||||
file_path_1 = os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_1")
|
test_file_1 = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_1"))
|
||||||
file_path_2 = os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_2")
|
test_file_2 = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_2"))
|
||||||
key_to_path = {"test_file_1": file_path_1, "test_file_2": file_path_2}
|
key_to_path = {"test_file_1": test_file_1.path, "test_file_2": test_file_2.path}
|
||||||
|
|
||||||
generate_file_with_content(simple_object_size.value, file_path=file_path_1)
|
generate_file_with_content(simple_object_size.value, test_file_1)
|
||||||
generate_file_with_content(simple_object_size.value, file_path=file_path_2)
|
generate_file_with_content(simple_object_size.value, test_file_2)
|
||||||
|
|
||||||
s3_client.sync(bucket=bucket, dir_path=os.path.dirname(file_path_1))
|
s3_client.sync(bucket, os.path.dirname(test_file_1))
|
||||||
|
|
||||||
with reporter.step("Check objects are synced"):
|
with reporter.step("Check objects are synced"):
|
||||||
objects = s3_client.list_objects(bucket)
|
objects = s3_client.list_objects(bucket)
|
||||||
|
|
|
@ -16,7 +16,13 @@ from frostfs_testlib.steps.s3 import s3_helper
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
from frostfs_testlib.testing.test_control import expect_not_raises
|
from frostfs_testlib.testing.test_control import expect_not_raises
|
||||||
from frostfs_testlib.utils import wallet_utils
|
from frostfs_testlib.utils import wallet_utils
|
||||||
from frostfs_testlib.utils.file_utils import concat_files, generate_file, generate_file_with_content, get_file_hash
|
from frostfs_testlib.utils.file_utils import (
|
||||||
|
TestFile,
|
||||||
|
concat_files,
|
||||||
|
generate_file,
|
||||||
|
generate_file_with_content,
|
||||||
|
get_file_hash,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.s3_gate
|
@pytest.mark.s3_gate
|
||||||
|
@ -237,9 +243,7 @@ class TestS3GateObject:
|
||||||
|
|
||||||
with reporter.step("Put several versions of object into bucket"):
|
with reporter.step("Put several versions of object into bucket"):
|
||||||
version_id_1 = s3_client.put_object(bucket, file_name_simple)
|
version_id_1 = s3_client.put_object(bucket, file_name_simple)
|
||||||
file_name_1 = generate_file_with_content(
|
file_name_1 = generate_file_with_content(simple_object_size.value, file_name_simple, version_2_content)
|
||||||
simple_object_size.value, file_path=file_name_simple, content=version_2_content
|
|
||||||
)
|
|
||||||
version_id_2 = s3_client.put_object(bucket, file_name_1)
|
version_id_2 = s3_client.put_object(bucket, file_name_1)
|
||||||
|
|
||||||
with reporter.step("Check bucket shows all versions"):
|
with reporter.step("Check bucket shows all versions"):
|
||||||
|
@ -258,16 +262,16 @@ class TestS3GateObject:
|
||||||
assert "DeleteMarker" not in delete_obj.keys(), "Delete markers should not be created"
|
assert "DeleteMarker" not in delete_obj.keys(), "Delete markers should not be created"
|
||||||
|
|
||||||
with reporter.step("Delete second version of object"):
|
with reporter.step("Delete second version of object"):
|
||||||
delete_obj = s3_client.delete_object(bucket, obj_key, version_id=version_id_2)
|
delete_obj = s3_client.delete_object(bucket, obj_key, version_id_2)
|
||||||
versions = s3_client.list_objects_versions(bucket)
|
versions = s3_client.list_objects_versions(bucket)
|
||||||
obj_versions = {version.get("VersionId") for version in versions if version.get("Key") == obj_key}
|
obj_versions = {version.get("VersionId") for version in versions if version.get("Key") == obj_key}
|
||||||
assert not obj_versions, "Expected object not found"
|
assert not obj_versions, "Expected object not found"
|
||||||
assert "DeleteMarker" not in delete_obj.keys(), "Delete markers should not be created"
|
assert "DeleteMarker" not in delete_obj.keys(), "Delete markers should not be created"
|
||||||
|
|
||||||
with reporter.step("Put new object into bucket"):
|
with reporter.step("Put new object into bucket"):
|
||||||
file_name_simple = generate_file(complex_object_size.value)
|
file_name_complex = generate_file(complex_object_size.value)
|
||||||
obj_key = os.path.basename(file_name_simple)
|
obj_key = os.path.basename(file_name_complex)
|
||||||
s3_client.put_object(bucket, file_name_simple)
|
s3_client.put_object(bucket, file_name_complex)
|
||||||
|
|
||||||
with reporter.step("Delete last object"):
|
with reporter.step("Delete last object"):
|
||||||
delete_obj = s3_client.delete_object(bucket, obj_key)
|
delete_obj = s3_client.delete_object(bucket, obj_key)
|
||||||
|
@ -288,17 +292,11 @@ class TestS3GateObject:
|
||||||
|
|
||||||
with reporter.step("Put several versions of object into bucket"):
|
with reporter.step("Put several versions of object into bucket"):
|
||||||
version_id_1 = s3_client.put_object(bucket, file_name_1)
|
version_id_1 = s3_client.put_object(bucket, file_name_1)
|
||||||
file_name_2 = generate_file_with_content(
|
file_name_2 = generate_file_with_content(simple_object_size.value, file_name_1, version_2_content)
|
||||||
simple_object_size.value, file_path=file_name_1, content=version_2_content
|
|
||||||
)
|
|
||||||
version_id_2 = s3_client.put_object(bucket, file_name_2)
|
version_id_2 = s3_client.put_object(bucket, file_name_2)
|
||||||
file_name_3 = generate_file_with_content(
|
file_name_3 = generate_file_with_content(simple_object_size.value, file_name_1, version_3_content)
|
||||||
simple_object_size.value, file_path=file_name_1, content=version_3_content
|
|
||||||
)
|
|
||||||
version_id_3 = s3_client.put_object(bucket, file_name_3)
|
version_id_3 = s3_client.put_object(bucket, file_name_3)
|
||||||
file_name_4 = generate_file_with_content(
|
file_name_4 = generate_file_with_content(simple_object_size.value, file_name_1, version_4_content)
|
||||||
simple_object_size.value, file_path=file_name_1, content=version_4_content
|
|
||||||
)
|
|
||||||
version_id_4 = s3_client.put_object(bucket, file_name_4)
|
version_id_4 = s3_client.put_object(bucket, file_name_4)
|
||||||
version_ids = {version_id_1, version_id_2, version_id_3, version_id_4}
|
version_ids = {version_id_1, version_id_2, version_id_3, version_id_4}
|
||||||
|
|
||||||
|
@ -758,19 +756,19 @@ class TestS3GateObject:
|
||||||
bucket: str,
|
bucket: str,
|
||||||
simple_object_size: ObjectSize,
|
simple_object_size: ObjectSize,
|
||||||
):
|
):
|
||||||
file_path_1 = os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_1")
|
test_file_1 = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_1"))
|
||||||
file_path_2 = os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_2")
|
test_file_2 = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_2"))
|
||||||
object_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
|
object_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
|
||||||
key_to_path = {"test_file_1": file_path_1, "test_file_2": file_path_2}
|
key_to_path = {"test_file_1": test_file_1.path, "test_file_2": test_file_2.path}
|
||||||
|
|
||||||
generate_file_with_content(simple_object_size.value, file_path=file_path_1)
|
generate_file_with_content(simple_object_size.value, test_file_1)
|
||||||
generate_file_with_content(simple_object_size.value, file_path=file_path_2)
|
generate_file_with_content(simple_object_size.value, test_file_2)
|
||||||
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
|
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
|
||||||
|
|
||||||
if sync_type == "sync":
|
if sync_type == "sync":
|
||||||
s3_client.sync(bucket=bucket, dir_path=os.path.dirname(file_path_1), metadata=object_metadata)
|
s3_client.sync(bucket, os.path.dirname(test_file_1), metadata=object_metadata)
|
||||||
elif sync_type == "cp":
|
elif sync_type == "cp":
|
||||||
s3_client.cp(bucket=bucket, dir_path=os.path.dirname(file_path_1), metadata=object_metadata)
|
s3_client.cp(bucket, os.path.dirname(test_file_1), metadata=object_metadata)
|
||||||
|
|
||||||
with reporter.step("Check objects are synced"):
|
with reporter.step("Check objects are synced"):
|
||||||
objects = s3_client.list_objects(bucket)
|
objects = s3_client.list_objects(bucket)
|
||||||
|
@ -796,7 +794,7 @@ class TestS3GateObject:
|
||||||
simple_object_size: ObjectSize,
|
simple_object_size: ObjectSize,
|
||||||
):
|
):
|
||||||
path = "/".join(["".join(choices(string.ascii_letters, k=3)) for _ in range(10)])
|
path = "/".join(["".join(choices(string.ascii_letters, k=3)) for _ in range(10)])
|
||||||
file_path_1 = os.path.join(temp_directory, path, "test_file_1")
|
file_path_1 = TestFile(os.path.join(temp_directory, path, "test_file_1"))
|
||||||
generate_file_with_content(simple_object_size.value, file_path=file_path_1)
|
generate_file_with_content(simple_object_size.value, file_path=file_path_1)
|
||||||
file_name = s3_helper.object_key_from_file_path(file_path_1)
|
file_name = s3_helper.object_key_from_file_path(file_path_1)
|
||||||
objects_list = s3_client.list_objects(bucket)
|
objects_list = s3_client.list_objects(bucket)
|
||||||
|
|
Loading…
Reference in a new issue