forked from TrueCloudLab/frostfs-testcases
Add grpc lock tests
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
This commit is contained in:
parent
08274d4620
commit
30ea4ab54e
13 changed files with 805 additions and 101 deletions
56
pytest_tests/helpers/container.py
Normal file
56
pytest_tests/helpers/container.py
Normal file
|
@ -0,0 +1,56 @@
|
|||
from dataclasses import dataclass
|
||||
from typing import Optional
|
||||
|
||||
import allure
|
||||
from file_helper import generate_file, get_file_hash
|
||||
from neofs_testlib.shell import Shell
|
||||
from neofs_verbs import put_object
|
||||
from storage_object import StorageObjectInfo
|
||||
from wallet import WalletFile
|
||||
|
||||
|
||||
@dataclass
|
||||
class StorageContainerInfo:
|
||||
id: str
|
||||
wallet_file: WalletFile
|
||||
|
||||
|
||||
class StorageContainer:
|
||||
def __init__(self, storage_container_info: StorageContainerInfo, shell: Shell) -> None:
|
||||
self.shell = shell
|
||||
self.storage_container_info = storage_container_info
|
||||
|
||||
def get_id(self) -> str:
|
||||
return self.storage_container_info.id
|
||||
|
||||
def get_wallet_path(self) -> str:
|
||||
return self.storage_container_info.wallet_file.path
|
||||
|
||||
@allure.step("Generate new object and put in container")
|
||||
def generate_object(self, size: int, expire_at: Optional[int] = None) -> StorageObjectInfo:
|
||||
with allure.step(f"Generate object with size {size}"):
|
||||
file_path = generate_file(size)
|
||||
file_hash = get_file_hash(file_path)
|
||||
|
||||
container_id = self.get_id()
|
||||
wallet_path = self.get_wallet_path()
|
||||
|
||||
with allure.step(f"Put object with size {size} to container {container_id}"):
|
||||
object_id = put_object(
|
||||
wallet=wallet_path,
|
||||
path=file_path,
|
||||
cid=container_id,
|
||||
expire_at=expire_at,
|
||||
shell=self.shell,
|
||||
)
|
||||
|
||||
storage_object = StorageObjectInfo(
|
||||
container_id,
|
||||
object_id,
|
||||
size=size,
|
||||
wallet_file_path=wallet_path,
|
||||
file_path=file_path,
|
||||
file_hash=file_hash,
|
||||
)
|
||||
|
||||
return storage_object
|
|
@ -11,6 +11,15 @@ OBJECT_NOT_FOUND = "code = 2049.*message = object not found"
|
|||
OBJECT_ALREADY_REMOVED = "code = 2052.*message = object already removed"
|
||||
SESSION_NOT_FOUND = "code = 4096.*message = session token not found"
|
||||
OUT_OF_RANGE = "code = 2053.*message = out of range"
|
||||
# TODO: Due to https://github.com/nspcc-dev/neofs-node/issues/2092 we have to check only codes until fixed
|
||||
# OBJECT_IS_LOCKED = "code = 2050.*message = object is locked"
|
||||
# LOCK_NON_REGULAR_OBJECT = "code = 2051.*message = ..." will be available once 2092 is fixed
|
||||
OBJECT_IS_LOCKED = "code = 2050"
|
||||
LOCK_NON_REGULAR_OBJECT = "code = 2051"
|
||||
|
||||
LIFETIME_REQUIRED = "either expiration epoch of a lifetime is required"
|
||||
LOCK_OBJECT_REMOVAL = "lock object removal"
|
||||
LOCK_OBJECT_EXPIRATION = "lock object expiration: {expiration_epoch}; current: {current_epoch}"
|
||||
|
||||
|
||||
def error_matches_status(error: Exception, status_pattern: str) -> bool:
|
||||
|
|
|
@ -1,27 +1,27 @@
|
|||
import logging
|
||||
from dataclasses import dataclass
|
||||
from time import sleep, time
|
||||
|
||||
import allure
|
||||
import pytest
|
||||
from common import NEOFS_NETMAP, STORAGE_NODE_SERVICE_NAME_REGEX
|
||||
from epoch import tick_epoch
|
||||
from grpc_responses import OBJECT_ALREADY_REMOVED
|
||||
from neofs_testlib.hosting import Hosting
|
||||
from neofs_testlib.shell import Shell
|
||||
from python_keywords.neofs_verbs import delete_object, get_object, head_object
|
||||
from tombstone import verify_head_tombstone
|
||||
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
|
||||
|
||||
@dataclass
|
||||
class StorageObjectInfo:
|
||||
class ObjectRef:
|
||||
cid: str
|
||||
oid: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class LockObjectInfo(ObjectRef):
|
||||
lifetime: int = None
|
||||
expire_at: int = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class StorageObjectInfo(ObjectRef):
|
||||
size: str = None
|
||||
cid: str = None
|
||||
wallet: str = None
|
||||
wallet_file_path: str = None
|
||||
file_path: str = None
|
||||
file_hash: str = None
|
||||
attributes: list[dict[str, str]] = None
|
||||
oid: str = None
|
||||
tombstone: str = None
|
||||
locks: list[LockObjectInfo] = None
|
||||
|
|
80
pytest_tests/helpers/test_control.py
Normal file
80
pytest_tests/helpers/test_control.py
Normal file
|
@ -0,0 +1,80 @@
|
|||
import logging
|
||||
from functools import wraps
|
||||
from time import sleep, time
|
||||
|
||||
from _pytest.outcomes import Failed
|
||||
from pytest import fail
|
||||
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
|
||||
|
||||
class expect_not_raises:
|
||||
"""
|
||||
Decorator/Context manager check that some action, method or test does not raises exceptions
|
||||
|
||||
Useful to set proper state of failed test cases in allure
|
||||
|
||||
Example:
|
||||
def do_stuff():
|
||||
raise Exception("Fail")
|
||||
|
||||
def test_yellow(): <- this test is marked yellow (Test Defect) in allure
|
||||
do_stuff()
|
||||
|
||||
def test_red(): <- this test is marked red (Failed) in allure
|
||||
with expect_not_raises():
|
||||
do_stuff()
|
||||
|
||||
@expect_not_raises()
|
||||
def test_also_red(): <- this test is also marked red (Failed) in allure
|
||||
do_stuff()
|
||||
"""
|
||||
|
||||
def __enter__(self):
|
||||
pass
|
||||
|
||||
def __exit__(self, exception_type, exception_value, exception_traceback):
|
||||
if exception_value:
|
||||
fail(str(exception_value))
|
||||
|
||||
def __call__(self, func):
|
||||
@wraps(func)
|
||||
def impl(*a, **kw):
|
||||
with expect_not_raises():
|
||||
func(*a, **kw)
|
||||
|
||||
return impl
|
||||
|
||||
|
||||
def wait_for_success(max_wait_time: int = 60, interval: int = 1):
|
||||
"""
|
||||
Decorator to wait for some conditions/functions to pass successfully.
|
||||
This is useful if you don't know exact time when something should pass successfully and do not
|
||||
want to use sleep(X) with too big X.
|
||||
|
||||
Be careful though, wrapped function should only check the state of something, not change it.
|
||||
"""
|
||||
|
||||
def wrapper(func):
|
||||
@wraps(func)
|
||||
def impl(*a, **kw):
|
||||
start = int(round(time()))
|
||||
last_exception = None
|
||||
while start + max_wait_time >= int(round(time())):
|
||||
try:
|
||||
return func(*a, **kw)
|
||||
except Exception as ex:
|
||||
logger.debug(ex)
|
||||
last_exception = ex
|
||||
sleep(interval)
|
||||
except Failed as ex:
|
||||
logger.debug(ex)
|
||||
last_exception = ex
|
||||
sleep(interval)
|
||||
|
||||
# timeout exceeded with no success, raise last_exception
|
||||
raise last_exception
|
||||
|
||||
return impl
|
||||
|
||||
return wrapper
|
|
@ -13,7 +13,6 @@ from python_keywords.payment_neogo import deposit_gas, transfer_gas
|
|||
class WalletFile:
|
||||
path: str
|
||||
password: str
|
||||
containers: Optional[list[str]] = None
|
||||
|
||||
def get_address(self) -> str:
|
||||
"""
|
||||
|
|
|
@ -15,6 +15,7 @@ markers =
|
|||
# functional markers
|
||||
container: tests for container creation
|
||||
grpc_api: standard gRPC API tests
|
||||
grpc_object_lock: gRPC lock tests
|
||||
http_gate: HTTP gate contract
|
||||
s3_gate: All S3 gate tests
|
||||
s3_gate_base: Base S3 gate tests
|
||||
|
|
|
@ -1,14 +1,12 @@
|
|||
import logging
|
||||
from time import sleep, time
|
||||
from time import sleep
|
||||
|
||||
import allure
|
||||
import pytest
|
||||
from common import STORAGE_NODE_SERVICE_NAME_REGEX
|
||||
from epoch import tick_epoch
|
||||
from grpc_responses import OBJECT_ALREADY_REMOVED
|
||||
from neofs_testlib.hosting import Hosting
|
||||
from neofs_testlib.shell import Shell
|
||||
from python_keywords.neofs_verbs import delete_object, get_object, head_object
|
||||
from python_keywords.neofs_verbs import delete_object, get_object
|
||||
from storage_object_info import StorageObjectInfo
|
||||
from tombstone import verify_head_tombstone
|
||||
|
||||
|
@ -17,38 +15,7 @@ logger = logging.getLogger("NeoLogger")
|
|||
CLEANUP_TIMEOUT = 10
|
||||
|
||||
|
||||
@allure.step("Waiting until object will be available on all nodes")
|
||||
def wait_until_objects_available_on_all_nodes(
|
||||
hosting: Hosting,
|
||||
storage_objects: list[StorageObjectInfo],
|
||||
shell: Shell,
|
||||
max_wait_time: int = 60,
|
||||
) -> None:
|
||||
start = time()
|
||||
|
||||
def wait_for_objects():
|
||||
for service_config in hosting.find_service_configs(STORAGE_NODE_SERVICE_NAME_REGEX):
|
||||
endpoint = service_config.attributes["rpc_endpoint"]
|
||||
for storage_object in storage_objects:
|
||||
head_object(
|
||||
storage_object.wallet,
|
||||
storage_object.cid,
|
||||
storage_object.oid,
|
||||
shell,
|
||||
endpoint=endpoint,
|
||||
)
|
||||
|
||||
while start + max_wait_time >= time():
|
||||
try:
|
||||
wait_for_objects()
|
||||
return
|
||||
except Exception as ex:
|
||||
logger.debug(ex)
|
||||
sleep(1)
|
||||
|
||||
raise ex
|
||||
|
||||
|
||||
@allure.step("Delete Objects")
|
||||
def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell) -> None:
|
||||
"""
|
||||
Deletes given storage objects.
|
||||
|
@ -61,10 +28,10 @@ def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell) -> No
|
|||
with allure.step("Delete objects"):
|
||||
for storage_object in storage_objects:
|
||||
storage_object.tombstone = delete_object(
|
||||
storage_object.wallet, storage_object.cid, storage_object.oid, shell
|
||||
storage_object.wallet_file_path, storage_object.cid, storage_object.oid, shell
|
||||
)
|
||||
verify_head_tombstone(
|
||||
wallet_path=storage_object.wallet,
|
||||
wallet_path=storage_object.wallet_file_path,
|
||||
cid=storage_object.cid,
|
||||
oid_ts=storage_object.tombstone,
|
||||
oid=storage_object.oid,
|
||||
|
@ -78,7 +45,7 @@ def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell) -> No
|
|||
for storage_object in storage_objects:
|
||||
with pytest.raises(Exception, match=OBJECT_ALREADY_REMOVED):
|
||||
get_object(
|
||||
storage_object.wallet,
|
||||
storage_object.wallet_file_path,
|
||||
storage_object.cid,
|
||||
storage_object.oid,
|
||||
shell=shell,
|
||||
|
|
|
@ -101,14 +101,7 @@ def storage_objects(
|
|||
with allure.step("Put objects"):
|
||||
# We need to upload objects multiple times with different attributes
|
||||
for attributes in OBJECT_ATTRIBUTES:
|
||||
storage_object = StorageObjectInfo()
|
||||
storage_object.size = request.param
|
||||
storage_object.cid = cid
|
||||
storage_object.wallet = wallet
|
||||
storage_object.file_path = file_path
|
||||
storage_object.file_hash = file_hash
|
||||
storage_object.attributes = attributes
|
||||
storage_object.oid = put_object(
|
||||
storage_object_id = put_object(
|
||||
wallet=wallet,
|
||||
path=file_path,
|
||||
cid=cid,
|
||||
|
@ -116,6 +109,13 @@ def storage_objects(
|
|||
attributes=attributes,
|
||||
)
|
||||
|
||||
storage_object = StorageObjectInfo(cid, storage_object_id)
|
||||
storage_object.size = request.param
|
||||
storage_object.wallet_file_path = wallet
|
||||
storage_object.file_path = file_path
|
||||
storage_object.file_hash = file_hash
|
||||
storage_object.attributes = attributes
|
||||
|
||||
storage_objects.append(storage_object)
|
||||
|
||||
yield storage_objects
|
||||
|
@ -141,14 +141,14 @@ def test_object_storage_policies(
|
|||
for storage_object in storage_objects:
|
||||
if storage_object.size == SIMPLE_OBJ_SIZE:
|
||||
copies = get_simple_object_copies(
|
||||
storage_object.wallet,
|
||||
storage_object.wallet_file_path,
|
||||
storage_object.cid,
|
||||
storage_object.oid,
|
||||
shell=client_shell,
|
||||
)
|
||||
else:
|
||||
copies = get_complex_object_copies(
|
||||
storage_object.wallet,
|
||||
storage_object.wallet_file_path,
|
||||
storage_object.cid,
|
||||
storage_object.oid,
|
||||
shell=client_shell,
|
||||
|
@ -170,7 +170,10 @@ def test_get_object_api(
|
|||
with allure.step("Get objects and compare hashes"):
|
||||
for storage_object in storage_objects:
|
||||
file_path = get_object(
|
||||
storage_object.wallet, storage_object.cid, storage_object.oid, client_shell
|
||||
storage_object.wallet_file_path,
|
||||
storage_object.cid,
|
||||
storage_object.oid,
|
||||
client_shell,
|
||||
)
|
||||
file_hash = get_file_hash(file_path)
|
||||
assert storage_object.file_hash == file_hash
|
||||
|
@ -192,10 +195,16 @@ def test_head_object_api(
|
|||
|
||||
with allure.step("Head object and validate"):
|
||||
head_object(
|
||||
storage_object_1.wallet, storage_object_1.cid, storage_object_1.oid, shell=client_shell
|
||||
storage_object_1.wallet_file_path,
|
||||
storage_object_1.cid,
|
||||
storage_object_1.oid,
|
||||
shell=client_shell,
|
||||
)
|
||||
head_info = head_object(
|
||||
storage_object_2.wallet, storage_object_2.cid, storage_object_2.oid, shell=client_shell
|
||||
storage_object_2.wallet_file_path,
|
||||
storage_object_2.cid,
|
||||
storage_object_2.oid,
|
||||
shell=client_shell,
|
||||
)
|
||||
check_header_is_presented(head_info, storage_object_2.attributes)
|
||||
|
||||
|
@ -212,7 +221,7 @@ def test_search_object_api(
|
|||
allure.dynamic.title(f"Validate object search by native API for {request.node.callspec.id}")
|
||||
|
||||
oids = [storage_object.oid for storage_object in storage_objects]
|
||||
wallet = storage_objects[0].wallet
|
||||
wallet = storage_objects[0].wallet_file_path
|
||||
cid = storage_objects[0].cid
|
||||
|
||||
test_table = [
|
||||
|
@ -265,12 +274,12 @@ def test_object_search_should_return_tombstone_items(
|
|||
file_hash = get_file_hash(file_path)
|
||||
|
||||
storage_object = StorageObjectInfo(
|
||||
size=object_size,
|
||||
cid=cid,
|
||||
wallet=wallet,
|
||||
oid=put_object(wallet, file_path, cid, shell=client_shell),
|
||||
size=object_size,
|
||||
wallet_file_path=wallet,
|
||||
file_path=file_path,
|
||||
file_hash=file_hash,
|
||||
oid=put_object(wallet, file_path, cid, shell=client_shell),
|
||||
)
|
||||
|
||||
with allure.step("Search object"):
|
||||
|
@ -316,7 +325,7 @@ def test_object_get_range_hash(
|
|||
f"Validate native get_range_hash object API for {request.node.callspec.id}"
|
||||
)
|
||||
|
||||
wallet = storage_objects[0].wallet
|
||||
wallet = storage_objects[0].wallet_file_path
|
||||
cid = storage_objects[0].cid
|
||||
oids = [storage_object.oid for storage_object in storage_objects[:2]]
|
||||
file_path = storage_objects[0].file_path
|
||||
|
@ -350,7 +359,7 @@ def test_object_get_range(
|
|||
"""
|
||||
allure.dynamic.title(f"Validate native get_range object API for {request.node.callspec.id}")
|
||||
|
||||
wallet = storage_objects[0].wallet
|
||||
wallet = storage_objects[0].wallet_file_path
|
||||
cid = storage_objects[0].cid
|
||||
oids = [storage_object.oid for storage_object in storage_objects[:2]]
|
||||
file_path = storage_objects[0].file_path
|
||||
|
@ -391,7 +400,7 @@ def test_object_get_range_negatives(
|
|||
f"Validate native get_range negative object API for {request.node.callspec.id}"
|
||||
)
|
||||
|
||||
wallet = storage_objects[0].wallet
|
||||
wallet = storage_objects[0].wallet_file_path
|
||||
cid = storage_objects[0].cid
|
||||
oids = [storage_object.oid for storage_object in storage_objects[:2]]
|
||||
file_size = storage_objects[0].size
|
||||
|
@ -432,7 +441,7 @@ def test_object_get_range_hash_negatives(
|
|||
f"Validate native get_range_hash negative object API for {request.node.callspec.id}"
|
||||
)
|
||||
|
||||
wallet = storage_objects[0].wallet
|
||||
wallet = storage_objects[0].wallet_file_path
|
||||
cid = storage_objects[0].cid
|
||||
oids = [storage_object.oid for storage_object in storage_objects[:2]]
|
||||
file_size = storage_objects[0].size
|
||||
|
|
527
pytest_tests/testsuites/object/test_object_lock.py
Executable file
527
pytest_tests/testsuites/object/test_object_lock.py
Executable file
|
@ -0,0 +1,527 @@
|
|||
import logging
|
||||
import re
|
||||
|
||||
import allure
|
||||
import pytest
|
||||
from common import COMPLEX_OBJ_SIZE, SIMPLE_OBJ_SIZE, STORAGE_GC_TIME
|
||||
from complex_object_actions import get_link_object
|
||||
from container import create_container
|
||||
from epoch import ensure_fresh_epoch, get_epoch, tick_epoch
|
||||
from grpc_responses import (
|
||||
LIFETIME_REQUIRED,
|
||||
LOCK_NON_REGULAR_OBJECT,
|
||||
LOCK_OBJECT_EXPIRATION,
|
||||
LOCK_OBJECT_REMOVAL,
|
||||
OBJECT_ALREADY_REMOVED,
|
||||
OBJECT_IS_LOCKED,
|
||||
OBJECT_NOT_FOUND,
|
||||
)
|
||||
from neofs_testlib.shell import Shell
|
||||
from pytest import FixtureRequest
|
||||
from python_keywords.neofs_verbs import delete_object, head_object, lock_object
|
||||
from test_control import expect_not_raises, wait_for_success
|
||||
from utility import parse_time, wait_for_gc_pass_on_storage_nodes
|
||||
|
||||
from helpers.container import StorageContainer, StorageContainerInfo
|
||||
from helpers.storage_object_info import LockObjectInfo, StorageObjectInfo
|
||||
from helpers.wallet import WalletFactory, WalletFile
|
||||
from steps.storage_object import delete_objects
|
||||
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
|
||||
FIXTURE_LOCK_LIFETIME = 5
|
||||
FIXTURE_OBJECT_LIFETIME = 10
|
||||
|
||||
|
||||
def get_storage_object_chunks(storage_object: StorageObjectInfo, shell: Shell):
|
||||
with allure.step(f"Get complex object chunks (f{storage_object.oid})"):
|
||||
split_object_id = get_link_object(
|
||||
storage_object.wallet_file_path,
|
||||
storage_object.cid,
|
||||
storage_object.oid,
|
||||
shell,
|
||||
is_direct=False,
|
||||
)
|
||||
head = head_object(
|
||||
storage_object.wallet_file_path, storage_object.cid, split_object_id, shell
|
||||
)
|
||||
|
||||
chunks_object_ids = []
|
||||
if "split" in head["header"] and "children" in head["header"]["split"]:
|
||||
chunks_object_ids = head["header"]["split"]["children"]
|
||||
return chunks_object_ids
|
||||
|
||||
|
||||
@pytest.fixture(
|
||||
scope="module",
|
||||
)
|
||||
def user_wallet(wallet_factory: WalletFactory):
|
||||
with allure.step("Create user wallet with container"):
|
||||
wallet_file = wallet_factory.create_wallet()
|
||||
return wallet_file
|
||||
|
||||
|
||||
@pytest.fixture(
|
||||
scope="module",
|
||||
)
|
||||
def user_container(user_wallet: WalletFile, client_shell: Shell):
|
||||
container_id = create_container(user_wallet.path, shell=client_shell)
|
||||
return StorageContainer(StorageContainerInfo(container_id, user_wallet), client_shell)
|
||||
|
||||
|
||||
@pytest.fixture(
|
||||
scope="module",
|
||||
)
|
||||
def locked_storage_object(
|
||||
user_container: StorageContainer,
|
||||
client_shell: Shell,
|
||||
request: FixtureRequest,
|
||||
):
|
||||
with allure.step(f"Creating locked object"):
|
||||
current_epoch = ensure_fresh_epoch(client_shell)
|
||||
expiration_epoch = current_epoch + FIXTURE_LOCK_LIFETIME
|
||||
|
||||
storage_object = user_container.generate_object(
|
||||
request.param, expire_at=current_epoch + FIXTURE_OBJECT_LIFETIME
|
||||
)
|
||||
lock_object_id = lock_object(
|
||||
storage_object.wallet_file_path,
|
||||
storage_object.cid,
|
||||
storage_object.oid,
|
||||
client_shell,
|
||||
lifetime=FIXTURE_LOCK_LIFETIME,
|
||||
)
|
||||
storage_object.locks = [
|
||||
LockObjectInfo(
|
||||
storage_object.cid, lock_object_id, FIXTURE_LOCK_LIFETIME, expiration_epoch
|
||||
)
|
||||
]
|
||||
|
||||
yield storage_object
|
||||
|
||||
with allure.step(f"Delete created locked object"):
|
||||
current_epoch = get_epoch(client_shell)
|
||||
epoch_diff = expiration_epoch - current_epoch + 1
|
||||
|
||||
if epoch_diff > 0:
|
||||
with allure.step(f"Tick {epoch_diff} epochs"):
|
||||
for _ in range(epoch_diff):
|
||||
tick_epoch(client_shell)
|
||||
try:
|
||||
delete_object(
|
||||
storage_object.wallet_file_path,
|
||||
storage_object.cid,
|
||||
storage_object.oid,
|
||||
client_shell,
|
||||
)
|
||||
except Exception as ex:
|
||||
ex_message = str(ex)
|
||||
# It's okay if object already removed
|
||||
if not re.search(OBJECT_NOT_FOUND, ex_message) and not re.search(
|
||||
OBJECT_ALREADY_REMOVED, ex_message
|
||||
):
|
||||
raise ex
|
||||
logger.debug(ex_message)
|
||||
|
||||
|
||||
@pytest.mark.sanity
|
||||
@pytest.mark.grpc_object_lock
|
||||
class TestObjectLockWithGrpc:
|
||||
@allure.title("Locked object should be protected from deletion")
|
||||
@pytest.mark.parametrize(
|
||||
"locked_storage_object",
|
||||
[SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE],
|
||||
ids=["simple object", "complex object"],
|
||||
indirect=True,
|
||||
)
|
||||
def test_locked_object_cannot_be_deleted(
|
||||
self,
|
||||
client_shell: Shell,
|
||||
request: FixtureRequest,
|
||||
locked_storage_object: StorageObjectInfo,
|
||||
):
|
||||
"""
|
||||
Locked object should be protected from deletion
|
||||
"""
|
||||
allure.dynamic.title(
|
||||
f"Locked object should be protected from deletion for {request.node.callspec.id}"
|
||||
)
|
||||
|
||||
with pytest.raises(Exception, match=OBJECT_IS_LOCKED):
|
||||
delete_object(
|
||||
locked_storage_object.wallet_file_path,
|
||||
locked_storage_object.cid,
|
||||
locked_storage_object.oid,
|
||||
client_shell,
|
||||
)
|
||||
|
||||
@allure.title("Lock object itself should be protected from deletion")
|
||||
# We operate with only lock object here so no complex object needed in this test
|
||||
@pytest.mark.parametrize("locked_storage_object", [SIMPLE_OBJ_SIZE], indirect=True)
|
||||
def test_lock_object_itself_cannot_be_deleted(
|
||||
self,
|
||||
client_shell: Shell,
|
||||
locked_storage_object: StorageObjectInfo,
|
||||
):
|
||||
"""
|
||||
Lock object itself should be protected from deletion
|
||||
"""
|
||||
|
||||
lock_object = locked_storage_object.locks[0]
|
||||
wallet_path = locked_storage_object.wallet_file_path
|
||||
|
||||
with pytest.raises(Exception, match=LOCK_OBJECT_REMOVAL):
|
||||
delete_object(wallet_path, lock_object.cid, lock_object.oid, client_shell)
|
||||
|
||||
@allure.title("Lock object itself cannot be locked")
|
||||
# We operate with only lock object here so no complex object needed in this test
|
||||
@pytest.mark.parametrize("locked_storage_object", [SIMPLE_OBJ_SIZE], indirect=True)
|
||||
def test_lock_object_cannot_be_locked(
|
||||
self,
|
||||
client_shell: Shell,
|
||||
locked_storage_object: StorageObjectInfo,
|
||||
):
|
||||
"""
|
||||
Lock object itself cannot be locked
|
||||
"""
|
||||
|
||||
lock_object_info = locked_storage_object.locks[0]
|
||||
wallet_path = locked_storage_object.wallet_file_path
|
||||
|
||||
with pytest.raises(Exception, match=LOCK_NON_REGULAR_OBJECT):
|
||||
lock_object(wallet_path, lock_object_info.cid, lock_object_info.oid, client_shell, 1)
|
||||
|
||||
@allure.title("Cannot lock object without lifetime and expire_at fields")
|
||||
# We operate with only lock object here so no complex object needed in this test
|
||||
@pytest.mark.parametrize("locked_storage_object", [SIMPLE_OBJ_SIZE], indirect=True)
|
||||
@pytest.mark.parametrize(
|
||||
"wrong_lifetime,wrong_expire_at,expected_error",
|
||||
[
|
||||
(None, None, LIFETIME_REQUIRED),
|
||||
(0, 0, LIFETIME_REQUIRED),
|
||||
(0, None, LIFETIME_REQUIRED),
|
||||
(None, 0, LIFETIME_REQUIRED),
|
||||
(-1, None, 'invalid argument "-1" for "--lifetime" flag'),
|
||||
(None, -1, 'invalid argument "-1" for "-e, --expire-at" flag'),
|
||||
],
|
||||
)
|
||||
def test_cannot_lock_object_without_lifetime(
|
||||
self,
|
||||
client_shell: Shell,
|
||||
locked_storage_object: StorageObjectInfo,
|
||||
wrong_lifetime: int,
|
||||
wrong_expire_at: int,
|
||||
expected_error: str,
|
||||
):
|
||||
"""
|
||||
Cannot lock object without lifetime and expire_at fields
|
||||
"""
|
||||
allure.dynamic.title(
|
||||
f"Cannot lock object without lifetime and expire_at fields: (lifetime={wrong_lifetime}, expire-at={wrong_expire_at})"
|
||||
)
|
||||
|
||||
lock_object_info = locked_storage_object.locks[0]
|
||||
wallet_path = locked_storage_object.wallet_file_path
|
||||
|
||||
with pytest.raises(Exception, match=expected_error):
|
||||
lock_object(
|
||||
wallet_path,
|
||||
lock_object_info.cid,
|
||||
lock_object_info.oid,
|
||||
client_shell,
|
||||
lifetime=wrong_lifetime,
|
||||
expire_at=wrong_expire_at,
|
||||
)
|
||||
|
||||
@allure.title("Expired object should be deleted after locks are expired")
|
||||
@pytest.mark.parametrize(
|
||||
"object_size", [SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE], ids=["simple object", "complex object"]
|
||||
)
|
||||
def test_expired_object_should_be_deleted_after_locks_are_expired(
|
||||
self,
|
||||
client_shell: Shell,
|
||||
request: FixtureRequest,
|
||||
user_container: StorageContainer,
|
||||
object_size: int,
|
||||
):
|
||||
"""
|
||||
Expired object should be deleted after locks are expired
|
||||
"""
|
||||
allure.dynamic.title(
|
||||
f"Expired object should be deleted after locks are expired for {request.node.callspec.id}"
|
||||
)
|
||||
|
||||
current_epoch = ensure_fresh_epoch(client_shell)
|
||||
storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 1)
|
||||
|
||||
with allure.step("Lock object for couple epochs"):
|
||||
lock_object(
|
||||
storage_object.wallet_file_path,
|
||||
storage_object.cid,
|
||||
storage_object.oid,
|
||||
client_shell,
|
||||
lifetime=3,
|
||||
)
|
||||
lock_object(
|
||||
storage_object.wallet_file_path,
|
||||
storage_object.cid,
|
||||
storage_object.oid,
|
||||
client_shell,
|
||||
expire_at=current_epoch + 3,
|
||||
)
|
||||
|
||||
with allure.step("Check object is not deleted at expiration time"):
|
||||
tick_epoch(client_shell)
|
||||
tick_epoch(client_shell)
|
||||
# Must wait to ensure object is not deleted
|
||||
wait_for_gc_pass_on_storage_nodes()
|
||||
with expect_not_raises():
|
||||
head_object(
|
||||
storage_object.wallet_file_path,
|
||||
storage_object.cid,
|
||||
storage_object.oid,
|
||||
client_shell,
|
||||
)
|
||||
|
||||
@wait_for_success(parse_time(STORAGE_GC_TIME))
|
||||
def check_object_not_found():
|
||||
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
|
||||
head_object(
|
||||
storage_object.wallet_file_path,
|
||||
storage_object.cid,
|
||||
storage_object.oid,
|
||||
client_shell,
|
||||
)
|
||||
|
||||
with allure.step("Wait for object to be deleted after third epoch"):
|
||||
tick_epoch(client_shell)
|
||||
check_object_not_found()
|
||||
|
||||
@allure.title("Should be possible to lock multiple objects at once")
|
||||
@pytest.mark.parametrize(
|
||||
"object_size",
|
||||
[SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE],
|
||||
ids=["simple object", "complex object"],
|
||||
)
|
||||
def test_should_be_possible_to_lock_multiple_objects_at_once(
|
||||
self,
|
||||
client_shell: Shell,
|
||||
request: FixtureRequest,
|
||||
user_container: StorageContainer,
|
||||
object_size: int,
|
||||
):
|
||||
"""
|
||||
Should be possible to lock multiple objects at once
|
||||
"""
|
||||
allure.dynamic.title(
|
||||
f"Should be possible to lock multiple objects at once for {request.node.callspec.id}"
|
||||
)
|
||||
|
||||
current_epoch = ensure_fresh_epoch(client_shell)
|
||||
storage_objects: list[StorageObjectInfo] = []
|
||||
|
||||
with allure.step("Generate three objects"):
|
||||
for _ in range(3):
|
||||
storage_objects.append(
|
||||
user_container.generate_object(object_size, expire_at=current_epoch + 5)
|
||||
)
|
||||
|
||||
lock_object(
|
||||
storage_objects[0].wallet_file_path,
|
||||
storage_objects[0].cid,
|
||||
",".join([storage_object.oid for storage_object in storage_objects]),
|
||||
client_shell,
|
||||
expire_at=current_epoch + 1,
|
||||
)
|
||||
|
||||
for storage_object in storage_objects:
|
||||
with allure.step(f"Try to delete object {storage_object.oid}"):
|
||||
with pytest.raises(Exception, match=OBJECT_IS_LOCKED):
|
||||
delete_object(
|
||||
storage_object.wallet_file_path,
|
||||
storage_object.cid,
|
||||
storage_object.oid,
|
||||
client_shell,
|
||||
)
|
||||
|
||||
with allure.step("Tick two epochs"):
|
||||
tick_epoch(client_shell)
|
||||
tick_epoch(client_shell)
|
||||
|
||||
with expect_not_raises():
|
||||
delete_objects(storage_objects, client_shell)
|
||||
|
||||
@allure.title("Already outdated lock should not be applied")
|
||||
@pytest.mark.parametrize(
|
||||
"object_size",
|
||||
[SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE],
|
||||
ids=["simple object", "complex object"],
|
||||
)
|
||||
def test_already_outdated_lock_should_not_be_applied(
|
||||
self,
|
||||
client_shell: Shell,
|
||||
request: FixtureRequest,
|
||||
user_container: StorageContainer,
|
||||
object_size: int,
|
||||
):
|
||||
"""
|
||||
Already outdated lock should not be applied
|
||||
"""
|
||||
allure.dynamic.title(
|
||||
f"Already outdated lock should not be applied for {request.node.callspec.id}"
|
||||
)
|
||||
|
||||
current_epoch = ensure_fresh_epoch(client_shell)
|
||||
|
||||
storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 1)
|
||||
|
||||
expiration_epoch = current_epoch - 1
|
||||
with pytest.raises(
|
||||
Exception,
|
||||
match=LOCK_OBJECT_EXPIRATION.format(
|
||||
expiration_epoch=expiration_epoch, current_epoch=current_epoch
|
||||
),
|
||||
):
|
||||
lock_object(
|
||||
storage_object.wallet_file_path,
|
||||
storage_object.cid,
|
||||
storage_object.oid,
|
||||
client_shell,
|
||||
expire_at=expiration_epoch,
|
||||
)
|
||||
|
||||
@allure.title("After lock expiration with lifetime user should be able to delete object")
|
||||
@pytest.mark.parametrize(
|
||||
"object_size",
|
||||
[SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE],
|
||||
ids=["simple object", "complex object"],
|
||||
)
|
||||
@expect_not_raises()
|
||||
def test_after_lock_expiration_with_lifetime_user_should_be_able_to_delete_object(
|
||||
self,
|
||||
client_shell: Shell,
|
||||
request: FixtureRequest,
|
||||
user_container: StorageContainer,
|
||||
object_size: int,
|
||||
):
|
||||
"""
|
||||
After lock expiration with lifetime user should be able to delete object
|
||||
"""
|
||||
allure.dynamic.title(
|
||||
f"After lock expiration with lifetime user should be able to delete object for {request.node.callspec.id}"
|
||||
)
|
||||
|
||||
current_epoch = ensure_fresh_epoch(client_shell)
|
||||
storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 1)
|
||||
|
||||
lock_object(
|
||||
storage_object.wallet_file_path,
|
||||
storage_object.cid,
|
||||
storage_object.oid,
|
||||
client_shell,
|
||||
lifetime=1,
|
||||
)
|
||||
|
||||
tick_epoch(client_shell)
|
||||
|
||||
delete_object(
|
||||
storage_object.wallet_file_path, storage_object.cid, storage_object.oid, client_shell
|
||||
)
|
||||
|
||||
@allure.title("After lock expiration with expire_at user should be able to delete object")
|
||||
@pytest.mark.parametrize(
|
||||
"object_size",
|
||||
[SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE],
|
||||
ids=["simple object", "complex object"],
|
||||
)
|
||||
@expect_not_raises()
|
||||
def test_after_lock_expiration_with_expire_at_user_should_be_able_to_delete_object(
|
||||
self,
|
||||
client_shell: Shell,
|
||||
request: FixtureRequest,
|
||||
user_container: StorageContainer,
|
||||
object_size: int,
|
||||
):
|
||||
"""
|
||||
After lock expiration with expire_at user should be able to delete object
|
||||
"""
|
||||
allure.dynamic.title(
|
||||
f"After lock expiration with expire_at user should be able to delete object for {request.node.callspec.id}"
|
||||
)
|
||||
|
||||
current_epoch = ensure_fresh_epoch(client_shell)
|
||||
|
||||
storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 5)
|
||||
|
||||
lock_object(
|
||||
storage_object.wallet_file_path,
|
||||
storage_object.cid,
|
||||
storage_object.oid,
|
||||
client_shell,
|
||||
expire_at=current_epoch + 1,
|
||||
)
|
||||
|
||||
tick_epoch(client_shell)
|
||||
|
||||
delete_object(
|
||||
storage_object.wallet_file_path, storage_object.cid, storage_object.oid, client_shell
|
||||
)
|
||||
|
||||
@allure.title("Complex object chunks should also be protected from deletion")
|
||||
@pytest.mark.parametrize(
|
||||
# Only complex objects are required for this test
|
||||
"locked_storage_object",
|
||||
[COMPLEX_OBJ_SIZE],
|
||||
indirect=True,
|
||||
)
|
||||
def test_complex_object_chunks_should_also_be_protected_from_deletion(
|
||||
self,
|
||||
client_shell: Shell,
|
||||
locked_storage_object: StorageObjectInfo,
|
||||
):
|
||||
"""
|
||||
Complex object chunks should also be protected from deletion
|
||||
"""
|
||||
|
||||
chunk_object_ids = get_storage_object_chunks(locked_storage_object, client_shell)
|
||||
for chunk_object_id in chunk_object_ids:
|
||||
with allure.step(f"Try to delete chunk object {chunk_object_id}"):
|
||||
with pytest.raises(Exception, match=OBJECT_IS_LOCKED):
|
||||
delete_object(
|
||||
locked_storage_object.wallet_file_path,
|
||||
locked_storage_object.cid,
|
||||
chunk_object_id,
|
||||
client_shell,
|
||||
)
|
||||
|
||||
@allure.title("Link object of complex object should also be protected from deletion")
|
||||
@pytest.mark.parametrize(
|
||||
# Only complex objects are required for this test
|
||||
"locked_storage_object",
|
||||
[COMPLEX_OBJ_SIZE],
|
||||
indirect=True,
|
||||
)
|
||||
def test_link_object_of_complex_object_should_also_be_protected_from_deletion(
|
||||
self,
|
||||
client_shell: Shell,
|
||||
locked_storage_object: StorageObjectInfo,
|
||||
):
|
||||
"""
|
||||
Link object of complex object should also be protected from deletion
|
||||
"""
|
||||
|
||||
link_object_id = get_link_object(
|
||||
locked_storage_object.wallet_file_path,
|
||||
locked_storage_object.cid,
|
||||
locked_storage_object.oid,
|
||||
client_shell,
|
||||
is_direct=False,
|
||||
)
|
||||
with allure.step(f"Try to delete link object {link_object_id}"):
|
||||
with pytest.raises(Exception, match=OBJECT_IS_LOCKED):
|
||||
delete_object(
|
||||
locked_storage_object.wallet_file_path,
|
||||
locked_storage_object.cid,
|
||||
link_object_id,
|
||||
client_shell,
|
||||
)
|
|
@ -3,7 +3,7 @@ import logging
|
|||
import allure
|
||||
import pytest
|
||||
from common import COMPLEX_OBJ_SIZE, SIMPLE_OBJ_SIZE
|
||||
from epoch import get_epoch, tick_epoch
|
||||
from epoch import ensure_fresh_epoch, tick_epoch
|
||||
from file_helper import generate_file
|
||||
from grpc_responses import MALFORMED_REQUEST, OBJECT_ACCESS_DENIED, OBJECT_NOT_FOUND
|
||||
from neofs_testlib.hosting import Hosting
|
||||
|
@ -41,23 +41,13 @@ from steps.session_token import (
|
|||
get_object_signed_token,
|
||||
sign_session_token,
|
||||
)
|
||||
from steps.storage_object import delete_objects, wait_until_objects_available_on_all_nodes
|
||||
from steps.storage_object import delete_objects
|
||||
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
|
||||
RANGE_OFFSET_FOR_COMPLEX_OBJECT = 200
|
||||
|
||||
|
||||
@allure.step("Ensure fresh epoch")
|
||||
def ensure_fresh_epoch(shell: Shell) -> int:
|
||||
# ensure new fresh epoch to avoid epoch switch during test session
|
||||
current_epoch = get_epoch(shell)
|
||||
tick_epoch(shell)
|
||||
epoch = get_epoch(shell)
|
||||
assert epoch > current_epoch, "Epoch wasn't ticked"
|
||||
return epoch
|
||||
|
||||
|
||||
@pytest.fixture(
|
||||
params=[SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE],
|
||||
ids=["simple object", "complex object"],
|
||||
|
@ -65,7 +55,7 @@ def ensure_fresh_epoch(shell: Shell) -> int:
|
|||
scope="module",
|
||||
)
|
||||
def storage_objects(
|
||||
hosting: Hosting, owner_wallet: WalletFile, client_shell: Shell, request: FixtureRequest
|
||||
owner_wallet: WalletFile, client_shell: Shell, request: FixtureRequest
|
||||
) -> list[StorageObjectInfo]:
|
||||
file_path = generate_file(request.param)
|
||||
storage_objects = []
|
||||
|
@ -77,24 +67,20 @@ def storage_objects(
|
|||
|
||||
with allure.step("Put objects"):
|
||||
# upload couple objects
|
||||
for i in range(3):
|
||||
storage_object = StorageObjectInfo()
|
||||
storage_object.size = request.param
|
||||
storage_object.cid = cid
|
||||
storage_object.wallet = owner_wallet.path
|
||||
storage_object.file_path = file_path
|
||||
|
||||
storage_object.oid = put_object(
|
||||
for _ in range(3):
|
||||
storage_object_id = put_object(
|
||||
wallet=owner_wallet.path,
|
||||
path=file_path,
|
||||
cid=cid,
|
||||
shell=client_shell,
|
||||
)
|
||||
|
||||
storage_object = StorageObjectInfo(cid, storage_object_id)
|
||||
storage_object.size = request.param
|
||||
storage_object.wallet_file_path = owner_wallet.path
|
||||
storage_object.file_path = file_path
|
||||
storage_objects.append(storage_object)
|
||||
|
||||
wait_until_objects_available_on_all_nodes(hosting, storage_objects, client_shell)
|
||||
|
||||
yield storage_objects
|
||||
|
||||
# Teardown after all tests done with current param
|
||||
|
@ -109,7 +95,7 @@ def get_ranges(storage_object: StorageObjectInfo, shell: Shell) -> list[str]:
|
|||
object_size = storage_object.size
|
||||
|
||||
if object_size == COMPLEX_OBJ_SIZE:
|
||||
net_info = get_netmap_netinfo(storage_object.wallet, shell)
|
||||
net_info = get_netmap_netinfo(storage_object.wallet_file_path, shell)
|
||||
max_object_size = net_info["maximum_object_size"]
|
||||
# make sure to test multiple parts of complex object
|
||||
assert object_size >= max_object_size + RANGE_OFFSET_FOR_COMPLEX_OBJECT
|
||||
|
|
|
@ -29,6 +29,7 @@ def get_link_object(
|
|||
shell: Shell,
|
||||
bearer: str = "",
|
||||
wallet_config: str = WALLET_CONFIG,
|
||||
is_direct: bool = True,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
|
@ -39,6 +40,8 @@ def get_link_object(
|
|||
shell: executor for cli command
|
||||
bearer (optional, str): path to Bearer token file
|
||||
wallet_config (optional, str): path to the neofs-cli config file
|
||||
is_direct: send request directly to the node or not; this flag
|
||||
turns into `--ttl 1` key
|
||||
Returns:
|
||||
(str): Link Object ID
|
||||
When no Link Object ID is found after all Storage Nodes polling,
|
||||
|
@ -53,7 +56,7 @@ def get_link_object(
|
|||
shell=shell,
|
||||
endpoint=node,
|
||||
is_raw=True,
|
||||
is_direct=True,
|
||||
is_direct=is_direct,
|
||||
bearer=bearer,
|
||||
wallet_config=wallet_config,
|
||||
)
|
||||
|
|
|
@ -21,6 +21,16 @@ from utility import parse_time
|
|||
logger = logging.getLogger("NeoLogger")
|
||||
|
||||
|
||||
@allure.step("Ensure fresh epoch")
|
||||
def ensure_fresh_epoch(shell: Shell) -> int:
|
||||
# ensure new fresh epoch to avoid epoch switch during test session
|
||||
current_epoch = get_epoch(shell)
|
||||
tick_epoch(shell)
|
||||
epoch = get_epoch(shell)
|
||||
assert epoch > current_epoch, "Epoch wasn't ticked"
|
||||
return epoch
|
||||
|
||||
|
||||
@allure.step("Get Epoch")
|
||||
def get_epoch(shell: Shell):
|
||||
neogo = NeoGo(shell=shell, neo_go_exec_path=NEOGO_EXECUTABLE)
|
||||
|
|
|
@ -272,6 +272,63 @@ def get_range(
|
|||
return range_file_path, content
|
||||
|
||||
|
||||
@allure.step("Lock Object")
|
||||
def lock_object(
|
||||
wallet: str,
|
||||
cid: str,
|
||||
oid: str,
|
||||
shell: Shell,
|
||||
lifetime: Optional[int] = None,
|
||||
expire_at: Optional[int] = None,
|
||||
endpoint: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
bearer: Optional[str] = None,
|
||||
session: Optional[str] = None,
|
||||
wallet_config: Optional[str] = None,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Lock object in container.
|
||||
|
||||
Args:
|
||||
address: Address of wallet account.
|
||||
bearer: File with signed JSON or binary encoded bearer token.
|
||||
cid: Container ID.
|
||||
oid: Object ID.
|
||||
lifetime: Lock lifetime.
|
||||
expire_at: Lock expiration epoch.
|
||||
endpoint: Remote node address.
|
||||
session: Path to a JSON-encoded container session token.
|
||||
ttl: TTL value in request meta header (default 2).
|
||||
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
||||
xhdr: Dict with request X-Headers.
|
||||
|
||||
Returns:
|
||||
Lock object ID
|
||||
"""
|
||||
|
||||
cli = NeofsCli(shell, NEOFS_CLI_EXEC, wallet_config or WALLET_CONFIG)
|
||||
result = cli.object.lock(
|
||||
rpc_endpoint=endpoint or NEOFS_ENDPOINT,
|
||||
lifetime=lifetime,
|
||||
expire_at=expire_at,
|
||||
address=address,
|
||||
wallet=wallet,
|
||||
cid=cid,
|
||||
oid=oid,
|
||||
bearer=bearer,
|
||||
xhdr=xhdr,
|
||||
session=session,
|
||||
ttl=ttl,
|
||||
)
|
||||
|
||||
# splitting CLI output to lines and taking the penultimate line
|
||||
id_str = result.stdout.strip().split("\n")[0]
|
||||
oid = id_str.split(":")[1]
|
||||
return oid.strip()
|
||||
|
||||
|
||||
@allure.step("Search object")
|
||||
def search_object(
|
||||
wallet: str,
|
||||
|
|
Loading…
Reference in a new issue