sinc master branch #1

Merged
dansingjulia merged 39 commits from master into master 2023-01-16 13:21:24 +00:00
54 changed files with 2566 additions and 1223 deletions

View file

@ -7,9 +7,11 @@ hosts:
container_name: s01
config_path: ../neofs-dev-env/services/storage/.storage.env
wallet_path: ../neofs-dev-env/services/storage/wallet01.json
local_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../neofs-dev-env/services/storage/wallet01.json
wallet_password: ""
volume_name: storage_storage_s01
rpc_endpoint: s01.neofs.devenv:8080
endpoint_data0: s01.neofs.devenv:8080
control_endpoint: s01.neofs.devenv:8081
un_locode: "RU MOW"
- name: s02
@ -17,9 +19,11 @@ hosts:
container_name: s02
config_path: ../neofs-dev-env/services/storage/.storage.env
wallet_path: ../neofs-dev-env/services/storage/wallet02.json
local_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../neofs-dev-env/services/storage/wallet02.json
wallet_password: ""
volume_name: storage_storage_s02
rpc_endpoint: s02.neofs.devenv:8080
endpoint_data0: s02.neofs.devenv:8080
control_endpoint: s02.neofs.devenv:8081
un_locode: "RU LED"
- name: s03
@ -27,9 +31,11 @@ hosts:
container_name: s03
config_path: ../neofs-dev-env/services/storage/.storage.env
wallet_path: ../neofs-dev-env/services/storage/wallet03.json
local_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../neofs-dev-env/services/storage/wallet03.json
wallet_password: ""
volume_name: storage_storage_s03
rpc_endpoint: s03.neofs.devenv:8080
endpoint_data0: s03.neofs.devenv:8080
control_endpoint: s03.neofs.devenv:8081
un_locode: "SE STO"
- name: s04
@ -37,9 +43,11 @@ hosts:
container_name: s04
config_path: ../neofs-dev-env/services/storage/.storage.env
wallet_path: ../neofs-dev-env/services/storage/wallet04.json
local_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../neofs-dev-env/services/storage/wallet04.json
wallet_password: ""
volume_name: storage_storage_s04
rpc_endpoint: s04.neofs.devenv:8080
endpoint_data0: s04.neofs.devenv:8080
control_endpoint: s04.neofs.devenv:8081
un_locode: "FI HEL"
- name: s3-gate01
@ -47,35 +55,45 @@ hosts:
container_name: s3_gate
config_path: ../neofs-dev-env/services/s3_gate/.s3.env
wallet_path: ../neofs-dev-env/services/s3_gate/wallet.json
local_config_path: ./TemporaryDir/password-s3.yml
local_wallet_path: ../neofs-dev-env/services/s3_gate/wallet.json
wallet_password: "s3"
endpoint: https://s3.neofs.devenv:8080
endpoint_data0: https://s3.neofs.devenv:8080
- name: http-gate01
attributes:
container_name: http_gate
config_path: ../neofs-dev-env/services/http_gate/.http.env
wallet_path: ../neofs-dev-env/services/http_gate/wallet.json
local_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../neofs-dev-env/services/http_gate/wallet.json
wallet_password: "one"
endpoint: http://http.neofs.devenv
endpoint_data0: http://http.neofs.devenv
- name: ir01
attributes:
container_name: ir01
config_path: ../neofs-dev-env/services/ir/.ir.env
wallet_path: ../neofs-dev-env/services/ir/az.json
local_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../neofs-dev-env/services/ir/az.json
wallet_password: "one"
- name: morph-chain01
attributes:
container_name: morph_chain
config_path: ../neofs-dev-env/services/morph_chain/protocol.privnet.yml
wallet_path: ../neofs-dev-env/services/morph_chain/node-wallet.json
local_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../neofs-dev-env/services/morph_chain/node-wallet.json
wallet_password: "one"
endpoint: http://morph-chain.neofs.devenv:30333
endpoint_internal0: http://morph-chain.neofs.devenv:30333
- name: main-chain01
attributes:
container_name: main_chain
config_path: ../neofs-dev-env/services/chain/protocol.privnet.yml
wallet_path: ../neofs-dev-env/services/chain/node-wallet.json
local_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../neofs-dev-env/services/chain/node-wallet.json
wallet_password: "one"
endpoint: http://main-chain.neofs.devenv:30333
endpoint_internal0: http://main-chain.neofs.devenv:30333
- name: coredns01
attributes:
container_name: coredns

2
.github/CODEOWNERS vendored
View file

@ -1 +1 @@
* @aprasolova @vdomnich-yadro @dansingjulia @yadro-vavdeev
* @vdomnich-yadro @dansingjulia @yadro-vavdeev

View file

@ -4,6 +4,7 @@ from dataclasses import dataclass
from typing import Any
import data_formatters
import yaml
from neofs_testlib.blockchain import RPCClient
from neofs_testlib.hosting import Host, Hosting
from neofs_testlib.hosting.config import ServiceConfig
@ -62,6 +63,22 @@ class NodeBase:
_ConfigAttributes.WALLET_PATH,
)
def get_remote_wallet_path(self) -> str:
"""
Returns node wallet file path located on remote host
"""
return self._get_attribute(
_ConfigAttributes.WALLET_PATH,
)
def get_remote_config_path(self) -> str:
"""
Returns node config file path located on remote host
"""
return self._get_attribute(
_ConfigAttributes.CONFIG_PATH,
)
def get_wallet_config_path(self):
return self._get_attribute(
_ConfigAttributes.LOCAL_WALLET_CONFIG,
@ -105,7 +122,7 @@ class S3Gate(NodeBase):
"""
def get_endpoint(self) -> str:
return self._get_attribute(_ConfigAttributes.ENDPOINT)
return self._get_attribute(_ConfigAttributes.ENDPOINT_DATA)
@property
def label(self) -> str:
@ -118,7 +135,7 @@ class HTTPGate(NodeBase):
"""
def get_endpoint(self) -> str:
return self._get_attribute(_ConfigAttributes.ENDPOINT)
return self._get_attribute(_ConfigAttributes.ENDPOINT_DATA)
@property
def label(self) -> str:
@ -141,7 +158,7 @@ class MorphChain(NodeBase):
self.rpc_client = RPCClient(self.get_endpoint())
def get_endpoint(self) -> str:
return self._get_attribute(_ConfigAttributes.ENDPOINT)
return self._get_attribute(_ConfigAttributes.ENDPOINT_INTERNAL)
@property
def label(self) -> str:
@ -164,7 +181,7 @@ class MainChain(NodeBase):
self.rpc_client = RPCClient(self.get_endpoint())
def get_endpoint(self) -> str:
return self._get_attribute(_ConfigAttributes.ENDPOINT)
return self._get_attribute(_ConfigAttributes.ENDPOINT_INTERNAL)
@property
def label(self) -> str:
@ -182,7 +199,7 @@ class StorageNode(NodeBase):
"""
def get_rpc_endpoint(self) -> str:
return self._get_attribute(_ConfigAttributes.RPC_ENDPOINT)
return self._get_attribute(_ConfigAttributes.ENDPOINT_DATA)
def get_control_endpoint(self) -> str:
return self._get_attribute(_ConfigAttributes.CONTROL_ENDPOINT)
@ -202,6 +219,7 @@ class Cluster:
default_rpc_endpoint: str
default_s3_gate_endpoint: str
default_http_gate_endpoint: str
def __init__(self, hosting: Hosting) -> None:
self._hosting = hosting
@ -220,6 +238,25 @@ class Cluster:
def hosting(self) -> Hosting:
return self._hosting
def _create_wallet_config(self, service: ServiceConfig) -> None:
wallet_path = service.attributes[_ConfigAttributes.LOCAL_WALLET_CONFIG]
wallet_password = service.attributes[_ConfigAttributes.WALLET_PASSWORD]
with open(wallet_path, "w") as file:
yaml.dump({"password": wallet_password}, file)
def create_wallet_configs(self, hosting: Hosting) -> None:
configs = hosting.find_service_configs(".*")
for config in configs:
if _ConfigAttributes.LOCAL_WALLET_CONFIG in config.attributes:
self._create_wallet_config(config)
def is_local_devevn(self) -> bool:
if len(self.hosting.hosts) == 1:
host = self.hosting.hosts[0]
if host.config.address == "localhost" and host.config.plugin_name == "docker":
return True
return False
@property
def storage_nodes(self) -> list[StorageNode]:
"""
@ -294,10 +331,17 @@ class Cluster:
def get_random_storage_rpc_endpoint(self) -> str:
return random.choice(self.get_storage_rpc_endpoints())
def get_random_storage_rpc_endpoint_mgmt(self) -> str:
return random.choice(self.get_storage_rpc_endpoints_mgmt())
def get_storage_rpc_endpoints(self) -> list[str]:
nodes = self.storage_nodes
return [node.get_rpc_endpoint() for node in nodes]
def get_storage_rpc_endpoints_mgmt(self) -> list[str]:
nodes = self.storage_nodes
return [node.get_rpc_endpoint_mgmt() for node in nodes]
def get_morph_endpoints(self) -> list[str]:
nodes = self.morph_chain_nodes
return [node.get_endpoint() for node in nodes]
@ -316,9 +360,10 @@ class _ConfigAttributes:
WALLET_PASSWORD = "wallet_password"
WALLET_PATH = "wallet_path"
WALLET_CONFIG = "wallet_config"
CONFIG_PATH = "config_path"
LOCAL_WALLET_PATH = "local_wallet_path"
LOCAL_WALLET_CONFIG = "local_config_path"
RPC_ENDPOINT = "rpc_endpoint"
ENDPOINT = "endpoint"
ENDPOINT_DATA = "endpoint_data0"
ENDPOINT_INTERNAL = "endpoint_internal0"
CONTROL_ENDPOINT = "control_endpoint"
UN_LOCODE = "un_locode"

View file

@ -5,7 +5,7 @@ import allure
from cluster import Cluster
from file_helper import generate_file, get_file_hash
from neofs_testlib.shell import Shell
from neofs_verbs import put_object_to_random_node
from neofs_verbs import put_object, put_object_to_random_node
from storage_object import StorageObjectInfo
from wallet import WalletFile
@ -33,16 +33,37 @@ class StorageContainer:
def get_wallet_path(self) -> str:
return self.storage_container_info.wallet_file.path
def get_wallet_config_path(self) -> str:
return self.storage_container_info.wallet_file.config_path
@allure.step("Generate new object and put in container")
def generate_object(self, size: int, expire_at: Optional[int] = None) -> StorageObjectInfo:
def generate_object(
self,
size: int,
expire_at: Optional[int] = None,
bearer_token: Optional[str] = None,
endpoint: Optional[str] = None,
) -> StorageObjectInfo:
with allure.step(f"Generate object with size {size}"):
file_path = generate_file(size)
file_hash = get_file_hash(file_path)
container_id = self.get_id()
wallet_path = self.get_wallet_path()
wallet_config = self.get_wallet_config_path()
with allure.step(f"Put object with size {size} to container {container_id}"):
if endpoint:
object_id = put_object(
wallet=wallet_path,
path=file_path,
cid=container_id,
expire_at=expire_at,
shell=self.shell,
endpoint=endpoint,
bearer=bearer_token,
wallet_config=wallet_config,
)
else:
object_id = put_object_to_random_node(
wallet=wallet_path,
path=file_path,
@ -50,6 +71,8 @@ class StorageContainer:
expire_at=expire_at,
shell=self.shell,
cluster=self.cluster,
bearer=bearer_token,
wallet_config=wallet_config,
)
storage_object = StorageObjectInfo(

View file

@ -5,12 +5,12 @@ import uuid
from typing import Any, Optional
import allure
from common import ASSETS_DIR, SIMPLE_OBJ_SIZE
from common import ASSETS_DIR
logger = logging.getLogger("NeoLogger")
def generate_file(size: int = SIMPLE_OBJ_SIZE) -> str:
def generate_file(size: int) -> str:
"""Generates a binary file with the specified size in bytes.
Args:
@ -28,6 +28,7 @@ def generate_file(size: int = SIMPLE_OBJ_SIZE) -> str:
def generate_file_with_content(
size: int,
file_path: Optional[str] = None,
content: Optional[str] = None,
) -> str:
@ -44,7 +45,7 @@ def generate_file_with_content(
"""
mode = "w+"
if content is None:
content = os.urandom(SIMPLE_OBJ_SIZE)
content = os.urandom(size)
mode = "wb"
if not file_path:

View file

@ -20,6 +20,10 @@ LOCK_NON_REGULAR_OBJECT = "code = 2051"
LIFETIME_REQUIRED = "either expiration epoch of a lifetime is required"
LOCK_OBJECT_REMOVAL = "lock object removal"
LOCK_OBJECT_EXPIRATION = "lock object expiration: {expiration_epoch}; current: {current_epoch}"
INVALID_RANGE_ZERO_LENGTH = "invalid '{range}' range: zero length"
INVALID_RANGE_OVERFLOW = "invalid '{range}' range: uint64 overflow"
INVALID_OFFSET_SPECIFIER = "invalid '{range}' range offset specifier"
INVALID_LENGTH_SPECIFIER = "invalid '{range}' range length specifier"
def error_matches_status(error: Exception, status_pattern: str) -> bool:

View file

@ -1,10 +1,9 @@
import os
import uuid
from dataclasses import dataclass
from typing import Optional
from cluster import Cluster
from common import FREE_STORAGE, WALLET_PASS
from cluster import Cluster, NodeBase
from common import FREE_STORAGE, WALLET_CONFIG, WALLET_PASS
from neofs_testlib.shell import Shell
from neofs_testlib.utils.wallet import get_last_address_from_wallet, init_wallet
from python_keywords.payment_neogo import deposit_gas, transfer_gas
@ -13,7 +12,14 @@ from python_keywords.payment_neogo import deposit_gas, transfer_gas
@dataclass
class WalletFile:
path: str
password: str
password: str = WALLET_PASS
config_path: str = WALLET_CONFIG
@staticmethod
def from_node(node: NodeBase):
return WalletFile(
node.get_wallet_path(), node.get_wallet_password(), node.get_wallet_config_path()
)
def get_address(self) -> str:
"""

View file

@ -6,8 +6,6 @@ log_format = %(asctime)s [%(levelname)4s] %(message)s
log_cli_date_format = %Y-%m-%d %H:%M:%S
log_date_format = %H:%M:%S
markers =
# controller markers
no_log_analyze: skip critical errors analyzer at the end of test
# special markers
staging: test to be excluded from run in verifier/pr-validation/sanity jobs and run test in staging job
sanity: test runs in sanity testrun
@ -15,6 +13,7 @@ markers =
# functional markers
container: tests for container creation
grpc_api: standard gRPC API tests
grpc_control: tests related to using neofs-cli control commands
grpc_object_lock: gRPC lock tests
http_gate: HTTP gate contract
s3_gate: All S3 gate tests
@ -29,6 +28,7 @@ markers =
node_mgmt: neofs control commands
session_token: tests for operations with session token
static_session: tests for operations with static session token
bearer: tests for bearer tokens
acl: All tests for ACL
acl_basic: tests for basic ACL
acl_bearer: tests for ACL with bearer

View file

@ -0,0 +1,27 @@
import os
# Load node parameters
LOAD_NODES = os.getenv("LOAD_NODES", "").split(",")
LOAD_NODE_SSH_USER = os.getenv("LOAD_NODE_SSH_USER", "root")
LOAD_NODE_SSH_PRIVATE_KEY_PATH = os.getenv("LOAD_NODE_SSH_PRIVATE_KEY_PATH")
BACKGROUND_WRITERS_COUNT = os.getenv("BACKGROUND_WRITERS_COUNT", 10)
BACKGROUND_READERS_COUNT = os.getenv("BACKGROUND_READERS_COUNT", 10)
BACKGROUND_OBJ_SIZE = os.getenv("BACKGROUND_OBJ_SIZE", 1024)
BACKGROUND_LOAD_MAX_TIME = os.getenv("BACKGROUND_LOAD_MAX_TIME", 600)
# Load run parameters
OBJ_SIZE = os.getenv("OBJ_SIZE", "1000").split(",")
CONTAINERS_COUNT = os.getenv("CONTAINERS_COUNT", "1").split(",")
OUT_FILE = os.getenv("OUT_FILE", "1mb_200.json").split(",")
OBJ_COUNT = os.getenv("OBJ_COUNT", "4").split(",")
WRITERS = os.getenv("WRITERS", "200").split(",")
READERS = os.getenv("READER", "0").split(",")
DELETERS = os.getenv("DELETERS", "0").split(",")
LOAD_TIME = os.getenv("LOAD_TIME", "200").split(",")
LOAD_TYPE = os.getenv("LOAD_TYPE", "grpc").split(",")
LOAD_NODES_COUNT = os.getenv("LOAD_NODES_COUNT", "1").split(",")
STORAGE_NODE_COUNT = os.getenv("STORAGE_NODE_COUNT", "4").split(",")
CONTAINER_PLACEMENT_POLICY = os.getenv(
"CONTAINER_PLACEMENT_POLICY", "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
)

View file

@ -1,3 +1,4 @@
import allure
import epoch
import pytest
from cluster import Cluster
@ -15,9 +16,17 @@ class ClusterTestBase:
ClusterTestBase.cluster = cluster
yield
@allure.title("Tick {epochs_to_tick} epochs")
def tick_epochs(self, epochs_to_tick: int):
for _ in range(epochs_to_tick):
self.tick_epoch()
def tick_epoch(self):
epoch.tick_epoch(self.shell, self.cluster)
def wait_for_epochs_align(self):
epoch.wait_for_epochs_align(self.shell, self.cluster)
def get_epoch(self):
return epoch.get_epoch(self.shell, self.cluster)

View file

@ -12,6 +12,7 @@ from neofs_testlib.shell import CommandOptions, SSHShell
from neofs_testlib.shell.interfaces import InteractiveInput
NEOFS_AUTHMATE_PATH = "neofs-s3-authmate"
STOPPED_HOSTS = []
@allure.title("Get services endpoints")
@ -22,14 +23,31 @@ def get_services_endpoints(
return [service_config.attributes[endpoint_attribute] for service_config in service_configs]
@allure.title("Stop nodes")
def stop_unused_nodes(storage_nodes: list, used_nodes_count: int):
for node in storage_nodes[used_nodes_count:]:
host = node.host
STOPPED_HOSTS.append(host)
host.stop_host("hard")
@allure.title("Start nodes")
def start_stopped_nodes():
for host in STOPPED_HOSTS:
host.start_host()
STOPPED_HOSTS.remove(host)
@allure.title("Init s3 client")
def init_s3_client(load_nodes: list, login: str, pkey: str, hosting: Hosting):
def init_s3_client(
load_nodes: list, login: str, pkey: str, container_placement_policy: str, hosting: Hosting
):
service_configs = hosting.find_service_configs(STORAGE_NODE_SERVICE_NAME_REGEX)
host = hosting.get_host_by_service(service_configs[0].name)
wallet_path = service_configs[0].attributes["wallet_path"]
neogo_cli_config = host.get_cli_config("neo-go")
neogo_wallet = NeoGo(shell=host.get_shell(), neo_go_exec_path=neogo_cli_config.exec_path).wallet
dump_keys_output = neogo_wallet.dump_keys(wallet_config=wallet_path).stdout
dump_keys_output = neogo_wallet.dump_keys(wallet=wallet_path, wallet_config=None).stdout
public_key = str(re.search(r":\n(?P<public_key>.*)", dump_keys_output).group("public_key"))
node_endpoint = service_configs[0].attributes["rpc_endpoint"]
# prompt_pattern doesn't work at the moment
@ -44,7 +62,7 @@ def init_s3_client(load_nodes: list, login: str, pkey: str, hosting: Hosting):
peer=node_endpoint,
bearer_rules=f"{path}/scenarios/files/rules.json",
gate_public_key=public_key,
container_placement_policy="REP 1 IN X CBF 1 SELECT 1 FROM * AS X",
container_placement_policy=container_placement_policy,
container_policy=f"{path}/scenarios/files/policy.json",
wallet_password="",
).stdout
@ -88,7 +106,7 @@ def prepare_objects(k6_instance: K6):
@allure.title("Prepare K6 instances and objects")
def prepare_k6_instances(
load_nodes: list, login: str, pkey: str, load_params: LoadParams, prepare: bool = True
) -> list:
) -> list[K6]:
k6_load_objects = []
for load_node in load_nodes:
ssh_client = SSHShell(host=load_node, login=login, private_key_path=pkey)

View file

@ -46,13 +46,9 @@ class TestS3GateBase(ClusterTestBase):
wallet = default_wallet
s3_bearer_rules_file = f"{os.getcwd()}/robot/resources/files/s3_bearer_rules.json"
policy = None if isinstance(request.param, str) else request.param[1]
(
cid,
bucket,
access_key_id,
secret_access_key,
owner_private_key,
) = init_s3_credentials(wallet, cluster, s3_bearer_rules_file=s3_bearer_rules_file)
(cid, bucket, access_key_id, secret_access_key, owner_private_key,) = init_s3_credentials(
wallet, cluster, s3_bearer_rules_file=s3_bearer_rules_file, policy=policy
)
containers_list = list_containers(
wallet, shell=client_shell, endpoint=self.cluster.default_rpc_endpoint
)
@ -88,15 +84,32 @@ class TestS3GateBase(ClusterTestBase):
def delete_all_object_in_bucket(self, bucket):
versioning_status = s3_gate_bucket.get_bucket_versioning_status(self.s3_client, bucket)
if versioning_status == s3_gate_bucket.VersioningStatus.ENABLED.value:
# From versioned bucket we should delete all versions of all objects
# From versioned bucket we should delete all versions and delete markers of all objects
objects_versions = s3_gate_object.list_objects_versions_s3(self.s3_client, bucket)
if objects_versions:
s3_gate_object.delete_object_versions_s3(self.s3_client, bucket, objects_versions)
s3_gate_object.delete_object_versions_s3_without_dm(
self.s3_client, bucket, objects_versions
)
objects_delete_markers = s3_gate_object.list_objects_delete_markers_s3(
self.s3_client, bucket
)
if objects_delete_markers:
s3_gate_object.delete_object_versions_s3_without_dm(
self.s3_client, bucket, objects_delete_markers
)
else:
# From non-versioned bucket it's sufficient to delete objects by key
objects = s3_gate_object.list_objects_s3(self.s3_client, bucket)
if objects:
s3_gate_object.delete_objects_s3(self.s3_client, bucket, objects)
objects_delete_markers = s3_gate_object.list_objects_delete_markers_s3(
self.s3_client, bucket
)
if objects_delete_markers:
s3_gate_object.delete_object_versions_s3_without_dm(
self.s3_client, bucket, objects_delete_markers
)
# Delete the bucket itself
s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket)

View file

@ -85,6 +85,21 @@ def list_objects_versions_s3(s3_client, bucket: str, full_output: bool = False)
) from err
@allure.step("List objects delete markers S3")
def list_objects_delete_markers_s3(s3_client, bucket: str, full_output: bool = False) -> list:
try:
response = s3_client.list_object_versions(Bucket=bucket)
delete_markers = response.get("DeleteMarkers", [])
log_command_execution("S3 List objects delete markers result", response)
return response if full_output else delete_markers
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Put object S3")
def put_object_s3(s3_client, bucket: str, filepath: str, **kwargs):
filename = os.path.basename(filepath)
@ -185,6 +200,27 @@ def delete_object_versions_s3(s3_client, bucket: str, object_versions: list):
) from err
@allure.step("Delete object versions S3 without delete markers")
def delete_object_versions_s3_without_dm(s3_client, bucket: str, object_versions: list):
try:
# Delete objects without creating delete markers
for object_version in object_versions:
params = {
"Bucket": bucket,
"Key": object_version["Key"],
"VersionId": object_version["VersionId"],
}
response = s3_client.delete_object(**params)
log_command_execution("S3 Delete object result", response)
return response
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Put object ACL")
def put_object_acl_s3(
s3_client,

View file

@ -124,7 +124,7 @@ def generate_container_session_token(
session = {
"container": {
"verb": verb.value,
"wildcard": cid is not None,
"wildcard": cid is None,
**({"containerID": {"value": f"{encode_for_json(cid)}"}} if cid is not None else {}),
},
}

View file

@ -68,8 +68,8 @@ def wallets(default_wallet, temp_directory, cluster: Cluster) -> Wallets:
@pytest.fixture(scope="module")
def file_path():
yield generate_file()
def file_path(simple_object_size):
yield generate_file(simple_object_size)
@pytest.fixture(scope="function")

View file

@ -6,7 +6,7 @@ from typing import Optional
import allure
import pytest
from cluster_test_base import ClusterTestBase
from common import ASSETS_DIR, COMPLEX_OBJ_SIZE, FREE_STORAGE, SIMPLE_OBJ_SIZE, WALLET_PASS
from common import ASSETS_DIR, FREE_STORAGE, WALLET_PASS
from file_helper import generate_file
from grpc_responses import OBJECT_ACCESS_DENIED, OBJECT_NOT_FOUND
from neofs_testlib.utils.wallet import init_wallet
@ -37,7 +37,7 @@ deposit = 30
@pytest.mark.parametrize(
"object_size",
[SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE],
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
@pytest.mark.sanity
@ -68,7 +68,7 @@ class TestStorageGroup(ClusterTestBase):
)
@allure.title("Test Storage Group in Private Container")
def test_storagegroup_basic_private_container(self, object_size):
def test_storagegroup_basic_private_container(self, object_size, max_object_size):
cid = create_container(
self.main_wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
@ -88,6 +88,7 @@ class TestStorageGroup(ClusterTestBase):
cid=cid,
obj_list=objects,
object_size=object_size,
max_object_size=max_object_size,
)
self.expect_failure_for_storagegroup_operations(
wallet=self.other_wallet,
@ -100,10 +101,11 @@ class TestStorageGroup(ClusterTestBase):
cid=cid,
obj_list=objects,
object_size=object_size,
max_object_size=max_object_size,
)
@allure.title("Test Storage Group in Public Container")
def test_storagegroup_basic_public_container(self, object_size):
def test_storagegroup_basic_public_container(self, object_size, max_object_size):
cid = create_container(
self.main_wallet,
basic_acl="public-read-write",
@ -120,22 +122,25 @@ class TestStorageGroup(ClusterTestBase):
cid=cid,
obj_list=objects,
object_size=object_size,
max_object_size=max_object_size,
)
self.expect_success_for_storagegroup_operations(
wallet=self.other_wallet,
cid=cid,
obj_list=objects,
object_size=object_size,
max_object_size=max_object_size,
)
self.storagegroup_operations_by_system_ro_container(
wallet=self.main_wallet,
cid=cid,
obj_list=objects,
object_size=object_size,
max_object_size=max_object_size,
)
@allure.title("Test Storage Group in Read-Only Container")
def test_storagegroup_basic_ro_container(self, object_size):
def test_storagegroup_basic_ro_container(self, object_size, max_object_size):
cid = create_container(
self.main_wallet,
basic_acl="public-read",
@ -152,6 +157,7 @@ class TestStorageGroup(ClusterTestBase):
cid=cid,
obj_list=objects,
object_size=object_size,
max_object_size=max_object_size,
)
self.storagegroup_operations_by_other_ro_container(
owner_wallet=self.main_wallet,
@ -159,16 +165,18 @@ class TestStorageGroup(ClusterTestBase):
cid=cid,
obj_list=objects,
object_size=object_size,
max_object_size=max_object_size,
)
self.storagegroup_operations_by_system_ro_container(
wallet=self.main_wallet,
cid=cid,
obj_list=objects,
object_size=object_size,
max_object_size=max_object_size,
)
@allure.title("Test Storage Group with Bearer Allow")
def test_storagegroup_bearer_allow(self, object_size):
def test_storagegroup_bearer_allow(self, object_size, max_object_size):
cid = create_container(
self.main_wallet,
basic_acl="eacl-public-read-write",
@ -185,6 +193,7 @@ class TestStorageGroup(ClusterTestBase):
cid=cid,
obj_list=objects,
object_size=object_size,
max_object_size=max_object_size,
)
storage_group = put_storagegroup(
self.shell, self.cluster.default_rpc_endpoint, self.main_wallet, cid, objects
@ -219,6 +228,7 @@ class TestStorageGroup(ClusterTestBase):
cid=cid,
obj_list=objects,
object_size=object_size,
max_object_size=max_object_size,
bearer=bearer_file,
)
@ -243,6 +253,7 @@ class TestStorageGroup(ClusterTestBase):
with allure.step("Tick two epochs"):
for _ in range(2):
self.tick_epoch()
self.wait_for_epochs_align()
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_storagegroup(
shell=self.shell,
@ -259,6 +270,7 @@ class TestStorageGroup(ClusterTestBase):
cid: str,
obj_list: list,
object_size: int,
max_object_size: int,
bearer: Optional[str] = None,
):
"""
@ -285,6 +297,7 @@ class TestStorageGroup(ClusterTestBase):
gid=storage_group,
obj_list=obj_list,
object_size=object_size,
max_object_size=max_object_size,
bearer=bearer,
)
delete_storagegroup(
@ -342,6 +355,7 @@ class TestStorageGroup(ClusterTestBase):
cid: str,
obj_list: list,
object_size: int,
max_object_size: int,
):
storage_group = put_storagegroup(
self.shell, self.cluster.default_rpc_endpoint, owner_wallet, cid, obj_list
@ -369,6 +383,7 @@ class TestStorageGroup(ClusterTestBase):
gid=storage_group,
obj_list=obj_list,
object_size=object_size,
max_object_size=max_object_size,
)
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
delete_storagegroup(
@ -381,7 +396,12 @@ class TestStorageGroup(ClusterTestBase):
@allure.step("Run Storage Group Operations On Systems's Behalf In RO Container")
def storagegroup_operations_by_system_ro_container(
self, wallet: str, cid: str, obj_list: list, object_size: int
self,
wallet: str,
cid: str,
obj_list: list,
object_size: int,
max_object_size: int,
):
"""
In this func we create a Storage Group on Inner Ring's key behalf
@ -438,6 +458,7 @@ class TestStorageGroup(ClusterTestBase):
gid=storage_group,
obj_list=obj_list,
object_size=object_size,
max_object_size=max_object_size,
wallet_config=ir_wallet_config,
)
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):

View file

@ -12,27 +12,32 @@ from binary_version_helper import get_local_binaries_versions, get_remote_binari
from cluster import Cluster
from common import (
ASSETS_DIR,
BACKGROUND_LOAD_MAX_TIME,
BACKGROUND_OBJ_SIZE,
BACKGROUND_READERS_COUNT,
BACKGROUND_WRITERS_COUNT,
COMPLEX_OBJECT_CHUNKS_COUNT,
COMPLEX_OBJECT_TAIL_SIZE,
FREE_STORAGE,
HOSTING_CONFIG_FILE,
LOAD_NODE_SSH_PRIVATE_KEY_PATH,
LOAD_NODE_SSH_USER,
LOAD_NODES,
SIMPLE_OBJECT_SIZE,
STORAGE_NODE_SERVICE_NAME_REGEX,
WALLET_PASS,
)
from env_properties import save_env_properties
from k6 import LoadParams
from load import get_services_endpoints, prepare_k6_instances
from load_params import (
BACKGROUND_LOAD_MAX_TIME,
BACKGROUND_OBJ_SIZE,
BACKGROUND_READERS_COUNT,
BACKGROUND_WRITERS_COUNT,
LOAD_NODE_SSH_PRIVATE_KEY_PATH,
LOAD_NODE_SSH_USER,
LOAD_NODES,
)
from neofs_testlib.hosting import Hosting
from neofs_testlib.reporter import AllureHandler, get_reporter
from neofs_testlib.shell import LocalShell, Shell
from neofs_testlib.utils.wallet import init_wallet
from payment_neogo import deposit_gas, transfer_gas
from pytest import FixtureRequest
from python_keywords.neofs_verbs import get_netmap_netinfo
from python_keywords.node_management import storage_node_healthcheck
from helpers.wallet import WalletFactory
@ -82,14 +87,39 @@ def require_multiple_hosts(hosting: Hosting):
yield
@pytest.fixture(scope="session")
def max_object_size(cluster: Cluster, client_shell: Shell) -> int:
storage_node = cluster.storage_nodes[0]
net_info = get_netmap_netinfo(
wallet=storage_node.get_wallet_path(),
wallet_config=storage_node.get_wallet_config_path(),
endpoint=storage_node.get_rpc_endpoint(),
shell=client_shell,
)
yield net_info["maximum_object_size"]
@pytest.fixture(scope="session")
def simple_object_size(max_object_size: int) -> int:
yield int(SIMPLE_OBJECT_SIZE) if int(SIMPLE_OBJECT_SIZE) < max_object_size else max_object_size
@pytest.fixture(scope="session")
def complex_object_size(max_object_size: int) -> int:
return max_object_size * int(COMPLEX_OBJECT_CHUNKS_COUNT) + int(COMPLEX_OBJECT_TAIL_SIZE)
@pytest.fixture(scope="session")
def wallet_factory(temp_directory: str, client_shell: Shell, cluster: Cluster) -> WalletFactory:
return WalletFactory(temp_directory, client_shell, cluster)
@pytest.fixture(scope="session")
def cluster(hosting: Hosting) -> Cluster:
yield Cluster(hosting)
def cluster(temp_directory: str, hosting: Hosting) -> Cluster:
cluster = Cluster(hosting)
if cluster.is_local_devevn():
cluster.create_wallet_configs(hosting)
yield cluster
@pytest.fixture(scope="session", autouse=True)
@ -116,25 +146,6 @@ def temp_directory():
shutil.rmtree(full_path)
@pytest.fixture(scope="function", autouse=True)
@allure.title("Analyze logs")
def analyze_logs(temp_directory: str, hosting: Hosting, request: FixtureRequest):
start_time = datetime.utcnow()
yield
end_time = datetime.utcnow()
# Skip tests where we expect failures in logs
if request.node.get_closest_marker("no_log_analyze"):
with allure.step("Skip analyze logs due to no_log_analyze mark"):
return
# Test name may exceed os NAME_MAX (255 bytes), so we use test start datetime instead
start_time_str = start_time.strftime("%Y_%m_%d_%H_%M_%S_%f")
logs_dir = os.path.join(temp_directory, f"logs_{start_time_str}")
dump_logs(hosting, logs_dir, start_time, end_time)
check_logs(logs_dir)
@pytest.fixture(scope="session", autouse=True)
@allure.title("Collect logs")
def collect_logs(temp_directory, hosting: Hosting):
@ -146,6 +157,7 @@ def collect_logs(temp_directory, hosting: Hosting):
logs_dir = os.path.join(temp_directory, "logs")
dump_logs(hosting, logs_dir, start_time, end_time)
attach_logs(logs_dir)
check_logs(logs_dir)
@pytest.fixture(scope="session", autouse=True)
@ -162,7 +174,7 @@ def run_health_check(collect_logs, cluster: Cluster):
@pytest.fixture(scope="session")
def background_grpc_load(client_shell, default_wallet):
def background_grpc_load(client_shell: Shell, hosting: Hosting):
registry_file = os.path.join("/tmp/", f"{str(uuid.uuid4())}.bolt")
prepare_file = os.path.join("/tmp/", f"{str(uuid.uuid4())}.json")
allure.dynamic.title(
@ -254,8 +266,9 @@ def default_wallet(client_shell: Shell, temp_directory: str, cluster: Cluster):
return wallet_path
@allure.title("Check logs for OOM and PANIC entries in {logs_dir}")
def check_logs(logs_dir: str):
problem_pattern = r"\Wpanic\W|\Woom\W"
problem_pattern = r"\Wpanic\W|\Woom\W|\Wtoo many open files\W"
log_file_paths = []
for directory_path, _, file_names in os.walk(logs_dir):
@ -267,9 +280,9 @@ def check_logs(logs_dir: str):
logs_with_problem = []
for file_path in log_file_paths:
with allure.step(f"Check log file {file_path}"):
with open(file_path, "r") as log_file:
if re.search(problem_pattern, log_file.read(), flags=re.IGNORECASE):
attach_logs(logs_dir)
logs_with_problem.append(file_path)
if logs_with_problem:
raise pytest.fail(f"System logs {', '.join(logs_with_problem)} contain critical errors")
@ -280,7 +293,11 @@ def dump_logs(hosting: Hosting, logs_dir: str, since: datetime, until: datetime)
os.makedirs(logs_dir)
for host in hosting.hosts:
with allure.step(f"Dump logs from host {host.config.address}"):
try:
host.dump_logs(logs_dir, since=since, until=until)
except Exception as ex:
logger.warning(f"Exception during logs collection: {ex}")
def attach_logs(logs_dir: str) -> None:

View file

@ -81,7 +81,7 @@ class TestContainer(ClusterTestBase):
delete_container(
wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
tick_epoch(self.shell, self.cluster)
self.tick_epoch()
wait_for_container_deletion(
wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
@ -121,7 +121,7 @@ class TestContainer(ClusterTestBase):
delete_container(
wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
tick_epoch(self.shell, self.cluster)
self.tick_epoch()
wait_for_container_deletion(
wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)

View file

@ -38,7 +38,9 @@ class TestFailoverNetwork(ClusterTestBase):
wait_all_storage_nodes_returned(self.cluster)
@allure.title("Block Storage node traffic")
def test_block_storage_node_traffic(self, default_wallet, require_multiple_hosts):
def test_block_storage_node_traffic(
self, default_wallet, require_multiple_hosts, simple_object_size
):
"""
Block storage nodes traffic using iptables and wait for replication for objects.
"""
@ -47,7 +49,7 @@ class TestFailoverNetwork(ClusterTestBase):
wakeup_node_timeout = 10 # timeout to let nodes detect that traffic has blocked
nodes_to_block_count = 2
source_file_path = generate_file()
source_file_path = generate_file(simple_object_size)
cid = create_container(
wallet,
shell=self.shell,

View file

@ -47,14 +47,11 @@ class TestFailoverStorage(ClusterTestBase):
@pytest.mark.parametrize("hard_reboot", [True, False])
@pytest.mark.failover_reboot
def test_lose_storage_node_host(
self,
default_wallet,
hard_reboot: bool,
require_multiple_hosts,
self, default_wallet, hard_reboot: bool, require_multiple_hosts, simple_object_size
):
wallet = default_wallet
placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
source_file_path = generate_file()
source_file_path = generate_file(simple_object_size)
cid = create_container(
wallet,
shell=self.shell,
@ -90,7 +87,7 @@ class TestFailoverStorage(ClusterTestBase):
)
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
with allure.step(f"Return all hosts"):
with allure.step("Return all hosts"):
return_stopped_hosts(self.cluster)
with allure.step("Check object data is not corrupted"):
@ -106,14 +103,11 @@ class TestFailoverStorage(ClusterTestBase):
@pytest.mark.parametrize("sequence", [True, False])
@pytest.mark.failover_panic
def test_panic_storage_node_host(
self,
default_wallet,
require_multiple_hosts,
sequence: bool,
self, default_wallet, require_multiple_hosts, sequence: bool, simple_object_size
):
wallet = default_wallet
placement_rule = "REP 2 IN X CBF 2 SELECT 2 FROM * AS X"
source_file_path = generate_file()
source_file_path = generate_file(simple_object_size)
cid = create_container(
wallet,
shell=self.shell,
@ -129,7 +123,7 @@ class TestFailoverStorage(ClusterTestBase):
cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes
)
allure.attach(
"\n".join(nodes),
"\n".join([str(node) for node in nodes]),
"Current nodes with object",
allure.attachment_type.TEXT,
)
@ -157,7 +151,7 @@ class TestFailoverStorage(ClusterTestBase):
)
allure.attach(
"\n".join(new_nodes),
"\n".join([str(new_node) for new_node in new_nodes]),
f"Nodes with object after {node} fail",
allure.attachment_type.TEXT,
)
@ -167,7 +161,7 @@ class TestFailoverStorage(ClusterTestBase):
cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes
)
allure.attach(
"\n".join(new_nodes),
"\n".join([str(new_node) for new_node in new_nodes]),
"Nodes with object after nodes fail",
allure.attachment_type.TEXT,
)

View file

@ -1,12 +1,8 @@
from enum import Enum
import allure
import pytest
from cluster_test_base import ClusterTestBase
from common import (
HTTP_GATE_SERVICE_NAME_REGEX,
LOAD_NODE_SSH_PRIVATE_KEY_PATH,
LOAD_NODE_SSH_USER,
LOAD_NODES,
S3_GATE_SERVICE_NAME_REGEX,
STORAGE_NODE_SERVICE_NAME_REGEX,
)
@ -17,34 +13,65 @@ from load import (
init_s3_client,
multi_node_k6_run,
prepare_k6_instances,
start_stopped_nodes,
stop_unused_nodes,
)
from load_params import (
CONTAINER_PLACEMENT_POLICY,
CONTAINERS_COUNT,
DELETERS,
LOAD_NODE_SSH_PRIVATE_KEY_PATH,
LOAD_NODE_SSH_USER,
LOAD_NODES,
LOAD_NODES_COUNT,
LOAD_TIME,
LOAD_TYPE,
OBJ_COUNT,
OBJ_SIZE,
OUT_FILE,
READERS,
STORAGE_NODE_COUNT,
WRITERS,
)
from neofs_testlib.hosting import Hosting
class LoadTime(Enum):
EXPECTED_MAXIMUM = 200
PMI_EXPECTATION = 900
CONTAINERS_COUNT = 1
OBJ_COUNT = 3
ENDPOINTS_ATTRIBUTES = {
"http": {"regex": HTTP_GATE_SERVICE_NAME_REGEX, "endpoint_attribute": "endpoint"},
"grpc": {"regex": STORAGE_NODE_SERVICE_NAME_REGEX, "endpoint_attribute": "rpc_endpoint"},
"s3": {"regex": S3_GATE_SERVICE_NAME_REGEX, "endpoint_attribute": "endpoint"},
}
@pytest.mark.load
class TestLoad:
class TestLoad(ClusterTestBase):
@pytest.fixture(autouse=True)
def clear_cache_and_data(self, hosting: Hosting):
clear_cache_and_data(hosting=hosting)
yield
start_stopped_nodes()
@pytest.mark.parametrize("obj_size, out_file", [(1000, "1mb_200.json")])
@pytest.mark.parametrize("writers, readers, deleters", [(140, 60, 0), (200, 0, 0)])
@pytest.mark.parametrize(
"load_time", [LoadTime.EXPECTED_MAXIMUM.value, LoadTime.PMI_EXPECTATION.value]
@pytest.fixture(scope="session", autouse=True)
def init_s3_client(self, hosting: Hosting):
if "s3" in list(map(lambda x: x.lower(), LOAD_TYPE)):
init_s3_client(
load_nodes=LOAD_NODES,
login=LOAD_NODE_SSH_USER,
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
hosting=hosting,
container_placement_policy=CONTAINER_PLACEMENT_POLICY,
)
@pytest.mark.parametrize("node_count", [4])
@pytest.mark.parametrize("obj_size, out_file", list(zip(OBJ_SIZE, OUT_FILE)))
@pytest.mark.parametrize("writers, readers, deleters", list(zip(WRITERS, READERS, DELETERS)))
@pytest.mark.parametrize("load_time", LOAD_TIME)
@pytest.mark.parametrize("node_count", STORAGE_NODE_COUNT)
@pytest.mark.parametrize("containers_count", CONTAINERS_COUNT)
@pytest.mark.parametrize("load_type", LOAD_TYPE)
@pytest.mark.parametrize("obj_count", OBJ_COUNT)
@pytest.mark.parametrize("load_nodes_count", LOAD_NODES_COUNT)
@pytest.mark.benchmark
@pytest.mark.grpc
def test_grpc_benchmark(
def test_custom_load(
self,
obj_size,
out_file,
@ -53,604 +80,41 @@ class TestLoad:
deleters,
load_time,
node_count,
obj_count,
load_type,
load_nodes_count,
containers_count,
hosting: Hosting,
):
allure.dynamic.title(
f"Benchmark test - node_count = {node_count}, "
f"Load test - node_count = {node_count}, "
f"writers = {writers} readers = {readers}, "
f"deleters = {deleters}, obj_size = {obj_size}, "
f"load_time = {load_time}"
)
stop_unused_nodes(self.cluster.storage_nodes, node_count)
with allure.step("Get endpoints"):
endpoints_list = get_services_endpoints(
hosting=hosting,
service_name_regex=STORAGE_NODE_SERVICE_NAME_REGEX,
endpoint_attribute="rpc_endpoint",
service_name_regex=ENDPOINTS_ATTRIBUTES[LOAD_TYPE]["regex"],
endpoint_attribute=ENDPOINTS_ATTRIBUTES[LOAD_TYPE]["endpoint_attribute"],
)
endpoints = ",".join(endpoints_list[:node_count])
load_params = LoadParams(
endpoint=endpoints,
obj_size=obj_size,
containers_count=CONTAINERS_COUNT,
containers_count=containers_count,
out_file=out_file,
obj_count=OBJ_COUNT,
obj_count=obj_count,
writers=writers,
readers=readers,
deleters=deleters,
load_time=load_time,
load_type="grpc",
load_type=load_type,
)
load_nodes_list = LOAD_NODES[:load_nodes_count]
k6_load_instances = prepare_k6_instances(
load_nodes=LOAD_NODES,
login=LOAD_NODE_SSH_USER,
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
load_params=load_params,
)
with allure.step("Run load"):
multi_node_k6_run(k6_load_instances)
@pytest.mark.parametrize(
"obj_size, out_file, writers",
[
(4, "4kb_300.json", 300),
(16, "16kb_250.json", 250),
(64, "64kb_250.json", 250),
(128, "128kb_250.json", 250),
(512, "512kb_200.json", 200),
(1000, "1mb_200.json", 200),
(8000, "8mb_150.json", 150),
(32000, "32mb_150.json", 150),
(128000, "128mb_100.json", 100),
(512000, "512mb_50.json", 50),
],
)
@pytest.mark.parametrize(
"load_time", [LoadTime.EXPECTED_MAXIMUM.value, LoadTime.PMI_EXPECTATION.value]
)
@pytest.mark.benchmark
@pytest.mark.grpc
def test_grpc_benchmark_write(
self,
obj_size,
out_file,
writers,
load_time,
hosting: Hosting,
):
allure.dynamic.title(
f"Single gate benchmark write test - "
f"writers = {writers}, "
f"obj_size = {obj_size}, "
f"load_time = {load_time}"
)
with allure.step("Get endpoints"):
endpoints_list = get_services_endpoints(
hosting=hosting,
service_name_regex=STORAGE_NODE_SERVICE_NAME_REGEX,
endpoint_attribute="rpc_endpoint",
)
endpoints = ",".join(endpoints_list[:1])
load_params = LoadParams(
endpoint=endpoints,
obj_size=obj_size,
containers_count=CONTAINERS_COUNT,
out_file=out_file,
obj_count=OBJ_COUNT,
writers=writers,
readers=0,
deleters=0,
load_time=load_time,
load_type="grpc",
)
k6_load_instances = prepare_k6_instances(
load_nodes=LOAD_NODES,
login=LOAD_NODE_SSH_USER,
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
load_params=load_params,
)
with allure.step("Run load"):
multi_node_k6_run(k6_load_instances)
@pytest.mark.parametrize(
"obj_size, out_file, writers, readers",
[
(8000, "8mb_350.json", 245, 105),
(32000, "32mb_300.json", 210, 90),
(128000, "128mb_100.json", 70, 30),
(512000, "512mb_70.json", 49, 21),
],
)
@pytest.mark.parametrize(
"load_time", [LoadTime.EXPECTED_MAXIMUM.value, LoadTime.PMI_EXPECTATION.value]
)
@pytest.mark.benchmark
@pytest.mark.grpc
def test_grpc_benchmark_write_read_70_30(
self,
obj_size,
out_file,
writers,
readers,
load_time,
hosting: Hosting,
):
allure.dynamic.title(
f"Single gate benchmark write + read (70%/30%) test - "
f"writers = {writers}, "
f"readers = {readers}, "
f"obj_size = {obj_size}, "
f"load_time = {load_time}"
)
with allure.step("Get endpoints"):
endpoints_list = get_services_endpoints(
hosting=hosting,
service_name_regex=STORAGE_NODE_SERVICE_NAME_REGEX,
endpoint_attribute="rpc_endpoint",
)
endpoints = ",".join(endpoints_list[:1])
load_params = LoadParams(
endpoint=endpoints,
obj_size=obj_size,
containers_count=CONTAINERS_COUNT,
out_file=out_file,
obj_count=500,
writers=writers,
readers=readers,
deleters=0,
load_time=load_time,
load_type="grpc",
)
k6_load_instances = prepare_k6_instances(
load_nodes=LOAD_NODES,
login=LOAD_NODE_SSH_USER,
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
load_params=load_params,
)
with allure.step("Run load"):
multi_node_k6_run(k6_load_instances)
@pytest.mark.parametrize(
"obj_size, out_file, readers",
[
(4, "4kb_300.json", 300),
(16, "16kb_300.json", 300),
(64, "64kb_300.json", 300),
(128, "128kb_250.json", 250),
(512, "512kb_150.json", 150),
(1000, "1mb_150.json", 150),
(8000, "8mb_150.json", 150),
(32000, "32mb_100.json", 100),
(128000, "128mb_25.json", 25),
(512000, "512mb_25.json", 25),
],
)
@pytest.mark.parametrize(
"load_time", [LoadTime.EXPECTED_MAXIMUM.value, LoadTime.PMI_EXPECTATION.value]
)
@pytest.mark.benchmark
@pytest.mark.grpc
def test_grpc_benchmark_read(
self,
obj_size,
out_file,
readers,
load_time,
hosting: Hosting,
):
allure.dynamic.title(
f"Single gate benchmark read test - "
f"readers = {readers}, "
f"obj_size = {obj_size}, "
f"load_time = {load_time}"
)
with allure.step("Get endpoints"):
endpoints_list = get_services_endpoints(
hosting=hosting,
service_name_regex=STORAGE_NODE_SERVICE_NAME_REGEX,
endpoint_attribute="rpc_endpoint",
)
endpoints = ",".join(endpoints_list[:1])
load_params = LoadParams(
endpoint=endpoints,
obj_size=obj_size,
containers_count=1,
out_file=out_file,
obj_count=500,
writers=0,
readers=readers,
deleters=0,
load_time=load_time,
load_type="grpc",
)
k6_load_instances = prepare_k6_instances(
load_nodes=LOAD_NODES,
login=LOAD_NODE_SSH_USER,
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
load_params=load_params,
)
with allure.step("Run load"):
multi_node_k6_run(k6_load_instances)
@pytest.mark.parametrize(
"obj_size, out_file, writers",
[
(4, "4kb_300.json", 300),
(16, "16kb_250.json", 250),
(64, "64kb_250.json", 250),
(128, "128kb_250.json", 250),
(512, "512kb_200.json", 200),
(1000, "1mb_200.json", 200),
(8000, "8mb_150.json", 150),
(32000, "32mb_150.json", 150),
(128000, "128mb_100.json", 100),
(512000, "512mb_50.json", 50),
],
)
@pytest.mark.parametrize(
"load_time", [LoadTime.EXPECTED_MAXIMUM.value, LoadTime.PMI_EXPECTATION.value]
)
@pytest.mark.benchmark
@pytest.mark.http
def test_http_benchmark_write(
self,
obj_size,
out_file,
writers,
load_time,
hosting: Hosting,
):
allure.dynamic.title(
f"Single gate benchmark write test - "
f"writers = {writers}, "
f"obj_size = {obj_size}, "
f"load_time = {load_time}"
)
with allure.step("Get endpoints"):
endpoints_list = get_services_endpoints(
hosting=hosting,
service_name_regex=HTTP_GATE_SERVICE_NAME_REGEX,
endpoint_attribute="endpoint",
)
endpoints = ",".join(endpoints_list[:1])
load_params = LoadParams(
endpoint=endpoints,
obj_size=obj_size,
containers_count=CONTAINERS_COUNT,
out_file=out_file,
obj_count=OBJ_COUNT,
writers=writers,
readers=0,
deleters=0,
load_time=load_time,
load_type="http",
)
k6_load_instances = prepare_k6_instances(
load_nodes=LOAD_NODES,
login=LOAD_NODE_SSH_USER,
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
load_params=load_params,
)
with allure.step("Run load"):
multi_node_k6_run(k6_load_instances)
@pytest.mark.parametrize(
"obj_size, out_file, writers, readers",
[
(8000, "8mb_350.json", 245, 105),
(32000, "32mb_300.json", 210, 90),
(128000, "128mb_100.json", 70, 30),
(512000, "512mb_70.json", 49, 21),
],
)
@pytest.mark.parametrize(
"load_time", [LoadTime.EXPECTED_MAXIMUM.value, LoadTime.PMI_EXPECTATION.value]
)
@pytest.mark.benchmark
@pytest.mark.http
def test_http_benchmark_write_read_70_30(
self,
obj_size,
out_file,
writers,
readers,
load_time,
hosting: Hosting,
):
allure.dynamic.title(
f"Single gate benchmark write + read (70%/30%) test - "
f"writers = {writers}, "
f"readers = {readers}, "
f"obj_size = {obj_size}, "
f"load_time = {load_time}"
)
with allure.step("Get endpoints"):
endpoints_list = get_services_endpoints(
hosting=hosting,
service_name_regex=HTTP_GATE_SERVICE_NAME_REGEX,
endpoint_attribute="endpoint",
)
endpoints = ",".join(endpoints_list[:1])
load_params = LoadParams(
endpoint=endpoints,
obj_size=obj_size,
containers_count=CONTAINERS_COUNT,
out_file=out_file,
obj_count=500,
writers=writers,
readers=readers,
deleters=0,
load_time=load_time,
load_type="http",
)
k6_load_instances = prepare_k6_instances(
load_nodes=LOAD_NODES,
login=LOAD_NODE_SSH_USER,
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
load_params=load_params,
)
with allure.step("Run load"):
multi_node_k6_run(k6_load_instances)
@pytest.mark.parametrize(
"obj_size, out_file, readers",
[
(4, "4kb_300.json", 300),
(16, "16kb_300.json", 300),
(64, "64kb_300.json", 300),
(128, "128kb_250.json", 250),
(512, "512kb_150.json", 150),
(1000, "1mb_150.json", 150),
(8000, "8mb_150.json", 150),
(32000, "32mb_100.json", 100),
(128000, "128mb_25.json", 25),
(512000, "512mb_25.json", 25),
],
)
@pytest.mark.parametrize(
"load_time", [LoadTime.EXPECTED_MAXIMUM.value, LoadTime.PMI_EXPECTATION.value]
)
@pytest.mark.benchmark
@pytest.mark.http
def test_http_benchmark_read(
self,
obj_size,
out_file,
readers,
load_time,
hosting: Hosting,
):
allure.dynamic.title(
f"Single gate benchmark read test - "
f"readers = {readers}, "
f"obj_size = {obj_size}, "
f"load_time = {load_time}"
)
with allure.step("Get endpoints"):
endpoints_list = get_services_endpoints(
hosting=hosting,
service_name_regex=HTTP_GATE_SERVICE_NAME_REGEX,
endpoint_attribute="endpoint",
)
endpoints = ",".join(endpoints_list[:1])
load_params = LoadParams(
endpoint=endpoints,
obj_size=obj_size,
containers_count=1,
out_file=out_file,
obj_count=500,
writers=0,
readers=readers,
deleters=0,
load_time=load_time,
load_type="http",
)
k6_load_instances = prepare_k6_instances(
load_nodes=LOAD_NODES,
login=LOAD_NODE_SSH_USER,
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
load_params=load_params,
)
with allure.step("Run load"):
multi_node_k6_run(k6_load_instances)
@pytest.mark.load
@pytest.mark.s3
class TestS3Load:
@pytest.fixture(autouse=True)
def clear_cache_and_data(self, hosting: Hosting):
clear_cache_and_data(hosting=hosting)
@pytest.fixture(scope="session", autouse=True)
def init_s3_client(self, hosting: Hosting):
init_s3_client(
load_nodes=LOAD_NODES,
login=LOAD_NODE_SSH_USER,
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
hosting=hosting,
)
@pytest.mark.parametrize(
"obj_size, out_file, writers",
[
(4, "4kb_300.json", 400),
(16, "16kb_250.json", 350),
(64, "64kb_250.json", 350),
(128, "128kb_250.json", 300),
(512, "512kb_200.json", 250),
(1000, "1mb_200.json", 250),
(8000, "8mb_150.json", 200),
(32000, "32mb_150.json", 200),
(128000, "128mb_100.json", 150),
(512000, "512mb_50.json", 50),
],
)
@pytest.mark.parametrize(
"load_time", [LoadTime.EXPECTED_MAXIMUM.value, LoadTime.PMI_EXPECTATION.value]
)
@pytest.mark.benchmark
@pytest.mark.s3
def test_s3_benchmark_write(
self,
obj_size,
out_file,
writers,
load_time,
hosting: Hosting,
):
allure.dynamic.title(
f"Single gate benchmark write test - "
f"writers = {writers}, "
f"obj_size = {obj_size}, "
f"load_time = {load_time}"
)
with allure.step("Get endpoints"):
endpoints_list = get_services_endpoints(
hosting=hosting,
service_name_regex=S3_GATE_SERVICE_NAME_REGEX,
endpoint_attribute="endpoint",
)
endpoints = ",".join(endpoints_list[:1])
load_params = LoadParams(
endpoint=endpoints,
obj_size=obj_size,
containers_count=CONTAINERS_COUNT,
out_file=out_file,
obj_count=OBJ_COUNT,
writers=writers,
readers=0,
deleters=0,
load_time=load_time,
load_type="s3",
)
k6_load_instances = prepare_k6_instances(
load_nodes=LOAD_NODES,
login=LOAD_NODE_SSH_USER,
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
load_params=load_params,
)
with allure.step("Run load"):
multi_node_k6_run(k6_load_instances)
@pytest.mark.parametrize(
"obj_size, out_file, writers, readers",
[
(4, "4kb_350.json", 210, 90),
(16, "16kb_300.json", 210, 90),
(64, "64kb_300.json", 210, 90),
(128, "128kb_300.json", 210, 90),
(512, "512kb_300.json", 210, 90),
(1000, "1mb_300.json", 210, 90),
(8000, "8mb_250.json", 175, 75),
(32000, "32mb_200.json", 140, 60),
(128000, "128mb_100.json", 70, 30),
(512000, "512mb_50.json", 35, 15),
],
)
@pytest.mark.parametrize(
"load_time", [LoadTime.EXPECTED_MAXIMUM.value, LoadTime.PMI_EXPECTATION.value]
)
@pytest.mark.benchmark
@pytest.mark.s3
def test_s3_benchmark_write_read_70_30(
self,
obj_size,
out_file,
writers,
readers,
load_time,
hosting: Hosting,
):
allure.dynamic.title(
f"Single gate benchmark write + read (70%/30%) test - "
f"writers = {writers}, "
f"readers = {readers}, "
f"obj_size = {obj_size}, "
f"load_time = {load_time}"
)
with allure.step("Get endpoints"):
endpoints_list = get_services_endpoints(
hosting=hosting,
service_name_regex=S3_GATE_SERVICE_NAME_REGEX,
endpoint_attribute="endpoint",
)
endpoints = ",".join(endpoints_list[:1])
load_params = LoadParams(
endpoint=endpoints,
obj_size=obj_size,
containers_count=CONTAINERS_COUNT,
out_file=out_file,
obj_count=500,
writers=writers,
readers=readers,
deleters=0,
load_time=load_time,
load_type="s3",
)
k6_load_instances = prepare_k6_instances(
load_nodes=LOAD_NODES,
login=LOAD_NODE_SSH_USER,
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
load_params=load_params,
)
with allure.step("Run load"):
multi_node_k6_run(k6_load_instances)
@pytest.mark.parametrize(
"obj_size, out_file, readers",
[
(4, "4kb_400.json", 400),
(16, "16kb_400.json", 400),
(64, "64kb_350.json", 350),
(128, "128kb_300.json", 300),
(512, "512kb_300.json", 300),
(1000, "1mb_300.json", 300),
(8000, "8mb_300.json", 300),
(32000, "32mb_200.json", 200),
(128000, "128mb_150.json", 150),
(512000, "512mb_50.json", 50),
],
)
@pytest.mark.parametrize(
"load_time", [LoadTime.EXPECTED_MAXIMUM.value, LoadTime.PMI_EXPECTATION.value]
)
@pytest.mark.benchmark
@pytest.mark.s3
def test_s3_benchmark_read(
self,
obj_size,
out_file,
readers,
load_time,
hosting: Hosting,
):
allure.dynamic.title(
f"Single gate benchmark read test - "
f"readers = {readers}, "
f"obj_size = {obj_size}, "
f"load_time = {load_time}"
)
with allure.step("Get endpoints"):
endpoints_list = get_services_endpoints(
hosting=hosting,
service_name_regex=S3_GATE_SERVICE_NAME_REGEX,
endpoint_attribute="endpoint",
)
endpoints = ",".join(endpoints_list[:1])
load_params = LoadParams(
endpoint=endpoints,
obj_size=obj_size,
containers_count=1,
out_file=out_file,
obj_count=500,
writers=0,
readers=readers,
deleters=0,
load_time=load_time,
load_type="s3",
)
k6_load_instances = prepare_k6_instances(
load_nodes=LOAD_NODES,
load_nodes=load_nodes_list,
login=LOAD_NODE_SSH_USER,
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
load_params=load_params,

View file

@ -7,7 +7,7 @@ import allure
import pytest
from cluster import StorageNode
from cluster_test_base import ClusterTestBase
from common import COMPLEX_OBJ_SIZE, MORPH_BLOCK_TIME, NEOFS_CONTRACT_CACHE_TIMEOUT
from common import MORPH_BLOCK_TIME, NEOFS_CONTRACT_CACHE_TIMEOUT
from epoch import tick_epoch
from file_helper import generate_file
from grpc_responses import OBJECT_NOT_FOUND, error_matches_status
@ -49,9 +49,10 @@ check_nodes: list[StorageNode] = []
class TestNodeManagement(ClusterTestBase):
@pytest.fixture
@allure.title("Create container and pick the node with data")
def create_container_and_pick_node(self, default_wallet: str) -> Tuple[str, StorageNode]:
default_wallet
file_path = generate_file()
def create_container_and_pick_node(
self, default_wallet: str, simple_object_size
) -> Tuple[str, StorageNode]:
file_path = generate_file(simple_object_size)
placement_rule = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
endpoint = self.cluster.default_rpc_endpoint
@ -126,11 +127,15 @@ class TestNodeManagement(ClusterTestBase):
self,
default_wallet,
return_nodes_after_test_run,
simple_object_size,
):
"""
This test remove one node from cluster then add it back. Test uses base control operations with storage nodes (healthcheck, netmap-snapshot, set-status).
"""
wallet = default_wallet
placement_rule_3 = "REP 3 IN X CBF 1 SELECT 3 FROM * AS X"
placement_rule_4 = "REP 4 IN X CBF 1 SELECT 4 FROM * AS X"
source_file_path = generate_file()
source_file_path = generate_file(simple_object_size)
storage_nodes = self.cluster.storage_nodes
random_node = random.choice(storage_nodes[1:])
@ -202,62 +207,6 @@ class TestNodeManagement(ClusterTestBase):
)
wait_object_replication(cid, oid, 4, shell=self.shell, nodes=storage_nodes)
@allure.title("Control Operations with storage nodes")
@pytest.mark.node_mgmt
def test_nodes_management(self, temp_directory):
"""
This test checks base control operations with storage nodes (healthcheck, netmap-snapshot, set-status).
"""
storage_nodes = self.cluster.storage_nodes
random_node = random.choice(storage_nodes)
alive_node = random.choice(list(set(storage_nodes) - {random_node}))
# Calculate public key that identifies node in netmap
random_node_netmap_key = random_node.get_wallet_public_key()
with allure.step(f"Check node ({random_node}) is in netmap"):
snapshot = get_netmap_snapshot(node=alive_node, shell=self.shell)
assert (
random_node_netmap_key in snapshot
), f"Expected node {random_node} to be in netmap"
with allure.step("Run health check for all storage nodes"):
for node in self.cluster.storage_nodes:
health_check = storage_node_healthcheck(node)
assert (
health_check.health_status == "READY"
and health_check.network_status == "ONLINE"
)
with allure.step(f"Move node ({random_node}) to offline state"):
storage_node_set_status(random_node, status="offline")
sleep(parse_time(MORPH_BLOCK_TIME))
tick_epoch(self.shell, self.cluster)
with allure.step(f"Check node {random_node} went to offline"):
health_check = storage_node_healthcheck(random_node)
assert (
health_check.health_status == "READY" and health_check.network_status == "OFFLINE"
)
snapshot = get_netmap_snapshot(node=alive_node, shell=self.shell)
assert (
random_node_netmap_key not in snapshot
), f"Expected node {random_node} not in netmap"
with allure.step(f"Check node {random_node} went to online"):
storage_node_set_status(random_node, status="online")
sleep(parse_time(MORPH_BLOCK_TIME))
tick_epoch(self.shell, self.cluster)
with allure.step(f"Check node {random_node} went to online"):
health_check = storage_node_healthcheck(random_node)
assert health_check.health_status == "READY" and health_check.network_status == "ONLINE"
snapshot = get_netmap_snapshot(node=alive_node, shell=self.shell)
assert random_node_netmap_key in snapshot, f"Expected node {random_node} in netmap"
@pytest.mark.parametrize(
"placement_rule,expected_copies",
[
@ -272,12 +221,14 @@ class TestNodeManagement(ClusterTestBase):
)
@pytest.mark.node_mgmt
@allure.title("Test object copies based on placement policy")
def test_placement_policy(self, default_wallet, placement_rule, expected_copies):
def test_placement_policy(
self, default_wallet, placement_rule, expected_copies, simple_object_size
):
"""
This test checks object's copies based on container's placement policy.
"""
wallet = default_wallet
file_path = generate_file()
file_path = generate_file(simple_object_size)
self.validate_object_copies(wallet, placement_rule, file_path, expected_copies)
@pytest.mark.parametrize(
@ -332,14 +283,19 @@ class TestNodeManagement(ClusterTestBase):
@pytest.mark.node_mgmt
@allure.title("Test object copies and storage nodes based on placement policy")
def test_placement_policy_with_nodes(
self, default_wallet, placement_rule, expected_copies, expected_nodes_id: set[int]
self,
default_wallet,
placement_rule,
expected_copies,
expected_nodes_id: set[int],
simple_object_size,
):
"""
Based on container's placement policy check that storage nodes are piked correctly and object has
correct copies amount.
"""
wallet = default_wallet
file_path = generate_file()
file_path = generate_file(simple_object_size)
cid, oid, found_nodes = self.validate_object_copies(
wallet, placement_rule, file_path, expected_copies
)
@ -356,24 +312,28 @@ class TestNodeManagement(ClusterTestBase):
)
@pytest.mark.node_mgmt
@allure.title("Negative cases for placement policy")
def test_placement_policy_negative(self, default_wallet, placement_rule, expected_copies):
def test_placement_policy_negative(
self, default_wallet, placement_rule, expected_copies, simple_object_size
):
"""
Negative test for placement policy.
"""
wallet = default_wallet
file_path = generate_file()
file_path = generate_file(simple_object_size)
with pytest.raises(RuntimeError, match=".*not enough nodes to SELECT from.*"):
self.validate_object_copies(wallet, placement_rule, file_path, expected_copies)
@pytest.mark.node_mgmt
@allure.title("NeoFS object could be dropped using control command")
def test_drop_object(self, default_wallet):
def test_drop_object(self, default_wallet, complex_object_size, simple_object_size):
"""
Test checks object could be dropped using `neofs-cli control drop-objects` command.
"""
wallet = default_wallet
endpoint = self.cluster.default_rpc_endpoint
file_path_simple, file_path_complex = generate_file(), generate_file(COMPLEX_OBJ_SIZE)
file_path_simple, file_path_complex = generate_file(simple_object_size), generate_file(
complex_object_size
)
locode = get_locode_from_random_node(self.cluster)
rule = f"REP 1 CBF 1 SELECT 1 FROM * FILTER 'UN-LOCODE' EQ '{locode}' AS LOC"
@ -411,9 +371,10 @@ class TestNodeManagement(ClusterTestBase):
self,
default_wallet,
create_container_and_pick_node,
simple_object_size,
):
wallet = default_wallet
file_path = generate_file()
file_path = generate_file(simple_object_size)
cid, node = create_container_and_pick_node
original_oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster)

View file

@ -5,14 +5,19 @@ import sys
import allure
import pytest
from cluster import Cluster
from common import COMPLEX_OBJ_SIZE, SIMPLE_OBJ_SIZE
from container import create_container
from complex_object_actions import get_complex_object_split_ranges
from file_helper import generate_file, get_file_content, get_file_hash
from grpc_responses import OUT_OF_RANGE
from grpc_responses import (
INVALID_LENGTH_SPECIFIER,
INVALID_OFFSET_SPECIFIER,
INVALID_RANGE_OVERFLOW,
INVALID_RANGE_ZERO_LENGTH,
OUT_OF_RANGE,
)
from neofs_testlib.shell import Shell
from pytest import FixtureRequest
from python_keywords.container import create_container
from python_keywords.neofs_verbs import (
get_netmap_netinfo,
get_object_from_random_node,
get_range,
get_range_hash,
@ -42,48 +47,50 @@ RANGES_COUNT = 4 # by quarters
RANGE_MIN_LEN = 10
RANGE_MAX_LEN = 500
# Used for static ranges found with issues
STATIC_RANGES = {
SIMPLE_OBJ_SIZE: [],
COMPLEX_OBJ_SIZE: [],
}
STATIC_RANGES = {}
def generate_ranges(file_size: int, max_object_size: int) -> list[(int, int)]:
file_range_step = file_size / RANGES_COUNT
def generate_ranges(
storage_object: StorageObjectInfo, max_object_size: int, shell: Shell, cluster: Cluster
) -> list[(int, int)]:
file_range_step = storage_object.size / RANGES_COUNT
file_ranges = []
file_ranges_to_test = []
for i in range(0, RANGES_COUNT):
file_ranges.append((int(file_range_step * i), int(file_range_step * (i + 1))))
file_ranges.append((int(file_range_step * i), int(file_range_step)))
# For simple object we can read all file ranges without too much time for testing
if file_size == SIMPLE_OBJ_SIZE:
if storage_object.size < max_object_size:
file_ranges_to_test.extend(file_ranges)
# For complex object we need to fetch multiple child objects from different nodes.
if file_size == COMPLEX_OBJ_SIZE:
else:
assert (
file_size >= RANGE_MAX_LEN + max_object_size
), f"Complex object size should be at least {max_object_size + RANGE_MAX_LEN}. Current: {file_size}"
file_ranges_to_test.append((RANGE_MAX_LEN, RANGE_MAX_LEN + max_object_size))
storage_object.size >= RANGE_MAX_LEN + max_object_size
), f"Complex object size should be at least {max_object_size + RANGE_MAX_LEN}. Current: {storage_object.size}"
file_ranges_to_test.append((RANGE_MAX_LEN, max_object_size - RANGE_MAX_LEN))
file_ranges_to_test.extend(get_complex_object_split_ranges(storage_object, shell, cluster))
# Special cases to read some bytes from start and some bytes from end of object
file_ranges_to_test.append((0, RANGE_MIN_LEN))
file_ranges_to_test.append((file_size - RANGE_MIN_LEN, file_size))
file_ranges_to_test.append((storage_object.size - RANGE_MIN_LEN, RANGE_MIN_LEN))
for start, end in file_ranges:
for offset, length in file_ranges:
range_length = random.randint(RANGE_MIN_LEN, RANGE_MAX_LEN)
range_start = random.randint(start, end)
range_start = random.randint(offset, offset + length)
file_ranges_to_test.append((range_start, min(range_start + range_length, file_size)))
file_ranges_to_test.append(
(range_start, min(range_length, storage_object.size - range_start))
)
file_ranges_to_test.extend(STATIC_RANGES[file_size])
file_ranges_to_test.extend(STATIC_RANGES.get(storage_object.size, []))
return file_ranges_to_test
@pytest.fixture(
params=[SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE],
params=[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
# Scope session to upload/delete each files set only once
scope="module",
@ -132,7 +139,7 @@ def storage_objects(
class TestObjectApi(ClusterTestBase):
@allure.title("Validate object storage policy by native API")
def test_object_storage_policies(
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo]
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo], simple_object_size
):
"""
Validate object storage policy
@ -143,7 +150,7 @@ class TestObjectApi(ClusterTestBase):
with allure.step("Validate storage policy for objects"):
for storage_object in storage_objects:
if storage_object.size == SIMPLE_OBJ_SIZE:
if storage_object.size == simple_object_size:
copies = get_simple_object_copies(
storage_object.wallet_file_path,
storage_object.cid,
@ -257,7 +264,9 @@ class TestObjectApi(ClusterTestBase):
@allure.title("Validate object search with removed items")
@pytest.mark.parametrize(
"object_size", [SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE], ids=["simple object", "complex object"]
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
def test_object_search_should_return_tombstone_items(
self, default_wallet: str, request: FixtureRequest, object_size: int
@ -330,10 +339,10 @@ class TestObjectApi(ClusterTestBase):
@pytest.mark.sanity
@pytest.mark.grpc_api
def test_object_get_range_hash(
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo]
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo], max_object_size
):
"""
Validate get_range_hash for object by common gRPC API
Validate get_range_hash for object by native gRPC API
"""
allure.dynamic.title(
f"Validate native get_range_hash object API for {request.node.callspec.id}"
@ -343,16 +352,13 @@ class TestObjectApi(ClusterTestBase):
cid = storage_objects[0].cid
oids = [storage_object.oid for storage_object in storage_objects[:2]]
file_path = storage_objects[0].file_path
net_info = get_netmap_netinfo(
wallet, self.shell, endpoint=self.cluster.default_rpc_endpoint
)
max_object_size = net_info["maximum_object_size"]
file_ranges_to_test = generate_ranges(storage_objects[0].size, max_object_size)
file_ranges_to_test = generate_ranges(
storage_objects[0], max_object_size, self.shell, self.cluster
)
logging.info(f"Ranges used in test {file_ranges_to_test}")
for range_start, range_end in file_ranges_to_test:
range_len = range_end - range_start
for range_start, range_len in file_ranges_to_test:
range_cut = f"{range_start}:{range_len}"
with allure.step(f"Get range hash ({range_cut})"):
for oid in oids:
@ -372,10 +378,10 @@ class TestObjectApi(ClusterTestBase):
@pytest.mark.sanity
@pytest.mark.grpc_api
def test_object_get_range(
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo]
self, request: FixtureRequest, storage_objects: list[StorageObjectInfo], max_object_size
):
"""
Validate get_range for object by common gRPC API
Validate get_range for object by native gRPC API
"""
allure.dynamic.title(f"Validate native get_range object API for {request.node.callspec.id}")
@ -383,16 +389,13 @@ class TestObjectApi(ClusterTestBase):
cid = storage_objects[0].cid
oids = [storage_object.oid for storage_object in storage_objects[:2]]
file_path = storage_objects[0].file_path
net_info = get_netmap_netinfo(
wallet, self.shell, endpoint=self.cluster.default_rpc_endpoint
)
max_object_size = net_info["maximum_object_size"]
file_ranges_to_test = generate_ranges(storage_objects[0].size, max_object_size)
file_ranges_to_test = generate_ranges(
storage_objects[0], max_object_size, self.shell, self.cluster
)
logging.info(f"Ranges used in test {file_ranges_to_test}")
for range_start, range_end in file_ranges_to_test:
range_len = range_end - range_start
for range_start, range_len in file_ranges_to_test:
range_cut = f"{range_start}:{range_len}"
with allure.step(f"Get range ({range_cut})"):
for oid in oids:
@ -420,7 +423,7 @@ class TestObjectApi(ClusterTestBase):
storage_objects: list[StorageObjectInfo],
):
"""
Validate get_range negative for object by common gRPC API
Validate get_range negative for object by native gRPC API
"""
allure.dynamic.title(
f"Validate native get_range negative object API for {request.node.callspec.id}"
@ -435,20 +438,30 @@ class TestObjectApi(ClusterTestBase):
RANGE_MIN_LEN < file_size
), f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})"
file_ranges_to_test = [
file_ranges_to_test: list[tuple(int, int, str)] = [
# Offset is bigger than the file size, the length is small.
(file_size + 1, RANGE_MIN_LEN),
(file_size + 1, RANGE_MIN_LEN, OUT_OF_RANGE),
# Offset is ok, but offset+length is too big.
(file_size - RANGE_MIN_LEN, RANGE_MIN_LEN * 2),
(file_size - RANGE_MIN_LEN, RANGE_MIN_LEN * 2, OUT_OF_RANGE),
# Offset is ok, and length is very-very big (e.g. MaxUint64) so that offset+length is wrapped and still "valid".
(RANGE_MIN_LEN, sys.maxsize * 2 + 1),
(RANGE_MIN_LEN, sys.maxsize * 2 + 1, INVALID_RANGE_OVERFLOW),
# Length is zero
(10, 0, INVALID_RANGE_ZERO_LENGTH),
# Negative values
(-1, 1, INVALID_OFFSET_SPECIFIER),
(10, -5, INVALID_LENGTH_SPECIFIER),
]
for range_start, range_len in file_ranges_to_test:
for range_start, range_len, expected_error in file_ranges_to_test:
range_cut = f"{range_start}:{range_len}"
expected_error = (
expected_error.format(range=range_cut)
if "{range}" in expected_error
else expected_error
)
with allure.step(f"Get range ({range_cut})"):
for oid in oids:
with pytest.raises(Exception, match=OUT_OF_RANGE):
with pytest.raises(Exception, match=expected_error):
get_range(
wallet,
cid,
@ -465,7 +478,7 @@ class TestObjectApi(ClusterTestBase):
storage_objects: list[StorageObjectInfo],
):
"""
Validate get_range_hash negative for object by common gRPC API
Validate get_range_hash negative for object by native gRPC API
"""
allure.dynamic.title(
f"Validate native get_range_hash negative object API for {request.node.callspec.id}"
@ -480,20 +493,30 @@ class TestObjectApi(ClusterTestBase):
RANGE_MIN_LEN < file_size
), f"Incorrect test setup. File size ({file_size}) is less than RANGE_MIN_LEN ({RANGE_MIN_LEN})"
file_ranges_to_test = [
file_ranges_to_test: list[tuple(int, int, str)] = [
# Offset is bigger than the file size, the length is small.
(file_size + 1, RANGE_MIN_LEN),
(file_size + 1, RANGE_MIN_LEN, OUT_OF_RANGE),
# Offset is ok, but offset+length is too big.
(file_size - RANGE_MIN_LEN, RANGE_MIN_LEN * 2),
(file_size - RANGE_MIN_LEN, RANGE_MIN_LEN * 2, OUT_OF_RANGE),
# Offset is ok, and length is very-very big (e.g. MaxUint64) so that offset+length is wrapped and still "valid".
(RANGE_MIN_LEN, sys.maxsize * 2 + 1),
(RANGE_MIN_LEN, sys.maxsize * 2 + 1, INVALID_RANGE_OVERFLOW),
# Length is zero
(10, 0, INVALID_RANGE_ZERO_LENGTH),
# Negative values
(-1, 1, INVALID_OFFSET_SPECIFIER),
(10, -5, INVALID_LENGTH_SPECIFIER),
]
for range_start, range_len in file_ranges_to_test:
for range_start, range_len, expected_error in file_ranges_to_test:
range_cut = f"{range_start}:{range_len}"
with allure.step(f"Get range ({range_cut})"):
expected_error = (
expected_error.format(range=range_cut)
if "{range}" in expected_error
else expected_error
)
with allure.step(f"Get range hash ({range_cut})"):
for oid in oids:
with pytest.raises(Exception, match=OUT_OF_RANGE):
with pytest.raises(Exception, match=expected_error):
get_range_hash(
wallet,
cid,

View file

@ -0,0 +1,158 @@
import allure
import pytest
from cluster import Cluster
from container import REP_2_FOR_3_NODES_PLACEMENT_RULE, SINGLE_PLACEMENT_RULE, create_container
from epoch import get_epoch
from neofs_testlib.shell import Shell
from neofs_verbs import delete_object, get_object
from pytest import FixtureRequest
from python_keywords.acl import EACLAccess, EACLOperation, EACLRole, EACLRule, form_bearertoken_file
from wellknown_acl import EACL_PUBLIC_READ_WRITE
from helpers.container import StorageContainer, StorageContainerInfo
from helpers.test_control import expect_not_raises
from helpers.wallet import WalletFile
from steps.cluster_test_base import ClusterTestBase
from steps.storage_object import StorageObjectInfo
@pytest.fixture(scope="module")
@allure.title("Create bearer token for OTHERS with all operations allowed for all containers")
def bearer_token_file_all_allow(default_wallet: str, client_shell: Shell, cluster: Cluster) -> str:
bearer = form_bearertoken_file(
default_wallet,
"",
[
EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.OTHERS)
for op in EACLOperation
],
shell=client_shell,
endpoint=cluster.default_rpc_endpoint,
)
return bearer
@pytest.fixture(scope="module")
@allure.title("Create user container for bearer token usage")
def user_container(
default_wallet: str, client_shell: Shell, cluster: Cluster, request: FixtureRequest
) -> StorageContainer:
container_id = create_container(
default_wallet,
shell=client_shell,
rule=request.param,
basic_acl=EACL_PUBLIC_READ_WRITE,
endpoint=cluster.default_rpc_endpoint,
)
# Deliberately using s3gate wallet here to test bearer token
s3gate = cluster.s3gates[0]
return StorageContainer(
StorageContainerInfo(container_id, WalletFile.from_node(s3gate)),
client_shell,
cluster,
)
@pytest.fixture()
def storage_objects(
user_container: StorageContainer,
bearer_token_file_all_allow: str,
request: FixtureRequest,
client_shell: Shell,
cluster: Cluster,
) -> list[StorageObjectInfo]:
epoch = get_epoch(client_shell, cluster)
storage_objects: list[StorageObjectInfo] = []
for node in cluster.storage_nodes:
storage_objects.append(
user_container.generate_object(
request.param,
epoch + 3,
bearer_token=bearer_token_file_all_allow,
endpoint=node.get_rpc_endpoint(),
)
)
return storage_objects
@pytest.mark.smoke
@pytest.mark.bearer
class TestObjectApiWithBearerToken(ClusterTestBase):
@pytest.mark.parametrize(
"user_container",
[SINGLE_PLACEMENT_RULE],
ids=["single replica for all nodes placement rule"],
indirect=True,
)
@pytest.mark.parametrize(
"storage_objects",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
indirect=True,
)
def test_delete_object_with_s3_wallet_bearer(
self,
storage_objects: list[StorageObjectInfo],
bearer_token_file_all_allow: str,
request: FixtureRequest,
):
allure.dynamic.title(
f"Object can be deleted from any node using s3gate wallet with bearer token for {request.node.callspec.id}"
)
s3_gate_wallet = self.cluster.s3gates[0]
with allure.step("Try to delete each object from first storage node"):
for storage_object in storage_objects:
with expect_not_raises():
delete_object(
s3_gate_wallet.get_wallet_path(),
storage_object.cid,
storage_object.oid,
self.shell,
endpoint=self.cluster.default_rpc_endpoint,
bearer=bearer_token_file_all_allow,
wallet_config=s3_gate_wallet.get_wallet_config_path(),
)
@pytest.mark.parametrize(
"user_container",
[REP_2_FOR_3_NODES_PLACEMENT_RULE],
ids=["2 replicas for 3 nodes placement rule"],
indirect=True,
)
@pytest.mark.parametrize(
"file_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
def test_get_object_with_s3_wallet_bearer_from_all_nodes(
self,
user_container: StorageContainer,
file_size: int,
bearer_token_file_all_allow: str,
request: FixtureRequest,
):
allure.dynamic.title(
f"Object can be fetched from any node using s3gate wallet with bearer token for {request.node.callspec.id}"
)
s3_gate_wallet = self.cluster.s3gates[0]
with allure.step("Put one object to container"):
epoch = self.get_epoch()
storage_object = user_container.generate_object(
file_size, epoch + 3, bearer_token=bearer_token_file_all_allow
)
with allure.step("Try to fetch object from each storage node"):
for node in self.cluster.storage_nodes:
with expect_not_raises():
get_object(
s3_gate_wallet.get_wallet_path(),
storage_object.cid,
storage_object.oid,
self.shell,
endpoint=node.get_rpc_endpoint(),
bearer=bearer_token_file_all_allow,
wallet_config=s3_gate_wallet.get_wallet_config_path(),
)

View file

@ -2,12 +2,11 @@ import logging
import allure
import pytest
from common import COMPLEX_OBJ_SIZE, SIMPLE_OBJ_SIZE
from container import create_container
from epoch import get_epoch, tick_epoch
from file_helper import generate_file, get_file_hash
from grpc_responses import OBJECT_NOT_FOUND
from pytest import FixtureRequest
from python_keywords.container import create_container
from python_keywords.neofs_verbs import get_object_from_random_node, put_object_to_random_node
from utility import wait_for_gc_pass_on_storage_nodes
@ -18,10 +17,12 @@ logger = logging.getLogger("NeoLogger")
@pytest.mark.sanity
@pytest.mark.grpc_api
class ObjectApiLifetimeTest(ClusterTestBase):
class TestObjectApiLifetime(ClusterTestBase):
@allure.title("Test object life time")
@pytest.mark.parametrize(
"object_size", [SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE], ids=["simple object", "complex object"]
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
def test_object_api_lifetime(
self, default_wallet: str, request: FixtureRequest, object_size: int
@ -48,7 +49,7 @@ class ObjectApiLifetimeTest(ClusterTestBase):
with allure.step("Tick two epochs"):
for _ in range(2):
tick_epoch(self.shell, self.cluster)
self.tick_epoch()
# Wait for GC, because object with expiration is counted as alive until GC removes it
wait_for_gc_pass_on_storage_nodes()

View file

@ -5,9 +5,8 @@ import allure
import pytest
from cluster import Cluster
from cluster_test_base import ClusterTestBase
from common import COMPLEX_OBJ_SIZE, SIMPLE_OBJ_SIZE, STORAGE_GC_TIME
from complex_object_actions import get_link_object
from container import create_container
from common import STORAGE_GC_TIME
from complex_object_actions import get_link_object, get_storage_object_chunks
from epoch import ensure_fresh_epoch, get_epoch, tick_epoch
from grpc_responses import (
LIFETIME_REQUIRED,
@ -19,16 +18,17 @@ from grpc_responses import (
OBJECT_NOT_FOUND,
)
from neofs_testlib.shell import Shell
from node_management import drop_object
from pytest import FixtureRequest
from python_keywords.container import create_container
from python_keywords.neofs_verbs import delete_object, head_object, lock_object
from storage_policy import get_nodes_with_object
from test_control import expect_not_raises, wait_for_success
from utility import parse_time, wait_for_gc_pass_on_storage_nodes
import steps
from helpers.container import StorageContainer, StorageContainerInfo
from helpers.storage_object_info import LockObjectInfo, StorageObjectInfo
from helpers.wallet import WalletFactory, WalletFile
from steps.cluster_test_base import ClusterTestBase
from steps.storage_object import delete_objects
logger = logging.getLogger("NeoLogger")
@ -65,6 +65,9 @@ def locked_storage_object(
cluster: Cluster,
request: FixtureRequest,
):
"""
Intention of this fixture is to provide storage object which is NOT expected to be deleted during test act phase
"""
with allure.step("Creating locked object"):
current_epoch = ensure_fresh_epoch(client_shell, cluster)
expiration_epoch = current_epoch + FIXTURE_LOCK_LIFETIME
@ -117,33 +120,35 @@ def locked_storage_object(
@pytest.mark.sanity
@pytest.mark.grpc_object_lock
class TestObjectLockWithGrpc(ClusterTestBase):
def get_storage_object_chunks(self, storage_object: StorageObjectInfo):
with allure.step(f"Get complex object chunks (f{storage_object.oid})"):
split_object_id = get_link_object(
@pytest.fixture()
def new_locked_storage_object(
self, user_container: StorageContainer, request: FixtureRequest
) -> StorageObjectInfo:
"""
Intention of this fixture is to provide new storage object for tests which may delete or corrupt the object or it's complementary objects
So we need a new one each time we ask for it
"""
with allure.step("Creating locked object"):
current_epoch = self.get_epoch()
storage_object = user_container.generate_object(
request.param, expire_at=current_epoch + FIXTURE_OBJECT_LIFETIME
)
lock_object(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
self.shell,
self.cluster.storage_nodes,
is_direct=False,
)
head = head_object(
storage_object.wallet_file_path,
storage_object.cid,
split_object_id,
self.shell,
self.cluster.default_rpc_endpoint,
lifetime=FIXTURE_LOCK_LIFETIME,
)
chunks_object_ids = []
if "split" in head["header"] and "children" in head["header"]["split"]:
chunks_object_ids = head["header"]["split"]["children"]
return chunks_object_ids
return storage_object
@allure.title("Locked object should be protected from deletion")
@pytest.mark.parametrize(
"locked_storage_object",
[SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE],
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
indirect=True,
)
@ -170,7 +175,9 @@ class TestObjectLockWithGrpc(ClusterTestBase):
@allure.title("Lock object itself should be protected from deletion")
# We operate with only lock object here so no complex object needed in this test
@pytest.mark.parametrize("locked_storage_object", [SIMPLE_OBJ_SIZE], indirect=True)
@pytest.mark.parametrize(
"locked_storage_object", [pytest.lazy_fixture("simple_object_size")], indirect=True
)
def test_lock_object_itself_cannot_be_deleted(
self,
locked_storage_object: StorageObjectInfo,
@ -193,7 +200,9 @@ class TestObjectLockWithGrpc(ClusterTestBase):
@allure.title("Lock object itself cannot be locked")
# We operate with only lock object here so no complex object needed in this test
@pytest.mark.parametrize("locked_storage_object", [SIMPLE_OBJ_SIZE], indirect=True)
@pytest.mark.parametrize(
"locked_storage_object", [pytest.lazy_fixture("simple_object_size")], indirect=True
)
def test_lock_object_cannot_be_locked(
self,
locked_storage_object: StorageObjectInfo,
@ -217,7 +226,9 @@ class TestObjectLockWithGrpc(ClusterTestBase):
@allure.title("Cannot lock object without lifetime and expire_at fields")
# We operate with only lock object here so no complex object needed in this test
@pytest.mark.parametrize("locked_storage_object", [SIMPLE_OBJ_SIZE], indirect=True)
@pytest.mark.parametrize(
"locked_storage_object", [pytest.lazy_fixture("simple_object_size")], indirect=True
)
@pytest.mark.parametrize(
"wrong_lifetime,wrong_expire_at,expected_error",
[
@ -259,7 +270,9 @@ class TestObjectLockWithGrpc(ClusterTestBase):
@allure.title("Expired object should be deleted after locks are expired")
@pytest.mark.parametrize(
"object_size", [SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE], ids=["simple object", "complex object"]
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
def test_expired_object_should_be_deleted_after_locks_are_expired(
self,
@ -284,7 +297,7 @@ class TestObjectLockWithGrpc(ClusterTestBase):
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
lifetime=3,
lifetime=2,
)
lock_object(
storage_object.wallet_file_path,
@ -292,12 +305,11 @@ class TestObjectLockWithGrpc(ClusterTestBase):
storage_object.oid,
self.shell,
self.cluster.default_rpc_endpoint,
expire_at=current_epoch + 3,
expire_at=current_epoch + 2,
)
with allure.step("Check object is not deleted at expiration time"):
self.tick_epoch()
self.tick_epoch()
self.tick_epochs(2)
# Must wait to ensure object is not deleted
wait_for_gc_pass_on_storage_nodes()
with expect_not_raises():
@ -327,7 +339,7 @@ class TestObjectLockWithGrpc(ClusterTestBase):
@allure.title("Should be possible to lock multiple objects at once")
@pytest.mark.parametrize(
"object_size",
[SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE],
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
def test_should_be_possible_to_lock_multiple_objects_at_once(
@ -382,7 +394,7 @@ class TestObjectLockWithGrpc(ClusterTestBase):
@allure.title("Already outdated lock should not be applied")
@pytest.mark.parametrize(
"object_size",
[SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE],
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
def test_already_outdated_lock_should_not_be_applied(
@ -421,7 +433,7 @@ class TestObjectLockWithGrpc(ClusterTestBase):
@allure.title("After lock expiration with lifetime user should be able to delete object")
@pytest.mark.parametrize(
"object_size",
[SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE],
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
@expect_not_raises()
@ -439,7 +451,7 @@ class TestObjectLockWithGrpc(ClusterTestBase):
)
current_epoch = self.ensure_fresh_epoch()
storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 1)
storage_object = user_container.generate_object(object_size, expire_at=current_epoch + 5)
lock_object(
storage_object.wallet_file_path,
@ -450,7 +462,7 @@ class TestObjectLockWithGrpc(ClusterTestBase):
lifetime=1,
)
self.tick_epoch()
self.tick_epochs(2)
with expect_not_raises():
delete_object(
storage_object.wallet_file_path,
@ -463,7 +475,7 @@ class TestObjectLockWithGrpc(ClusterTestBase):
@allure.title("After lock expiration with expire_at user should be able to delete object")
@pytest.mark.parametrize(
"object_size",
[SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE],
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
@expect_not_raises()
@ -493,7 +505,7 @@ class TestObjectLockWithGrpc(ClusterTestBase):
expire_at=current_epoch + 1,
)
self.tick_epoch()
self.tick_epochs(2)
with expect_not_raises():
delete_object(
@ -508,7 +520,7 @@ class TestObjectLockWithGrpc(ClusterTestBase):
@pytest.mark.parametrize(
# Only complex objects are required for this test
"locked_storage_object",
[COMPLEX_OBJ_SIZE],
[pytest.lazy_fixture("complex_object_size")],
indirect=True,
)
def test_complex_object_chunks_should_also_be_protected_from_deletion(
@ -519,7 +531,9 @@ class TestObjectLockWithGrpc(ClusterTestBase):
Complex object chunks should also be protected from deletion
"""
chunk_object_ids = self.get_storage_object_chunks(locked_storage_object)
chunk_object_ids = get_storage_object_chunks(
locked_storage_object, self.shell, self.cluster
)
for chunk_object_id in chunk_object_ids:
with allure.step(f"Try to delete chunk object {chunk_object_id}"):
with pytest.raises(Exception, match=OBJECT_IS_LOCKED):
@ -531,11 +545,90 @@ class TestObjectLockWithGrpc(ClusterTestBase):
self.cluster.default_rpc_endpoint,
)
@allure.title("Link object of locked complex object can be dropped")
@pytest.mark.grpc_control
@pytest.mark.parametrize(
"new_locked_storage_object",
# Only complex object is required
[pytest.lazy_fixture("complex_object_size")],
indirect=True,
)
def test_link_object_of_locked_complex_object_can_be_dropped(
self, new_locked_storage_object: StorageObjectInfo
):
link_object_id = get_link_object(
new_locked_storage_object.wallet_file_path,
new_locked_storage_object.cid,
new_locked_storage_object.oid,
self.shell,
self.cluster.storage_nodes,
)
with allure.step(f"Drop link object with id {link_object_id} from nodes"):
nodes_with_object = get_nodes_with_object(
new_locked_storage_object.cid,
link_object_id,
shell=self.shell,
nodes=self.cluster.storage_nodes,
)
for node in nodes_with_object:
with expect_not_raises():
drop_object(node, new_locked_storage_object.cid, link_object_id)
@allure.title("Chunks of locked complex object can be dropped")
@pytest.mark.grpc_control
@pytest.mark.parametrize(
"new_locked_storage_object",
# Only complex object is required
[pytest.lazy_fixture("complex_object_size")],
indirect=True,
)
def test_chunks_of_locked_complex_object_can_be_dropped(
self, new_locked_storage_object: StorageObjectInfo
):
chunk_objects = get_storage_object_chunks(
new_locked_storage_object, self.shell, self.cluster
)
for chunk_object_id in chunk_objects:
with allure.step(f"Drop chunk object with id {chunk_object_id} from nodes"):
nodes_with_object = get_nodes_with_object(
new_locked_storage_object.cid,
chunk_object_id,
shell=self.shell,
nodes=self.cluster.storage_nodes,
)
for node in nodes_with_object:
with expect_not_raises():
drop_object(node, new_locked_storage_object.cid, chunk_object_id)
@pytest.mark.grpc_control
@pytest.mark.parametrize(
"new_locked_storage_object",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
indirect=True,
)
def test_locked_object_can_be_dropped(
self, new_locked_storage_object: StorageObjectInfo, request: FixtureRequest
):
allure.dynamic.title(f"Locked {request.node.callspec.id} can be dropped")
nodes_with_object = get_nodes_with_object(
new_locked_storage_object.cid,
new_locked_storage_object.oid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
)
for node in nodes_with_object:
with expect_not_raises():
drop_object(node, new_locked_storage_object.cid, new_locked_storage_object.oid)
@allure.title("Link object of complex object should also be protected from deletion")
@pytest.mark.parametrize(
# Only complex objects are required for this test
"locked_storage_object",
[COMPLEX_OBJ_SIZE],
[pytest.lazy_fixture("complex_object_size")],
indirect=True,
)
def test_link_object_of_complex_object_should_also_be_protected_from_deletion(

View file

@ -0,0 +1,131 @@
import logging
import allure
import pytest
from container import create_container
from file_helper import generate_file
from http_gate import get_object_and_verify_hashes, upload_via_http_gate_curl
from python_keywords.acl import (
EACLAccess,
EACLOperation,
EACLRole,
EACLRule,
bearer_token_base64_from_file,
create_eacl,
form_bearertoken_file,
set_eacl,
sign_bearer,
wait_for_cache_expired,
)
from wellknown_acl import PUBLIC_ACL
from steps.cluster_test_base import ClusterTestBase
logger = logging.getLogger("NeoLogger")
@pytest.mark.sanity
@pytest.mark.http_gate
class Test_http_bearer(ClusterTestBase):
PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
@pytest.fixture(scope="class", autouse=True)
@allure.title("[Class/Autouse]: Prepare wallet and deposit")
def prepare_wallet(self, default_wallet):
Test_http_bearer.wallet = default_wallet
@pytest.fixture(scope="class")
def user_container(self) -> str:
return create_container(
wallet=self.wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=self.PLACEMENT_RULE,
basic_acl=PUBLIC_ACL,
)
@pytest.fixture(scope="class")
def eacl_deny_for_others(self, user_container: str) -> None:
with allure.step(f"Set deny all operations for {EACLRole.OTHERS} via eACL"):
eacl = EACLRule(
access=EACLAccess.DENY, role=EACLRole.OTHERS, operation=EACLOperation.PUT
)
set_eacl(
self.wallet,
user_container,
create_eacl(user_container, eacl, shell=self.shell),
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
wait_for_cache_expired()
@pytest.fixture(scope="class")
def bearer_token_no_limit_for_others(self, user_container: str) -> str:
with allure.step(f"Create bearer token for {EACLRole.OTHERS} with all operations allowed"):
bearer = form_bearertoken_file(
self.wallet,
user_container,
[
EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.OTHERS)
for op in EACLOperation
],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
sign=False,
)
bearer_signed = f"{bearer}_signed"
sign_bearer(
shell=self.shell,
wallet_path=self.wallet,
eacl_rules_file_from=bearer,
eacl_rules_file_to=bearer_signed,
json=False,
)
return bearer_token_base64_from_file(bearer_signed)
@allure.title(f"[negative] Put object without bearer token for {EACLRole.OTHERS}")
def test_unable_put_without_bearer_token(
self, simple_object_size: int, user_container: str, eacl_deny_for_others
):
eacl_deny_for_others
upload_via_http_gate_curl(
cid=user_container,
filepath=generate_file(simple_object_size),
endpoint=self.cluster.default_http_gate_endpoint,
error_pattern="access to object operation denied",
)
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
def test_put_with_bearer_when_eacl_restrict(
self,
object_size: int,
user_container: str,
eacl_deny_for_others,
bearer_token_no_limit_for_others: str,
):
eacl_deny_for_others
bearer = bearer_token_no_limit_for_others
file_path = generate_file(object_size)
with allure.step(
f"Put object with bearer token for {EACLRole.OTHERS}, then get and verify hashes"
):
headers = [f" -H 'Authorization: Bearer {bearer}'"]
oid = upload_via_http_gate_curl(
cid=user_container,
filepath=file_path,
endpoint=self.cluster.default_http_gate_endpoint,
headers=headers,
)
get_object_and_verify_hashes(
oid=oid,
file_name=file_path,
wallet=self.wallet,
cid=user_container,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
)

View file

@ -1,24 +1,23 @@
import logging
import os
import random
from time import sleep
import allure
import pytest
from common import COMPLEX_OBJ_SIZE
from container import create_container
from epoch import get_epoch, tick_epoch
from file_helper import generate_file, get_file_hash
from python_keywords.container import create_container
from python_keywords.http_gate import (
attr_into_header,
get_object_and_verify_hashes,
get_object_by_attr_and_verify_hashes,
get_via_http_curl,
get_via_http_gate,
get_via_http_gate_by_attribute,
get_via_zip_http_gate,
try_to_get_object_and_expect_error,
upload_via_http_gate,
upload_via_http_gate_curl,
)
from python_keywords.neofs_verbs import get_object, put_object_to_random_node
from python_keywords.storage_policy import get_nodes_without_object
from python_keywords.neofs_verbs import put_object_to_random_node
from utility import wait_for_gc_pass_on_storage_nodes
from wellknown_acl import PUBLIC_ACL
@ -27,11 +26,6 @@ from steps.cluster_test_base import ClusterTestBase
logger = logging.getLogger("NeoLogger")
OBJECT_NOT_FOUND_ERROR = "not found"
# For some reason object uploaded via http gateway is not immediately available for downloading
# Until this issue is resolved we are waiting for some time before attempting to read an object
# TODO: remove after https://github.com/nspcc-dev/neofs-http-gw/issues/176 is fixed
OBJECT_UPLOAD_DELAY = 10
@allure.link(
"https://github.com/nspcc-dev/neofs-http-gw#neofs-http-gateway", name="neofs-http-gateway"
@ -50,7 +44,7 @@ class TestHttpGate(ClusterTestBase):
TestHttpGate.wallet = default_wallet
@allure.title("Test Put over gRPC, Get over HTTP")
def test_put_grpc_get_http(self):
def test_put_grpc_get_http(self, complex_object_size, simple_object_size):
"""
Test that object can be put using gRPC interface and get using HTTP.
@ -72,7 +66,9 @@ class TestHttpGate(ClusterTestBase):
rule=self.PLACEMENT_RULE_1,
basic_acl=PUBLIC_ACL,
)
file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE)
file_path_simple, file_path_large = generate_file(simple_object_size), generate_file(
complex_object_size
)
with allure.step("Put objects using gRPC"):
oid_simple = put_object_to_random_node(
@ -91,13 +87,21 @@ class TestHttpGate(ClusterTestBase):
)
for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)):
self.get_object_and_verify_hashes(oid, file_path, self.wallet, cid)
get_object_and_verify_hashes(
oid=oid,
file_name=file_path,
wallet=self.wallet,
cid=cid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
)
@allure.link("https://github.com/nspcc-dev/neofs-http-gw#uploading", name="uploading")
@allure.link("https://github.com/nspcc-dev/neofs-http-gw#downloading", name="downloading")
@allure.title("Test Put over HTTP, Get over HTTP")
@pytest.mark.smoke
def test_put_http_get_http(self):
def test_put_http_get_http(self, complex_object_size, simple_object_size):
"""
Test that object can be put and get using HTTP interface.
@ -117,7 +121,9 @@ class TestHttpGate(ClusterTestBase):
rule=self.PLACEMENT_RULE_2,
basic_acl=PUBLIC_ACL,
)
file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE)
file_path_simple, file_path_large = generate_file(simple_object_size), generate_file(
complex_object_size
)
with allure.step("Put objects using HTTP"):
oid_simple = upload_via_http_gate(
@ -128,7 +134,15 @@ class TestHttpGate(ClusterTestBase):
)
for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)):
self.get_object_and_verify_hashes(oid, file_path, self.wallet, cid)
get_object_and_verify_hashes(
oid=oid,
file_name=file_path,
wallet=self.wallet,
cid=cid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
)
@allure.link(
"https://github.com/nspcc-dev/neofs-http-gw#by-attributes", name="download by attributes"
@ -143,7 +157,7 @@ class TestHttpGate(ClusterTestBase):
],
ids=["simple", "hyphen", "percent"],
)
def test_put_http_get_http_with_headers(self, attributes: dict):
def test_put_http_get_http_with_headers(self, attributes: dict, simple_object_size):
"""
Test that object can be downloaded using different attributes in HTTP header.
@ -163,10 +177,10 @@ class TestHttpGate(ClusterTestBase):
rule=self.PLACEMENT_RULE_2,
basic_acl=PUBLIC_ACL,
)
file_path = generate_file()
file_path = generate_file(simple_object_size)
with allure.step("Put objects using HTTP with attribute"):
headers = self._attr_into_header(attributes)
headers = attr_into_header(attributes)
oid = upload_via_http_gate(
cid=cid,
path=file_path,
@ -174,12 +188,16 @@ class TestHttpGate(ClusterTestBase):
endpoint=self.cluster.default_http_gate_endpoint,
)
sleep(OBJECT_UPLOAD_DELAY)
self.get_object_by_attr_and_verify_hashes(oid, file_path, cid, attributes)
get_object_by_attr_and_verify_hashes(
oid=oid,
file_name=file_path,
cid=cid,
attrs=attributes,
endpoint=self.cluster.default_http_gate_endpoint,
)
@allure.title("Test Expiration-Epoch in HTTP header")
def test_expiration_epoch_in_http(self):
def test_expiration_epoch_in_http(self, simple_object_size):
endpoint = self.cluster.default_rpc_endpoint
http_endpoint = self.cluster.default_http_gate_endpoint
@ -190,7 +208,7 @@ class TestHttpGate(ClusterTestBase):
rule=self.PLACEMENT_RULE_2,
basic_acl=PUBLIC_ACL,
)
file_path = generate_file()
file_path = generate_file(simple_object_size)
oids = []
curr_epoch = get_epoch(self.shell, self.cluster)
@ -213,14 +231,17 @@ class TestHttpGate(ClusterTestBase):
get_via_http_gate(cid=cid, oid=oid, endpoint=http_endpoint)
for expired_objects, not_expired_objects in [(oids[:1], oids[1:]), (oids[:2], oids[2:])]:
tick_epoch(self.shell, self.cluster)
self.tick_epoch()
# Wait for GC, because object with expiration is counted as alive until GC removes it
wait_for_gc_pass_on_storage_nodes()
for oid in expired_objects:
self.try_to_get_object_and_expect_error(
cid=cid, oid=oid, error_pattern=OBJECT_NOT_FOUND_ERROR
try_to_get_object_and_expect_error(
cid=cid,
oid=oid,
error_pattern=OBJECT_NOT_FOUND_ERROR,
endpoint=self.cluster.default_http_gate_endpoint,
)
with allure.step("Other objects can be get"):
@ -228,7 +249,7 @@ class TestHttpGate(ClusterTestBase):
get_via_http_gate(cid=cid, oid=oid, endpoint=http_endpoint)
@allure.title("Test Zip in HTTP header")
def test_zip_in_http(self):
def test_zip_in_http(self, complex_object_size, simple_object_size):
cid = create_container(
self.wallet,
shell=self.shell,
@ -236,7 +257,9 @@ class TestHttpGate(ClusterTestBase):
rule=self.PLACEMENT_RULE_2,
basic_acl=PUBLIC_ACL,
)
file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE)
file_path_simple, file_path_large = generate_file(simple_object_size), generate_file(
complex_object_size
)
common_prefix = "my_files"
headers1 = {"X-Attribute-FilePath": f"{common_prefix}/file1"}
@ -255,8 +278,6 @@ class TestHttpGate(ClusterTestBase):
endpoint=self.cluster.default_http_gate_endpoint,
)
sleep(OBJECT_UPLOAD_DELAY)
dir_path = get_via_zip_http_gate(
cid=cid, prefix=common_prefix, endpoint=self.cluster.default_http_gate_endpoint
)
@ -267,7 +288,7 @@ class TestHttpGate(ClusterTestBase):
@pytest.mark.long
@allure.title("Test Put over HTTP/Curl, Get over HTTP/Curl for large object")
def test_put_http_get_http_large_file(self):
def test_put_http_get_http_large_file(self, complex_object_size):
"""
This test checks upload and download using curl with 'large' object.
Large is object with size up to 20Mb.
@ -280,7 +301,7 @@ class TestHttpGate(ClusterTestBase):
basic_acl=PUBLIC_ACL,
)
obj_size = int(os.getenv("BIG_OBJ_SIZE", COMPLEX_OBJ_SIZE))
obj_size = int(os.getenv("BIG_OBJ_SIZE", complex_object_size))
file_path = generate_file(obj_size)
with allure.step("Put objects using HTTP"):
@ -290,21 +311,31 @@ class TestHttpGate(ClusterTestBase):
oid_curl = upload_via_http_gate_curl(
cid=cid,
filepath=file_path,
large_object=True,
endpoint=self.cluster.default_http_gate_endpoint,
)
self.get_object_and_verify_hashes(oid_gate, file_path, self.wallet, cid)
self.get_object_and_verify_hashes(
oid_curl,
file_path,
self.wallet,
cid,
get_object_and_verify_hashes(
oid=oid_gate,
file_name=file_path,
wallet=self.wallet,
cid=cid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
)
get_object_and_verify_hashes(
oid=oid_curl,
file_name=file_path,
wallet=self.wallet,
cid=cid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
object_getter=get_via_http_curl,
)
@allure.title("Test Put/Get over HTTP using Curl utility")
def test_put_http_get_http_curl(self):
def test_put_http_get_http_curl(self, complex_object_size, simple_object_size):
"""
Test checks upload and download over HTTP using curl utility.
"""
@ -315,87 +346,28 @@ class TestHttpGate(ClusterTestBase):
rule=self.PLACEMENT_RULE_2,
basic_acl=PUBLIC_ACL,
)
file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE)
file_path_simple, file_path_large = generate_file(simple_object_size), generate_file(
complex_object_size
)
with allure.step("Put objects using curl utility"):
oid_simple = upload_via_http_gate_curl(
cid=cid, filepath=file_path_simple, endpoint=self.cluster.default_http_gate_endpoint
)
oid_large = upload_via_http_gate_curl(
cid=cid, filepath=file_path_large, endpoint=self.cluster.default_http_gate_endpoint
cid=cid,
filepath=file_path_large,
endpoint=self.cluster.default_http_gate_endpoint,
)
for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)):
self.get_object_and_verify_hashes(
oid,
file_path,
self.wallet,
cid,
object_getter=get_via_http_curl,
)
@allure.step("Try to get object and expect error")
def try_to_get_object_and_expect_error(self, cid: str, oid: str, error_pattern: str) -> None:
try:
get_via_http_gate(cid=cid, oid=oid, endpoint=self.cluster.default_http_gate_endpoint)
raise AssertionError(f"Expected error on getting object with cid: {cid}")
except Exception as err:
match = error_pattern.casefold() in str(err).casefold()
assert match, f"Expected {err} to match {error_pattern}"
@allure.step("Verify object can be get using HTTP header attribute")
def get_object_by_attr_and_verify_hashes(
self, oid: str, file_name: str, cid: str, attrs: dict
) -> None:
got_file_path_http = get_via_http_gate(
cid=cid, oid=oid, endpoint=self.cluster.default_http_gate_endpoint
)
got_file_path_http_attr = get_via_http_gate_by_attribute(
cid=cid, attribute=attrs, endpoint=self.cluster.default_http_gate_endpoint
)
TestHttpGate._assert_hashes_are_equal(
file_name, got_file_path_http, got_file_path_http_attr
)
@allure.step("Verify object can be get using HTTP")
def get_object_and_verify_hashes(
self, oid: str, file_name: str, wallet: str, cid: str, object_getter=None
) -> None:
nodes = get_nodes_without_object(
wallet=wallet,
cid=cid,
get_object_and_verify_hashes(
oid=oid,
file_name=file_path,
wallet=self.wallet,
cid=cid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
object_getter=get_via_http_curl,
)
random_node = random.choice(nodes)
object_getter = object_getter or get_via_http_gate
got_file_path = get_object(
wallet=wallet,
cid=cid,
oid=oid,
shell=self.shell,
endpoint=random_node.get_rpc_endpoint(),
)
got_file_path_http = object_getter(
cid=cid, oid=oid, endpoint=self.cluster.default_http_gate_endpoint
)
TestHttpGate._assert_hashes_are_equal(file_name, got_file_path, got_file_path_http)
@staticmethod
def _assert_hashes_are_equal(orig_file_name: str, got_file_1: str, got_file_2: str) -> None:
msg = "Expected hashes are equal for files {f1} and {f2}"
got_file_hash_http = get_file_hash(got_file_1)
assert get_file_hash(got_file_2) == got_file_hash_http, msg.format(
f1=got_file_2, f2=got_file_1
)
assert get_file_hash(orig_file_name) == got_file_hash_http, msg.format(
f1=orig_file_name, f2=got_file_1
)
@staticmethod
def _attr_into_header(attrs: dict) -> dict:
return {f"X-Attribute-{_key}": _value for _key, _value in attrs.items()}

View file

@ -0,0 +1,229 @@
import logging
import os
import allure
import pytest
from container import (
create_container,
delete_container,
list_containers,
wait_for_container_deletion,
)
from epoch import tick_epoch
from file_helper import generate_file
from http_gate import (
attr_into_str_header_curl,
get_object_by_attr_and_verify_hashes,
try_to_get_object_and_expect_error,
try_to_get_object_via_passed_request_and_expect_error,
upload_via_http_gate_curl,
)
from pytest import FixtureRequest
from python_keywords.neofs_verbs import delete_object
from wellknown_acl import PUBLIC_ACL
from helpers.storage_object_info import StorageObjectInfo
from steps.cluster_test_base import ClusterTestBase
OBJECT_ALREADY_REMOVED_ERROR = "object already removed"
logger = logging.getLogger("NeoLogger")
@pytest.mark.sanity
@pytest.mark.http_gate
class Test_http_headers(ClusterTestBase):
PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
obj1_keys = ["Writer", "Chapter1", "Chapter2"]
obj2_keys = ["Writer", "Ch@pter1", "chapter2"]
values = ["Leo Tolstoy", "peace", "w@r"]
OBJECT_ATTRIBUTES = [
{obj1_keys[0]: values[0], obj1_keys[1]: values[1], obj1_keys[2]: values[2]},
{obj2_keys[0]: values[0], obj2_keys[1]: values[1], obj2_keys[2]: values[2]},
]
@pytest.fixture(scope="class", autouse=True)
@allure.title("[Class/Autouse]: Prepare wallet and deposit")
def prepare_wallet(self, default_wallet):
Test_http_headers.wallet = default_wallet
@pytest.fixture(
params=[
pytest.lazy_fixture("simple_object_size"),
pytest.lazy_fixture("complex_object_size"),
],
ids=["simple object", "complex object"],
scope="class",
)
def storage_objects_with_attributes(self, request: FixtureRequest) -> list[StorageObjectInfo]:
storage_objects = []
wallet = self.wallet
cid = create_container(
wallet=self.wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=self.PLACEMENT_RULE,
basic_acl=PUBLIC_ACL,
)
file_path = generate_file(request.param)
for attributes in self.OBJECT_ATTRIBUTES:
storage_object_id = upload_via_http_gate_curl(
cid=cid,
filepath=file_path,
endpoint=self.cluster.default_http_gate_endpoint,
headers=attr_into_str_header_curl(attributes),
)
storage_object = StorageObjectInfo(cid, storage_object_id)
storage_object.size = os.path.getsize(file_path)
storage_object.wallet_file_path = wallet
storage_object.file_path = file_path
storage_object.attributes = attributes
storage_objects.append(storage_object)
yield storage_objects
@allure.title("Get object1 by attribute")
def test_object1_can_be_get_by_attr(
self, storage_objects_with_attributes: list[StorageObjectInfo]
):
"""
Test to get object#1 by attribute and comapre hashes
Steps:
1. Download object#1 with attributes [Chapter2=w@r] and compare hashes
"""
storage_object_1 = storage_objects_with_attributes[0]
with allure.step(
f'Download object#1 via wget with attributes Chapter2: {storage_object_1.attributes["Chapter2"]} and compare hashes'
):
get_object_by_attr_and_verify_hashes(
oid=storage_object_1.oid,
file_name=storage_object_1.file_path,
cid=storage_object_1.cid,
attrs={"Chapter2": storage_object_1.attributes["Chapter2"]},
endpoint=self.cluster.default_http_gate_endpoint,
)
@allure.title("Test get object2 with different attributes, then delete object2 and get object1")
def test_object2_can_be_get_by_attr(
self, storage_objects_with_attributes: list[StorageObjectInfo]
):
"""
Test to get object2 with different attributes, then delete object2 and get object1 using 1st attribute. Note: obj1 and obj2 have the same attribute#1,
and when obj2 is deleted you can get obj1 by 1st attribute
Steps:
1. Download object#2 with attributes [chapter2=w@r] and compare hashes
2. Download object#2 with attributes [Ch@pter1=peace] and compare hashes
3. Delete object#2
4. Download object#1 with attributes [Writer=Leo Tolstoy] and compare hashes
"""
storage_object_1 = storage_objects_with_attributes[0]
storage_object_2 = storage_objects_with_attributes[1]
with allure.step(
f'Download object#2 via wget with attributes [chapter2={storage_object_2.attributes["chapter2"]}] / [Ch@pter1={storage_object_2.attributes["Ch@pter1"]}] and compare hashes'
):
selected_attributes_object2 = [
{"chapter2": storage_object_2.attributes["chapter2"]},
{"Ch@pter1": storage_object_2.attributes["Ch@pter1"]},
]
for attributes in selected_attributes_object2:
get_object_by_attr_and_verify_hashes(
oid=storage_object_2.oid,
file_name=storage_object_2.file_path,
cid=storage_object_2.cid,
attrs=attributes,
endpoint=self.cluster.default_http_gate_endpoint,
)
with allure.step("Delete object#2 and verify is the container deleted"):
delete_object(
wallet=self.wallet,
cid=storage_object_2.cid,
oid=storage_object_2.oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
try_to_get_object_and_expect_error(
cid=storage_object_2.cid,
oid=storage_object_2.oid,
error_pattern=OBJECT_ALREADY_REMOVED_ERROR,
endpoint=self.cluster.default_http_gate_endpoint,
)
storage_objects_with_attributes.remove(storage_object_2)
with allure.step(
f'Download object#1 with attributes [Writer={storage_object_1.attributes["Writer"]}] and compare hashes'
):
key_value_pair = {"Writer": storage_object_1.attributes["Writer"]}
get_object_by_attr_and_verify_hashes(
oid=storage_object_1.oid,
file_name=storage_object_1.file_path,
cid=storage_object_1.cid,
attrs=key_value_pair,
endpoint=self.cluster.default_http_gate_endpoint,
)
@allure.title("[Negative] Try to put object and get right after container is deleted")
def test_negative_put_and_get_object3(
self, storage_objects_with_attributes: list[StorageObjectInfo]
):
"""
Test to attempt to put object and try to download it right after the container has been deleted
Steps:
1. [Negative] Allocate and attempt to put object#3 via http with attributes: [Writer=Leo Tolstoy, Writer=peace, peace=peace]
Expected: "Error duplication of attributes detected"
2. Delete container
3. [Negative] Try to download object with attributes [peace=peace]
Expected: "HTTP request sent, awaiting response... 404 Not Found"
"""
storage_object_1 = storage_objects_with_attributes[0]
with allure.step(
"[Negative] Allocate and attemt to put object#3 via http with attributes: [Writer=Leo Tolstoy, Writer=peace, peace=peace]"
):
file_path_3 = generate_file(storage_object_1.size)
attrs_obj3 = {"Writer": "Leo Tolstoy", "peace": "peace"}
headers = attr_into_str_header_curl(attrs_obj3)
headers.append(" ".join(attr_into_str_header_curl({"Writer": "peace"})))
error_pattern = f"key duplication error: X-Attribute-Writer"
upload_via_http_gate_curl(
cid=storage_object_1.cid,
filepath=file_path_3,
endpoint=self.cluster.default_http_gate_endpoint,
headers=headers,
error_pattern=error_pattern,
)
with allure.step("Delete container and verify container deletion"):
delete_container(
wallet=self.wallet,
cid=storage_object_1.cid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
self.tick_epoch()
wait_for_container_deletion(
self.wallet,
storage_object_1.cid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
assert storage_object_1.cid not in list_containers(
self.wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
with allure.step(
"[Negative] Try to download (wget) object via wget with attributes [peace=peace]"
):
request = f"/get/{storage_object_1.cid}/peace/peace"
error_pattern = "404 Not Found"
try_to_get_object_via_passed_request_and_expect_error(
cid=storage_object_1.cid,
oid="",
error_pattern=error_pattern,
attrs=attrs_obj3,
http_request_path=request,
endpoint=self.cluster.default_http_gate_endpoint,
)

View file

@ -0,0 +1,127 @@
import logging
import os
import allure
import pytest
from container import create_container
from file_helper import generate_file
from http_gate import (
get_object_and_verify_hashes,
get_object_by_attr_and_verify_hashes,
try_to_get_object_via_passed_request_and_expect_error,
)
from python_keywords.neofs_verbs import put_object_to_random_node
from wellknown_acl import PUBLIC_ACL
from steps.cluster_test_base import ClusterTestBase
logger = logging.getLogger("NeoLogger")
@pytest.mark.sanity
@pytest.mark.http_gate
class Test_http_object(ClusterTestBase):
PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
@pytest.fixture(scope="class", autouse=True)
@allure.title("[Class/Autouse]: Prepare wallet and deposit")
def prepare_wallet(self, default_wallet):
Test_http_object.wallet = default_wallet
@allure.title("Test Put over gRPC, Get over HTTP")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
def test_object_put_get_attributes(self, object_size: int):
"""
Test that object can be put using gRPC interface and get using HTTP.
Steps:
1. Create object;
2. Put objects using gRPC (neofs-cli) with attributes [--attributes chapter1=peace,chapter2=war];
3. Download object using HTTP gate (https://github.com/nspcc-dev/neofs-http-gw#downloading);
4. Compare hashes between original and downloaded object;
5. [Negative] Try to the get object with specified attributes and `get` request: [get/$CID/chapter1/peace];
6. Download the object with specified attributes and `get_by_attribute` request: [get_by_attribute/$CID/chapter1/peace];
7. Compare hashes between original and downloaded object;
8. [Negative] Try to the get object via `get_by_attribute` request: [get_by_attribute/$CID/$OID];
Expected result:
Hashes must be the same.
"""
with allure.step("Create public container"):
cid = create_container(
self.wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=self.PLACEMENT_RULE,
basic_acl=PUBLIC_ACL,
)
# Generate file
file_path = generate_file(object_size)
# List of Key=Value attributes
obj_key1 = "chapter1"
obj_value1 = "peace"
obj_key2 = "chapter2"
obj_value2 = "war"
# Prepare for grpc PUT request
key_value1 = obj_key1 + "=" + obj_value1
key_value2 = obj_key2 + "=" + obj_value2
with allure.step("Put objects using gRPC [--attributes chapter1=peace,chapter2=war]"):
oid = put_object_to_random_node(
wallet=self.wallet,
path=file_path,
cid=cid,
shell=self.shell,
cluster=self.cluster,
attributes=f"{key_value1},{key_value2}",
)
with allure.step("Get object and verify hashes [ get/$CID/$OID ]"):
get_object_and_verify_hashes(
oid=oid,
file_name=file_path,
wallet=self.wallet,
cid=cid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
)
with allure.step("[Negative] try to get object: [get/$CID/chapter1/peace]"):
attrs = {obj_key1: obj_value1, obj_key2: obj_value2}
request = f"/get/{cid}/{obj_key1}/{obj_value1}"
expected_err_msg = "Failed to get object via HTTP gate:"
try_to_get_object_via_passed_request_and_expect_error(
cid=cid,
oid=oid,
error_pattern=expected_err_msg,
http_request_path=request,
attrs=attrs,
endpoint=self.cluster.default_http_gate_endpoint,
)
with allure.step(
"Download the object with attribute [get_by_attribute/$CID/chapter1/peace]"
):
get_object_by_attr_and_verify_hashes(
oid=oid,
file_name=file_path,
cid=cid,
attrs=attrs,
endpoint=self.cluster.default_http_gate_endpoint,
)
with allure.step("[Negative] try to get object: get_by_attribute/$CID/$OID"):
request = f"/get_by_attribute/{cid}/{oid}"
try_to_get_object_via_passed_request_and_expect_error(
cid=cid,
oid=oid,
error_pattern=expected_err_msg,
http_request_path=request,
endpoint=self.cluster.default_http_gate_endpoint,
)

View file

@ -0,0 +1,70 @@
import logging
import allure
import pytest
from container import create_container
from file_helper import generate_file
from http_gate import get_object_and_verify_hashes, upload_via_http_gate_curl
from wellknown_acl import PUBLIC_ACL
from steps.cluster_test_base import ClusterTestBase
logger = logging.getLogger("NeoLogger")
@pytest.mark.sanity
@pytest.mark.http_gate
class Test_http_streaming(ClusterTestBase):
PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
@pytest.fixture(scope="class", autouse=True)
@allure.title("[Class/Autouse]: Prepare wallet and deposit")
def prepare_wallet(self, default_wallet):
Test_http_streaming.wallet = default_wallet
@allure.title("Test Put via pipe (steaming), Get over HTTP and verify hashes")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("complex_object_size")],
ids=["complex object"],
)
def test_object_can_be_put_get_by_streaming(self, object_size: int):
"""
Test that object can be put using gRPC interface and get using HTTP.
Steps:
1. Create big object;
2. Put object using curl with pipe (streaming);
3. Download object using HTTP gate (https://github.com/nspcc-dev/neofs-http-gw#downloading);
4. Compare hashes between original and downloaded object;
Expected result:
Hashes must be the same.
"""
with allure.step("Create public container and verify container creation"):
cid = create_container(
self.wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=self.PLACEMENT_RULE,
basic_acl=PUBLIC_ACL,
)
with allure.step("Allocate big object"):
# Generate file
file_path = generate_file(object_size)
with allure.step(
"Put objects using curl utility and Get object and verify hashes [ get/$CID/$OID ]"
):
oid = upload_via_http_gate_curl(
cid=cid, filepath=file_path, endpoint=self.cluster.default_http_gate_endpoint
)
get_object_and_verify_hashes(
oid=oid,
file_name=file_path,
wallet=self.wallet,
cid=cid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
)

View file

@ -0,0 +1,406 @@
import calendar
import datetime
import logging
from typing import Optional
import allure
import pytest
from container import create_container
from epoch import get_epoch, wait_for_epochs_align
from file_helper import generate_file
from grpc_responses import OBJECT_NOT_FOUND
from http_gate import (
attr_into_str_header_curl,
get_object_and_verify_hashes,
try_to_get_object_and_expect_error,
upload_via_http_gate_curl,
)
from python_keywords.neofs_verbs import get_netmap_netinfo, get_object_from_random_node, head_object
from wellknown_acl import PUBLIC_ACL
from steps.cluster_test_base import ClusterTestBase
logger = logging.getLogger("NeoLogger")
EXPIRATION_TIMESTAMP_HEADER = "__NEOFS__EXPIRATION_TIMESTAMP"
EXPIRATION_EPOCH_HEADER = "__NEOFS__EXPIRATION_EPOCH"
EXPIRATION_DURATION_HEADER = "__NEOFS__EXPIRATION_DURATION"
EXPIRATION_EXPIRATION_RFC = "__NEOFS__EXPIRATION_RFC3339"
NEOFS_EXPIRATION_EPOCH = "Neofs-Expiration-Epoch"
NEOFS_EXPIRATION_DURATION = "Neofs-Expiration-Duration"
NEOFS_EXPIRATION_TIMESTAMP = "Neofs-Expiration-Timestamp"
NEOFS_EXIPRATION_RFC3339 = "Neofs-Expiration-RFC3339"
@pytest.mark.sanity
@pytest.mark.http_gate
class Test_http_system_header(ClusterTestBase):
PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
@pytest.fixture(scope="class", autouse=True)
@allure.title("[Class/Autouse]: Prepare wallet and deposit")
def prepare_wallet(self, default_wallet):
Test_http_system_header.wallet = default_wallet
@pytest.fixture(scope="class")
@allure.title("Create container")
def user_container(self):
return create_container(
wallet=self.wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=self.PLACEMENT_RULE,
basic_acl=PUBLIC_ACL,
)
@pytest.fixture(scope="class")
@allure.title("epoch_duration in seconds")
def epoch_duration(self) -> int:
net_info = get_netmap_netinfo(
wallet=self.wallet,
endpoint=self.cluster.default_rpc_endpoint,
shell=self.shell,
)
epoch_duration_in_blocks = net_info["epoch_duration"]
time_per_block = net_info["time_per_block"]
return int(epoch_duration_in_blocks * time_per_block)
@allure.title("Return N-epoch count in minutes")
def epoch_count_into_mins(self, epoch_duration: int, epoch: int) -> str:
mins = epoch_duration * epoch / 60
return f"{mins}m"
@allure.title("Return future timestamp after N epochs are passed")
def epoch_count_into_timestamp(
self, epoch_duration: int, epoch: int, rfc3339: Optional[bool] = False
) -> str:
current_datetime = datetime.datetime.utcnow()
epoch_count_in_seconds = epoch_duration * epoch
future_datetime = current_datetime + datetime.timedelta(seconds=epoch_count_in_seconds)
if rfc3339:
return future_datetime.isoformat("T") + "Z"
else:
return str(calendar.timegm(future_datetime.timetuple()))
@allure.title("Check is (header_output) Key=Value exists and equal in passed (header_to_find)")
def check_key_value_presented_header(self, header_output: dict, header_to_find: dict) -> bool:
header_att = header_output["header"]["attributes"]
for key_to_check, val_to_check in header_to_find.items():
if key_to_check not in header_att or val_to_check != header_att[key_to_check]:
logger.info(f"Unable to find {key_to_check}: '{val_to_check}' in {header_att}")
return False
return True
@allure.title(
f"Validate that only {EXPIRATION_EPOCH_HEADER} exists in header and other headers are abesent"
)
def validation_for_http_header_attr(self, head_info: dict, expected_epoch: int) -> None:
# check that __NEOFS__EXPIRATION_EPOCH attribute has corresponding epoch
assert self.check_key_value_presented_header(
head_info, {EXPIRATION_EPOCH_HEADER: str(expected_epoch)}
), f'Expected to find {EXPIRATION_EPOCH_HEADER}: {expected_epoch} in: {head_info["header"]["attributes"]}'
# check that {EXPIRATION_EPOCH_HEADER} absents in header output
assert not (
self.check_key_value_presented_header(head_info, {EXPIRATION_DURATION_HEADER: ""})
), f"Only {EXPIRATION_EPOCH_HEADER} can be displayed in header attributes"
# check that {EXPIRATION_TIMESTAMP_HEADER} absents in header output
assert not (
self.check_key_value_presented_header(head_info, {EXPIRATION_TIMESTAMP_HEADER: ""})
), f"Only {EXPIRATION_TIMESTAMP_HEADER} can be displayed in header attributes"
# check that {EXPIRATION_EXPIRATION_RFC} absents in header output
assert not (
self.check_key_value_presented_header(head_info, {EXPIRATION_EXPIRATION_RFC: ""})
), f"Only {EXPIRATION_EXPIRATION_RFC} can be displayed in header attributes"
@allure.title("Put / get / verify object and return head command result to invoker")
def oid_header_info_for_object(self, file_path: str, attributes: dict, user_container: str):
oid = upload_via_http_gate_curl(
cid=user_container,
filepath=file_path,
endpoint=self.cluster.default_http_gate_endpoint,
headers=attr_into_str_header_curl(attributes),
)
get_object_and_verify_hashes(
oid=oid,
file_name=file_path,
wallet=self.wallet,
cid=user_container,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
)
head = head_object(
wallet=self.wallet,
cid=user_container,
oid=oid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
return oid, head
@allure.title("[negative] attempt to put object with expired epoch")
def test_unable_put_expired_epoch(self, user_container: str, simple_object_size: int):
headers = attr_into_str_header_curl(
{"Neofs-Expiration-Epoch": str(get_epoch(self.shell, self.cluster) - 1)}
)
file_path = generate_file(simple_object_size)
with allure.step(
"Put object using HTTP with attribute Expiration-Epoch where epoch is expired"
):
upload_via_http_gate_curl(
cid=user_container,
filepath=file_path,
endpoint=self.cluster.default_http_gate_endpoint,
headers=headers,
error_pattern="object has expired",
)
@allure.title("[negative] attempt to put object with negative Neofs-Expiration-Duration")
def test_unable_put_negative_duration(self, user_container: str, simple_object_size: int):
headers = attr_into_str_header_curl({"Neofs-Expiration-Duration": "-1h"})
file_path = generate_file(simple_object_size)
with allure.step(
"Put object using HTTP with attribute Neofs-Expiration-Duration where duration is negative"
):
upload_via_http_gate_curl(
cid=user_container,
filepath=file_path,
endpoint=self.cluster.default_http_gate_endpoint,
headers=headers,
error_pattern=f"{EXPIRATION_DURATION_HEADER} must be positive",
)
@allure.title(
"[negative] attempt to put object with Neofs-Expiration-Timestamp value in the past"
)
def test_unable_put_expired_timestamp(self, user_container: str, simple_object_size: int):
headers = attr_into_str_header_curl({"Neofs-Expiration-Timestamp": "1635075727"})
file_path = generate_file(simple_object_size)
with allure.step(
"Put object using HTTP with attribute Neofs-Expiration-Timestamp where duration is in the past"
):
upload_via_http_gate_curl(
cid=user_container,
filepath=file_path,
endpoint=self.cluster.default_http_gate_endpoint,
headers=headers,
error_pattern=f"{EXPIRATION_TIMESTAMP_HEADER} must be in the future",
)
@allure.title(
"[negative] Put object using HTTP with attribute Neofs-Expiration-RFC3339 where duration is in the past"
)
def test_unable_put_expired_rfc(self, user_container: str, simple_object_size: int):
headers = attr_into_str_header_curl({"Neofs-Expiration-RFC3339": "2021-11-22T09:55:49Z"})
file_path = generate_file(simple_object_size)
upload_via_http_gate_curl(
cid=user_container,
filepath=file_path,
endpoint=self.cluster.default_http_gate_endpoint,
headers=headers,
error_pattern=f"{EXPIRATION_EXPIRATION_RFC} must be in the future",
)
@allure.title("priority of attributes epoch>duration")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
def test_http_attr_priority_epoch_duration(
self, user_container: str, object_size: int, epoch_duration: int
):
self.tick_epoch()
epoch_count = 1
expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count
logger.info(
f"epoch duration={epoch_duration}, current_epoch= {get_epoch(self.shell, self.cluster)} expected_epoch {expected_epoch}"
)
attributes = {NEOFS_EXPIRATION_EPOCH: expected_epoch, NEOFS_EXPIRATION_DURATION: "1m"}
file_path = generate_file(object_size)
with allure.step(
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
):
oid, head_info = self.oid_header_info_for_object(
file_path=file_path, attributes=attributes, user_container=user_container
)
self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
with allure.step("Check that object becomes unavailable when epoch is expired"):
for _ in range(0, epoch_count + 1):
self.tick_epoch()
assert (
get_epoch(self.shell, self.cluster) == expected_epoch + 1
), f"Epochs should be equal: {get_epoch(self.shell, self.cluster)} != {expected_epoch + 1}"
with allure.step("Check object deleted because it expires-on epoch"):
wait_for_epochs_align(self.shell, self.cluster)
try_to_get_object_and_expect_error(
cid=user_container,
oid=oid,
error_pattern="404 Not Found",
endpoint=self.cluster.default_http_gate_endpoint,
)
# check that object is not available via grpc
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object_from_random_node(
self.wallet, user_container, oid, self.shell, self.cluster
)
@allure.title(
f"priority of attributes duration>timestamp, duration time has higher priority and should be converted {EXPIRATION_EPOCH_HEADER}"
)
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
def test_http_attr_priority_dur_timestamp(
self, user_container: str, object_size: int, epoch_duration: int
):
self.tick_epoch()
epoch_count = 2
expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count
logger.info(
f"epoch duration={epoch_duration}, current_epoch= {get_epoch(self.shell, self.cluster)} expected_epoch {expected_epoch}"
)
attributes = {
NEOFS_EXPIRATION_DURATION: self.epoch_count_into_mins(
epoch_duration=epoch_duration, epoch=2
),
NEOFS_EXPIRATION_TIMESTAMP: self.epoch_count_into_timestamp(
epoch_duration=epoch_duration, epoch=1
),
}
file_path = generate_file(object_size)
with allure.step(
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
):
oid, head_info = self.oid_header_info_for_object(
file_path=file_path, attributes=attributes, user_container=user_container
)
self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
with allure.step("Check that object becomes unavailable when epoch is expired"):
for _ in range(0, epoch_count + 1):
self.tick_epoch()
assert (
get_epoch(self.shell, self.cluster) == expected_epoch + 1
), f"Epochs should be equal: {get_epoch(self.shell, self.cluster)} != {expected_epoch + 1}"
with allure.step("Check object deleted because it expires-on epoch"):
wait_for_epochs_align(self.shell, self.cluster)
try_to_get_object_and_expect_error(
cid=user_container,
oid=oid,
error_pattern="404 Not Found",
endpoint=self.cluster.default_http_gate_endpoint,
)
# check that object is not available via grpc
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object_from_random_node(
self.wallet, user_container, oid, self.shell, self.cluster
)
@allure.title(
f"priority of attributes timestamp>Expiration-RFC, timestamp has higher priority and should be converted {EXPIRATION_EPOCH_HEADER}"
)
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
def test_http_attr_priority_timestamp_rfc(
self, user_container: str, object_size: int, epoch_duration: int
):
self.tick_epoch()
epoch_count = 2
expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count
logger.info(
f"epoch duration={epoch_duration}, current_epoch= {get_epoch(self.shell, self.cluster)} expected_epoch {expected_epoch}"
)
attributes = {
NEOFS_EXPIRATION_TIMESTAMP: self.epoch_count_into_timestamp(
epoch_duration=epoch_duration, epoch=2
),
NEOFS_EXIPRATION_RFC3339: self.epoch_count_into_timestamp(
epoch_duration=epoch_duration, epoch=1, rfc3339=True
),
}
file_path = generate_file(object_size)
with allure.step(
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
):
oid, head_info = self.oid_header_info_for_object(
file_path=file_path, attributes=attributes, user_container=user_container
)
self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
with allure.step("Check that object becomes unavailable when epoch is expired"):
for _ in range(0, epoch_count + 1):
self.tick_epoch()
assert (
get_epoch(self.shell, self.cluster) == expected_epoch + 1
), f"Epochs should be equal: {get_epoch(self.shell, self.cluster)} != {expected_epoch + 1}"
with allure.step("Check object deleted because it expires-on epoch"):
wait_for_epochs_align(self.shell, self.cluster)
try_to_get_object_and_expect_error(
cid=user_container,
oid=oid,
error_pattern="404 Not Found",
endpoint=self.cluster.default_http_gate_endpoint,
)
# check that object is not available via grpc
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object_from_random_node(
self.wallet, user_container, oid, self.shell, self.cluster
)
@allure.title("Test that object is automatically delete when expiration passed")
@pytest.mark.parametrize(
"object_size",
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
def test_http_rfc_object_unavailable_after_expir(
self, user_container: str, object_size: int, epoch_duration: int
):
self.tick_epoch()
epoch_count = 2
expected_epoch = get_epoch(self.shell, self.cluster) + epoch_count
logger.info(
f"epoch duration={epoch_duration}, current_epoch= {get_epoch(self.shell, self.cluster)} expected_epoch {expected_epoch}"
)
attributes = {
NEOFS_EXIPRATION_RFC3339: self.epoch_count_into_timestamp(
epoch_duration=epoch_duration, epoch=2, rfc3339=True
)
}
file_path = generate_file(object_size)
with allure.step(
f"Put objects using HTTP with attributes and head command should display {EXPIRATION_EPOCH_HEADER}: {expected_epoch} attr"
):
oid, head_info = self.oid_header_info_for_object(
file_path=file_path,
attributes=attributes,
user_container=user_container,
)
self.validation_for_http_header_attr(head_info=head_info, expected_epoch=expected_epoch)
with allure.step("Check that object becomes unavailable when epoch is expired"):
for _ in range(0, epoch_count + 1):
self.tick_epoch()
# check that {EXPIRATION_EXPIRATION_RFC} absents in header output
assert (
get_epoch(self.shell, self.cluster) == expected_epoch + 1
), f"Epochs should be equal: {get_epoch(self.shell, self.cluster)} != {expected_epoch + 1}"
with allure.step("Check object deleted because it expires-on epoch"):
wait_for_epochs_align(self.shell, self.cluster)
try_to_get_object_and_expect_error(
cid=user_container,
oid=oid,
error_pattern="404 Not Found",
endpoint=self.cluster.default_http_gate_endpoint,
)
# check that object is not available via grpc
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object_from_random_node(
self.wallet, user_container, oid, self.shell, self.cluster
)

View file

@ -17,8 +17,8 @@ def pytest_generate_tests(metafunc):
@pytest.mark.s3_gate
class TestS3GateACL(TestS3GateBase):
@allure.title("Test S3: Object ACL")
def test_s3_object_ACL(self, bucket):
file_path = generate_file()
def test_s3_object_ACL(self, bucket, simple_object_size):
file_path = generate_file(simple_object_size)
file_name = object_key_from_file_path(file_path)
with allure.step("Put object into bucket, Check ACL is empty"):

View file

@ -103,8 +103,8 @@ class TestS3GateBucket(TestS3GateBase):
], "Permission for CanonicalUser is FULL_CONTROL"
@allure.title("Test S3: create bucket with object lock")
def test_s3_bucket_object_lock(self):
file_path = generate_file()
def test_s3_bucket_object_lock(self, simple_object_size):
file_path = generate_file(simple_object_size)
file_name = object_key_from_file_path(file_path)
with allure.step("Create bucket with --no-object-lock-enabled-for-bucket"):
@ -138,10 +138,10 @@ class TestS3GateBucket(TestS3GateBase):
)
@allure.title("Test S3: delete bucket")
def test_s3_delete_bucket(self):
file_path_1 = generate_file()
def test_s3_delete_bucket(self, simple_object_size):
file_path_1 = generate_file(simple_object_size)
file_name_1 = object_key_from_file_path(file_path_1)
file_path_2 = generate_file()
file_path_2 = generate_file(simple_object_size)
file_name_2 = object_key_from_file_path(file_path_2)
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client)

View file

@ -5,7 +5,7 @@ from random import choice, choices
import allure
import pytest
from aws_cli_client import AwsCliClient
from common import ASSETS_DIR, COMPLEX_OBJ_SIZE, SIMPLE_OBJ_SIZE
from common import ASSETS_DIR
from epoch import tick_epoch
from file_helper import (
generate_file,
@ -39,12 +39,12 @@ def pytest_generate_tests(metafunc):
@pytest.mark.s3_gate_base
class TestS3Gate(TestS3GateBase):
@allure.title("Test S3 Bucket API")
def test_s3_buckets(self):
def test_s3_buckets(self, simple_object_size):
"""
Test base S3 Bucket API (Create/List/Head/Delete).
"""
file_path = generate_file()
file_path = generate_file(simple_object_size)
file_name = self.object_key_from_file_path(file_path)
with allure.step("Create buckets"):
@ -99,7 +99,7 @@ class TestS3Gate(TestS3GateBase):
with allure.step(f"Delete bucket {bucket_1}"):
s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket_1)
tick_epoch(self.shell, self.cluster)
self.tick_epoch()
with allure.step(f"Check bucket {bucket_1} deleted"):
with pytest.raises(Exception, match=r".*Not Found.*"):
@ -109,11 +109,13 @@ class TestS3Gate(TestS3GateBase):
@pytest.mark.parametrize(
"file_type", ["simple", "large"], ids=["Simple object", "Large object"]
)
def test_s3_api_object(self, file_type, two_buckets):
def test_s3_api_object(self, file_type, two_buckets, simple_object_size, complex_object_size):
"""
Test base S3 Object API (Put/Head/List) for simple and large objects.
"""
file_path = generate_file(SIMPLE_OBJ_SIZE if file_type == "simple" else COMPLEX_OBJ_SIZE)
file_path = generate_file(
simple_object_size if file_type == "simple" else complex_object_size
)
file_name = self.object_key_from_file_path(file_path)
bucket_1, bucket_2 = two_buckets
@ -136,7 +138,7 @@ class TestS3Gate(TestS3GateBase):
s3_gate_object.get_object_attributes(self.s3_client, bucket, file_name, *attrs)
@allure.title("Test S3 Sync directory")
def test_s3_sync_dir(self, bucket):
def test_s3_sync_dir(self, bucket, simple_object_size):
"""
Test checks sync directory with AWS CLI utility.
"""
@ -147,8 +149,8 @@ class TestS3Gate(TestS3GateBase):
if not isinstance(self.s3_client, AwsCliClient):
pytest.skip("This test is not supported with boto3 client")
generate_file_with_content(file_path=file_path_1)
generate_file_with_content(file_path=file_path_2)
generate_file_with_content(simple_object_size, file_path=file_path_1)
generate_file_with_content(simple_object_size, file_path=file_path_2)
self.s3_client.sync(bucket_name=bucket, dir_path=os.path.dirname(file_path_1))
@ -166,19 +168,21 @@ class TestS3Gate(TestS3GateBase):
), "Expected hashes are the same"
@allure.title("Test S3 Object versioning")
def test_s3_api_versioning(self, bucket):
def test_s3_api_versioning(self, bucket, simple_object_size):
"""
Test checks basic versioning functionality for S3 bucket.
"""
version_1_content = "Version 1"
version_2_content = "Version 2"
file_name_simple = generate_file_with_content(content=version_1_content)
file_name_simple = generate_file_with_content(simple_object_size, content=version_1_content)
obj_key = os.path.basename(file_name_simple)
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_simple)
generate_file_with_content(file_path=file_name_simple, content=version_2_content)
generate_file_with_content(
simple_object_size, file_path=file_name_simple, content=version_2_content
)
version_id_2 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_simple)
with allure.step("Check bucket shows all versions"):
@ -246,13 +250,15 @@ class TestS3Gate(TestS3GateBase):
@pytest.mark.s3_gate_multipart
@allure.title("Test S3 Object Multipart API")
def test_s3_api_multipart(self, bucket):
def test_s3_api_multipart(self, bucket, simple_object_size):
"""
Test checks S3 Multipart API (Create multipart upload/Abort multipart upload/List multipart upload/
Upload part/List parts/Complete multipart upload).
"""
parts_count = 3
file_name_large = generate_file(SIMPLE_OBJ_SIZE * 1024 * 6 * parts_count) # 5Mb - min part
file_name_large = generate_file(
simple_object_size * 1024 * 6 * parts_count
) # 5Mb - min part
object_key = self.object_key_from_file_path(file_name_large)
part_files = split_file(file_name_large, parts_count)
parts = []
@ -320,7 +326,7 @@ class TestS3Gate(TestS3GateBase):
check_tags_by_bucket(self.s3_client, bucket, [])
@allure.title("Test S3 Object tagging API")
def test_s3_api_object_tagging(self, bucket):
def test_s3_api_object_tagging(self, bucket, simple_object_size):
"""
Test checks S3 Object tagging API (Put tag/Get tag/Update tag).
"""
@ -330,7 +336,7 @@ class TestS3Gate(TestS3GateBase):
("some-key--obj2", "some-value--obj2"),
]
key_value_pair_obj_new = [("some-key-obj-new", "some-value-obj-new")]
file_name_simple = generate_file(SIMPLE_OBJ_SIZE)
file_name_simple = generate_file(simple_object_size)
obj_key = self.object_key_from_file_path(file_name_simple)
s3_gate_bucket.put_bucket_tagging(self.s3_client, bucket, key_value_pair_bucket)
@ -350,7 +356,7 @@ class TestS3Gate(TestS3GateBase):
check_tags_by_object(self.s3_client, bucket, obj_key, [])
@allure.title("Test S3: Delete object & delete objects S3 API")
def test_s3_api_delete(self, two_buckets):
def test_s3_api_delete(self, two_buckets, simple_object_size, complex_object_size):
"""
Check delete_object and delete_objects S3 API operation. From first bucket some objects deleted one by one.
From second bucket some objects deleted all at once.
@ -359,7 +365,7 @@ class TestS3Gate(TestS3GateBase):
max_delete_objects = 17
put_objects = []
file_paths = []
obj_sizes = [SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE]
obj_sizes = [simple_object_size, complex_object_size]
bucket_1, bucket_2 = two_buckets
@ -406,12 +412,14 @@ class TestS3Gate(TestS3GateBase):
try_to_get_objects_and_expect_error(self.s3_client, bucket_2, objects_to_delete_b2)
@allure.title("Test S3: Copy object to the same bucket")
def test_s3_copy_same_bucket(self, bucket):
def test_s3_copy_same_bucket(self, bucket, complex_object_size, simple_object_size):
"""
Test object can be copied to the same bucket.
#TODO: delete after test_s3_copy_object will be merge
"""
file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE)
file_path_simple, file_path_large = generate_file(simple_object_size), generate_file(
complex_object_size
)
file_name_simple = self.object_key_from_file_path(file_path_simple)
file_name_large = self.object_key_from_file_path(file_path_large)
bucket_objects = [file_name_simple, file_name_large]
@ -448,12 +456,14 @@ class TestS3Gate(TestS3GateBase):
)
@allure.title("Test S3: Copy object to another bucket")
def test_s3_copy_to_another_bucket(self, two_buckets):
def test_s3_copy_to_another_bucket(self, two_buckets, complex_object_size, simple_object_size):
"""
Test object can be copied to another bucket.
#TODO: delete after test_s3_copy_object will be merge
"""
file_path_simple, file_path_large = generate_file(), generate_file(COMPLEX_OBJ_SIZE)
file_path_simple, file_path_large = generate_file(simple_object_size), generate_file(
complex_object_size
)
file_name_simple = self.object_key_from_file_path(file_path_simple)
file_name_large = self.object_key_from_file_path(file_path_large)
bucket_1_objects = [file_name_simple, file_name_large]

View file

@ -21,8 +21,8 @@ def pytest_generate_tests(metafunc):
@pytest.mark.parametrize("version_id", [None, "second"])
class TestS3GateLocking(TestS3GateBase):
@allure.title("Test S3: Checking the operation of retention period & legal lock on the object")
def test_s3_object_locking(self, version_id):
file_path = generate_file()
def test_s3_object_locking(self, version_id, simple_object_size):
file_path = generate_file(simple_object_size)
file_name = object_key_from_file_path(file_path)
retention_period = 2
@ -30,7 +30,7 @@ class TestS3GateLocking(TestS3GateBase):
with allure.step("Put several versions of object into bucket"):
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
file_name_1 = generate_file_with_content(file_path=file_path)
file_name_1 = generate_file_with_content(simple_object_size, file_path=file_path)
version_id_2 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_1)
check_objects_in_bucket(self.s3_client, bucket, [file_name])
if version_id:
@ -74,8 +74,8 @@ class TestS3GateLocking(TestS3GateBase):
s3_gate_object.delete_object_s3(self.s3_client, bucket, file_name, version_id)
@allure.title("Test S3: Checking the impossibility to change the retention mode COMPLIANCE")
def test_s3_mode_compliance(self, version_id):
file_path = generate_file()
def test_s3_mode_compliance(self, version_id, simple_object_size):
file_path = generate_file(simple_object_size)
file_name = object_key_from_file_path(file_path)
retention_period = 2
retention_period_1 = 1
@ -115,8 +115,8 @@ class TestS3GateLocking(TestS3GateBase):
)
@allure.title("Test S3: Checking the ability to change retention mode GOVERNANCE")
def test_s3_mode_governance(self, version_id):
file_path = generate_file()
def test_s3_mode_governance(self, version_id, simple_object_size):
file_path = generate_file(simple_object_size)
file_name = object_key_from_file_path(file_path)
retention_period = 3
retention_period_1 = 2
@ -183,8 +183,8 @@ class TestS3GateLocking(TestS3GateBase):
)
@allure.title("Test S3: Checking if an Object Cannot Be Locked")
def test_s3_legal_hold(self, version_id):
file_path = generate_file()
def test_s3_legal_hold(self, version_id, simple_object_size):
file_path = generate_file(simple_object_size)
file_name = object_key_from_file_path(file_path)
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client, False)
@ -205,8 +205,8 @@ class TestS3GateLocking(TestS3GateBase):
@pytest.mark.s3_gate
class TestS3GateLockingBucket(TestS3GateBase):
@allure.title("Test S3: Bucket Lock")
def test_s3_bucket_lock(self):
file_path = generate_file()
def test_s3_bucket_lock(self, simple_object_size):
file_path = generate_file(simple_object_size)
file_name = object_key_from_file_path(file_path)
configuration = {"Rule": {"DefaultRetention": {"Mode": "COMPLIANCE", "Days": 1}}}

View file

@ -7,7 +7,7 @@ from random import choices, sample
import allure
import pytest
from aws_cli_client import AwsCliClient
from common import ASSETS_DIR, COMPLEX_OBJ_SIZE, FREE_STORAGE, SIMPLE_OBJ_SIZE, WALLET_PASS
from common import ASSETS_DIR, FREE_STORAGE, WALLET_PASS
from data_formatters import get_wallet_public_key
from file_helper import concat_files, generate_file, generate_file_with_content, get_file_hash
from neofs_testlib.utils.wallet import init_wallet
@ -32,8 +32,8 @@ class TestS3GateObject(TestS3GateBase):
return os.path.basename(full_path)
@allure.title("Test S3: Copy object")
def test_s3_copy_object(self, two_buckets):
file_path = generate_file()
def test_s3_copy_object(self, two_buckets, simple_object_size):
file_path = generate_file(simple_object_size)
file_name = self.object_key_from_file_path(file_path)
bucket_1_objects = [file_name]
@ -79,9 +79,9 @@ class TestS3GateObject(TestS3GateBase):
s3_gate_object.copy_object_s3(self.s3_client, bucket_1, file_name)
@allure.title("Test S3: Copy version of object")
def test_s3_copy_version_object(self, two_buckets):
def test_s3_copy_version_object(self, two_buckets, simple_object_size):
version_1_content = "Version 1"
file_name_simple = generate_file_with_content(content=version_1_content)
file_name_simple = generate_file_with_content(simple_object_size, content=version_1_content)
obj_key = os.path.basename(file_name_simple)
bucket_1, bucket_2 = two_buckets
@ -115,9 +115,9 @@ class TestS3GateObject(TestS3GateBase):
s3_gate_object.copy_object_s3(self.s3_client, bucket_1, obj_key)
@allure.title("Test S3: Checking copy with acl")
def test_s3_copy_acl(self, bucket):
def test_s3_copy_acl(self, bucket, simple_object_size):
version_1_content = "Version 1"
file_name_simple = generate_file_with_content(content=version_1_content)
file_name_simple = generate_file_with_content(simple_object_size, content=version_1_content)
obj_key = os.path.basename(file_name_simple)
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED)
@ -137,9 +137,9 @@ class TestS3GateObject(TestS3GateBase):
), "Permission for all groups is FULL_CONTROL"
@allure.title("Test S3: Copy object with metadata")
def test_s3_copy_metadate(self, bucket):
def test_s3_copy_metadate(self, bucket, simple_object_size):
object_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
file_path = generate_file()
file_path = generate_file(simple_object_size)
file_name = self.object_key_from_file_path(file_path)
bucket_1_objects = [file_name]
@ -187,9 +187,9 @@ class TestS3GateObject(TestS3GateBase):
), f"Metadata must be {object_metadata_1}"
@allure.title("Test S3: Copy object with tagging")
def test_s3_copy_tagging(self, bucket):
def test_s3_copy_tagging(self, bucket, simple_object_size):
object_tagging = [(f"{uuid.uuid4()}", f"{uuid.uuid4()}")]
file_path = generate_file()
file_path = generate_file(simple_object_size)
file_name_simple = self.object_key_from_file_path(file_path)
bucket_1_objects = [file_name_simple]
@ -239,10 +239,10 @@ class TestS3GateObject(TestS3GateBase):
assert tag in got_tags, f"Expected tag {tag} in {got_tags}"
@allure.title("Test S3: Delete version of object")
def test_s3_delete_versioning(self, bucket):
def test_s3_delete_versioning(self, bucket, complex_object_size, simple_object_size):
version_1_content = "Version 1"
version_2_content = "Version 2"
file_name_simple = generate_file_with_content(content=version_1_content)
file_name_simple = generate_file_with_content(simple_object_size, content=version_1_content)
obj_key = os.path.basename(file_name_simple)
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED)
@ -250,7 +250,7 @@ class TestS3GateObject(TestS3GateBase):
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_simple)
file_name_1 = generate_file_with_content(
file_path=file_name_simple, content=version_2_content
simple_object_size, file_path=file_name_simple, content=version_2_content
)
version_id_2 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_1)
@ -287,7 +287,7 @@ class TestS3GateObject(TestS3GateBase):
assert not "DeleteMarkers" in delete_obj.keys(), "Delete markes not found"
with allure.step("Put new object into bucket"):
file_name_simple = generate_file(COMPLEX_OBJ_SIZE)
file_name_simple = generate_file(complex_object_size)
obj_key = os.path.basename(file_name_simple)
version_id = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_simple)
@ -298,12 +298,12 @@ class TestS3GateObject(TestS3GateBase):
assert "DeleteMarker" in delete_obj.keys(), f"Expected delete Marker"
@allure.title("Test S3: bulk delete version of object")
def test_s3_bulk_delete_versioning(self, bucket):
def test_s3_bulk_delete_versioning(self, bucket, simple_object_size):
version_1_content = "Version 1"
version_2_content = "Version 2"
version_3_content = "Version 3"
version_4_content = "Version 4"
file_name_1 = generate_file_with_content(content=version_1_content)
file_name_1 = generate_file_with_content(simple_object_size, content=version_1_content)
obj_key = os.path.basename(file_name_1)
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED)
@ -311,15 +311,15 @@ class TestS3GateObject(TestS3GateBase):
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_1)
file_name_2 = generate_file_with_content(
file_path=file_name_1, content=version_2_content
simple_object_size, file_path=file_name_1, content=version_2_content
)
version_id_2 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_2)
file_name_3 = generate_file_with_content(
file_path=file_name_1, content=version_3_content
simple_object_size, file_path=file_name_1, content=version_3_content
)
version_id_3 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_3)
file_name_4 = generate_file_with_content(
file_path=file_name_1, content=version_4_content
simple_object_size, file_path=file_name_1, content=version_4_content
)
version_id_4 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_4)
version_ids = {version_id_1, version_id_2, version_id_3, version_id_4}
@ -349,17 +349,17 @@ class TestS3GateObject(TestS3GateBase):
), f"Expected object has versions: {version_to_save}"
@allure.title("Test S3: Get versions of object")
def test_s3_get_versioning(self, bucket):
def test_s3_get_versioning(self, bucket, simple_object_size):
version_1_content = "Version 1"
version_2_content = "Version 2"
file_name_simple = generate_file_with_content(content=version_1_content)
file_name_simple = generate_file_with_content(simple_object_size, content=version_1_content)
obj_key = os.path.basename(file_name_simple)
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_simple)
file_name_1 = generate_file_with_content(
file_path=file_name_simple, content=version_2_content
simple_object_size, file_path=file_name_simple, content=version_2_content
)
version_id_2 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_1)
@ -388,14 +388,14 @@ class TestS3GateObject(TestS3GateBase):
), f"Get object with version {version_id_2}"
@allure.title("Test S3: Get range")
def test_s3_get_range(self, bucket):
file_path = generate_file(COMPLEX_OBJ_SIZE)
def test_s3_get_range(self, bucket, complex_object_size: int, simple_object_size: int):
file_path = generate_file(complex_object_size)
file_name = self.object_key_from_file_path(file_path)
file_hash = get_file_hash(file_path)
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
file_name_1 = generate_file_with_content(file_path=file_path)
file_name_1 = generate_file_with_content(simple_object_size, file_path=file_path)
version_id_2 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_1)
with allure.step("Get first version of object"):
@ -404,42 +404,46 @@ class TestS3GateObject(TestS3GateBase):
bucket,
file_name,
version_id_1,
range=[0, int(COMPLEX_OBJ_SIZE / 3)],
range=[0, int(complex_object_size / 3)],
)
object_1_part_2 = s3_gate_object.get_object_s3(
self.s3_client,
bucket,
file_name,
version_id_1,
range=[int(COMPLEX_OBJ_SIZE / 3) + 1, 2 * int(COMPLEX_OBJ_SIZE / 3)],
range=[int(complex_object_size / 3) + 1, 2 * int(complex_object_size / 3)],
)
object_1_part_3 = s3_gate_object.get_object_s3(
self.s3_client,
bucket,
file_name,
version_id_1,
range=[2 * int(COMPLEX_OBJ_SIZE / 3) + 1, COMPLEX_OBJ_SIZE],
range=[2 * int(complex_object_size / 3) + 1, complex_object_size],
)
con_file = concat_files([object_1_part_1, object_1_part_2, object_1_part_3])
assert get_file_hash(con_file) == file_hash, "Hashes must be the same"
with allure.step("Get second version of object"):
object_2_part_1 = s3_gate_object.get_object_s3(
self.s3_client, bucket, file_name, version_id_2, range=[0, int(SIMPLE_OBJ_SIZE / 3)]
self.s3_client,
bucket,
file_name,
version_id_2,
range=[0, int(simple_object_size / 3)],
)
object_2_part_2 = s3_gate_object.get_object_s3(
self.s3_client,
bucket,
file_name,
version_id_2,
range=[int(SIMPLE_OBJ_SIZE / 3) + 1, 2 * int(SIMPLE_OBJ_SIZE / 3)],
range=[int(simple_object_size / 3) + 1, 2 * int(simple_object_size / 3)],
)
object_2_part_3 = s3_gate_object.get_object_s3(
self.s3_client,
bucket,
file_name,
version_id_2,
range=[2 * int(SIMPLE_OBJ_SIZE / 3) + 1, COMPLEX_OBJ_SIZE],
range=[2 * int(simple_object_size / 3) + 1, simple_object_size],
)
con_file_1 = concat_files([object_2_part_1, object_2_part_2, object_2_part_3])
assert get_file_hash(con_file_1) == get_file_hash(
@ -448,28 +452,28 @@ class TestS3GateObject(TestS3GateBase):
with allure.step("Get object"):
object_3_part_1 = s3_gate_object.get_object_s3(
self.s3_client, bucket, file_name, range=[0, int(SIMPLE_OBJ_SIZE / 3)]
self.s3_client, bucket, file_name, range=[0, int(simple_object_size / 3)]
)
object_3_part_2 = s3_gate_object.get_object_s3(
self.s3_client,
bucket,
file_name,
range=[int(SIMPLE_OBJ_SIZE / 3) + 1, 2 * int(SIMPLE_OBJ_SIZE / 3)],
range=[int(simple_object_size / 3) + 1, 2 * int(simple_object_size / 3)],
)
object_3_part_3 = s3_gate_object.get_object_s3(
self.s3_client,
bucket,
file_name,
range=[2 * int(SIMPLE_OBJ_SIZE / 3) + 1, COMPLEX_OBJ_SIZE],
range=[2 * int(simple_object_size / 3) + 1, simple_object_size],
)
con_file = concat_files([object_3_part_1, object_3_part_2, object_3_part_3])
assert get_file_hash(con_file) == get_file_hash(file_name_1), "Hashes must be the same"
@allure.title("Test S3: Copy object with metadata")
@pytest.mark.smoke
def test_s3_head_object(self, bucket):
def test_s3_head_object(self, bucket, complex_object_size, simple_object_size):
object_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
file_path = generate_file(COMPLEX_OBJ_SIZE)
file_path = generate_file(complex_object_size)
file_name = self.object_key_from_file_path(file_path)
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED)
@ -477,7 +481,7 @@ class TestS3GateObject(TestS3GateBase):
version_id_1 = s3_gate_object.put_object_s3(
self.s3_client, bucket, file_path, Metadata=object_metadata
)
file_name_1 = generate_file_with_content(file_path=file_path)
file_name_1 = generate_file_with_content(simple_object_size, file_path=file_path)
version_id_2 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_1)
with allure.step("Get head of first version of object"):
@ -506,10 +510,10 @@ class TestS3GateObject(TestS3GateBase):
@allure.title("Test S3: list of object with versions")
@pytest.mark.parametrize("list_type", ["v1", "v2"])
def test_s3_list_object(self, list_type: str, bucket):
file_path_1 = generate_file(COMPLEX_OBJ_SIZE)
def test_s3_list_object(self, list_type: str, bucket, complex_object_size):
file_path_1 = generate_file(complex_object_size)
file_name = self.object_key_from_file_path(file_path_1)
file_path_2 = generate_file(COMPLEX_OBJ_SIZE)
file_path_2 = generate_file(complex_object_size)
file_name_2 = self.object_key_from_file_path(file_path_2)
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED)
@ -543,8 +547,8 @@ class TestS3GateObject(TestS3GateBase):
assert "DeleteMarker" in delete_obj.keys(), f"Expected delete Marker"
@allure.title("Test S3: put object")
def test_s3_put_object(self, bucket):
file_path_1 = generate_file(COMPLEX_OBJ_SIZE)
def test_s3_put_object(self, bucket, complex_object_size, simple_object_size):
file_path_1 = generate_file(complex_object_size)
file_name = self.object_key_from_file_path(file_path_1)
object_1_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
tag_key_1 = "tag1"
@ -569,7 +573,7 @@ class TestS3GateObject(TestS3GateBase):
], "Tags must be the same"
with allure.step("Rewrite file into bucket"):
file_path_2 = generate_file_with_content(file_path=file_path_1)
file_path_2 = generate_file_with_content(simple_object_size, file_path=file_path_1)
s3_gate_object.put_object_s3(
self.s3_client, bucket, file_path_2, Metadata=object_2_metadata, Tagging=tag_2
)
@ -583,7 +587,7 @@ class TestS3GateObject(TestS3GateBase):
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED)
file_path_3 = generate_file(COMPLEX_OBJ_SIZE)
file_path_3 = generate_file(complex_object_size)
file_hash = get_file_hash(file_path_3)
file_name_3 = self.object_key_from_file_path(file_path_3)
object_3_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
@ -604,7 +608,7 @@ class TestS3GateObject(TestS3GateBase):
], "Tags must be the same"
with allure.step("Put new version of file into bucket"):
file_path_4 = generate_file_with_content(file_path=file_path_3)
file_path_4 = generate_file_with_content(simple_object_size, file_path=file_path_3)
version_id_2 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_path_4)
versions = s3_gate_object.list_objects_versions_s3(self.s3_client, bucket)
obj_versions = {
@ -680,8 +684,15 @@ class TestS3GateObject(TestS3GateBase):
@allure.title("Test S3: put object with ACL")
@pytest.mark.parametrize("bucket_versioning", ["ENABLED", "SUSPENDED"])
def test_s3_put_object_acl(self, prepare_two_wallets, bucket_versioning, bucket):
file_path_1 = generate_file(COMPLEX_OBJ_SIZE)
def test_s3_put_object_acl(
self,
prepare_two_wallets,
bucket_versioning,
bucket,
complex_object_size,
simple_object_size,
):
file_path_1 = generate_file(complex_object_size)
file_name = self.object_key_from_file_path(file_path_1)
if bucket_versioning == "ENABLED":
status = s3_gate_bucket.VersioningStatus.ENABLED
@ -698,7 +709,7 @@ class TestS3GateObject(TestS3GateBase):
assert get_file_hash(file_path_1) == get_file_hash(object_1), "Hashes must be the same"
with allure.step("Put object with acl public-read"):
file_path_2 = generate_file_with_content(file_path=file_path_1)
file_path_2 = generate_file_with_content(simple_object_size, file_path=file_path_1)
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path_2, ACL="public-read")
obj_acl = s3_gate_object.get_object_acl_s3(self.s3_client, bucket, file_name)
obj_permission = [permission.get("Permission") for permission in obj_acl]
@ -710,7 +721,7 @@ class TestS3GateObject(TestS3GateBase):
assert get_file_hash(file_path_2) == get_file_hash(object_2), "Hashes must be the same"
with allure.step("Put object with acl public-read-write"):
file_path_3 = generate_file_with_content(file_path=file_path_1)
file_path_3 = generate_file_with_content(simple_object_size, file_path=file_path_1)
s3_gate_object.put_object_s3(
self.s3_client, bucket, file_path_3, ACL="public-read-write"
)
@ -724,7 +735,7 @@ class TestS3GateObject(TestS3GateBase):
assert get_file_hash(file_path_3) == get_file_hash(object_3), "Hashes must be the same"
with allure.step("Put object with acl authenticated-read"):
file_path_4 = generate_file_with_content(file_path=file_path_1)
file_path_4 = generate_file_with_content(simple_object_size, file_path=file_path_1)
s3_gate_object.put_object_s3(
self.s3_client, bucket, file_path_4, ACL="authenticated-read"
)
@ -737,11 +748,11 @@ class TestS3GateObject(TestS3GateBase):
object_4 = s3_gate_object.get_object_s3(self.s3_client, bucket, file_name)
assert get_file_hash(file_path_4) == get_file_hash(object_4), "Hashes must be the same"
file_path_5 = generate_file(COMPLEX_OBJ_SIZE)
file_path_5 = generate_file(complex_object_size)
file_name_5 = self.object_key_from_file_path(file_path_5)
with allure.step("Put object with --grant-full-control id=mycanonicaluserid"):
file_path_6 = generate_file_with_content(file_path=file_path_5)
file_path_6 = generate_file_with_content(simple_object_size, file_path=file_path_5)
s3_gate_object.put_object_s3(
self.s3_client,
bucket,
@ -760,7 +771,7 @@ class TestS3GateObject(TestS3GateBase):
with allure.step(
"Put object with --grant-read uri=http://acs.amazonaws.com/groups/global/AllUsers"
):
file_path_7 = generate_file_with_content(file_path=file_path_5)
file_path_7 = generate_file_with_content(simple_object_size, file_path=file_path_5)
s3_gate_object.put_object_s3(
self.s3_client,
bucket,
@ -777,10 +788,11 @@ class TestS3GateObject(TestS3GateBase):
assert get_file_hash(file_path_7) == get_file_hash(object_7), "Hashes must be the same"
@allure.title("Test S3: put object with lock-mode")
def test_s3_put_object_lock_mode(self, bucket):
def test_s3_put_object_lock_mode(self, complex_object_size, simple_object_size):
file_path_1 = generate_file(COMPLEX_OBJ_SIZE)
file_path_1 = generate_file(complex_object_size)
file_name = self.object_key_from_file_path(file_path_1)
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client, True)
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED)
with allure.step(
@ -803,7 +815,7 @@ class TestS3GateObject(TestS3GateBase):
"Put new version of object with [--object-lock-mode COMPLIANCE] и [--object-lock-retain-until-date +3days]"
):
date_obj = datetime.utcnow() + timedelta(days=2)
file_name_1 = generate_file_with_content(file_path=file_path_1)
file_name_1 = generate_file_with_content(simple_object_size, file_path=file_path_1)
s3_gate_object.put_object_s3(
self.s3_client,
bucket,
@ -819,7 +831,7 @@ class TestS3GateObject(TestS3GateBase):
"Put new version of object with [--object-lock-mode COMPLIANCE] и [--object-lock-retain-until-date +2days]"
):
date_obj = datetime.utcnow() + timedelta(days=3)
file_name_1 = generate_file_with_content(file_path=file_path_1)
file_name_1 = generate_file_with_content(simple_object_size, file_path=file_path_1)
s3_gate_object.put_object_s3(
self.s3_client,
bucket,
@ -857,7 +869,7 @@ class TestS3GateObject(TestS3GateBase):
@allure.title("Test S3 Sync directory")
@pytest.mark.parametrize("sync_type", ["sync", "cp"])
def test_s3_sync_dir(self, sync_type, bucket):
def test_s3_sync_dir(self, sync_type, bucket, simple_object_size):
file_path_1 = os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_1")
file_path_2 = os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_2")
object_metadata = {f"{uuid.uuid4()}": f"{uuid.uuid4()}"}
@ -866,8 +878,8 @@ class TestS3GateObject(TestS3GateBase):
if not isinstance(self.s3_client, AwsCliClient):
pytest.skip("This test is not supported with boto3 client")
generate_file_with_content(file_path=file_path_1)
generate_file_with_content(file_path=file_path_2)
generate_file_with_content(simple_object_size, file_path=file_path_1)
generate_file_with_content(simple_object_size, file_path=file_path_2)
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED)
# TODO: return ACL, when https://github.com/nspcc-dev/neofs-s3-gw/issues/685 will be closed
if sync_type == "sync":
@ -909,10 +921,10 @@ class TestS3GateObject(TestS3GateBase):
# ], "Permission for all groups is FULL_CONTROL"
@allure.title("Test S3 Put 10 nested level object")
def test_s3_put_10_folder(self, bucket, temp_directory):
def test_s3_put_10_folder(self, bucket, temp_directory, simple_object_size):
path = "/".join(["".join(choices(string.ascii_letters, k=3)) for _ in range(10)])
file_path_1 = os.path.join(temp_directory, path, "test_file_1")
generate_file_with_content(file_path=file_path_1)
generate_file_with_content(simple_object_size, file_path=file_path_1)
file_name = self.object_key_from_file_path(file_path_1)
objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"

View file

@ -1,21 +1,11 @@
import os
import time
from datetime import datetime, timedelta
from random import choice
from string import ascii_letters
from typing import Tuple
import allure
import pytest
from file_helper import generate_file, generate_file_with_content
from file_helper import generate_file
from python_keywords.container import search_container_by_name
from python_keywords.storage_policy import get_simple_object_copies
from s3_helper import (
assert_object_lock_mode,
check_objects_in_bucket,
object_key_from_file_path,
set_bucket_versioning,
)
from s3_helper import check_objects_in_bucket, object_key_from_file_path, set_bucket_versioning
from steps import s3_gate_bucket, s3_gate_object
from steps.s3_gate_base import TestS3GateBase
@ -35,10 +25,10 @@ def pytest_generate_tests(metafunc):
@pytest.mark.s3_gate
class TestS3GatePolicy(TestS3GateBase):
@allure.title("Test S3: Verify bucket creation with retention policy applied")
def test_s3_bucket_location(self):
file_path_1 = generate_file()
def test_s3_bucket_location(self, simple_object_size):
file_path_1 = generate_file(simple_object_size)
file_name_1 = object_key_from_file_path(file_path_1)
file_path_2 = generate_file()
file_path_2 = generate_file(simple_object_size)
file_name_2 = object_key_from_file_path(file_path_2)
with allure.step("Create two buckets with different bucket configuration"):

View file

@ -32,8 +32,8 @@ class TestS3GateTagging(TestS3GateBase):
return tags
@allure.title("Test S3: Object tagging")
def test_s3_object_tagging(self, bucket):
file_path = generate_file()
def test_s3_object_tagging(self, bucket, simple_object_size):
file_path = generate_file(simple_object_size)
file_name = object_key_from_file_path(file_path)
with allure.step("Put with 3 tags object into bucket"):

View file

@ -30,8 +30,8 @@ class TestS3GateVersioning(TestS3GateBase):
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.SUSPENDED)
@allure.title("Test S3: Enable and disable versioning")
def test_s3_version(self):
file_path = generate_file()
def test_s3_version(self, simple_object_size):
file_path = generate_file(simple_object_size)
file_name = self.object_key_from_file_path(file_path)
bucket_objects = [file_name]
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client, False)
@ -61,7 +61,7 @@ class TestS3GateVersioning(TestS3GateBase):
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
file_name_1 = generate_file_with_content(file_path=file_path)
file_name_1 = generate_file_with_content(simple_object_size, file_path=file_path)
version_id_2 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_1)
with allure.step("Check bucket shows all versions"):

View file

@ -3,7 +3,7 @@ import random
import allure
import pytest
from cluster_test_base import ClusterTestBase
from common import COMPLEX_OBJ_SIZE, SIMPLE_OBJ_SIZE, WALLET_PASS
from common import WALLET_PASS
from file_helper import generate_file
from grpc_responses import SESSION_NOT_FOUND
from neofs_testlib.utils.wallet import get_last_address_from_wallet
@ -19,7 +19,7 @@ class TestDynamicObjectSession(ClusterTestBase):
@allure.title("Test Object Operations with Session Token")
@pytest.mark.parametrize(
"object_size",
[SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE],
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
)
def test_object_session_token(self, default_wallet, object_size):

View file

@ -4,7 +4,6 @@ import allure
import pytest
from cluster import Cluster
from cluster_test_base import ClusterTestBase
from common import COMPLEX_OBJ_SIZE, SIMPLE_OBJ_SIZE
from epoch import ensure_fresh_epoch
from file_helper import generate_file
from grpc_responses import MALFORMED_REQUEST, OBJECT_ACCESS_DENIED, OBJECT_NOT_FOUND
@ -13,7 +12,6 @@ from pytest import FixtureRequest
from python_keywords.container import create_container
from python_keywords.neofs_verbs import (
delete_object,
get_netmap_netinfo,
get_object,
get_object_from_random_node,
get_range,
@ -22,6 +20,7 @@ from python_keywords.neofs_verbs import (
put_object_to_random_node,
search_object,
)
from test_control import expect_not_raises
from wallet import WalletFile
from helpers.storage_object_info import StorageObjectInfo
@ -58,7 +57,7 @@ def storage_containers(
@pytest.fixture(
params=[SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE],
params=[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
ids=["simple object", "complex object"],
# Scope module to upload/delete each files set only once
scope="module",
@ -98,16 +97,15 @@ def storage_objects(
@allure.step("Get ranges for test")
def get_ranges(storage_object: StorageObjectInfo, shell: Shell, endpoint: str) -> list[str]:
def get_ranges(
storage_object: StorageObjectInfo, max_object_size: int, shell: Shell, endpoint: str
) -> list[str]:
"""
Returns ranges to test range/hash methods via static session
"""
object_size = storage_object.size
if object_size == COMPLEX_OBJ_SIZE:
net_info = get_netmap_netinfo(storage_object.wallet_file_path, shell, endpoint)
max_object_size = net_info["maximum_object_size"]
# make sure to test multiple parts of complex object
if object_size > max_object_size:
assert object_size >= max_object_size + RANGE_OFFSET_FOR_COMPLEX_OBJECT
return [
"0:10",
@ -160,9 +158,9 @@ class TestObjectStaticSession(ClusterTestBase):
self,
user_wallet: WalletFile,
storage_objects: list[StorageObjectInfo],
static_sessions: list[str],
static_sessions: dict[ObjectVerb, str],
method_under_test,
verb: str,
verb: ObjectVerb,
request: FixtureRequest,
):
"""
@ -175,9 +173,9 @@ class TestObjectStaticSession(ClusterTestBase):
for node in self.cluster.storage_nodes:
for storage_object in storage_objects[0:2]:
method_under_test(
user_wallet.path,
storage_object.cid,
storage_object.oid,
wallet=user_wallet.path,
cid=storage_object.cid,
oid=storage_object.oid,
shell=self.shell,
endpoint=node.get_rpc_endpoint(),
session=static_sessions[verb],
@ -193,10 +191,11 @@ class TestObjectStaticSession(ClusterTestBase):
self,
user_wallet: WalletFile,
storage_objects: list[StorageObjectInfo],
static_sessions: list[str],
static_sessions: dict[ObjectVerb, str],
method_under_test,
verb: str,
verb: ObjectVerb,
request: FixtureRequest,
max_object_size,
):
"""
Validate static session with range operations
@ -205,10 +204,13 @@ class TestObjectStaticSession(ClusterTestBase):
f"Validate static session with range operations for {request.node.callspec.id}"
)
storage_object = storage_objects[0]
ranges_to_test = get_ranges(storage_object, self.shell, self.cluster.default_rpc_endpoint)
ranges_to_test = get_ranges(
storage_object, max_object_size, self.shell, self.cluster.default_rpc_endpoint
)
for range_to_test in ranges_to_test:
with allure.step(f"Check range {range_to_test}"):
with expect_not_raises():
method_under_test(
user_wallet.path,
storage_object.cid,
@ -227,7 +229,7 @@ class TestObjectStaticSession(ClusterTestBase):
self,
user_wallet: WalletFile,
storage_objects: list[StorageObjectInfo],
static_sessions: list[str],
static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
):
"""
@ -253,7 +255,7 @@ class TestObjectStaticSession(ClusterTestBase):
self,
user_wallet: WalletFile,
storage_objects: list[StorageObjectInfo],
static_sessions: list[str],
static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
):
"""
@ -278,7 +280,7 @@ class TestObjectStaticSession(ClusterTestBase):
self,
stranger_wallet: WalletFile,
storage_objects: list[StorageObjectInfo],
static_sessions: list[str],
static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
):
"""
@ -305,7 +307,7 @@ class TestObjectStaticSession(ClusterTestBase):
self,
user_wallet: WalletFile,
storage_objects: list[StorageObjectInfo],
static_sessions: list[str],
static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
):
"""
@ -333,7 +335,7 @@ class TestObjectStaticSession(ClusterTestBase):
user_wallet: WalletFile,
storage_objects: list[StorageObjectInfo],
storage_containers: list[str],
static_sessions: list[str],
static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
):
"""
@ -361,7 +363,7 @@ class TestObjectStaticSession(ClusterTestBase):
owner_wallet: WalletFile,
user_wallet: WalletFile,
stranger_wallet: WalletFile,
storage_containers: list[int],
storage_containers: list[str],
storage_objects: list[StorageObjectInfo],
temp_directory: str,
request: FixtureRequest,
@ -638,7 +640,7 @@ class TestObjectStaticSession(ClusterTestBase):
self,
user_wallet: WalletFile,
storage_objects: list[StorageObjectInfo],
static_sessions: list[str],
static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
):
"""
@ -663,7 +665,7 @@ class TestObjectStaticSession(ClusterTestBase):
self,
user_wallet: WalletFile,
storage_objects: list[StorageObjectInfo],
static_sessions: list[str],
static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
):
"""

View file

@ -25,6 +25,7 @@ from steps.cluster_test_base import ClusterTestBase
from steps.session_token import ContainerVerb, get_container_signed_token
@pytest.mark.static_session_container
class TestSessionTokenContainer(ClusterTestBase):
@pytest.fixture(scope="module")
def static_sessions(
@ -74,7 +75,6 @@ class TestSessionTokenContainer(ClusterTestBase):
owner_wallet.path, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
@pytest.mark.skip("Failed with timeout")
def test_static_session_token_container_create_with_other_verb(
self,
user_wallet: WalletFile,
@ -94,7 +94,6 @@ class TestSessionTokenContainer(ClusterTestBase):
wait_for_creation=False,
)
@pytest.mark.skip("Failed with timeout")
def test_static_session_token_container_create_with_other_wallet(
self,
stranger_wallet: WalletFile,
@ -136,6 +135,7 @@ class TestSessionTokenContainer(ClusterTestBase):
session_token=static_sessions[ContainerVerb.DELETE],
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
await_mode=True,
)
assert cid not in list_containers(
@ -148,6 +148,7 @@ class TestSessionTokenContainer(ClusterTestBase):
user_wallet: WalletFile,
stranger_wallet: WalletFile,
static_sessions: dict[ContainerVerb, str],
simple_object_size,
):
"""
Validate static session with set eacl operation
@ -159,7 +160,7 @@ class TestSessionTokenContainer(ClusterTestBase):
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
file_path = generate_file()
file_path = generate_file(simple_object_size)
assert can_put_object(stranger_wallet.path, cid, file_path, self.shell, self.cluster)
with allure.step(f"Deny all operations for other via eACL"):

View file

@ -0,0 +1,153 @@
import json
import pathlib
import re
from dataclasses import dataclass
from io import StringIO
import allure
import pytest
import yaml
from cluster import Cluster, StorageNode
from common import WALLET_CONFIG
from configobj import ConfigObj
from neofs_testlib.cli import NeofsCli
SHARD_PREFIX = "NEOFS_STORAGE_SHARD_"
BLOBSTOR_PREFIX = "_BLOBSTOR_"
@dataclass
class Blobstor:
path: str
path_type: str
def __eq__(self, other) -> bool:
if not isinstance(other, self.__class__):
raise RuntimeError(f"Only two {self.__class__.__name__} instances can be compared")
return self.path == other.path and self.path_type == other.path_type
def __hash__(self):
return hash((self.path, self.path_type))
@staticmethod
def from_config_object(section: ConfigObj, shard_id: str, blobstor_id: str):
var_prefix = f"{SHARD_PREFIX}{shard_id}{BLOBSTOR_PREFIX}{blobstor_id}"
return Blobstor(section.get(f"{var_prefix}_PATH"), section.get(f"{var_prefix}_TYPE"))
@dataclass
class Shard:
blobstor: list[Blobstor]
metabase: str
writecache: str
def __eq__(self, other) -> bool:
if not isinstance(other, self.__class__):
raise RuntimeError(f"Only two {self.__class__.__name__} instances can be compared")
return (
set(self.blobstor) == set(other.blobstor)
and self.metabase == other.metabase
and self.writecache == other.writecache
)
def __hash__(self):
return hash((self.metabase, self.writecache))
@staticmethod
def _get_blobstor_count_from_section(config_object: ConfigObj, shard_id: int):
pattern = f"{SHARD_PREFIX}{shard_id}{BLOBSTOR_PREFIX}"
blobstors = {key[: len(pattern) + 2] for key in config_object.keys() if pattern in key}
return len(blobstors)
@staticmethod
def from_config_object(config_object: ConfigObj, shard_id: int):
var_prefix = f"{SHARD_PREFIX}{shard_id}"
blobstor_count = Shard._get_blobstor_count_from_section(config_object, shard_id)
blobstors = [
Blobstor.from_config_object(config_object, shard_id, blobstor_id)
for blobstor_id in range(blobstor_count)
]
write_cache_enabled = config_object.as_bool(f"{var_prefix}_WRITECACHE_ENABLED")
return Shard(
blobstors,
config_object.get(f"{var_prefix}_METABASE_PATH"),
config_object.get(f"{var_prefix}_WRITECACHE_PATH") if write_cache_enabled else "",
)
@staticmethod
def from_object(shard):
metabase = shard["metabase"]["path"] if "path" in shard["metabase"] else shard["metabase"]
writecache = (
shard["writecache"]["path"] if "path" in shard["writecache"] else shard["writecache"]
)
return Shard(
blobstor=[
Blobstor(path=blobstor["path"], path_type=blobstor["type"])
for blobstor in shard["blobstor"]
],
metabase=metabase,
writecache=writecache,
)
def shards_from_yaml(contents: str) -> list[Shard]:
config = yaml.safe_load(contents)
config["storage"]["shard"].pop("default")
return [Shard.from_object(shard) for shard in config["storage"]["shard"].values()]
def shards_from_env(contents: str) -> list[Shard]:
configObj = ConfigObj(StringIO(contents))
pattern = f"{SHARD_PREFIX}\d*"
num_shards = len(set(re.findall(pattern, contents)))
return [Shard.from_config_object(configObj, shard_id) for shard_id in range(num_shards)]
@pytest.mark.sanity
@pytest.mark.shard
class TestControlShard:
@staticmethod
def get_shards_from_config(node: StorageNode) -> list[Shard]:
config_file = node.get_remote_config_path()
file_type = pathlib.Path(config_file).suffix
contents = node.host.get_shell().exec(f"cat {config_file}").stdout
parser_method = {
".env": shards_from_env,
".yaml": shards_from_yaml,
".yml": shards_from_yaml,
}
shards = parser_method[file_type](contents)
return shards
@staticmethod
def get_shards_from_cli(node: StorageNode) -> list[Shard]:
wallet_path = node.get_remote_wallet_path()
wallet_password = node.get_wallet_password()
control_endpoint = node.get_control_endpoint()
cli_config = node.host.get_cli_config("neofs-cli")
cli = NeofsCli(node.host.get_shell(), cli_config.exec_path, WALLET_CONFIG)
result = cli.shards.list(
endpoint=control_endpoint,
wallet=wallet_path,
wallet_password=wallet_password,
json_mode=True,
)
return [Shard.from_object(shard) for shard in json.loads(result.stdout.split(">", 1)[1])]
@allure.title("All shards are available")
def test_control_shard(self, cluster: Cluster):
for storage_node in cluster.storage_nodes:
shards_from_config = self.get_shards_from_config(storage_node)
shards_from_cli = self.get_shards_from_cli(storage_node)
assert set(shards_from_config) == set(shards_from_cli)

View file

@ -16,6 +16,7 @@ cffi==1.15.0
chardet==4.0.0
charset-normalizer==2.0.12
coverage==6.3.3
configobj==5.0.6
docker==4.4.0
docutils==0.17.1
Events==0.4
@ -35,7 +36,7 @@ neo-mamba==0.10.0
neo3crypto==0.2.1
neo3vm==0.9.0
neo3vm-stubs==0.9.0
neofs-testlib==0.7.0
neofs-testlib==0.8.1
netaddr==0.8.0
orjson==3.6.8
packaging==21.3
@ -55,6 +56,7 @@ pyflakes==2.4.0
pyparsing==3.0.9
pyrsistent==0.18.1
pytest==7.1.2
pytest-lazy-fixture==0.6.3
python-dateutil==2.8.2
pyyaml==6.0
requests==2.28.0

View file

@ -172,13 +172,14 @@ def form_bearertoken_file(
eacl_rule_list: List[Union[EACLRule, EACLPubKey]],
shell: Shell,
endpoint: str,
sign: Optional[bool] = True,
) -> str:
"""
This function fetches eACL for given <cid> on behalf of <wif>,
then extends it with filters taken from <eacl_rules>, signs
with bearer token and writes to file
"""
enc_cid = _encode_cid_for_eacl(cid)
enc_cid = _encode_cid_for_eacl(cid) if cid else None
file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
eacl = get_eacl(wif, cid, shell, endpoint)
@ -189,7 +190,7 @@ def form_bearertoken_file(
logger.info(json_eacl)
eacl_result = {
"body": {
"eaclTable": {"containerID": {"value": enc_cid}, "records": []},
"eaclTable": {"containerID": {"value": enc_cid} if cid else enc_cid, "records": []},
"lifetime": {"exp": EACL_LIFETIME, "nbf": "1", "iat": "0"},
}
}
@ -219,7 +220,14 @@ def form_bearertoken_file(
json.dump(eacl_result, eacl_file, ensure_ascii=False, indent=4)
logger.info(f"Got these extended ACL records: {eacl_result}")
sign_bearer(shell, wif, file_path)
if sign:
sign_bearer(
shell=shell,
wallet_path=wif,
eacl_rules_file_from=file_path,
eacl_rules_file_to=file_path,
json=True,
)
return file_path
@ -246,10 +254,12 @@ def eacl_rules(access: str, verbs: list, user: str) -> list[str]:
return rules
def sign_bearer(shell: Shell, wallet_path: str, eacl_rules_file: str) -> None:
def sign_bearer(
shell: Shell, wallet_path: str, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool
) -> None:
neofscli = NeofsCli(shell=shell, neofs_cli_exec_path=NEOFS_CLI_EXEC, config_file=WALLET_CONFIG)
neofscli.util.sign_bearer_token(
wallet=wallet_path, from_file=eacl_rules_file, to_file=eacl_rules_file, json=True
wallet=wallet_path, from_file=eacl_rules_file_from, to_file=eacl_rules_file_to, json=json
)
@ -257,3 +267,12 @@ def sign_bearer(shell: Shell, wallet_path: str, eacl_rules_file: str) -> None:
def wait_for_cache_expired():
sleep(NEOFS_CONTRACT_CACHE_TIMEOUT)
return
@allure.step("Return bearer token in base64 to caller")
def bearer_token_base64_from_file(
bearer_path: str,
) -> str:
with open(bearer_path, "rb") as file:
signed = file.read()
return base64.b64encode(signed).decode("utf-8")

View file

@ -11,17 +11,97 @@
"""
import logging
from typing import Optional
from typing import Optional, Tuple
import allure
import neofs_verbs
from cluster import StorageNode
from cluster import Cluster, StorageNode
from common import WALLET_CONFIG
from neofs_testlib.shell import Shell
from neofs_verbs import head_object
from storage_object import StorageObjectInfo
logger = logging.getLogger("NeoLogger")
def get_storage_object_chunks(
storage_object: StorageObjectInfo, shell: Shell, cluster: Cluster
) -> list[str]:
"""
Get complex object split objects ids (no linker object)
Args:
storage_object: storage_object to get it's chunks
shell: client shell to do cmd requests
cluster: cluster object under test
Returns:
list of object ids of complex object chunks
"""
with allure.step(f"Get complex object chunks (f{storage_object.oid})"):
split_object_id = get_link_object(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
shell,
cluster.storage_nodes,
is_direct=False,
)
head = head_object(
storage_object.wallet_file_path,
storage_object.cid,
split_object_id,
shell,
cluster.default_rpc_endpoint,
)
chunks_object_ids = []
if "split" in head["header"] and "children" in head["header"]["split"]:
chunks_object_ids = head["header"]["split"]["children"]
return chunks_object_ids
def get_complex_object_split_ranges(
storage_object: StorageObjectInfo, shell: Shell, cluster: Cluster
) -> list[Tuple[int, int]]:
"""
Get list of split ranges tuples (offset, length) of a complex object
For example if object size if 100 and max object size in system is 30
the returned list should be
[(0, 30), (30, 30), (60, 30), (90, 10)]
Args:
storage_object: storage_object to get it's chunks
shell: client shell to do cmd requests
cluster: cluster object under test
Returns:
list of object ids of complex object chunks
"""
ranges: list = []
offset = 0
chunks_ids = get_storage_object_chunks(storage_object, shell, cluster)
for chunk_id in chunks_ids:
head = head_object(
storage_object.wallet_file_path,
storage_object.cid,
chunk_id,
shell,
cluster.default_rpc_endpoint,
)
length = int(head["header"]["payloadLength"])
ranges.append((offset, length))
offset = offset + length
return ranges
@allure.step("Get Link Object")
def get_link_object(
wallet: str,

View file

@ -18,6 +18,8 @@ from neofs_testlib.shell import Shell
logger = logging.getLogger("NeoLogger")
DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X"
REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X"
@allure.step("Create Container")
@ -176,6 +178,7 @@ def delete_container(
endpoint: str,
force: bool = False,
session_token: Optional[str] = None,
await_mode: bool = False,
) -> None:
"""
A wrapper for `neofs-cli container delete` call.
@ -191,7 +194,12 @@ def delete_container(
cli = NeofsCli(shell, NEOFS_CLI_EXEC, WALLET_CONFIG)
cli.container.delete(
wallet=wallet, cid=cid, rpc_endpoint=endpoint, force=force, session=session_token
wallet=wallet,
cid=cid,
rpc_endpoint=endpoint,
force=force,
session=session_token,
await_mode=await_mode,
)

View file

@ -1,59 +1,91 @@
import json
import logging
from time import sleep
from typing import Optional
import allure
from cluster import Cluster
from common import MAINNET_BLOCK_TIME, NEOFS_ADM_CONFIG_PATH, NEOFS_ADM_EXEC, NEOGO_EXECUTABLE
from neofs_testlib.cli import NeofsAdm, NeoGo
from cluster import Cluster, StorageNode
from common import (
MAINNET_BLOCK_TIME,
NEOFS_ADM_CONFIG_PATH,
NEOFS_ADM_EXEC,
NEOFS_CLI_EXEC,
NEOGO_EXECUTABLE,
)
from neofs_testlib.cli import NeofsAdm, NeofsCli, NeoGo
from neofs_testlib.shell import Shell
from neofs_testlib.utils.wallet import get_last_address_from_wallet
from payment_neogo import get_contract_hash
from test_control import wait_for_success
from utility import parse_time
logger = logging.getLogger("NeoLogger")
@allure.step("Ensure fresh epoch")
def ensure_fresh_epoch(shell: Shell, cluster: Cluster) -> int:
def ensure_fresh_epoch(
shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None
) -> int:
# ensure new fresh epoch to avoid epoch switch during test session
current_epoch = get_epoch(shell, cluster)
tick_epoch(shell, cluster)
epoch = get_epoch(shell, cluster)
alive_node = alive_node if alive_node else cluster.storage_nodes[0]
current_epoch = get_epoch(shell, cluster, alive_node)
tick_epoch(shell, cluster, alive_node)
epoch = get_epoch(shell, cluster, alive_node)
assert epoch > current_epoch, "Epoch wasn't ticked"
return epoch
@allure.step("Get Epoch")
def get_epoch(shell: Shell, cluster: Cluster):
morph_chain = cluster.morph_chain_nodes[0]
morph_endpoint = morph_chain.get_endpoint()
@allure.step("Wait for epochs align in whole cluster")
@wait_for_success(60, 5)
def wait_for_epochs_align(shell: Shell, cluster: Cluster) -> bool:
epochs = []
for node in cluster.storage_nodes:
epochs.append(get_epoch(shell, cluster, node))
unique_epochs = list(set(epochs))
assert (
len(unique_epochs) == 1
), f"unaligned epochs found, {epochs}, count of unique epochs {len(unique_epochs)}"
neogo = NeoGo(shell=shell, neo_go_exec_path=NEOGO_EXECUTABLE)
out = neogo.contract.testinvokefunction(
scripthash=get_contract_hash(morph_chain, "netmap.neofs", shell=shell),
method="epoch",
rpc_endpoint=morph_endpoint,
)
return int(json.loads(out.stdout.replace("\n", ""))["stack"][0]["value"])
@allure.step("Get Epoch")
def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None):
alive_node = alive_node if alive_node else cluster.storage_nodes[0]
endpoint = alive_node.get_rpc_endpoint()
wallet_path = alive_node.get_wallet_path()
wallet_config = alive_node.get_wallet_config_path()
cli = NeofsCli(shell=shell, neofs_cli_exec_path=NEOFS_CLI_EXEC, config_file=wallet_config)
epoch = cli.netmap.epoch(endpoint, wallet_path)
return int(epoch.stdout)
@allure.step("Tick Epoch")
def tick_epoch(shell: Shell, cluster: Cluster):
def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None):
"""
Tick epoch using neofs-adm or NeoGo if neofs-adm is not available (DevEnv)
Args:
shell: local shell to make queries about current epoch. Remote shell will be used to tick new one
cluster: cluster instance under test
alive_node: node to send requests to (first node in cluster by default)
"""
alive_node = alive_node if alive_node else cluster.storage_nodes[0]
remote_shell = alive_node.host.get_shell()
if NEOFS_ADM_EXEC and NEOFS_ADM_CONFIG_PATH:
# If neofs-adm is available, then we tick epoch with it (to be consistent with UAT tests)
neofsadm = NeofsAdm(
shell=shell, neofs_adm_exec_path=NEOFS_ADM_EXEC, config_file=NEOFS_ADM_CONFIG_PATH
shell=remote_shell,
neofs_adm_exec_path=NEOFS_ADM_EXEC,
config_file=NEOFS_ADM_CONFIG_PATH,
)
neofsadm.morph.force_new_epoch()
return
# Use first node by default
# Otherwise we tick epoch using transaction
cur_epoch = get_epoch(shell, cluster)
# Use first node by default
ir_node = cluster.ir_nodes[0]
# In case if no local_wallet_path is provided, we use wallet_path
ir_wallet_path = ir_node.get_wallet_path()

View file

@ -1,14 +1,24 @@
import base64
import logging
import os
import random
import re
import shutil
import uuid
import zipfile
from typing import Optional
from urllib.parse import quote_plus
import allure
import requests
from aws_cli_client import LONG_TIMEOUT
from cli_helpers import _cmd_run
from cluster import StorageNode
from common import SIMPLE_OBJECT_SIZE
from file_helper import get_file_hash
from neofs_testlib.shell import Shell
from python_keywords.neofs_verbs import get_object
from python_keywords.storage_policy import get_nodes_without_object
logger = logging.getLogger("NeoLogger")
@ -16,14 +26,21 @@ ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/")
@allure.step("Get via HTTP Gate")
def get_via_http_gate(cid: str, oid: str, endpoint: str):
def get_via_http_gate(cid: str, oid: str, endpoint: str, request_path: Optional[str] = None):
"""
This function gets given object from HTTP gate
cid: container id to get object from
oid: object ID
endpoint: http gate endpoint
request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}]
"""
# if `request_path` parameter ommited, use default
if request_path is None:
request = f"{endpoint}/get/{cid}/{oid}"
else:
request = f"{endpoint}{request_path}"
resp = requests.get(request, stream=True)
if not resp.ok:
@ -76,16 +93,24 @@ def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str):
@allure.step("Get via HTTP Gate by attribute")
def get_via_http_gate_by_attribute(cid: str, attribute: dict, endpoint: str):
def get_via_http_gate_by_attribute(
cid: str, attribute: dict, endpoint: str, request_path: Optional[str] = None
):
"""
This function gets given object from HTTP gate
cid: CID to get object from
attribute: attribute {name: attribute} value pair
endpoint: http gate endpoint
request_path: (optional) http request path, if ommited - use default [{endpoint}/get_by_attribute/{Key}/{Value}]
"""
attr_name = list(attribute.keys())[0]
attr_value = quote_plus(str(attribute.get(attr_name)))
# if `request_path` parameter ommited, use default
if request_path is None:
request = f"{endpoint}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}"
else:
request = f"{endpoint}{request_path}"
resp = requests.get(request, stream=True)
if not resp.ok:
@ -135,9 +160,27 @@ def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: dict = Non
return resp.json().get("object_id")
@allure.step("Check is the passed object large")
def is_object_large(filepath: str) -> bool:
"""
This function check passed file size and return True if file_size > SIMPLE_OBJECT_SIZE
filepath: File path to check
"""
file_size = os.path.getsize(filepath)
logger.info(f"Size= {file_size}")
if file_size > int(SIMPLE_OBJECT_SIZE):
return True
else:
return False
@allure.step("Upload via HTTP Gate using Curl")
def upload_via_http_gate_curl(
cid: str, filepath: str, endpoint: str, large_object=False, headers: dict = None
cid: str,
filepath: str,
endpoint: str,
headers: list = None,
error_pattern: Optional[str] = None,
) -> str:
"""
This function upload given object through HTTP gate using curl utility.
@ -145,14 +188,33 @@ def upload_via_http_gate_curl(
filepath: File path to upload
headers: Object header
endpoint: http gate endpoint
error_pattern: [optional] expected error message from the command
"""
request = f"{endpoint}/upload/{cid}"
files = f"file=@{filepath};filename={os.path.basename(filepath)}"
cmd = f"curl -F '{files}' {request}"
attributes = ""
if headers:
# parse attributes
attributes = " ".join(headers)
large_object = is_object_large(filepath)
if large_object:
# pre-clean
_cmd_run("rm pipe -f")
files = f"file=@pipe;filename={os.path.basename(filepath)}"
cmd = f"mkfifo pipe;cat {filepath} > pipe & curl --no-buffer -F '{files}' {request}"
cmd = f"mkfifo pipe;cat {filepath} > pipe & curl --no-buffer -F '{files}' {attributes} {request}"
output = _cmd_run(cmd, LONG_TIMEOUT)
# clean up pipe
_cmd_run("rm pipe")
else:
files = f"file=@{filepath};filename={os.path.basename(filepath)}"
cmd = f"curl -F '{files}' {attributes} {request}"
output = _cmd_run(cmd)
if error_pattern:
match = error_pattern.casefold() in str(output).casefold()
assert match, f"Expected {output} to match {error_pattern}"
return ""
oid_re = re.search(r'"object_id": "(.*)"', output)
if not oid_re:
raise AssertionError(f'Could not find "object_id" in {output}')
@ -180,3 +242,112 @@ def _attach_allure_step(request: str, status_code: int, req_type="GET"):
command_attachment = f"REQUEST: '{request}'\n" f"RESPONSE:\n {status_code}\n"
with allure.step(f"{req_type} Request"):
allure.attach(command_attachment, f"{req_type} Request", allure.attachment_type.TEXT)
@allure.step("Try to get object and expect error")
def try_to_get_object_and_expect_error(
cid: str, oid: str, error_pattern: str, endpoint: str
) -> None:
try:
get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint)
raise AssertionError(f"Expected error on getting object with cid: {cid}")
except Exception as err:
match = error_pattern.casefold() in str(err).casefold()
assert match, f"Expected {err} to match {error_pattern}"
@allure.step("Verify object can be get using HTTP header attribute")
def get_object_by_attr_and_verify_hashes(
oid: str, file_name: str, cid: str, attrs: dict, endpoint: str
) -> None:
got_file_path_http = get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint)
got_file_path_http_attr = get_via_http_gate_by_attribute(
cid=cid, attribute=attrs, endpoint=endpoint
)
assert_hashes_are_equal(file_name, got_file_path_http, got_file_path_http_attr)
def get_object_and_verify_hashes(
oid: str,
file_name: str,
wallet: str,
cid: str,
shell: Shell,
nodes: list[StorageNode],
endpoint: str,
object_getter=None,
) -> None:
nodes_list = get_nodes_without_object(
wallet=wallet,
cid=cid,
oid=oid,
shell=shell,
nodes=nodes,
)
# for some reason we can face with case when nodes_list is empty due to object resides in all nodes
if nodes_list:
random_node = random.choice(nodes_list)
else:
random_node = random.choice(nodes)
object_getter = object_getter or get_via_http_gate
got_file_path = get_object(
wallet=wallet,
cid=cid,
oid=oid,
shell=shell,
endpoint=random_node.get_rpc_endpoint(),
)
got_file_path_http = object_getter(cid=cid, oid=oid, endpoint=endpoint)
assert_hashes_are_equal(file_name, got_file_path, got_file_path_http)
def assert_hashes_are_equal(orig_file_name: str, got_file_1: str, got_file_2: str) -> None:
msg = "Expected hashes are equal for files {f1} and {f2}"
got_file_hash_http = get_file_hash(got_file_1)
assert get_file_hash(got_file_2) == got_file_hash_http, msg.format(f1=got_file_2, f2=got_file_1)
assert get_file_hash(orig_file_name) == got_file_hash_http, msg.format(
f1=orig_file_name, f2=got_file_1
)
def attr_into_header(attrs: dict) -> dict:
return {f"X-Attribute-{_key}": _value for _key, _value in attrs.items()}
@allure.step(
"Convert each attribute (Key=Value) to the following format: -H 'X-Attribute-Key: Value'"
)
def attr_into_str_header_curl(attrs: dict) -> list:
headers = []
for k, v in attrs.items():
headers.append(f"-H 'X-Attribute-{k}: {v}'")
logger.info(f"[List of Attrs for curl:] {headers}")
return headers
@allure.step(
"Try to get object via http (pass http_request and optional attributes) and expect error"
)
def try_to_get_object_via_passed_request_and_expect_error(
cid: str,
oid: str,
error_pattern: str,
endpoint: str,
http_request_path: str,
attrs: dict = None,
) -> None:
try:
if attrs is None:
get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, request_path=http_request_path)
else:
get_via_http_gate_by_attribute(
cid=cid, attribute=attrs, endpoint=endpoint, request_path=http_request_path
)
raise AssertionError(f"Expected error on getting object with cid: {cid}")
except Exception as err:
match = error_pattern.casefold() in str(err).casefold()
assert match, f"Expected {err} to match {error_pattern}"

View file

@ -7,7 +7,7 @@ from typing import Optional
import allure
from cluster import Cluster
from common import COMPLEX_OBJ_SIZE, NEOFS_CLI_EXEC, SIMPLE_OBJ_SIZE, WALLET_CONFIG
from common import NEOFS_CLI_EXEC, WALLET_CONFIG
from complex_object_actions import get_link_object
from neofs_testlib.cli import NeofsCli
from neofs_testlib.shell import Shell
@ -201,12 +201,13 @@ def verify_get_storage_group(
gid: str,
obj_list: list,
object_size: int,
max_object_size: int,
bearer: str = None,
wallet_config: str = WALLET_CONFIG,
):
obj_parts = []
endpoint = cluster.default_rpc_endpoint
if object_size == COMPLEX_OBJ_SIZE:
if object_size > max_object_size:
for obj in obj_list:
link_oid = get_link_object(
wallet,
@ -239,11 +240,10 @@ def verify_get_storage_group(
bearer=bearer,
wallet_config=wallet_config,
)
if object_size == SIMPLE_OBJ_SIZE:
exp_size = SIMPLE_OBJ_SIZE * obj_num
exp_size = object_size * obj_num
if object_size < max_object_size:
assert int(storagegroup_data["Group size"]) == exp_size
assert storagegroup_data["Members"] == obj_list
else:
exp_size = COMPLEX_OBJ_SIZE * obj_num
assert int(storagegroup_data["Group size"]) == exp_size
assert storagegroup_data["Members"] == obj_parts

View file

@ -4,9 +4,9 @@ import yaml
CONTAINER_WAIT_INTERVAL = "1m"
# TODO: Get object size data from a node config
SIMPLE_OBJ_SIZE = int(os.getenv("SIMPLE_OBJ_SIZE", "1000"))
COMPLEX_OBJ_SIZE = int(os.getenv("COMPLEX_OBJ_SIZE", "2000"))
SIMPLE_OBJECT_SIZE = os.getenv("SIMPLE_OBJECT_SIZE", "1000")
COMPLEX_OBJECT_CHUNKS_COUNT = os.getenv("COMPLEX_OBJECT_CHUNKS_COUNT", "3")
COMPLEX_OBJECT_TAIL_SIZE = os.getenv("COMPLEX_OBJECT_TAIL_SIZE", "1000")
MAINNET_BLOCK_TIME = os.getenv("MAINNET_BLOCK_TIME", "1s")
MAINNET_TIMEOUT = os.getenv("MAINNET_TIMEOUT", "1min")
@ -27,14 +27,6 @@ DEVENV_PATH = os.getenv("DEVENV_PATH", os.path.join("..", "neofs-dev-env"))
# Password of wallet owned by user on behalf of whom we are running tests
WALLET_PASS = os.getenv("WALLET_PASS", "")
# Load node parameters
LOAD_NODES = os.getenv("LOAD_NODES", "").split(",")
LOAD_NODE_SSH_USER = os.getenv("LOAD_NODE_SSH_USER", "root")
LOAD_NODE_SSH_PRIVATE_KEY_PATH = os.getenv("LOAD_NODE_SSH_PRIVATE_KEY_PATH")
BACKGROUND_WRITERS_COUNT = os.getenv("BACKGROUND_WRITERS_COUNT", 10)
BACKGROUND_READERS_COUNT = os.getenv("BACKGROUND_READERS_COUNT", 10)
BACKGROUND_OBJ_SIZE = os.getenv("BACKGROUND_OBJ_SIZE", 1024)
BACKGROUND_LOAD_MAX_TIME = os.getenv("BACKGROUND_LOAD_MAX_TIME", 600)
# Paths to CLI executables on machine that runs tests
NEOGO_EXECUTABLE = os.getenv("NEOGO_EXECUTABLE", "neo-go")

View file

@ -7,3 +7,5 @@ READONLY_ACL_F = "1FBF8CFF"
PUBLIC_ACL = "0FBFBFFF"
INACCESSIBLE_ACL = "40000000"
STICKYBIT_PUB_ACL = "3FFFFFFF"
EACL_PUBLIC_READ_WRITE = "eacl-public-read-write"

View file

@ -5,4 +5,4 @@ pushd $DEVENV_PATH > /dev/null
export `make env`
popd > /dev/null
export PYTHONPATH=${PYTHONPATH}:${VIRTUAL_ENV}/../robot/resources/lib/:${VIRTUAL_ENV}/../robot/resources/lib/python_keywords:${VIRTUAL_ENV}/../robot/resources/lib/robot:${VIRTUAL_ENV}/../robot/variables:${VIRTUAL_ENV}/../pytest_tests/helpers:${VIRTUAL_ENV}/../pytest_tests/steps
export PYTHONPATH=${PYTHONPATH}:${VIRTUAL_ENV}/../robot/resources/lib/:${VIRTUAL_ENV}/../robot/resources/lib/python_keywords:${VIRTUAL_ENV}/../robot/resources/lib/robot:${VIRTUAL_ENV}/../robot/variables:${VIRTUAL_ENV}/../pytest_tests/helpers:${VIRTUAL_ENV}/../pytest_tests/steps:${VIRTUAL_ENV}/../pytest_tests/resources