Compare commits

...

25 commits

Author SHA1 Message Date
cc614a5f07 Add await
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2023-11-16 09:08:17 +03:00
5b1641edde Fix s3_client fixture
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2023-06-30 11:23:29 +03:00
10da8ae15e Delete object after put to container
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2023-06-22 09:00:57 +00:00
9d5f0d2235 Delete wait replication steps for tests
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2023-06-22 09:00:57 +00:00
Yaroslava Lukoyanova
ad7897ea6f Added case for not ignoring unhealthy tree endpoints 2023-06-22 09:00:57 +00:00
0c81ba6f22 Fix test shutdown node
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2023-06-22 09:00:57 +00:00
Yaroslava Lukoyanova
737c544fc0 Add case for loss of pilorama on one node 2023-06-22 09:00:57 +00:00
Yaroslava Lukoyanova
c9bfba136d Add test case for loss of one node 2023-06-22 09:00:57 +00:00
9c3b1c771d Add write-cache loss test
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2023-06-22 09:00:57 +00:00
61771ed534 Add metabase loss test 2023-06-22 09:00:57 +00:00
Yaroslava Lukoyanova
07e3ef3972 Fixture for restore stopped storage nodes in test_failover_storage 2023-06-22 09:00:57 +00:00
Yaroslava Lukoyanova
f84a06a2bb Fixed typo in test_s3_delete_versioning case assertion 2023-06-22 09:00:57 +00:00
Yaroslava Lukoyanova
378bd0fef8 Add pilorama loss test cases, marked as skipped 2023-06-22 09:00:57 +00:00
Yaroslava Lukoyanova
319e00c7a7 Add test cases for S3 blobovnicza and fstree loss 2023-06-22 09:00:57 +00:00
0e00a526ca Add wait block for tick epoch and add version testlib
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2023-06-22 09:00:57 +00:00
e947ea07cf Test for invalid location constraint
Signed-off-by: Liza <e.chichindaeva@yadro.com>
2023-06-22 09:00:57 +00:00
3fdc14b0a4 [#41] lifetime: Fix lifetime test
Signed-off-by: Dmitrii Stepanov <d.stepanov@yadro.com>
2023-06-22 09:00:57 +00:00
c1b64e83a9 Fix tests
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2023-06-22 09:00:57 +00:00
1ba361b636 Move shared code to testlib
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2023-06-22 09:00:57 +00:00
Yaroslava Lukoyanova
78babed619 Added new test cases for s3 gate, delete marker feature 2023-06-22 09:00:57 +00:00
197320f4eb Node argument made optional for epoch ticks
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2023-06-22 09:00:57 +00:00
81a5734e76 Add tests for node shutdown and object replication integrity
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2023-06-22 09:00:57 +00:00
4272bff8c9 Add check for Errors while deleting objects 2023-06-22 09:00:57 +00:00
010757b0a9 Add test for putting object while one node is stopped
Signed-off-by: Liza <e.chichindaeva@yadro.com>
2023-06-22 09:00:57 +00:00
934f76c52f Use proper name for binary
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2023-05-04 16:32:33 +03:00
82 changed files with 2222 additions and 8553 deletions

View file

@ -14,11 +14,11 @@ These tests rely on resources and utility modules that have been originally deve
- `make`
- `sudo cp bin/frostfs-cli /usr/local/bin/frostfs-cli`
2. Install frostfs-authmate
2. Install frostfs-s3-authmate
- `git clone git@github.com:TrueCloudLab/frostfs-s3-gw.git`
- `cd frostfs-s3-gw`
- `make`
- `sudo cp bin/frostfs-s3-authmate /usr/local/bin/frostfs-authmate`
- `sudo cp bin/frostfs-s3-authmate /usr/local/bin/frostfs-s3-authmate`
3. Install neo-go
- `git clone git@github.com:nspcc-dev/neo-go.git`

View file

@ -1,31 +0,0 @@
diff -urN bin.orig/activate bin/activate
--- bin.orig/activate 2018-12-27 14:55:13.916461020 +0900
+++ bin/activate 2018-12-27 20:38:35.223248728 +0900
@@ -30,6 +30,15 @@
unset _OLD_VIRTUAL_PS1
fi
+ # Unset exported dev-env variables
+ pushd ${DEVENV_PATH} > /dev/null
+ unset `make env | awk -F= '{print $1}'`
+ popd > /dev/null
+
+ # Unset external env variables
+ declare -f env_deactivate > /dev/null && env_deactivate
+ declare -f venv_deactivate > /dev/null && venv_deactivate
+
unset VIRTUAL_ENV
if [ ! "${1-}" = "nondestructive" ] ; then
# Self destruct!
@@ -47,6 +56,11 @@
PATH="$VIRTUAL_ENV/bin:$PATH"
export PATH
+# Set external variables
+if [ -f ${VIRTUAL_ENV}/bin/environment.sh ] ; then
+ . ${VIRTUAL_ENV}/bin/environment.sh
+fi
+
# unset PYTHONHOME if set
if ! [ -z "${PYTHONHOME+_}" ] ; then
_OLD_VIRTUAL_PYTHONHOME="$PYTHONHOME"

View file

@ -1,281 +0,0 @@
import base64
import json
import logging
import os
import uuid
from dataclasses import dataclass
from enum import Enum
from time import sleep
from typing import Any, Dict, List, Optional, Union
import allure
import base58
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.shell import Shell
from frostfs_testlib.utils import wallet_utils
from pytest_tests.resources.common import ASSETS_DIR, FROSTFS_CLI_EXEC, WALLET_CONFIG
logger = logging.getLogger("NeoLogger")
EACL_LIFETIME = 100500
FROSTFS_CONTRACT_CACHE_TIMEOUT = 30
class EACLOperation(Enum):
PUT = "put"
GET = "get"
HEAD = "head"
GET_RANGE = "getrange"
GET_RANGE_HASH = "getrangehash"
SEARCH = "search"
DELETE = "delete"
class EACLAccess(Enum):
ALLOW = "allow"
DENY = "deny"
class EACLRole(Enum):
OTHERS = "others"
USER = "user"
SYSTEM = "system"
class EACLHeaderType(Enum):
REQUEST = "req" # Filter request headers
OBJECT = "obj" # Filter object headers
SERVICE = "SERVICE" # Filter service headers. These are not processed by FrostFS nodes and exist for service use only
class EACLMatchType(Enum):
STRING_EQUAL = "=" # Return true if strings are equal
STRING_NOT_EQUAL = "!=" # Return true if strings are different
@dataclass
class EACLFilter:
header_type: EACLHeaderType = EACLHeaderType.REQUEST
match_type: EACLMatchType = EACLMatchType.STRING_EQUAL
key: Optional[str] = None
value: Optional[str] = None
def to_dict(self) -> Dict[str, Any]:
return {
"headerType": self.header_type,
"matchType": self.match_type,
"key": self.key,
"value": self.value,
}
@dataclass
class EACLFilters:
filters: Optional[List[EACLFilter]] = None
def __str__(self):
return (
",".join(
[
f"{filter.header_type.value}:"
f"{filter.key}{filter.match_type.value}{filter.value}"
for filter in self.filters
]
)
if self.filters
else []
)
@dataclass
class EACLPubKey:
keys: Optional[List[str]] = None
@dataclass
class EACLRule:
operation: Optional[EACLOperation] = None
access: Optional[EACLAccess] = None
role: Optional[Union[EACLRole, str]] = None
filters: Optional[EACLFilters] = None
def to_dict(self) -> Dict[str, Any]:
return {
"Operation": self.operation,
"Access": self.access,
"Role": self.role,
"Filters": self.filters or [],
}
def __str__(self):
role = (
self.role.value
if isinstance(self.role, EACLRole)
else f'pubkey:{wallet_utils.get_wallet_public_key(self.role, "")}'
)
return f'{self.access.value} {self.operation.value} {self.filters or ""} {role}'
@allure.title("Get extended ACL")
def get_eacl(wallet_path: str, cid: str, shell: Shell, endpoint: str) -> Optional[str]:
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, WALLET_CONFIG)
try:
result = cli.container.get_eacl(wallet=wallet_path, rpc_endpoint=endpoint, cid=cid)
except RuntimeError as exc:
logger.info("Extended ACL table is not set for this container")
logger.info(f"Got exception while getting eacl: {exc}")
return None
if "extended ACL table is not set for this container" in result.stdout:
return None
return result.stdout
@allure.title("Set extended ACL")
def set_eacl(
wallet_path: str,
cid: str,
eacl_table_path: str,
shell: Shell,
endpoint: str,
session_token: Optional[str] = None,
) -> None:
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, WALLET_CONFIG)
cli.container.set_eacl(
wallet=wallet_path,
rpc_endpoint=endpoint,
cid=cid,
table=eacl_table_path,
await_mode=True,
session=session_token,
)
def _encode_cid_for_eacl(cid: str) -> str:
cid_base58 = base58.b58decode(cid)
return base64.b64encode(cid_base58).decode("utf-8")
def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str:
table_file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"eacl_table_{str(uuid.uuid4())}.json")
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, WALLET_CONFIG)
cli.acl.extended_create(cid=cid, out=table_file_path, rule=rules_list)
with open(table_file_path, "r") as file:
table_data = file.read()
logger.info(f"Generated eACL:\n{table_data}")
return table_file_path
def form_bearertoken_file(
wif: str,
cid: str,
eacl_rule_list: List[Union[EACLRule, EACLPubKey]],
shell: Shell,
endpoint: str,
sign: Optional[bool] = True,
) -> str:
"""
This function fetches eACL for given <cid> on behalf of <wif>,
then extends it with filters taken from <eacl_rules>, signs
with bearer token and writes to file
"""
enc_cid = _encode_cid_for_eacl(cid) if cid else None
file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
eacl = get_eacl(wif, cid, shell, endpoint)
json_eacl = dict()
if eacl:
eacl = eacl.replace("eACL: ", "").split("Signature")[0]
json_eacl = json.loads(eacl)
logger.info(json_eacl)
eacl_result = {
"body": {
"eaclTable": {"containerID": {"value": enc_cid} if cid else enc_cid, "records": []},
"lifetime": {"exp": EACL_LIFETIME, "nbf": "1", "iat": "0"},
}
}
assert eacl_rules, "Got empty eacl_records list"
for rule in eacl_rule_list:
op_data = {
"operation": rule.operation.value.upper(),
"action": rule.access.value.upper(),
"filters": rule.filters or [],
"targets": [],
}
if isinstance(rule.role, EACLRole):
op_data["targets"] = [{"role": rule.role.value.upper()}]
elif isinstance(rule.role, EACLPubKey):
op_data["targets"] = [{"keys": rule.role.keys}]
eacl_result["body"]["eaclTable"]["records"].append(op_data)
# Add records from current eACL
if "records" in json_eacl.keys():
for record in json_eacl["records"]:
eacl_result["body"]["eaclTable"]["records"].append(record)
with open(file_path, "w", encoding="utf-8") as eacl_file:
json.dump(eacl_result, eacl_file, ensure_ascii=False, indent=4)
logger.info(f"Got these extended ACL records: {eacl_result}")
if sign:
sign_bearer(
shell=shell,
wallet_path=wif,
eacl_rules_file_from=file_path,
eacl_rules_file_to=file_path,
json=True,
)
return file_path
def eacl_rules(access: str, verbs: list, user: str) -> list[str]:
"""
This function creates a list of eACL rules.
Args:
access (str): identifies if the following operation(s)
is allowed or denied
verbs (list): a list of operations to set rules for
user (str): a group of users (user/others) or a wallet of
a certain user for whom rules are set
Returns:
(list): a list of eACL rules
"""
if user not in ("others", "user"):
pubkey = wallet_utils.get_wallet_public_key(user, wallet_password="")
user = f"pubkey:{pubkey}"
rules = []
for verb in verbs:
rule = f"{access} {verb} {user}"
rules.append(rule)
return rules
def sign_bearer(
shell: Shell, wallet_path: str, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool
) -> None:
frostfscli = FrostfsCli(
shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=WALLET_CONFIG
)
frostfscli.util.sign_bearer_token(
wallet=wallet_path, from_file=eacl_rules_file_from, to_file=eacl_rules_file_to, json=json
)
@allure.title("Wait for eACL cache expired")
def wait_for_cache_expired():
sleep(FROSTFS_CONTRACT_CACHE_TIMEOUT)
return
@allure.step("Return bearer token in base64 to caller")
def bearer_token_base64_from_file(
bearer_path: str,
) -> str:
with open(bearer_path, "rb") as file:
signed = file.read()
return base64.b64encode(signed).decode("utf-8")

View file

@ -1,596 +0,0 @@
import json
import logging
import os
from datetime import datetime
from typing import Optional
import allure
from pytest_tests.helpers.cli_helpers import _cmd_run
from pytest_tests.resources.common import ASSETS_DIR
logger = logging.getLogger("NeoLogger")
REGULAR_TIMEOUT = 90
LONG_TIMEOUT = 240
class AwsCliClient:
# Flags that we use for all S3 commands: disable SSL verification (as we use self-signed
# certificate in devenv) and disable automatic pagination in CLI output
common_flags = "--no-verify-ssl --no-paginate"
s3gate_endpoint: str
def __init__(self, s3gate_endpoint) -> None:
self.s3gate_endpoint = s3gate_endpoint
def create_bucket(
self,
Bucket: str,
ObjectLockEnabledForBucket: Optional[bool] = None,
ACL: Optional[str] = None,
GrantFullControl: Optional[str] = None,
GrantRead: Optional[str] = None,
GrantWrite: Optional[str] = None,
CreateBucketConfiguration: Optional[dict] = None,
):
if ObjectLockEnabledForBucket is None:
object_lock = ""
elif ObjectLockEnabledForBucket:
object_lock = " --object-lock-enabled-for-bucket"
else:
object_lock = " --no-object-lock-enabled-for-bucket"
cmd = (
f"aws {self.common_flags} s3api create-bucket --bucket {Bucket} "
f"{object_lock} --endpoint {self.s3gate_endpoint}"
)
if ACL:
cmd += f" --acl {ACL}"
if GrantFullControl:
cmd += f" --grant-full-control {GrantFullControl}"
if GrantWrite:
cmd += f" --grant-write {GrantWrite}"
if GrantRead:
cmd += f" --grant-read {GrantRead}"
if CreateBucketConfiguration:
cmd += f" --create-bucket-configuration LocationConstraint={CreateBucketConfiguration['LocationConstraint']}"
_cmd_run(cmd, REGULAR_TIMEOUT)
def list_buckets(self) -> dict:
cmd = f"aws {self.common_flags} s3api list-buckets --endpoint {self.s3gate_endpoint}"
output = _cmd_run(cmd)
return self._to_json(output)
def get_bucket_acl(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api get-bucket-acl --bucket {Bucket} "
f"--endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd, REGULAR_TIMEOUT)
return self._to_json(output)
def get_bucket_versioning(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api get-bucket-versioning --bucket {Bucket} "
f"--endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd, REGULAR_TIMEOUT)
return self._to_json(output)
def get_bucket_location(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api get-bucket-location --bucket {Bucket} "
f"--endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd, REGULAR_TIMEOUT)
return self._to_json(output)
def put_bucket_versioning(self, Bucket: str, VersioningConfiguration: dict) -> dict:
cmd = (
f"aws {self.common_flags} s3api put-bucket-versioning --bucket {Bucket} "
f'--versioning-configuration Status={VersioningConfiguration.get("Status")} '
f"--endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def list_objects(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api list-objects --bucket {Bucket} "
f"--endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def list_objects_v2(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api list-objects-v2 --bucket {Bucket} "
f"--endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def list_object_versions(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api list-object-versions --bucket {Bucket} "
f"--endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def copy_object(
self,
Bucket: str,
CopySource: str,
Key: str,
ACL: Optional[str] = None,
MetadataDirective: Optional[str] = None,
Metadata: Optional[dict] = None,
TaggingDirective: Optional[str] = None,
Tagging: Optional[str] = None,
) -> dict:
cmd = (
f"aws {self.common_flags} s3api copy-object --copy-source {CopySource} "
f"--bucket {Bucket} --key {Key} --endpoint {self.s3gate_endpoint}"
)
if ACL:
cmd += f" --acl {ACL}"
if MetadataDirective:
cmd += f" --metadata-directive {MetadataDirective}"
if Metadata:
cmd += " --metadata "
for key, value in Metadata.items():
cmd += f" {key}={value}"
if TaggingDirective:
cmd += f" --tagging-directive {TaggingDirective}"
if Tagging:
cmd += f" --tagging {Tagging}"
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
def head_bucket(self, Bucket: str) -> dict:
cmd = f"aws {self.common_flags} s3api head-bucket --bucket {Bucket} --endpoint {self.s3gate_endpoint}"
output = _cmd_run(cmd)
return self._to_json(output)
def put_object(
self,
Body: str,
Bucket: str,
Key: str,
Metadata: Optional[dict] = None,
Tagging: Optional[str] = None,
ACL: Optional[str] = None,
ObjectLockMode: Optional[str] = None,
ObjectLockRetainUntilDate: Optional[datetime] = None,
ObjectLockLegalHoldStatus: Optional[str] = None,
GrantFullControl: Optional[str] = None,
GrantRead: Optional[str] = None,
) -> dict:
cmd = (
f"aws {self.common_flags} s3api put-object --bucket {Bucket} --key {Key} "
f"--body {Body} --endpoint {self.s3gate_endpoint}"
)
if Metadata:
cmd += f" --metadata"
for key, value in Metadata.items():
cmd += f" {key}={value}"
if Tagging:
cmd += f" --tagging '{Tagging}'"
if ACL:
cmd += f" --acl {ACL}"
if ObjectLockMode:
cmd += f" --object-lock-mode {ObjectLockMode}"
if ObjectLockRetainUntilDate:
cmd += f' --object-lock-retain-until-date "{ObjectLockRetainUntilDate}"'
if ObjectLockLegalHoldStatus:
cmd += f" --object-lock-legal-hold-status {ObjectLockLegalHoldStatus}"
if GrantFullControl:
cmd += f" --grant-full-control '{GrantFullControl}'"
if GrantRead:
cmd += f" --grant-read {GrantRead}"
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
def head_object(self, Bucket: str, Key: str, VersionId: str = None) -> dict:
version = f" --version-id {VersionId}" if VersionId else ""
cmd = (
f"aws {self.common_flags} s3api head-object --bucket {Bucket} --key {Key} "
f"{version} --endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def get_object(
self,
Bucket: str,
Key: str,
file_path: str,
VersionId: Optional[str] = None,
Range: Optional[str] = None,
) -> dict:
version = f" --version-id {VersionId}" if VersionId else ""
cmd = (
f"aws {self.common_flags} s3api get-object --bucket {Bucket} --key {Key} "
f"{version} {file_path} --endpoint {self.s3gate_endpoint}"
)
if Range:
cmd += f" --range {Range}"
output = _cmd_run(cmd, REGULAR_TIMEOUT)
return self._to_json(output)
def get_object_acl(self, Bucket: str, Key: str, VersionId: Optional[str] = None) -> dict:
version = f" --version-id {VersionId}" if VersionId else ""
cmd = (
f"aws {self.common_flags} s3api get-object-acl --bucket {Bucket} --key {Key} "
f"{version} --endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd, REGULAR_TIMEOUT)
return self._to_json(output)
def put_object_acl(
self,
Bucket: str,
Key: str,
ACL: Optional[str] = None,
GrantWrite: Optional[str] = None,
GrantRead: Optional[str] = None,
) -> dict:
cmd = (
f"aws {self.common_flags} s3api put-object-acl --bucket {Bucket} --key {Key} "
f" --endpoint {self.s3gate_endpoint}"
)
if ACL:
cmd += f" --acl {ACL}"
if GrantWrite:
cmd += f" --grant-write {GrantWrite}"
if GrantRead:
cmd += f" --grant-read {GrantRead}"
output = _cmd_run(cmd, REGULAR_TIMEOUT)
return self._to_json(output)
def put_bucket_acl(
self,
Bucket: str,
ACL: Optional[str] = None,
GrantWrite: Optional[str] = None,
GrantRead: Optional[str] = None,
) -> dict:
cmd = (
f"aws {self.common_flags} s3api put-bucket-acl --bucket {Bucket} "
f" --endpoint {self.s3gate_endpoint}"
)
if ACL:
cmd += f" --acl {ACL}"
if GrantWrite:
cmd += f" --grant-write {GrantWrite}"
if GrantRead:
cmd += f" --grant-read {GrantRead}"
output = _cmd_run(cmd, REGULAR_TIMEOUT)
return self._to_json(output)
def delete_objects(self, Bucket: str, Delete: dict) -> dict:
file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json")
with open(file_path, "w") as out_file:
out_file.write(json.dumps(Delete))
logger.info(f"Input file for delete-objects: {json.dumps(Delete)}")
cmd = (
f"aws {self.common_flags} s3api delete-objects --bucket {Bucket} "
f"--delete file://{file_path} --endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
def delete_object(self, Bucket: str, Key: str, VersionId: str = None) -> dict:
version = f" --version-id {VersionId}" if VersionId else ""
cmd = (
f"aws {self.common_flags} s3api delete-object --bucket {Bucket} "
f"--key {Key} {version} --endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
def get_object_attributes(
self,
bucket: str,
key: str,
*attributes: str,
version_id: str = None,
max_parts: int = None,
part_number: int = None,
) -> dict:
attrs = ",".join(attributes)
version = f" --version-id {version_id}" if version_id else ""
parts = f"--max-parts {max_parts}" if max_parts else ""
part_number = f"--part-number-marker {part_number}" if part_number else ""
cmd = (
f"aws {self.common_flags} s3api get-object-attributes --bucket {bucket} "
f"--key {key} {version} {parts} {part_number} --object-attributes {attrs} "
f"--endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def delete_bucket(self, Bucket: str) -> dict:
cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {Bucket} --endpoint {self.s3gate_endpoint}"
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
def get_bucket_tagging(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api get-bucket-tagging --bucket {Bucket} "
f"--endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def get_bucket_policy(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api get-bucket-policy --bucket {Bucket} "
f"--endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def put_bucket_policy(self, Bucket: str, Policy: dict) -> dict:
cmd = (
f"aws {self.common_flags} s3api put-bucket-policy --bucket {Bucket} "
f"--policy {json.dumps(Policy)} --endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def get_bucket_cors(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api get-bucket-cors --bucket {Bucket} "
f"--endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def put_bucket_cors(self, Bucket: str, CORSConfiguration: dict) -> dict:
cmd = (
f"aws {self.common_flags} s3api put-bucket-cors --bucket {Bucket} "
f"--cors-configuration '{json.dumps(CORSConfiguration)}' --endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def delete_bucket_cors(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api delete-bucket-cors --bucket {Bucket} "
f"--endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def put_bucket_tagging(self, Bucket: str, Tagging: dict) -> dict:
cmd = (
f"aws {self.common_flags} s3api put-bucket-tagging --bucket {Bucket} "
f"--tagging '{json.dumps(Tagging)}' --endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def delete_bucket_tagging(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {Bucket} "
f"--endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def put_object_retention(
self, Bucket: str, Key: str, Retention: dict, VersionId: Optional[str] = None
) -> dict:
version = f" --version-id {VersionId}" if VersionId else ""
cmd = (
f"aws {self.common_flags} s3api put-object-retention --bucket {Bucket} --key {Key} "
f"{version} --retention '{json.dumps(Retention, indent=4, sort_keys=True, default=str)}' --endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def put_object_legal_hold(
self, Bucket: str, Key: str, LegalHold: dict, VersionId: Optional[str] = None
) -> dict:
version = f" --version-id {VersionId}" if VersionId else ""
cmd = (
f"aws {self.common_flags} s3api put-object-legal-hold --bucket {Bucket} --key {Key} "
f"{version} --legal-hold '{json.dumps(LegalHold)}' --endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def put_object_retention(
self,
Bucket: str,
Key: str,
Retention: dict,
VersionId: Optional[str] = None,
BypassGovernanceRetention: Optional[bool] = None,
) -> dict:
version = f" --version-id {VersionId}" if VersionId else ""
cmd = (
f"aws {self.common_flags} s3api put-object-retention --bucket {Bucket} --key {Key} "
f"{version} --retention '{json.dumps(Retention, indent=4, sort_keys=True, default=str)}' --endpoint {self.s3gate_endpoint}"
)
if not BypassGovernanceRetention is None:
cmd += " --bypass-governance-retention"
output = _cmd_run(cmd)
return self._to_json(output)
def put_object_legal_hold(
self, Bucket: str, Key: str, LegalHold: dict, VersionId: Optional[str] = None
) -> dict:
version = f" --version-id {VersionId}" if VersionId else ""
cmd = (
f"aws {self.common_flags} s3api put-object-legal-hold --bucket {Bucket} --key {Key} "
f"{version} --legal-hold '{json.dumps(LegalHold)}' --endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def put_object_tagging(self, Bucket: str, Key: str, Tagging: dict) -> dict:
cmd = (
f"aws {self.common_flags} s3api put-object-tagging --bucket {Bucket} --key {Key} "
f"--tagging '{json.dumps(Tagging)}' --endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def get_object_tagging(self, Bucket: str, Key: str, VersionId: Optional[str] = None) -> dict:
version = f" --version-id {VersionId}" if VersionId else ""
cmd = (
f"aws {self.common_flags} s3api get-object-tagging --bucket {Bucket} --key {Key} "
f"{version} --endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd, REGULAR_TIMEOUT)
return self._to_json(output)
def delete_object_tagging(self, Bucket: str, Key: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api delete-object-tagging --bucket {Bucket} "
f"--key {Key} --endpoint {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
@allure.step("Sync directory S3")
def sync(
self,
bucket_name: str,
dir_path: str,
ACL: Optional[str] = None,
Metadata: Optional[dict] = None,
) -> dict:
cmd = (
f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket_name} "
f"--endpoint-url {self.s3gate_endpoint}"
)
if Metadata:
cmd += f" --metadata"
for key, value in Metadata.items():
cmd += f" {key}={value}"
if ACL:
cmd += f" --acl {ACL}"
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
@allure.step("CP directory S3")
def cp(
self,
bucket_name: str,
dir_path: str,
ACL: Optional[str] = None,
Metadata: Optional[dict] = None,
) -> dict:
cmd = (
f"aws {self.common_flags} s3 cp {dir_path} s3://{bucket_name} "
f"--endpoint-url {self.s3gate_endpoint} --recursive"
)
if Metadata:
cmd += " --metadata"
for key, value in Metadata.items():
cmd += f" {key}={value}"
if ACL:
cmd += f" --acl {ACL}"
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
def create_multipart_upload(self, Bucket: str, Key: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api create-multipart-upload --bucket {Bucket} "
f"--key {Key} --endpoint-url {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def list_multipart_uploads(self, Bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api list-multipart-uploads --bucket {Bucket} "
f"--endpoint-url {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def abort_multipart_upload(self, Bucket: str, Key: str, UploadId: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api abort-multipart-upload --bucket {Bucket} "
f"--key {Key} --upload-id {UploadId} --endpoint-url {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def upload_part(self, UploadId: str, Bucket: str, Key: str, PartNumber: int, Body: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api upload-part --bucket {Bucket} --key {Key} "
f"--upload-id {UploadId} --part-number {PartNumber} --body {Body} "
f"--endpoint-url {self.s3gate_endpoint}"
)
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
def upload_part_copy(
self, UploadId: str, Bucket: str, Key: str, PartNumber: int, CopySource: str
) -> dict:
cmd = (
f"aws {self.common_flags} s3api upload-part-copy --bucket {Bucket} --key {Key} "
f"--upload-id {UploadId} --part-number {PartNumber} --copy-source {CopySource} "
f"--endpoint-url {self.s3gate_endpoint}"
)
output = _cmd_run(cmd, LONG_TIMEOUT)
return self._to_json(output)
def list_parts(self, UploadId: str, Bucket: str, Key: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api list-parts --bucket {Bucket} --key {Key} "
f"--upload-id {UploadId} --endpoint-url {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def complete_multipart_upload(
self, Bucket: str, Key: str, UploadId: str, MultipartUpload: dict
) -> dict:
file_path = os.path.join(os.getcwd(), ASSETS_DIR, "parts.json")
with open(file_path, "w") as out_file:
out_file.write(json.dumps(MultipartUpload))
logger.info(f"Input file for complete-multipart-upload: {json.dumps(MultipartUpload)}")
cmd = (
f"aws {self.common_flags} s3api complete-multipart-upload --bucket {Bucket} "
f"--key {Key} --upload-id {UploadId} --multipart-upload file://{file_path} "
f"--endpoint-url {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def put_object_lock_configuration(self, Bucket, ObjectLockConfiguration):
cmd = (
f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {Bucket} "
f"--object-lock-configuration '{json.dumps(ObjectLockConfiguration)}' --endpoint-url {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
def get_object_lock_configuration(self, Bucket):
cmd = (
f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {Bucket} "
f"--endpoint-url {self.s3gate_endpoint}"
)
output = _cmd_run(cmd)
return self._to_json(output)
@staticmethod
def _to_json(output: str) -> dict:
json_output = {}
try:
json_output = json.loads(output)
except Exception:
if "{" not in output and "}" not in output:
logger.warning(f"Could not parse json from output {output}")
return json_output
json_output = json.loads(output[output.index("{") :])
return json_output

View file

@ -1,74 +0,0 @@
import logging
import re
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli
from frostfs_testlib.hosting import Hosting
from frostfs_testlib.shell import Shell
from pytest_tests.resources.common import FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC, WALLET_CONFIG
logger = logging.getLogger("NeoLogger")
def get_local_binaries_versions(shell: Shell) -> dict[str, str]:
versions = {}
for binary in ["neo-go", "frostfs-authmate"]:
out = shell.exec(f"{binary} --version").stdout
versions[binary] = _parse_version(out)
frostfs_cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, WALLET_CONFIG)
versions["frostfs-cli"] = _parse_version(frostfs_cli.version.get().stdout)
try:
frostfs_adm = FrostfsAdm(shell, FROSTFS_ADM_EXEC)
versions["frostfs-adm"] = _parse_version(frostfs_adm.version.get().stdout)
except RuntimeError:
logger.info(f"frostfs-adm not installed")
out = shell.exec("aws --version").stdout
out_lines = out.split("\n")
versions["AWS"] = out_lines[0] if out_lines else "Unknown"
return versions
def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]:
versions_by_host = {}
for host in hosting.hosts:
binary_path_by_name = {} # Maps binary name to executable path
for service_config in host.config.services:
exec_path = service_config.attributes.get("exec_path")
if exec_path:
binary_path_by_name[service_config.name] = exec_path
for cli_config in host.config.clis:
binary_path_by_name[cli_config.name] = cli_config.exec_path
shell = host.get_shell()
versions_at_host = {}
for binary_name, binary_path in binary_path_by_name.items():
try:
result = shell.exec(f"{binary_path} --version")
versions_at_host[binary_name] = _parse_version(result.stdout)
except Exception as exc:
logger.error(f"Cannot get version for {binary_path} because of\n{exc}")
versions_at_host[binary_name] = "Unknown"
versions_by_host[host.config.address] = versions_at_host
# Consolidate versions across all hosts
versions = {}
for host, binary_versions in versions_by_host.items():
for name, version in binary_versions.items():
captured_version = versions.get(name)
if captured_version:
assert (
captured_version == version
), f"Binary {name} has inconsistent version on host {host}"
else:
versions[name] = version
return versions
def _parse_version(version_output: str) -> str:
version = re.search(r"version[:\s]*v?(.+)", version_output, re.IGNORECASE)
return version.group(1).strip() if version else "Unknown"

View file

@ -1,131 +0,0 @@
#!/usr/bin/python3.10
"""
Helper functions to use with `frostfs-cli`, `neo-go` and other CLIs.
"""
import json
import logging
import subprocess
import sys
from contextlib import suppress
from datetime import datetime
from textwrap import shorten
from typing import Union
import allure
import pexpect
logger = logging.getLogger("NeoLogger")
COLOR_GREEN = "\033[92m"
COLOR_OFF = "\033[0m"
def _cmd_run(cmd: str, timeout: int = 30) -> str:
"""
Runs given shell command <cmd>, in case of success returns its stdout,
in case of failure returns error message.
"""
compl_proc = None
start_time = datetime.now()
try:
logger.info(f"{COLOR_GREEN}Executing command: {cmd}{COLOR_OFF}")
start_time = datetime.utcnow()
compl_proc = subprocess.run(
cmd,
check=True,
universal_newlines=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
timeout=timeout,
shell=True,
)
output = compl_proc.stdout
return_code = compl_proc.returncode
end_time = datetime.utcnow()
logger.info(f"{COLOR_GREEN}Output: {output}{COLOR_OFF}")
_attach_allure_log(cmd, output, return_code, start_time, end_time)
return output
except subprocess.CalledProcessError as exc:
logger.info(
f"Command: {cmd}\n" f"Error:\nreturn code: {exc.returncode} " f"\nOutput: {exc.output}"
)
end_time = datetime.now()
return_code, cmd_output = subprocess.getstatusoutput(cmd)
_attach_allure_log(cmd, cmd_output, return_code, start_time, end_time)
raise RuntimeError(
f"Command: {cmd}\n" f"Error:\nreturn code: {exc.returncode}\n" f"Output: {exc.output}"
) from exc
except OSError as exc:
raise RuntimeError(f"Command: {cmd}\n" f"Output: {exc.strerror}") from exc
except Exception as exc:
return_code, cmd_output = subprocess.getstatusoutput(cmd)
end_time = datetime.now()
_attach_allure_log(cmd, cmd_output, return_code, start_time, end_time)
logger.info(
f"Command: {cmd}\n"
f"Error:\nreturn code: {return_code}\n"
f"Output: {exc.output.decode('utf-8') if type(exc.output) is bytes else exc.output}"
)
raise
def _run_with_passwd(cmd: str) -> str:
child = pexpect.spawn(cmd)
child.delaybeforesend = 1
child.expect(".*")
child.sendline("\r")
if sys.platform == "darwin":
child.expect(pexpect.EOF)
cmd = child.before
else:
child.wait()
cmd = child.read()
return cmd.decode()
def _configure_aws_cli(cmd: str, key_id: str, access_key: str, out_format: str = "json") -> str:
child = pexpect.spawn(cmd)
child.delaybeforesend = 1
child.expect("AWS Access Key ID.*")
child.sendline(key_id)
child.expect("AWS Secret Access Key.*")
child.sendline(access_key)
child.expect("Default region name.*")
child.sendline("")
child.expect("Default output format.*")
child.sendline(out_format)
child.wait()
cmd = child.read()
# child.expect(pexpect.EOF)
# cmd = child.before
return cmd.decode()
def _attach_allure_log(
cmd: str, output: str, return_code: int, start_time: datetime, end_time: datetime
) -> None:
command_attachment = (
f"COMMAND: '{cmd}'\n"
f"OUTPUT:\n {output}\n"
f"RC: {return_code}\n"
f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {end_time - start_time}"
)
with allure.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'):
allure.attach(command_attachment, "Command execution", allure.attachment_type.TEXT)
def log_command_execution(cmd: str, output: Union[str, dict]) -> None:
logger.info(f"{cmd}: {output}")
with suppress(Exception):
json_output = json.dumps(output, indent=4, sort_keys=True)
output = json_output
command_attachment = f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n"
with allure.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'):
allure.attach(command_attachment, "Command execution", allure.attachment_type.TEXT)

View file

@ -1,374 +0,0 @@
import random
import pathlib
import re
from dataclasses import dataclass
from typing import Any
import yaml
from frostfs_testlib.blockchain import RPCClient
from frostfs_testlib.hosting import Host, Hosting
from frostfs_testlib.hosting.config import ServiceConfig
from frostfs_testlib.utils import wallet_utils
@dataclass
class NodeBase:
"""
Represents a node of some underlying service
"""
id: str
name: str
host: Host
def __init__(self, id, name, host) -> None:
self.id = id
self.name = name
self.host = host
self.construct()
def construct(self):
pass
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return id(self.name)
def __str__(self):
return self.label
def __repr__(self) -> str:
return self.label
@property
def label(self) -> str:
return self.name
def start_service(self):
self.host.start_service(self.name)
def stop_service(self):
self.host.stop_service(self.name)
def get_wallet_password(self) -> str:
return self._get_attribute(_ConfigAttributes.WALLET_PASSWORD)
def get_wallet_path(self) -> str:
return self._get_attribute(
_ConfigAttributes.LOCAL_WALLET_PATH,
_ConfigAttributes.WALLET_PATH,
)
def get_remote_wallet_path(self) -> str:
"""
Returns node wallet file path located on remote host
"""
return self._get_attribute(
_ConfigAttributes.WALLET_PATH,
)
def get_remote_config_path(self) -> str:
"""
Returns node config file path located on remote host
"""
return self._get_attribute(
_ConfigAttributes.CONFIG_PATH,
)
def get_wallet_config_path(self):
return self._get_attribute(
_ConfigAttributes.LOCAL_WALLET_CONFIG,
_ConfigAttributes.WALLET_CONFIG,
)
def get_wallet_public_key(self):
storage_wallet_path = self.get_wallet_path()
storage_wallet_pass = self.get_wallet_password()
return wallet_utils.get_wallet_public_key(storage_wallet_path, storage_wallet_pass)
def _get_attribute(self, attribute_name: str, default_attribute_name: str = None) -> list[str]:
config = self.host.get_service_config(self.name)
if default_attribute_name:
return config.attributes.get(
attribute_name, config.attributes.get(default_attribute_name)
)
else:
return config.attributes.get(attribute_name)
def _get_service_config(self) -> ServiceConfig:
return self.host.get_service_config(self.name)
class InnerRingNode(NodeBase):
"""
Class represents inner ring node in a cluster
Inner ring node is not always the same as physical host (or physical node, if you will):
It can be service running in a container or on physical host
For testing perspective, it's not relevant how it is actually running,
since frostfs network will still treat it as "node"
"""
def get_netmap_cleaner_threshold(self) -> str:
config_file = self.get_remote_config_path()
contents = self.host.get_shell().exec(f"cat {config_file}").stdout
config = yaml.safe_load(contents)
value = config["netmap_cleaner"]["threshold"]
return value
class S3Gate(NodeBase):
"""
Class represents S3 gateway in a cluster
"""
def get_endpoint(self) -> str:
return self._get_attribute(_ConfigAttributes.ENDPOINT_DATA)
@property
def label(self) -> str:
return f"{self.name}: {self.get_endpoint()}"
class HTTPGate(NodeBase):
"""
Class represents HTTP gateway in a cluster
"""
def get_endpoint(self) -> str:
return self._get_attribute(_ConfigAttributes.ENDPOINT_DATA)
@property
def label(self) -> str:
return f"{self.name}: {self.get_endpoint()}"
class MorphChain(NodeBase):
"""
Class represents side-chain aka morph-chain consensus node in a cluster
Consensus node is not always the same as physical host (or physical node, if you will):
It can be service running in a container or on physical host
For testing perspective, it's not relevant how it is actually running,
since frostfs network will still treat it as "node"
"""
rpc_client: RPCClient = None
def construct(self):
self.rpc_client = RPCClient(self.get_endpoint())
def get_endpoint(self) -> str:
return self._get_attribute(_ConfigAttributes.ENDPOINT_INTERNAL)
@property
def label(self) -> str:
return f"{self.name}: {self.get_endpoint()}"
class MainChain(NodeBase):
"""
Class represents main-chain consensus node in a cluster
Consensus node is not always the same as physical host:
It can be service running in a container or on physical host (or physical node, if you will):
For testing perspective, it's not relevant how it is actually running,
since frostfs network will still treat it as "node"
"""
rpc_client: RPCClient = None
def construct(self):
self.rpc_client = RPCClient(self.get_endpoint())
def get_endpoint(self) -> str:
return self._get_attribute(_ConfigAttributes.ENDPOINT_INTERNAL)
@property
def label(self) -> str:
return f"{self.name}: {self.get_endpoint()}"
class StorageNode(NodeBase):
"""
Class represents storage node in a storage cluster
Storage node is not always the same as physical host:
It can be service running in a container or on physical host (or physical node, if you will):
For testing perspective, it's not relevant how it is actually running,
since frostfs network will still treat it as "node"
"""
def get_rpc_endpoint(self) -> str:
return self._get_attribute(_ConfigAttributes.ENDPOINT_DATA)
def get_control_endpoint(self) -> str:
return self._get_attribute(_ConfigAttributes.CONTROL_ENDPOINT)
def get_un_locode(self):
return self._get_attribute(_ConfigAttributes.UN_LOCODE)
@property
def label(self) -> str:
return f"{self.name}: {self.get_rpc_endpoint()}"
class Cluster:
"""
This class represents a Cluster object for the whole storage based on provided hosting
"""
default_rpc_endpoint: str
default_s3_gate_endpoint: str
default_http_gate_endpoint: str
def __init__(self, hosting: Hosting) -> None:
self._hosting = hosting
self.default_rpc_endpoint = self.storage_nodes[0].get_rpc_endpoint()
self.default_s3_gate_endpoint = self.s3gates[0].get_endpoint()
self.default_http_gate_endpoint = self.http_gates[0].get_endpoint()
@property
def hosts(self) -> list[Host]:
"""
Returns list of Hosts
"""
return self._hosting.hosts
@property
def hosting(self) -> Hosting:
return self._hosting
def _create_wallet_config(self, service: ServiceConfig) -> None:
wallet_path = service.attributes[_ConfigAttributes.LOCAL_WALLET_CONFIG]
wallet_password = service.attributes[_ConfigAttributes.WALLET_PASSWORD]
with open(wallet_path, "w") as file:
yaml.dump({"password": wallet_password}, file)
def create_wallet_configs(self, hosting: Hosting) -> None:
configs = hosting.find_service_configs(".*")
for config in configs:
if _ConfigAttributes.LOCAL_WALLET_CONFIG in config.attributes:
self._create_wallet_config(config)
def is_local_devevn(self) -> bool:
if len(self.hosting.hosts) == 1:
host = self.hosting.hosts[0]
if host.config.address == "localhost" and host.config.plugin_name == "docker":
return True
return False
@property
def storage_nodes(self) -> list[StorageNode]:
"""
Returns list of Storage Nodes (not physical nodes)
"""
return self._get_nodes(_ServicesNames.STORAGE)
@property
def s3gates(self) -> list[S3Gate]:
"""
Returns list of S3 gates
"""
return self._get_nodes(_ServicesNames.S3_GATE)
@property
def http_gates(self) -> list[S3Gate]:
"""
Returns list of HTTP gates
"""
return self._get_nodes(_ServicesNames.HTTP_GATE)
@property
def morph_chain_nodes(self) -> list[MorphChain]:
"""
Returns list of morph-chain consensus nodes (not physical nodes)
"""
return self._get_nodes(_ServicesNames.MORPH_CHAIN)
@property
def main_chain_nodes(self) -> list[MainChain]:
"""
Returns list of main-chain consensus nodes (not physical nodes)
"""
return self._get_nodes(_ServicesNames.MAIN_CHAIN)
@property
def ir_nodes(self) -> list[InnerRingNode]:
"""
Returns list of inner-ring nodes (not physical nodes)
"""
return self._get_nodes(_ServicesNames.INNER_RING)
def _get_nodes(self, service_name) -> list[StorageNode]:
configs = self.hosting.find_service_configs(f"{service_name}\d*$")
class_mapping: dict[str, Any] = {
_ServicesNames.STORAGE: StorageNode,
_ServicesNames.INNER_RING: InnerRingNode,
_ServicesNames.MORPH_CHAIN: MorphChain,
_ServicesNames.S3_GATE: S3Gate,
_ServicesNames.HTTP_GATE: HTTPGate,
_ServicesNames.MAIN_CHAIN: MainChain,
}
cls = class_mapping.get(service_name)
return [
cls(
self._get_id(config.name),
config.name,
self.hosting.get_host_by_service(config.name),
)
for config in configs
]
def _get_id(self, node_name) -> str:
pattern = "\d*$"
matches = re.search(pattern, node_name)
if matches:
return int(matches.group())
def get_random_storage_rpc_endpoint(self) -> str:
return random.choice(self.get_storage_rpc_endpoints())
def get_random_storage_rpc_endpoint_mgmt(self) -> str:
return random.choice(self.get_storage_rpc_endpoints_mgmt())
def get_storage_rpc_endpoints(self) -> list[str]:
nodes = self.storage_nodes
return [node.get_rpc_endpoint() for node in nodes]
def get_storage_rpc_endpoints_mgmt(self) -> list[str]:
nodes = self.storage_nodes
return [node.get_rpc_endpoint_mgmt() for node in nodes]
def get_morph_endpoints(self) -> list[str]:
nodes = self.morph_chain_nodes
return [node.get_endpoint() for node in nodes]
class _ServicesNames:
STORAGE = "s"
S3_GATE = "s3-gate"
HTTP_GATE = "http-gate"
MORPH_CHAIN = "morph-chain"
INNER_RING = "ir"
MAIN_CHAIN = "main-chain"
class _ConfigAttributes:
WALLET_PASSWORD = "wallet_password"
WALLET_PATH = "wallet_path"
WALLET_CONFIG = "wallet_config"
CONFIG_PATH = "config_path"
LOCAL_WALLET_PATH = "local_wallet_path"
LOCAL_WALLET_CONFIG = "local_config_path"
ENDPOINT_DATA = "endpoint_data0"
ENDPOINT_INTERNAL = "endpoint_internal0"
CONTROL_ENDPOINT = "control_endpoint"
UN_LOCODE = "un_locode"

View file

@ -1,211 +0,0 @@
#!/usr/bin/python3
"""
This module contains functions which are used for Large Object assembling:
getting Last Object and split and getting Link Object. It is not enough to
simply perform a "raw" HEAD request, as noted in the issue:
https://github.com/nspcc-dev/neofs-node/issues/1304. Therefore, the reliable
retrieval of the aforementioned objects must be done this way: send direct
"raw" HEAD request to the every Storage Node and return the desired OID on
first non-null response.
"""
import logging
from typing import Optional, Tuple
import allure
from frostfs_testlib.shell import Shell
from pytest_tests.helpers import frostfs_verbs
from pytest_tests.helpers.cluster import Cluster, StorageNode
from pytest_tests.helpers.frostfs_verbs import head_object
from pytest_tests.helpers.storage_object_info import StorageObjectInfo
from pytest_tests.resources.common import CLI_DEFAULT_TIMEOUT, WALLET_CONFIG
logger = logging.getLogger("NeoLogger")
def get_storage_object_chunks(
storage_object: StorageObjectInfo,
shell: Shell,
cluster: Cluster,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> list[str]:
"""
Get complex object split objects ids (no linker object)
Args:
storage_object: storage_object to get it's chunks
shell: client shell to do cmd requests
cluster: cluster object under test
timeout: Timeout for an operation.
Returns:
list of object ids of complex object chunks
"""
with allure.step(f"Get complex object chunks (f{storage_object.oid})"):
split_object_id = get_link_object(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
shell,
cluster.storage_nodes,
is_direct=False,
timeout=timeout,
)
head = head_object(
storage_object.wallet_file_path,
storage_object.cid,
split_object_id,
shell,
cluster.default_rpc_endpoint,
timeout=timeout,
)
chunks_object_ids = []
if "split" in head["header"] and "children" in head["header"]["split"]:
chunks_object_ids = head["header"]["split"]["children"]
return chunks_object_ids
def get_complex_object_split_ranges(
storage_object: StorageObjectInfo,
shell: Shell,
cluster: Cluster,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> list[Tuple[int, int]]:
"""
Get list of split ranges tuples (offset, length) of a complex object
For example if object size if 100 and max object size in system is 30
the returned list should be
[(0, 30), (30, 30), (60, 30), (90, 10)]
Args:
storage_object: storage_object to get it's chunks
shell: client shell to do cmd requests
cluster: cluster object under test
timeout: Timeout for an operation.
Returns:
list of object ids of complex object chunks
"""
ranges: list = []
offset = 0
chunks_ids = get_storage_object_chunks(storage_object, shell, cluster)
for chunk_id in chunks_ids:
head = head_object(
storage_object.wallet_file_path,
storage_object.cid,
chunk_id,
shell,
cluster.default_rpc_endpoint,
timeout=timeout,
)
length = int(head["header"]["payloadLength"])
ranges.append((offset, length))
offset = offset + length
return ranges
@allure.step("Get Link Object")
def get_link_object(
wallet: str,
cid: str,
oid: str,
shell: Shell,
nodes: list[StorageNode],
bearer: str = "",
wallet_config: str = WALLET_CONFIG,
is_direct: bool = True,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
):
"""
Args:
wallet (str): path to the wallet on whose behalf the Storage Nodes
are requested
cid (str): Container ID which stores the Large Object
oid (str): Large Object ID
shell: executor for cli command
nodes: list of nodes to do search on
bearer (optional, str): path to Bearer token file
wallet_config (optional, str): path to the frostfs-cli config file
is_direct: send request directly to the node or not; this flag
turns into `--ttl 1` key
timeout: Timeout for an operation.
Returns:
(str): Link Object ID
When no Link Object ID is found after all Storage Nodes polling,
the function throws an error.
"""
for node in nodes:
endpoint = node.get_rpc_endpoint()
try:
resp = frostfs_verbs.head_object(
wallet,
cid,
oid,
shell=shell,
endpoint=endpoint,
is_raw=True,
is_direct=is_direct,
bearer=bearer,
wallet_config=wallet_config,
timeout=timeout,
)
if resp["link"]:
return resp["link"]
except Exception:
logger.info(f"No Link Object found on {endpoint}; continue")
logger.error(f"No Link Object for {cid}/{oid} found among all Storage Nodes")
return None
@allure.step("Get Last Object")
def get_last_object(
wallet: str,
cid: str,
oid: str,
shell: Shell,
nodes: list[StorageNode],
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> Optional[str]:
"""
Args:
wallet (str): path to the wallet on whose behalf the Storage Nodes
are requested
cid (str): Container ID which stores the Large Object
oid (str): Large Object ID
shell: executor for cli command
nodes: list of nodes to do search on
timeout: Timeout for an operation.
Returns:
(str): Last Object ID
When no Last Object ID is found after all Storage Nodes polling,
the function throws an error.
"""
for node in nodes:
endpoint = node.get_rpc_endpoint()
try:
resp = frostfs_verbs.head_object(
wallet,
cid,
oid,
shell=shell,
endpoint=endpoint,
is_raw=True,
is_direct=True,
timeout=timeout,
)
if resp["lastPart"]:
return resp["lastPart"]
except Exception:
logger.info(f"No Last Object found on {endpoint}; continue")
logger.error(f"No Last Object for {cid}/{oid} found among all Storage Nodes")
return None

View file

@ -1,359 +0,0 @@
import json
import logging
from dataclasses import dataclass
from time import sleep
from typing import Optional, Union
import allure
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.shell import Shell
from frostfs_testlib.utils import json_utils
from pytest_tests.helpers.cluster import Cluster
from pytest_tests.helpers.file_helper import generate_file, get_file_hash
from pytest_tests.helpers.frostfs_verbs import put_object, put_object_to_random_node
from pytest_tests.helpers.storage_object_info import StorageObjectInfo
from pytest_tests.helpers.wallet import WalletFile
from pytest_tests.resources.common import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, WALLET_CONFIG
logger = logging.getLogger("NeoLogger")
@dataclass
class StorageContainerInfo:
id: str
wallet_file: WalletFile
class StorageContainer:
def __init__(
self,
storage_container_info: StorageContainerInfo,
shell: Shell,
cluster: Cluster,
) -> None:
self.shell = shell
self.storage_container_info = storage_container_info
self.cluster = cluster
def get_id(self) -> str:
return self.storage_container_info.id
def get_wallet_path(self) -> str:
return self.storage_container_info.wallet_file.path
def get_wallet_config_path(self) -> str:
return self.storage_container_info.wallet_file.config_path
@allure.step("Generate new object and put in container")
def generate_object(
self,
size: int,
expire_at: Optional[int] = None,
bearer_token: Optional[str] = None,
endpoint: Optional[str] = None,
) -> StorageObjectInfo:
with allure.step(f"Generate object with size {size}"):
file_path = generate_file(size)
file_hash = get_file_hash(file_path)
container_id = self.get_id()
wallet_path = self.get_wallet_path()
wallet_config = self.get_wallet_config_path()
with allure.step(f"Put object with size {size} to container {container_id}"):
if endpoint:
object_id = put_object(
wallet=wallet_path,
path=file_path,
cid=container_id,
expire_at=expire_at,
shell=self.shell,
endpoint=endpoint,
bearer=bearer_token,
wallet_config=wallet_config,
)
else:
object_id = put_object_to_random_node(
wallet=wallet_path,
path=file_path,
cid=container_id,
expire_at=expire_at,
shell=self.shell,
cluster=self.cluster,
bearer=bearer_token,
wallet_config=wallet_config,
)
storage_object = StorageObjectInfo(
container_id,
object_id,
size=size,
wallet_file_path=wallet_path,
file_path=file_path,
file_hash=file_hash,
)
return storage_object
DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X"
REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X"
@allure.step("Create Container")
def create_container(
wallet: str,
shell: Shell,
endpoint: str,
rule: str = DEFAULT_PLACEMENT_RULE,
basic_acl: str = "",
attributes: Optional[dict] = None,
session_token: str = "",
session_wallet: str = "",
name: str = None,
options: dict = None,
await_mode: bool = True,
wait_for_creation: bool = True,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> str:
"""
A wrapper for `frostfs-cli container create` call.
Args:
wallet (str): a wallet on whose behalf a container is created
rule (optional, str): placement rule for container
basic_acl (optional, str): an ACL for container, will be
appended to `--basic-acl` key
attributes (optional, dict): container attributes , will be
appended to `--attributes` key
session_token (optional, str): a path to session token file
session_wallet(optional, str): a path to the wallet which signed
the session token; this parameter makes sense
when paired with `session_token`
shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
options (optional, dict): any other options to pass to the call
name (optional, str): container name attribute
await_mode (bool): block execution until container is persisted
wait_for_creation (): Wait for container shows in container list
timeout: Timeout for the operation.
Returns:
(str): CID of the created container
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, WALLET_CONFIG)
result = cli.container.create(
rpc_endpoint=endpoint,
wallet=session_wallet if session_wallet else wallet,
policy=rule,
basic_acl=basic_acl,
attributes=attributes,
name=name,
session=session_token,
await_mode=await_mode,
timeout=timeout,
**options or {},
)
cid = _parse_cid(result.stdout)
logger.info("Container created; waiting until it is persisted in the sidechain")
if wait_for_creation:
wait_for_container_creation(wallet, cid, shell, endpoint)
return cid
def wait_for_container_creation(
wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 15, sleep_interval: int = 1
):
for _ in range(attempts):
containers = list_containers(wallet, shell, endpoint)
if cid in containers:
return
logger.info(f"There is no {cid} in {containers} yet; sleep {sleep_interval} and continue")
sleep(sleep_interval)
raise RuntimeError(
f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting"
)
def wait_for_container_deletion(
wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 30, sleep_interval: int = 1
):
for _ in range(attempts):
try:
get_container(wallet, cid, shell=shell, endpoint=endpoint)
sleep(sleep_interval)
continue
except Exception as err:
if "container not found" not in str(err):
raise AssertionError(f'Expected "container not found" in error, got\n{err}')
return
raise AssertionError(f"Expected container deleted during {attempts * sleep_interval} sec.")
@allure.step("List Containers")
def list_containers(
wallet: str, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT
) -> list[str]:
"""
A wrapper for `frostfs-cli container list` call. It returns all the
available containers for the given wallet.
Args:
wallet (str): a wallet on whose behalf we list the containers
shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
timeout: Timeout for the operation.
Returns:
(list): list of containers
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, WALLET_CONFIG)
result = cli.container.list(rpc_endpoint=endpoint, wallet=wallet, timeout=timeout)
logger.info(f"Containers: \n{result}")
return result.stdout.split()
@allure.step("List Objects in container")
def list_objects(
wallet: str,
shell: Shell,
container_id: str,
endpoint: str,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> list[str]:
"""
A wrapper for `frostfs-cli container list-objects` call. It returns all the
available objects in container.
Args:
wallet (str): a wallet on whose behalf we list the containers objects
shell: executor for cli command
container_id: cid of container
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
timeout: Timeout for the operation.
Returns:
(list): list of containers
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, WALLET_CONFIG)
result = cli.container.list_objects(
rpc_endpoint=endpoint, wallet=wallet, cid=container_id, timeout=timeout
)
logger.info(f"Container objects: \n{result}")
return result.stdout.split()
@allure.step("Get Container")
def get_container(
wallet: str,
cid: str,
shell: Shell,
endpoint: str,
json_mode: bool = True,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> Union[dict, str]:
"""
A wrapper for `frostfs-cli container get` call. It extracts container's
attributes and rearranges them into a more compact view.
Args:
wallet (str): path to a wallet on whose behalf we get the container
cid (str): ID of the container to get
shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
json_mode (bool): return container in JSON format
timeout: Timeout for the operation.
Returns:
(dict, str): dict of container attributes
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, WALLET_CONFIG)
result = cli.container.get(
rpc_endpoint=endpoint, wallet=wallet, cid=cid, json_mode=json_mode, timeout=timeout
)
if not json_mode:
return result.stdout
container_info = json.loads(result.stdout)
attributes = dict()
for attr in container_info["attributes"]:
attributes[attr["key"]] = attr["value"]
container_info["attributes"] = attributes
container_info["ownerID"] = json_utils.json_reencode(container_info["ownerID"]["value"])
return container_info
@allure.step("Delete Container")
# TODO: make the error message about a non-found container more user-friendly
# https://github.com/nspcc-dev/frostfs-contract/issues/121
def delete_container(
wallet: str,
cid: str,
shell: Shell,
endpoint: str,
force: bool = False,
session_token: Optional[str] = None,
await_mode: bool = False,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> None:
"""
A wrapper for `frostfs-cli container delete` call.
Args:
wallet (str): path to a wallet on whose behalf we delete the container
cid (str): ID of the container to delete
shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
force (bool): do not check whether container contains locks and remove immediately
session_token: a path to session token file
timeout: Timeout for the operation.
This function doesn't return anything.
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, WALLET_CONFIG)
cli.container.delete(
wallet=wallet,
cid=cid,
rpc_endpoint=endpoint,
force=force,
session=session_token,
await_mode=await_mode,
timeout=timeout,
)
def _parse_cid(output: str) -> str:
"""
Parses container ID from a given CLI output. The input string we expect:
container ID: 2tz86kVTDpJxWHrhw3h6PbKMwkLtBEwoqhHQCKTre1FN
awaiting...
container has been persisted on sidechain
We want to take 'container ID' value from the string.
Args:
output (str): CLI output to parse
Returns:
(str): extracted CID
"""
try:
# taking first line from command's output
first_line = output.split("\n")[0]
except Exception:
first_line = ""
logger.error(f"Got empty output: {output}")
splitted = first_line.split(": ")
if len(splitted) != 2:
raise ValueError(f"no CID was parsed from command output: \t{first_line}")
return splitted[1]
@allure.step("Search container by name")
def search_container_by_name(wallet: str, name: str, shell: Shell, endpoint: str):
list_cids = list_containers(wallet, shell, endpoint)
for cid in list_cids:
cont_info = get_container(wallet, cid, shell, endpoint, True)
if cont_info.get("attributes", {}).get("Name", None) == name:
return cid
return None

View file

@ -1,9 +1,9 @@
from typing import List, Optional
from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.acl import EACLOperation
from pytest_tests.helpers.acl import EACLOperation
from pytest_tests.helpers.cluster import Cluster
from pytest_tests.helpers.object_access import (
can_delete_object,
can_get_head_object,

View file

@ -1,40 +0,0 @@
import logging
import re
import allure
from pytest import Config
logger = logging.getLogger("NeoLogger")
@allure.step("Read environment.properties")
def read_env_properties(config: Config) -> dict:
environment_dir = config.getoption("--alluredir")
if not environment_dir:
return None
file_path = f"{environment_dir}/environment.properties"
with open(file_path, "r") as file:
raw_content = file.read()
env_properties = {}
for line in raw_content.split("\n"):
m = re.match("(.*?)=(.*)", line)
if not m:
logger.warning(f"Could not parse env property from {line}")
continue
key, value = m.group(1), m.group(2)
env_properties[key] = value
return env_properties
@allure.step("Update data in environment.properties")
def save_env_properties(config: Config, env_data: dict) -> None:
environment_dir = config.getoption("--alluredir")
if not environment_dir:
return None
file_path = f"{environment_dir}/environment.properties"
with open(file_path, "a+") as env_file:
for env, env_value in env_data.items():
env_file.write(f"{env}={env_value}\n")

View file

@ -1,112 +0,0 @@
import logging
from time import sleep
from typing import Optional
import allure
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo
from frostfs_testlib.shell import Shell
from frostfs_testlib.utils import datetime_utils, wallet_utils
from pytest_tests.helpers.cluster import Cluster, StorageNode
from pytest_tests.helpers.payment_neogo import get_contract_hash
from pytest_tests.helpers.test_control import wait_for_success
from pytest_tests.resources.common import (
CLI_DEFAULT_TIMEOUT,
FROSTFS_ADM_CONFIG_PATH,
FROSTFS_ADM_EXEC,
FROSTFS_CLI_EXEC,
MAINNET_BLOCK_TIME,
NEOGO_EXECUTABLE,
)
logger = logging.getLogger("NeoLogger")
@allure.step("Ensure fresh epoch")
def ensure_fresh_epoch(
shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None
) -> int:
# ensure new fresh epoch to avoid epoch switch during test session
alive_node = alive_node if alive_node else cluster.storage_nodes[0]
current_epoch = get_epoch(shell, cluster, alive_node)
tick_epoch(shell, cluster, alive_node)
epoch = get_epoch(shell, cluster, alive_node)
assert epoch > current_epoch, "Epoch wasn't ticked"
return epoch
@allure.step("Wait for epochs align in whole cluster")
@wait_for_success(60, 5)
def wait_for_epochs_align(shell: Shell, cluster: Cluster) -> bool:
epochs = []
for node in cluster.storage_nodes:
epochs.append(get_epoch(shell, cluster, node))
unique_epochs = list(set(epochs))
assert (
len(unique_epochs) == 1
), f"unaligned epochs found, {epochs}, count of unique epochs {len(unique_epochs)}"
@allure.step("Get Epoch")
def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None):
alive_node = alive_node if alive_node else cluster.storage_nodes[0]
endpoint = alive_node.get_rpc_endpoint()
wallet_path = alive_node.get_wallet_path()
wallet_config = alive_node.get_wallet_config_path()
cli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config)
epoch = cli.netmap.epoch(endpoint, wallet_path, timeout=CLI_DEFAULT_TIMEOUT)
return int(epoch.stdout)
@allure.step("Tick Epoch")
def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None):
"""
Tick epoch using frostfs-adm or NeoGo if frostfs-adm is not available (DevEnv)
Args:
shell: local shell to make queries about current epoch. Remote shell will be used to tick new one
cluster: cluster instance under test
alive_node: node to send requests to (first node in cluster by default)
"""
alive_node = alive_node if alive_node else cluster.storage_nodes[0]
remote_shell = alive_node.host.get_shell()
if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH:
# If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests)
frostfsadm = FrostfsAdm(
shell=remote_shell,
frostfs_adm_exec_path=FROSTFS_ADM_EXEC,
config_file=FROSTFS_ADM_CONFIG_PATH,
)
frostfsadm.morph.force_new_epoch()
return
# Otherwise we tick epoch using transaction
cur_epoch = get_epoch(shell, cluster)
# Use first node by default
ir_node = cluster.ir_nodes[0]
# In case if no local_wallet_path is provided, we use wallet_path
ir_wallet_path = ir_node.get_wallet_path()
ir_wallet_pass = ir_node.get_wallet_password()
ir_address = wallet_utils.get_last_address_from_wallet(ir_wallet_path, ir_wallet_pass)
morph_chain = cluster.morph_chain_nodes[0]
morph_endpoint = morph_chain.get_endpoint()
neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE)
neogo.contract.invokefunction(
wallet=ir_wallet_path,
wallet_password=ir_wallet_pass,
scripthash=get_contract_hash(morph_chain, "netmap.frostfs", shell=shell),
method="newEpoch",
arguments=f"int:{cur_epoch + 1}",
multisig_hash=f"{ir_address}:Global",
address=ir_address,
rpc_endpoint=morph_endpoint,
force=True,
gas=1,
)
sleep(datetime_utils.parse_time(MAINNET_BLOCK_TIME))

View file

@ -1,55 +0,0 @@
import logging
from time import sleep
import allure
from frostfs_testlib.shell import Shell
from pytest_tests.helpers.cluster import Cluster, StorageNode
from pytest_tests.helpers.node_management import storage_node_healthcheck
from pytest_tests.helpers.storage_policy import get_nodes_with_object
logger = logging.getLogger("NeoLogger")
@allure.step("Wait for object replication")
def wait_object_replication(
cid: str,
oid: str,
expected_copies: int,
shell: Shell,
nodes: list[StorageNode],
) -> list[StorageNode]:
sleep_interval, attempts = 15, 20
nodes_with_object = []
for _ in range(attempts):
nodes_with_object = get_nodes_with_object(cid, oid, shell=shell, nodes=nodes)
if len(nodes_with_object) >= expected_copies:
return nodes_with_object
sleep(sleep_interval)
raise AssertionError(
f"Expected {expected_copies} copies of object, but found {len(nodes_with_object)}. "
f"Waiting time {sleep_interval * attempts}"
)
@allure.step("Wait for storage nodes returned to cluster")
def wait_all_storage_nodes_returned(cluster: Cluster) -> None:
sleep_interval, attempts = 15, 20
for __attempt in range(attempts):
if is_all_storage_nodes_returned(cluster):
return
sleep(sleep_interval)
raise AssertionError("Storage node(s) is broken")
def is_all_storage_nodes_returned(cluster: Cluster) -> bool:
with allure.step("Run health check for all storage nodes"):
for node in cluster.storage_nodes:
try:
health_check = storage_node_healthcheck(node)
except Exception as err:
logger.warning(f"Node healthcheck fails with error {err}")
return False
if health_check.health_status != "READY" or health_check.network_status != "ONLINE":
return False
return True

View file

@ -1,168 +0,0 @@
import hashlib
import logging
import os
import uuid
from typing import Any, Optional
import allure
from pytest_tests.resources.common import ASSETS_DIR
logger = logging.getLogger("NeoLogger")
def generate_file(size: int) -> str:
"""Generates a binary file with the specified size in bytes.
Args:
size: Size in bytes, can be declared as 6e+6 for example.
Returns:
The path to the generated file.
"""
file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
with open(file_path, "wb") as file:
file.write(os.urandom(size))
logger.info(f"File with size {size} bytes has been generated: {file_path}")
return file_path
def generate_file_with_content(
size: int,
file_path: Optional[str] = None,
content: Optional[str] = None,
) -> str:
"""Creates a new file with specified content.
Args:
file_path: Path to the file that should be created. If not specified, then random file
path will be generated.
content: Content that should be stored in the file. If not specified, then random binary
content will be generated.
Returns:
Path to the generated file.
"""
mode = "w+"
if content is None:
content = os.urandom(size)
mode = "wb"
if not file_path:
file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
else:
if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
with open(file_path, mode) as file:
file.write(content)
return file_path
@allure.step("Get File Hash")
def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[int] = None) -> str:
"""Generates hash for the specified file.
Args:
file_path: Path to the file to generate hash for.
len: How many bytes to read.
offset: Position to start reading from.
Returns:
Hash of the file as hex-encoded string.
"""
file_hash = hashlib.sha256()
with open(file_path, "rb") as out:
if len and not offset:
file_hash.update(out.read(len))
elif len and offset:
out.seek(offset, 0)
file_hash.update(out.read(len))
elif offset and not len:
out.seek(offset, 0)
file_hash.update(out.read())
else:
file_hash.update(out.read())
return file_hash.hexdigest()
@allure.step("Concatenation set of files to one file")
def concat_files(file_paths: list, resulting_file_path: Optional[str] = None) -> str:
"""Concatenates several files into a single file.
Args:
file_paths: Paths to the files to concatenate.
resulting_file_name: Path to the file where concatenated content should be stored.
Returns:
Path to the resulting file.
"""
if not resulting_file_path:
resulting_file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
with open(resulting_file_path, "wb") as f:
for file in file_paths:
with open(file, "rb") as part_file:
f.write(part_file.read())
return resulting_file_path
def split_file(file_path: str, parts: int) -> list[str]:
"""Splits specified file into several specified number of parts.
Each part is saved under name `{original_file}_part_{i}`.
Args:
file_path: Path to the file that should be split.
parts: Number of parts the file should be split into.
Returns:
Paths to the part files.
"""
with open(file_path, "rb") as file:
content = file.read()
content_size = len(content)
chunk_size = int((content_size + parts) / parts)
part_id = 1
part_file_paths = []
for content_offset in range(0, content_size + 1, chunk_size):
part_file_name = f"{file_path}_part_{part_id}"
part_file_paths.append(part_file_name)
with open(part_file_name, "wb") as out_file:
out_file.write(content[content_offset : content_offset + chunk_size])
part_id += 1
return part_file_paths
def get_file_content(
file_path: str, content_len: Optional[int] = None, mode: str = "r", offset: Optional[int] = None
) -> Any:
"""Returns content of specified file.
Args:
file_path: Path to the file.
content_len: Limit of content length. If None, then entire file content is returned;
otherwise only the first content_len bytes of the content are returned.
mode: Mode of opening the file.
offset: Position to start reading from.
Returns:
Content of the specified file.
"""
with open(file_path, mode) as file:
if content_len and not offset:
content = file.read(content_len)
elif content_len and offset:
file.seek(offset, 0)
content = file.read(content_len)
elif offset and not content_len:
file.seek(offset, 0)
content = file.read()
else:
content = file.read()
return content

View file

@ -1,672 +0,0 @@
import json
import logging
import os
import re
import uuid
from typing import Any, Optional
import allure
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.shell import Shell
from frostfs_testlib.utils import json_utils
from pytest_tests.helpers.cluster import Cluster
from pytest_tests.resources.common import (
ASSETS_DIR,
CLI_DEFAULT_TIMEOUT,
FROSTFS_CLI_EXEC,
WALLET_CONFIG,
)
logger = logging.getLogger("NeoLogger")
@allure.step("Get object from random node")
def get_object_from_random_node(
wallet: str,
cid: str,
oid: str,
shell: Shell,
cluster: Cluster,
bearer: Optional[str] = None,
write_object: Optional[str] = None,
xhdr: Optional[dict] = None,
wallet_config: Optional[str] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> str:
"""
GET from FrostFS random storage node
Args:
wallet: wallet on whose behalf GET is done
cid: ID of Container where we get the Object from
oid: Object ID
shell: executor for cli command
bearer (optional, str): path to Bearer Token file, appends to `--bearer` key
write_object (optional, str): path to downloaded file, appends to `--file` key
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config(optional, str): path to the wallet config
no_progress(optional, bool): do not show progress bar
xhdr (optional, dict): Request X-Headers in form of Key=Value
session (optional, dict): path to a JSON-encoded container session token
timeout: Timeout for the operation.
Returns:
(str): path to downloaded file
"""
endpoint = cluster.get_random_storage_rpc_endpoint()
return get_object(
wallet,
cid,
oid,
shell,
endpoint,
bearer,
write_object,
xhdr,
wallet_config,
no_progress,
session,
timeout,
)
@allure.step("Get object from {endpoint}")
def get_object(
wallet: str,
cid: str,
oid: str,
shell: Shell,
endpoint: str = None,
bearer: Optional[str] = None,
write_object: Optional[str] = None,
xhdr: Optional[dict] = None,
wallet_config: Optional[str] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> str:
"""
GET from FrostFS.
Args:
wallet (str): wallet on whose behalf GET is done
cid (str): ID of Container where we get the Object from
oid (str): Object ID
shell: executor for cli command
bearer: path to Bearer Token file, appends to `--bearer` key
write_object: path to downloaded file, appends to `--file` key
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config(optional, str): path to the wallet config
no_progress(optional, bool): do not show progress bar
xhdr (optional, dict): Request X-Headers in form of Key=Value
session (optional, dict): path to a JSON-encoded container session token
timeout: Timeout for the operation.
Returns:
(str): path to downloaded file
"""
if not write_object:
write_object = str(uuid.uuid4())
file_path = os.path.join(ASSETS_DIR, write_object)
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or WALLET_CONFIG)
cli.object.get(
rpc_endpoint=endpoint,
wallet=wallet,
cid=cid,
oid=oid,
file=file_path,
bearer=bearer,
no_progress=no_progress,
xhdr=xhdr,
session=session,
timeout=timeout,
)
return file_path
@allure.step("Get Range Hash from {endpoint}")
def get_range_hash(
wallet: str,
cid: str,
oid: str,
range_cut: str,
shell: Shell,
endpoint: str,
bearer: Optional[str] = None,
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
):
"""
GETRANGEHASH of given Object.
Args:
wallet: wallet on whose behalf GETRANGEHASH is done
cid: ID of Container where we get the Object from
oid: Object ID
shell: executor for cli command
bearer: path to Bearer Token file, appends to `--bearer` key
range_cut: Range to take hash from in the form offset1:length1,...,
value to pass to the `--range` parameter
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config: path to the wallet config
xhdr: Request X-Headers in form of Key=Values
session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session.
timeout: Timeout for the operation.
Returns:
None
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or WALLET_CONFIG)
result = cli.object.hash(
rpc_endpoint=endpoint,
wallet=wallet,
cid=cid,
oid=oid,
range=range_cut,
bearer=bearer,
xhdr=xhdr,
session=session,
timeout=timeout,
)
# cutting off output about range offset and length
return result.stdout.split(":")[1].strip()
@allure.step("Put object to random node")
def put_object_to_random_node(
wallet: str,
path: str,
cid: str,
shell: Shell,
cluster: Cluster,
bearer: Optional[str] = None,
attributes: Optional[dict] = None,
xhdr: Optional[dict] = None,
wallet_config: Optional[str] = None,
expire_at: Optional[int] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
):
"""
PUT of given file to a random storage node.
Args:
wallet: wallet on whose behalf PUT is done
path: path to file to be PUT
cid: ID of Container where we get the Object from
shell: executor for cli command
cluster: cluster under test
bearer: path to Bearer Token file, appends to `--bearer` key
attributes: User attributes in form of Key1=Value1,Key2=Value2
cluster: cluster under test
wallet_config: path to the wallet config
no_progress: do not show progress bar
expire_at: Last epoch in the life of the object
xhdr: Request X-Headers in form of Key=Value
session: path to a JSON-encoded container session token
timeout: Timeout for the operation.
Returns:
ID of uploaded Object
"""
endpoint = cluster.get_random_storage_rpc_endpoint()
return put_object(
wallet,
path,
cid,
shell,
endpoint,
bearer,
attributes,
xhdr,
wallet_config,
expire_at,
no_progress,
session,
timeout,
)
@allure.step("Put object at {endpoint} in container {cid}")
def put_object(
wallet: str,
path: str,
cid: str,
shell: Shell,
endpoint: str,
bearer: Optional[str] = None,
attributes: Optional[dict] = None,
xhdr: Optional[dict] = None,
wallet_config: Optional[str] = None,
expire_at: Optional[int] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
):
"""
PUT of given file.
Args:
wallet: wallet on whose behalf PUT is done
path: path to file to be PUT
cid: ID of Container where we get the Object from
shell: executor for cli command
bearer: path to Bearer Token file, appends to `--bearer` key
attributes: User attributes in form of Key1=Value1,Key2=Value2
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config: path to the wallet config
no_progress: do not show progress bar
expire_at: Last epoch in the life of the object
xhdr: Request X-Headers in form of Key=Value
session: path to a JSON-encoded container session token
timeout: Timeout for the operation.
Returns:
(str): ID of uploaded Object
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or WALLET_CONFIG)
result = cli.object.put(
rpc_endpoint=endpoint,
wallet=wallet,
file=path,
cid=cid,
attributes=attributes,
bearer=bearer,
expire_at=expire_at,
no_progress=no_progress,
xhdr=xhdr,
session=session,
timeout=timeout,
)
# splitting CLI output to lines and taking the penultimate line
id_str = result.stdout.strip().split("\n")[-2]
oid = id_str.split(":")[1]
return oid.strip()
@allure.step("Delete object {cid}/{oid} from {endpoint}")
def delete_object(
wallet: str,
cid: str,
oid: str,
shell: Shell,
endpoint: str = None,
bearer: str = "",
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
):
"""
DELETE an Object.
Args:
wallet: wallet on whose behalf DELETE is done
cid: ID of Container where we get the Object from
oid: ID of Object we are going to delete
shell: executor for cli command
bearer: path to Bearer Token file, appends to `--bearer` key
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config: path to the wallet config
xhdr: Request X-Headers in form of Key=Value
session: path to a JSON-encoded container session token
timeout: Timeout for the operation.
Returns:
(str): Tombstone ID
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or WALLET_CONFIG)
result = cli.object.delete(
rpc_endpoint=endpoint,
wallet=wallet,
cid=cid,
oid=oid,
bearer=bearer,
xhdr=xhdr,
session=session,
timeout=timeout,
)
id_str = result.stdout.split("\n")[1]
tombstone = id_str.split(":")[1]
return tombstone.strip()
@allure.step("Get Range")
def get_range(
wallet: str,
cid: str,
oid: str,
range_cut: str,
shell: Shell,
endpoint: str = None,
wallet_config: Optional[str] = None,
bearer: str = "",
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
):
"""
GETRANGE an Object.
Args:
wallet: wallet on whose behalf GETRANGE is done
cid: ID of Container where we get the Object from
oid: ID of Object we are going to request
range_cut: range to take data from in the form offset:length
shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
bearer: path to Bearer Token file, appends to `--bearer` key
wallet_config: path to the wallet config
xhdr: Request X-Headers in form of Key=Value
session: path to a JSON-encoded container session token
timeout: Timeout for the operation.
Returns:
(str, bytes) - path to the file with range content and content of this file as bytes
"""
range_file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4()))
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or WALLET_CONFIG)
cli.object.range(
rpc_endpoint=endpoint,
wallet=wallet,
cid=cid,
oid=oid,
range=range_cut,
file=range_file_path,
bearer=bearer,
xhdr=xhdr,
session=session,
timeout=timeout,
)
with open(range_file_path, "rb") as file:
content = file.read()
return range_file_path, content
@allure.step("Lock Object")
def lock_object(
wallet: str,
cid: str,
oid: str,
shell: Shell,
endpoint: str,
lifetime: Optional[int] = None,
expire_at: Optional[int] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
session: Optional[str] = None,
wallet_config: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> str:
"""
Lock object in container.
Args:
address: Address of wallet account.
bearer: File with signed JSON or binary encoded bearer token.
cid: Container ID.
oid: Object ID.
lifetime: Lock lifetime.
expire_at: Lock expiration epoch.
shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
session: Path to a JSON-encoded container session token.
ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for the operation.
Returns:
Lock object ID
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or WALLET_CONFIG)
result = cli.object.lock(
rpc_endpoint=endpoint,
lifetime=lifetime,
expire_at=expire_at,
address=address,
wallet=wallet,
cid=cid,
oid=oid,
bearer=bearer,
xhdr=xhdr,
session=session,
ttl=ttl,
timeout=timeout,
)
# splitting CLI output to lines and taking the penultimate line
id_str = result.stdout.strip().split("\n")[0]
oid = id_str.split(":")[1]
return oid.strip()
@allure.step("Search object")
def search_object(
wallet: str,
cid: str,
shell: Shell,
endpoint: str,
bearer: str = "",
filters: Optional[dict] = None,
expected_objects_list: Optional[list] = None,
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None,
session: Optional[str] = None,
phy: bool = False,
root: bool = False,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> list:
"""
SEARCH an Object.
Args:
wallet: wallet on whose behalf SEARCH is done
cid: ID of Container where we get the Object from
shell: executor for cli command
bearer: path to Bearer Token file, appends to `--bearer` key
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
filters: key=value pairs to filter Objects
expected_objects_list: a list of ObjectIDs to compare found Objects with
wallet_config: path to the wallet config
xhdr: Request X-Headers in form of Key=Value
session: path to a JSON-encoded container session token
phy: Search physically stored objects.
root: Search for user objects.
timeout: Timeout for the operation.
Returns:
list of found ObjectIDs
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or WALLET_CONFIG)
result = cli.object.search(
rpc_endpoint=endpoint,
wallet=wallet,
cid=cid,
bearer=bearer,
xhdr=xhdr,
filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()]
if filters
else None,
session=session,
phy=phy,
root=root,
timeout=timeout,
)
found_objects = re.findall(r"(\w{43,44})", result.stdout)
if expected_objects_list:
if sorted(found_objects) == sorted(expected_objects_list):
logger.info(
f"Found objects list '{found_objects}' "
f"is equal for expected list '{expected_objects_list}'"
)
else:
logger.warning(
f"Found object list {found_objects} "
f"is not equal to expected list '{expected_objects_list}'"
)
return found_objects
@allure.step("Get netmap netinfo")
def get_netmap_netinfo(
wallet: str,
shell: Shell,
endpoint: str,
wallet_config: Optional[str] = None,
address: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> dict[str, Any]:
"""
Get netmap netinfo output from node
Args:
wallet (str): wallet on whose behalf request is done
shell: executor for cli command
endpoint (optional, str): FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
address: Address of wallet account
ttl: TTL value in request meta header (default 2)
wallet: Path to the wallet or binary key
xhdr: Request X-Headers in form of Key=Value
Returns:
(dict): dict of parsed command output
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or WALLET_CONFIG)
output = cli.netmap.netinfo(
wallet=wallet,
rpc_endpoint=endpoint,
address=address,
ttl=ttl,
xhdr=xhdr,
timeout=timeout,
)
settings = dict()
patterns = [
(re.compile("(.*): (\d+)"), int),
(re.compile("(.*): (false|true)"), bool),
(re.compile("(.*): (\d+\.\d+)"), float),
]
for pattern, func in patterns:
for setting, value in re.findall(pattern, output.stdout):
settings[setting.lower().strip().replace(" ", "_")] = func(value)
return settings
@allure.step("Head object")
def head_object(
wallet: str,
cid: str,
oid: str,
shell: Shell,
endpoint: str,
bearer: str = "",
xhdr: Optional[dict] = None,
json_output: bool = True,
is_raw: bool = False,
is_direct: bool = False,
wallet_config: Optional[str] = None,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
):
"""
HEAD an Object.
Args:
wallet (str): wallet on whose behalf HEAD is done
cid (str): ID of Container where we get the Object from
oid (str): ObjectID to HEAD
shell: executor for cli command
bearer (optional, str): path to Bearer Token file, appends to `--bearer` key
endpoint(optional, str): FrostFS endpoint to send request to
json_output(optional, bool): return response in JSON format or not; this flag
turns into `--json` key
is_raw(optional, bool): send "raw" request or not; this flag
turns into `--raw` key
is_direct(optional, bool): send request directly to the node or not; this flag
turns into `--ttl 1` key
wallet_config(optional, str): path to the wallet config
xhdr (optional, dict): Request X-Headers in form of Key=Value
session (optional, dict): path to a JSON-encoded container session token
timeout: Timeout for the operation.
Returns:
depending on the `json_output` parameter value, the function returns
(dict): HEAD response in JSON format
or
(str): HEAD response as a plain text
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or WALLET_CONFIG)
result = cli.object.head(
rpc_endpoint=endpoint,
wallet=wallet,
cid=cid,
oid=oid,
bearer=bearer,
json_mode=json_output,
raw=is_raw,
ttl=1 if is_direct else None,
xhdr=xhdr,
session=session,
timeout=timeout,
)
if not json_output:
return result
try:
decoded = json.loads(result.stdout)
except Exception as exc:
# If we failed to parse output as JSON, the cause might be
# the plain text string in the beginning of the output.
# Here we cut off first string and try to parse again.
logger.info(f"failed to parse output: {exc}")
logger.info("parsing output in another way")
fst_line_idx = result.stdout.find("\n")
decoded = json.loads(result.stdout[fst_line_idx:])
# If response is Complex Object header, it has `splitId` key
if "splitId" in decoded.keys():
logger.info("decoding split header")
return json_utils.decode_split_header(decoded)
# If response is Last or Linking Object header,
# it has `header` dictionary and non-null `split` dictionary
if "split" in decoded["header"].keys():
if decoded["header"]["split"]:
logger.info("decoding linking object")
return json_utils.decode_linking_object(decoded)
if decoded["header"]["objectType"] == "STORAGE_GROUP":
logger.info("decoding storage group")
return json_utils.decode_storage_group(decoded)
if decoded["header"]["objectType"] == "TOMBSTONE":
logger.info("decoding tombstone")
return json_utils.decode_tombstone(decoded)
logger.info("decoding simple header")
return json_utils.decode_simple_header(decoded)

View file

@ -1,353 +0,0 @@
import logging
import os
import random
import re
import shutil
import uuid
import zipfile
from typing import Optional
from urllib.parse import quote_plus
import allure
import requests
from frostfs_testlib.shell import Shell
from pytest_tests.helpers.aws_cli_client import LONG_TIMEOUT
from pytest_tests.helpers.cli_helpers import _cmd_run
from pytest_tests.helpers.cluster import StorageNode
from pytest_tests.helpers.file_helper import get_file_hash
from pytest_tests.helpers.frostfs_verbs import get_object
from pytest_tests.helpers.storage_policy import get_nodes_without_object
from pytest_tests.resources.common import SIMPLE_OBJECT_SIZE
logger = logging.getLogger("NeoLogger")
ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/")
@allure.step("Get via HTTP Gate")
def get_via_http_gate(cid: str, oid: str, endpoint: str, request_path: Optional[str] = None):
"""
This function gets given object from HTTP gate
cid: container id to get object from
oid: object ID
endpoint: http gate endpoint
request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}]
"""
# if `request_path` parameter ommited, use default
if request_path is None:
request = f"{endpoint}/get/{cid}/{oid}"
else:
request = f"{endpoint}{request_path}"
resp = requests.get(request, stream=True)
if not resp.ok:
raise Exception(
f"""Failed to get object via HTTP gate:
request: {resp.request.path_url},
response: {resp.text},
status code: {resp.status_code} {resp.reason}"""
)
logger.info(f"Request: {request}")
_attach_allure_step(request, resp.status_code)
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}")
with open(file_path, "wb") as file:
shutil.copyfileobj(resp.raw, file)
return file_path
@allure.step("Get via Zip HTTP Gate")
def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str):
"""
This function gets given object from HTTP gate
cid: container id to get object from
prefix: common prefix
endpoint: http gate endpoint
"""
request = f"{endpoint}/zip/{cid}/{prefix}"
resp = requests.get(request, stream=True)
if not resp.ok:
raise Exception(
f"""Failed to get object via HTTP gate:
request: {resp.request.path_url},
response: {resp.text},
status code: {resp.status_code} {resp.reason}"""
)
logger.info(f"Request: {request}")
_attach_allure_step(request, resp.status_code)
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_archive.zip")
with open(file_path, "wb") as file:
shutil.copyfileobj(resp.raw, file)
with zipfile.ZipFile(file_path, "r") as zip_ref:
zip_ref.extractall(ASSETS_DIR)
return os.path.join(os.getcwd(), ASSETS_DIR, prefix)
@allure.step("Get via HTTP Gate by attribute")
def get_via_http_gate_by_attribute(
cid: str, attribute: dict, endpoint: str, request_path: Optional[str] = None
):
"""
This function gets given object from HTTP gate
cid: CID to get object from
attribute: attribute {name: attribute} value pair
endpoint: http gate endpoint
request_path: (optional) http request path, if ommited - use default [{endpoint}/get_by_attribute/{Key}/{Value}]
"""
attr_name = list(attribute.keys())[0]
attr_value = quote_plus(str(attribute.get(attr_name)))
# if `request_path` parameter ommited, use default
if request_path is None:
request = f"{endpoint}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}"
else:
request = f"{endpoint}{request_path}"
resp = requests.get(request, stream=True)
if not resp.ok:
raise Exception(
f"""Failed to get object via HTTP gate:
request: {resp.request.path_url},
response: {resp.text},
status code: {resp.status_code} {resp.reason}"""
)
logger.info(f"Request: {request}")
_attach_allure_step(request, resp.status_code)
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{str(uuid.uuid4())}")
with open(file_path, "wb") as file:
shutil.copyfileobj(resp.raw, file)
return file_path
@allure.step("Upload via HTTP Gate")
def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: dict = None) -> str:
"""
This function upload given object through HTTP gate
cid: CID to get object from
path: File path to upload
endpoint: http gate endpoint
headers: Object header
"""
request = f"{endpoint}/upload/{cid}"
files = {"upload_file": open(path, "rb")}
body = {"filename": path}
resp = requests.post(request, files=files, data=body, headers=headers)
if not resp.ok:
raise Exception(
f"""Failed to get object via HTTP gate:
request: {resp.request.path_url},
response: {resp.text},
status code: {resp.status_code} {resp.reason}"""
)
logger.info(f"Request: {request}")
_attach_allure_step(request, resp.json(), req_type="POST")
assert resp.json().get("object_id"), f"OID found in response {resp}"
return resp.json().get("object_id")
@allure.step("Check is the passed object large")
def is_object_large(filepath: str) -> bool:
"""
This function check passed file size and return True if file_size > SIMPLE_OBJECT_SIZE
filepath: File path to check
"""
file_size = os.path.getsize(filepath)
logger.info(f"Size= {file_size}")
if file_size > int(SIMPLE_OBJECT_SIZE):
return True
else:
return False
@allure.step("Upload via HTTP Gate using Curl")
def upload_via_http_gate_curl(
cid: str,
filepath: str,
endpoint: str,
headers: list = None,
error_pattern: Optional[str] = None,
) -> str:
"""
This function upload given object through HTTP gate using curl utility.
cid: CID to get object from
filepath: File path to upload
headers: Object header
endpoint: http gate endpoint
error_pattern: [optional] expected error message from the command
"""
request = f"{endpoint}/upload/{cid}"
attributes = ""
if headers:
# parse attributes
attributes = " ".join(headers)
large_object = is_object_large(filepath)
if large_object:
# pre-clean
_cmd_run("rm pipe -f")
files = f"file=@pipe;filename={os.path.basename(filepath)}"
cmd = f"mkfifo pipe;cat {filepath} > pipe & curl --no-buffer -F '{files}' {attributes} {request}"
output = _cmd_run(cmd, LONG_TIMEOUT)
# clean up pipe
_cmd_run("rm pipe")
else:
files = f"file=@{filepath};filename={os.path.basename(filepath)}"
cmd = f"curl -F '{files}' {attributes} {request}"
output = _cmd_run(cmd)
if error_pattern:
match = error_pattern.casefold() in str(output).casefold()
assert match, f"Expected {output} to match {error_pattern}"
return ""
oid_re = re.search(r'"object_id": "(.*)"', output)
if not oid_re:
raise AssertionError(f'Could not find "object_id" in {output}')
return oid_re.group(1)
@allure.step("Get via HTTP Gate using Curl")
def get_via_http_curl(cid: str, oid: str, endpoint: str) -> str:
"""
This function gets given object from HTTP gate using curl utility.
cid: CID to get object from
oid: object OID
endpoint: http gate endpoint
"""
request = f"{endpoint}/get/{cid}/{oid}"
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}")
cmd = f"curl {request} > {file_path}"
_cmd_run(cmd)
return file_path
def _attach_allure_step(request: str, status_code: int, req_type="GET"):
command_attachment = f"REQUEST: '{request}'\n" f"RESPONSE:\n {status_code}\n"
with allure.step(f"{req_type} Request"):
allure.attach(command_attachment, f"{req_type} Request", allure.attachment_type.TEXT)
@allure.step("Try to get object and expect error")
def try_to_get_object_and_expect_error(
cid: str, oid: str, error_pattern: str, endpoint: str
) -> None:
try:
get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint)
raise AssertionError(f"Expected error on getting object with cid: {cid}")
except Exception as err:
match = error_pattern.casefold() in str(err).casefold()
assert match, f"Expected {err} to match {error_pattern}"
@allure.step("Verify object can be get using HTTP header attribute")
def get_object_by_attr_and_verify_hashes(
oid: str, file_name: str, cid: str, attrs: dict, endpoint: str
) -> None:
got_file_path_http = get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint)
got_file_path_http_attr = get_via_http_gate_by_attribute(
cid=cid, attribute=attrs, endpoint=endpoint
)
assert_hashes_are_equal(file_name, got_file_path_http, got_file_path_http_attr)
def get_object_and_verify_hashes(
oid: str,
file_name: str,
wallet: str,
cid: str,
shell: Shell,
nodes: list[StorageNode],
endpoint: str,
object_getter=None,
) -> None:
nodes_list = get_nodes_without_object(
wallet=wallet,
cid=cid,
oid=oid,
shell=shell,
nodes=nodes,
)
# for some reason we can face with case when nodes_list is empty due to object resides in all nodes
if nodes_list:
random_node = random.choice(nodes_list)
else:
random_node = random.choice(nodes)
object_getter = object_getter or get_via_http_gate
got_file_path = get_object(
wallet=wallet,
cid=cid,
oid=oid,
shell=shell,
endpoint=random_node.get_rpc_endpoint(),
)
got_file_path_http = object_getter(cid=cid, oid=oid, endpoint=endpoint)
assert_hashes_are_equal(file_name, got_file_path, got_file_path_http)
def assert_hashes_are_equal(orig_file_name: str, got_file_1: str, got_file_2: str) -> None:
msg = "Expected hashes are equal for files {f1} and {f2}"
got_file_hash_http = get_file_hash(got_file_1)
assert get_file_hash(got_file_2) == got_file_hash_http, msg.format(f1=got_file_2, f2=got_file_1)
assert get_file_hash(orig_file_name) == got_file_hash_http, msg.format(
f1=orig_file_name, f2=got_file_1
)
def attr_into_header(attrs: dict) -> dict:
return {f"X-Attribute-{_key}": _value for _key, _value in attrs.items()}
@allure.step(
"Convert each attribute (Key=Value) to the following format: -H 'X-Attribute-Key: Value'"
)
def attr_into_str_header_curl(attrs: dict) -> list:
headers = []
for k, v in attrs.items():
headers.append(f"-H 'X-Attribute-{k}: {v}'")
logger.info(f"[List of Attrs for curl:] {headers}")
return headers
@allure.step(
"Try to get object via http (pass http_request and optional attributes) and expect error"
)
def try_to_get_object_via_passed_request_and_expect_error(
cid: str,
oid: str,
error_pattern: str,
endpoint: str,
http_request_path: str,
attrs: dict = None,
) -> None:
try:
if attrs is None:
get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, request_path=http_request_path)
else:
get_via_http_gate_by_attribute(
cid=cid, attribute=attrs, endpoint=endpoint, request_path=http_request_path
)
raise AssertionError(f"Expected error on getting object with cid: {cid}")
except Exception as err:
match = error_pattern.casefold() in str(err).casefold()
assert match, f"Expected {err} to match {error_pattern}"

View file

@ -1,237 +0,0 @@
import re
from contextlib import contextmanager
from dataclasses import dataclass
from time import sleep
from typing import Optional
import allure
from frostfs_testlib.shell import Shell
from pytest_tests.helpers.remote_process import RemoteProcess
EXIT_RESULT_CODE = 0
LOAD_RESULTS_PATTERNS = {
"grpc": {
"write_ops": r"frostfs_obj_put_total\W*\d*\W*(?P<write_ops>\d*\.\d*)",
"read_ops": r"frostfs_obj_get_total\W*\d*\W*(?P<read_ops>\d*\.\d*)",
},
"s3": {
"write_ops": r"aws_obj_put_total\W*\d*\W*(?P<write_ops>\d*\.\d*)",
"read_ops": r"aws_obj_get_total\W*\d*\W*(?P<write_ops>\d*\.\d*)",
},
"http": {"total_ops": r"http_reqs\W*\d*\W*(?P<total_ops>\d*\.\d*)"},
}
@dataclass
class LoadParams:
load_type: str
endpoint: str
writers: Optional[int] = None
readers: Optional[int] = None
deleters: Optional[int] = None
clients: Optional[int] = None
containers_count: Optional[int] = None
out_file: Optional[str] = None
load_time: Optional[int] = None
obj_count: Optional[int] = None
obj_size: Optional[int] = None
registry_file: Optional[str] = None
@dataclass
class LoadResults:
data_sent: float = 0.0
data_received: float = 0.0
read_ops: float = 0.0
write_ops: float = 0.0
total_ops: float = 0.0
class K6:
def __init__(self, load_params: LoadParams, shell: Shell):
self.load_params = load_params
self.shell = shell
self._k6_dir = None
self._k6_result = None
self._k6_process = None
self._k6_stop_attempts = 5
self._k6_stop_timeout = 15
@property
def process_dir(self) -> str:
return self._k6_process.process_dir
@property
def k6_dir(self) -> str:
if not self._k6_dir:
self._k6_dir = self.shell.exec(
r"sudo find . -name 'k6' -exec dirname {} \; -quit"
).stdout.strip("\n")
return self._k6_dir
@allure.step("Prepare containers and objects")
def prepare(self) -> str:
self._k6_dir = self.k6_dir
if self.load_params.load_type == "http" or self.load_params.load_type == "grpc":
command = (
f"{self.k6_dir}/scenarios/preset/preset_grpc.py "
f"--size {self.load_params.obj_size} "
f"--containers {self.load_params.containers_count} "
f"--out {self.k6_dir}/{self.load_params.load_type}_{self.load_params.out_file} "
f"--endpoint {self.load_params.endpoint.split(',')[0]} "
f"--preload_obj {self.load_params.obj_count} "
)
terminal = self.shell.exec(command)
return terminal.stdout.strip("\n")
elif self.load_params.load_type == "s3":
command = (
f"{self.k6_dir}/scenarios/preset/preset_s3.py --size {self.load_params.obj_size} "
f"--buckets {self.load_params.containers_count} "
f"--out {self.k6_dir}/{self.load_params.load_type}_{self.load_params.out_file} "
f"--endpoint {self.load_params.endpoint.split(',')[0]} "
f"--preload_obj {self.load_params.obj_count} "
f"--location load-1-1"
)
terminal = self.shell.exec(command)
return terminal.stdout.strip("\n")
else:
raise AssertionError("Wrong K6 load type")
@allure.step("Generate K6 command")
def _generate_env_variables(self, load_params: LoadParams, k6_dir: str) -> str:
env_vars = {
"DURATION": load_params.load_time or None,
"WRITE_OBJ_SIZE": load_params.obj_size or None,
"WRITERS": load_params.writers or 0,
"READERS": load_params.readers or 0,
"DELETERS": load_params.deleters or 0,
"REGISTRY_FILE": load_params.registry_file or None,
"CLIENTS": load_params.clients or None,
f"{self.load_params.load_type.upper()}_ENDPOINTS": self.load_params.endpoint,
"PREGEN_JSON": f"{self.k6_dir}/{self.load_params.load_type}_{self.load_params.out_file}"
if load_params.out_file
else None,
}
allure.attach(
"\n".join(f"{param}: {value}" for param, value in env_vars.items()),
"K6 ENV variables",
allure.attachment_type.TEXT,
)
return " ".join(
[f"-e {param}={value}" for param, value in env_vars.items() if value is not None]
)
@allure.step("Start K6 on initiator")
def start(self) -> None:
self._k6_dir = self.k6_dir
command = (
f"{self.k6_dir}/k6 run {self._generate_env_variables(self.load_params, self.k6_dir)} "
f"{self.k6_dir}/scenarios/{self.load_params.load_type}.js"
)
self._k6_process = RemoteProcess.create(command, self.shell)
@allure.step("Wait until K6 is finished")
def wait_until_finished(self, timeout: int = 0, k6_should_be_running: bool = False) -> None:
if self._k6_process is None:
assert "No k6 instances were executed"
if k6_should_be_running:
assert self._k6_process.running(), "k6 should be running."
for __attempt in reversed(range(5)) if timeout else [0]:
if not self._k6_process.running():
return
if __attempt: # no sleep in last iteration
sleep(int(timeout / 5))
self._stop_k6()
raise TimeoutError(f"Expected K6 finished in {timeout} sec.")
@contextmanager
def start_context(
self, warm_up_time: int = 0, expected_finish: bool = False, expected_fail: bool = False
) -> None:
self.start()
sleep(warm_up_time)
try:
yield self
except Exception as err:
if self._k6_process.running():
self._kill_k6()
raise
if expected_fail:
self._kill_k6()
elif expected_finish:
if self._k6_process.running():
self._kill_k6()
raise AssertionError("K6 has not finished in expected time")
else:
self._k6_should_be_finished()
else:
self._stop_k6()
@allure.step("Get K6 results")
def get_k6_results(self) -> None:
self.__log_k6_output()
@allure.step("Assert K6 should be finished")
def _k6_should_be_finished(self) -> None:
k6_rc = self._k6_process.rc()
assert k6_rc == 0, f"K6 unexpectedly finished with RC {k6_rc}"
@allure.step("Terminate K6 on initiator")
def stop(self) -> None:
if not self._k6_process.running():
raise AssertionError("K6 unexpectedly finished")
self._stop_k6()
k6_rc = self._k6_process.rc()
assert k6_rc == EXIT_RESULT_CODE, f"Return code of K6 job should be 0, but {k6_rc}"
def check_k6_is_running(self) -> bool:
if self._k6_process:
return self._k6_process.running()
return False
@property
def is_finished(self) -> bool:
return not self._k6_process.running()
def parsing_results(self) -> LoadResults:
output = self._k6_process.stdout(full=True).replace("\n", "")
metric_regex_map = {
"data_received": r"data_received\W*\d*.\d*.\w*\W*(?P<data_received>\d*)",
"data_sent": r"data_sent\W*\d*.\d*.\w*\W*(?P<data_sent>\d*)",
}
metric_regex_map.update(LOAD_RESULTS_PATTERNS[self.load_params.load_type])
metric_values = {}
for metric_name, metric_regex in metric_regex_map.items():
match = re.search(metric_regex, output)
if match:
metric_values[metric_name] = float(match.group(metric_name))
continue
metric_values[metric_name] = 0.0
load_result = LoadResults(**metric_values)
return load_result
@allure.step("Try to stop K6 with SIGTERM")
def _stop_k6(self) -> None:
for __attempt in range(self._k6_stop_attempts):
if not self._k6_process.running():
break
self._k6_process.stop()
sleep(self._k6_stop_timeout)
else:
raise AssertionError("Can not stop K6 process within timeout")
def _kill_k6(self) -> None:
self._k6_process.kill()
@allure.step("Log K6 output")
def __log_k6_output(self) -> None:
allure.attach(self._k6_process.stdout(full=True), "K6 output", allure.attachment_type.TEXT)

View file

@ -1,310 +0,0 @@
import logging
import random
import re
import time
from time import sleep
from dataclasses import dataclass
from typing import Optional
import allure
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli
from frostfs_testlib.shell import Shell
from frostfs_testlib.utils import datetime_utils
from pytest_tests.helpers.cluster import Cluster, StorageNode
from pytest_tests.helpers.epoch import tick_epoch
from pytest_tests.resources.common import (
FROSTFS_CLI_EXEC,
FROSTFS_ADM_CONFIG_PATH,
FROSTFS_ADM_EXEC,
FROSTFS_CLI_EXEC,
MORPH_BLOCK_TIME,
)
logger = logging.getLogger("NeoLogger")
@dataclass
class HealthStatus:
network_status: Optional[str] = None
health_status: Optional[str] = None
@staticmethod
def from_stdout(output: str) -> "HealthStatus":
network, health = None, None
for line in output.split("\n"):
if "Network status" in line:
network = line.split(":")[-1].strip()
if "Health status" in line:
health = line.split(":")[-1].strip()
return HealthStatus(network, health)
@allure.step("Stop random storage nodes")
def stop_random_storage_nodes(number: int, nodes: list[StorageNode]) -> list[StorageNode]:
"""
Shuts down the given number of randomly selected storage nodes.
Args:
number: the number of storage nodes to stop
nodes: the list of storage nodes to stop
Returns:
the list of nodes that were stopped
"""
nodes_to_stop = random.sample(nodes, number)
for node in nodes_to_stop:
node.stop_service()
return nodes_to_stop
@allure.step("Start storage node")
def start_storage_nodes(nodes: list[StorageNode]) -> None:
"""
The function starts specified storage nodes.
Args:
nodes: the list of nodes to start
"""
for node in nodes:
node.start_service()
@allure.step("Get Locode from random storage node")
def get_locode_from_random_node(cluster: Cluster) -> str:
node = random.choice(cluster.storage_nodes)
locode = node.get_un_locode()
logger.info(f"Chosen '{locode}' locode from node {node}")
return locode
@allure.step("Healthcheck for storage node {node}")
def storage_node_healthcheck(node: StorageNode) -> HealthStatus:
"""
The function returns storage node's health status.
Args:
node: storage node for which health status should be retrieved.
Returns:
health status as HealthStatus object.
"""
command = "control healthcheck"
output = _run_control_command_with_retries(node, command)
return HealthStatus.from_stdout(output)
@allure.step("Set status for {node}")
def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) -> None:
"""
The function sets particular status for given node.
Args:
node: node for which status should be set.
status: online or offline.
retries (optional, int): number of retry attempts if it didn't work from the first time
"""
command = f"control set-status --status {status}"
_run_control_command_with_retries(node, command, retries)
@allure.step("Get netmap snapshot")
def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str:
"""
The function returns string representation of netmap snapshot.
Args:
node: node from which netmap snapshot should be requested.
Returns:
string representation of netmap
"""
storage_wallet_config = node.get_wallet_config_path()
storage_wallet_path = node.get_wallet_path()
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, config_file=storage_wallet_config)
return cli.netmap.snapshot(
rpc_endpoint=node.get_rpc_endpoint(),
wallet=storage_wallet_path,
).stdout
@allure.step("Get shard list for {node}")
def node_shard_list(node: StorageNode) -> list[str]:
"""
The function returns list of shards for specified storage node.
Args:
node: node for which shards should be returned.
Returns:
list of shards.
"""
command = "control shards list"
output = _run_control_command_with_retries(node, command)
return re.findall(r"Shard (.*):", output)
@allure.step("Shard set for {node}")
def node_shard_set_mode(node: StorageNode, shard: str, mode: str) -> str:
"""
The function sets mode for specified shard.
Args:
node: node on which shard mode should be set.
"""
command = f"control shards set-mode --id {shard} --mode {mode}"
return _run_control_command_with_retries(node, command)
@allure.step("Drop object from {node}")
def drop_object(node: StorageNode, cid: str, oid: str) -> str:
"""
The function drops object from specified node.
Args:
node_id str: node from which object should be dropped.
"""
command = f"control drop-objects -o {cid}/{oid}"
return _run_control_command_with_retries(node, command)
@allure.step("Delete data from host for node {node}")
def delete_node_data(node: StorageNode) -> None:
node.stop_service()
node.host.delete_storage_node_data(node.name)
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
@allure.step("Exclude node {node_to_exclude} from network map")
def exclude_node_from_network_map(
node_to_exclude: StorageNode,
alive_node: StorageNode,
shell: Shell,
cluster: Cluster,
) -> None:
node_netmap_key = node_to_exclude.get_wallet_public_key()
storage_node_set_status(node_to_exclude, status="offline")
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME))
tick_epoch(shell, cluster)
snapshot = get_netmap_snapshot(node=alive_node, shell=shell)
assert (
node_netmap_key not in snapshot
), f"Expected node with key {node_netmap_key} to be absent in network map"
@allure.step("Include node {node_to_include} into network map")
def include_node_to_network_map(
node_to_include: StorageNode,
alive_node: StorageNode,
shell: Shell,
cluster: Cluster,
) -> None:
storage_node_set_status(node_to_include, status="online")
# Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch.
# First sleep can be omitted after https://github.com/nspcc-dev/frostfs-node/issues/1790 complete.
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
tick_epoch(shell, cluster)
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
check_node_in_map(node_to_include, shell, alive_node)
@allure.step("Check node {node} in network map")
def check_node_in_map(
node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None
) -> None:
alive_node = alive_node or node
node_netmap_key = node.get_wallet_public_key()
logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}")
snapshot = get_netmap_snapshot(alive_node, shell)
assert (
node_netmap_key in snapshot
), f"Expected node with key {node_netmap_key} to be in network map"
@allure.step("Check node {node} NOT in network map")
def check_node_not_in_map(
node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None
) -> None:
alive_node = alive_node or node
node_netmap_key = node.get_wallet_public_key()
logger.info(f"Node ({node.label}) netmap key: {node_netmap_key}")
snapshot = get_netmap_snapshot(alive_node, shell)
assert (
node_netmap_key not in snapshot
), f"Expected node with key {node_netmap_key} to be NOT in network map"
@allure.step("Wait for node {node} is ready")
def wait_for_node_to_be_ready(node: StorageNode) -> None:
timeout, attempts = 30, 6
for _ in range(attempts):
try:
health_check = storage_node_healthcheck(node)
if health_check.health_status == "READY":
return
except Exception as err:
logger.warning(f"Node {node} is not ready:\n{err}")
sleep(timeout)
raise AssertionError(
f"Node {node} hasn't gone to the READY state after {timeout * attempts} seconds"
)
@allure.step("Remove nodes from network map trough cli-adm morph command")
def remove_nodes_from_map_morph(shell: Shell, cluster: Cluster, remove_nodes: list[StorageNode], alive_node: Optional[StorageNode] = None):
"""
Move node to the Offline state in the candidates list and tick an epoch to update the netmap
using frostfs-adm
Args:
shell: local shell to make queries about current epoch. Remote shell will be used to tick new one
cluster: cluster instance under test
alive_node: node to send requests to (first node in cluster by default)
remove_nodes: list of nodes which would be removed from map
"""
alive_node = alive_node if alive_node else remove_nodes[0]
remote_shell = alive_node.host.get_shell()
node_netmap_keys = list(map(StorageNode.get_wallet_public_key, remove_nodes))
logger.info(f"Nodes netmap keys are: {' '.join(node_netmap_keys)}")
if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH:
# If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests)
frostfsadm = FrostfsAdm(
shell=remote_shell,
frostfs_adm_exec_path=FROSTFS_ADM_EXEC,
config_file=FROSTFS_ADM_CONFIG_PATH,
)
frostfsadm.morph.remove_nodes(node_netmap_keys)
def _run_control_command_with_retries(node: StorageNode, command: str, retries: int = 0) -> str:
for attempt in range(1 + retries): # original attempt + specified retries
try:
return _run_control_command(node, command)
except AssertionError as err:
if attempt < retries:
logger.warning(f"Command {command} failed with error {err} and will be retried")
continue
raise AssertionError(f"Command {command} failed with error {err}") from err
def _run_control_command(node: StorageNode, command: str) -> None:
host = node.host
service_config = host.get_service_config(node.name)
wallet_path = service_config.attributes["wallet_path"]
wallet_password = service_config.attributes["wallet_password"]
control_endpoint = service_config.attributes["control_endpoint"]
shell = host.get_shell()
wallet_config_path = f"/tmp/{node.name}-config.yaml"
wallet_config = f'password: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
cli_config = host.get_cli_config("frostfs-cli")
# TODO: implement cli.control
# cli = frostfsCli(shell, cli_config.exec_path, wallet_config_path)
result = shell.exec(
f"{cli_config.exec_path} {command} --endpoint {control_endpoint} "
f"--wallet {wallet_path} --config {wallet_config_path}"
)
return result.stdout

View file

@ -1,13 +1,10 @@
from typing import Optional
import allure
from frostfs_testlib.resources.common import OBJECT_ACCESS_DENIED
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.resources.error_patterns import OBJECT_ACCESS_DENIED
from frostfs_testlib.shell import Shell
from frostfs_testlib.utils import string_utils
from pytest_tests.helpers.cluster import Cluster
from pytest_tests.helpers.file_helper import get_file_hash
from pytest_tests.helpers.frostfs_verbs import (
from frostfs_testlib.steps.cli.object import (
delete_object,
get_object_from_random_node,
get_range,
@ -16,7 +13,9 @@ from pytest_tests.helpers.frostfs_verbs import (
put_object_to_random_node,
search_object,
)
from pytest_tests.resources.common import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.utils import string_utils
from frostfs_testlib.utils.file_utils import get_file_hash
OPERATION_ERROR_TYPE = RuntimeError

View file

@ -1,220 +0,0 @@
import base64
import json
import logging
import re
import time
from typing import Optional
import allure
from frostfs_testlib.cli import NeoGo
from frostfs_testlib.shell import Shell
from frostfs_testlib.utils import converting_utils, datetime_utils, wallet_utils
from neo3.wallet import utils as neo3_utils
from neo3.wallet import wallet as neo3_wallet
from pytest_tests.helpers.cluster import MainChain, MorphChain
from pytest_tests.resources.common import (
FROSTFS_CONTRACT,
GAS_HASH,
MAINNET_BLOCK_TIME,
NEOGO_EXECUTABLE,
)
logger = logging.getLogger("NeoLogger")
EMPTY_PASSWORD = ""
TX_PERSIST_TIMEOUT = 15 # seconds
ASSET_POWER_MAINCHAIN = 10**8
ASSET_POWER_SIDECHAIN = 10**12
def get_nns_contract_hash(morph_chain: MorphChain) -> str:
return morph_chain.rpc_client.get_contract_state(1)["hash"]
def get_contract_hash(morph_chain: MorphChain, resolve_name: str, shell: Shell) -> str:
nns_contract_hash = get_nns_contract_hash(morph_chain)
neogo = NeoGo(shell=shell, neo_go_exec_path=NEOGO_EXECUTABLE)
out = neogo.contract.testinvokefunction(
scripthash=nns_contract_hash,
method="resolve",
arguments=f"string:{resolve_name} int:16",
rpc_endpoint=morph_chain.get_endpoint(),
)
stack_data = json.loads(out.stdout.replace("\n", ""))["stack"][0]["value"]
return bytes.decode(base64.b64decode(stack_data[0]["value"]))
@allure.step("Withdraw Mainnet Gas")
def withdraw_mainnet_gas(shell: Shell, main_chain: MainChain, wlt: str, amount: int):
address = wallet_utils.get_last_address_from_wallet(wlt, EMPTY_PASSWORD)
scripthash = neo3_utils.address_to_script_hash(address)
neogo = NeoGo(shell=shell, neo_go_exec_path=NEOGO_EXECUTABLE)
out = neogo.contract.invokefunction(
wallet=wlt,
address=address,
rpc_endpoint=main_chain.get_endpoint(),
scripthash=FROSTFS_CONTRACT,
method="withdraw",
arguments=f"{scripthash} int:{amount}",
multisig_hash=f"{scripthash}:Global",
wallet_password="",
)
m = re.match(r"^Sent invocation transaction (\w{64})$", out.stdout)
if m is None:
raise Exception("Can not get Tx.")
tx = m.group(1)
if not transaction_accepted(tx):
raise AssertionError(f"TX {tx} hasn't been processed")
def transaction_accepted(main_chain: MainChain, tx_id: str):
"""
This function returns True in case of accepted TX.
Args:
tx_id(str): transaction ID
Returns:
(bool)
"""
try:
for _ in range(0, TX_PERSIST_TIMEOUT):
time.sleep(1)
resp = main_chain.rpc_client.get_transaction_height(tx_id)
if resp is not None:
logger.info(f"TX is accepted in block: {resp}")
return True
except Exception as out:
logger.info(f"request failed with error: {out}")
raise out
return False
@allure.step("Get FrostFS Balance")
def get_balance(shell: Shell, morph_chain: MorphChain, wallet_path: str, wallet_password: str = ""):
"""
This function returns FrostFS balance for given wallet.
"""
with open(wallet_path) as wallet_file:
wallet = neo3_wallet.Wallet.from_json(json.load(wallet_file), password=wallet_password)
acc = wallet.accounts[-1]
payload = [{"type": "Hash160", "value": str(acc.script_hash)}]
try:
resp = morph_chain.rpc_client.invoke_function(
get_contract_hash(morph_chain, "balance.frostfs", shell=shell), "balanceOf", payload
)
logger.info(f"Got response \n{resp}")
value = int(resp["stack"][0]["value"])
return value / ASSET_POWER_SIDECHAIN
except Exception as out:
logger.error(f"failed to get wallet balance: {out}")
raise out
@allure.title("Transfer Gas")
def transfer_gas(
shell: Shell,
amount: int,
main_chain: MainChain,
wallet_from_path: Optional[str] = None,
wallet_from_password: Optional[str] = None,
address_from: Optional[str] = None,
address_to: Optional[str] = None,
wallet_to_path: Optional[str] = None,
wallet_to_password: Optional[str] = None,
):
"""
This function transfer GAS in main chain from mainnet wallet to
the provided wallet. If the wallet contains more than one address,
the assets will be transferred to the last one.
Args:
shell: Shell instance.
wallet_from_password: Password of the wallet; it is required to decode the wallet
and extract its addresses.
wallet_from_path: Path to chain node wallet.
address_from: The address of the wallet to transfer assets from.
wallet_to_path: The path to the wallet to transfer assets to.
wallet_to_password: The password to the wallet to transfer assets to.
address_to: The address of the wallet to transfer assets to.
amount: Amount of gas to transfer.
"""
wallet_from_path = wallet_from_path or main_chain.get_wallet_path()
wallet_from_password = (
wallet_from_password
if wallet_from_password is not None
else main_chain.get_wallet_password()
)
address_from = address_from or wallet_utils.get_last_address_from_wallet(
wallet_from_path, wallet_from_password
)
address_to = address_to or wallet_utils.get_last_address_from_wallet(
wallet_to_path, wallet_to_password
)
neogo = NeoGo(shell, neo_go_exec_path=NEOGO_EXECUTABLE)
out = neogo.nep17.transfer(
rpc_endpoint=main_chain.get_endpoint(),
wallet=wallet_from_path,
wallet_password=wallet_from_password,
amount=amount,
from_address=address_from,
to_address=address_to,
token="GAS",
force=True,
)
txid = out.stdout.strip().split("\n")[-1]
if len(txid) != 64:
raise Exception("Got no TXID after run the command")
if not transaction_accepted(main_chain, txid):
raise AssertionError(f"TX {txid} hasn't been processed")
time.sleep(datetime_utils.parse_time(MAINNET_BLOCK_TIME))
@allure.step("FrostFS Deposit")
def deposit_gas(
shell: Shell,
main_chain: MainChain,
amount: int,
wallet_from_path: str,
wallet_from_password: str,
):
"""
Transferring GAS from given wallet to FrostFS contract address.
"""
# get FrostFS contract address
deposit_addr = converting_utils.contract_hash_to_address(FROSTFS_CONTRACT)
logger.info(f"FrostFS contract address: {deposit_addr}")
address_from = wallet_utils.get_last_address_from_wallet(
wallet_path=wallet_from_path, wallet_password=wallet_from_password
)
transfer_gas(
shell=shell,
main_chain=main_chain,
amount=amount,
wallet_from_path=wallet_from_path,
wallet_from_password=wallet_from_password,
address_to=deposit_addr,
address_from=address_from,
)
@allure.step("Get Mainnet Balance")
def get_mainnet_balance(main_chain: MainChain, address: str):
resp = main_chain.rpc_client.get_nep17_balances(address=address)
logger.info(f"Got getnep17balances response: {resp}")
for balance in resp["balance"]:
if balance["assethash"] == GAS_HASH:
return float(balance["amount"]) / ASSET_POWER_MAINCHAIN
return float(0)
@allure.step("Get Sidechain Balance")
def get_sidechain_balance(morph_chain: MorphChain, address: str):
resp = morph_chain.rpc_client.get_nep17_balances(address=address)
logger.info(f"Got getnep17balances response: {resp}")
for balance in resp["balance"]:
if balance["assethash"] == GAS_HASH:
return float(balance["amount"]) / ASSET_POWER_SIDECHAIN
return float(0)

View file

@ -1,187 +0,0 @@
from __future__ import annotations
import uuid
from typing import Optional
import allure
from frostfs_testlib.shell import Shell
from frostfs_testlib.shell.interfaces import CommandOptions
from tenacity import retry, stop_after_attempt, wait_fixed
class RemoteProcess:
def __init__(self, cmd: str, process_dir: str, shell: Shell):
self.process_dir = process_dir
self.cmd = cmd
self.stdout_last_line_number = 0
self.stderr_last_line_number = 0
self.pid: Optional[str] = None
self.proc_rc: Optional[int] = None
self.saved_stdout: Optional[str] = None
self.saved_stderr: Optional[str] = None
self.shell = shell
@classmethod
@allure.step("Create remote process")
def create(cls, command: str, shell: Shell) -> RemoteProcess:
"""
Create a process on a remote host.
Created dir for process with following files:
command.sh: script to execute
pid: contains process id
rc: contains script return code
stderr: contains script errors
stdout: contains script output
Args:
shell: Shell instance
command: command to be run on a remote host
Returns:
RemoteProcess instance for further examination
"""
remote_process = cls(cmd=command, process_dir=f"/tmp/proc_{uuid.uuid4()}", shell=shell)
remote_process._create_process_dir()
remote_process._generate_command_script(command)
remote_process._start_process()
remote_process.pid = remote_process._get_pid()
return remote_process
@allure.step("Get process stdout")
def stdout(self, full: bool = False) -> str:
"""
Method to get process stdout, either fresh info or full.
Args:
full: returns full stdout that we have to this moment
Returns:
Fresh stdout. By means of stdout_last_line_number only new stdout lines are returned.
If process is finished (proc_rc is not None) saved stdout is returned
"""
if self.saved_stdout is not None:
cur_stdout = self.saved_stdout
else:
terminal = self.shell.exec(f"cat {self.process_dir}/stdout")
if self.proc_rc is not None:
self.saved_stdout = terminal.stdout
cur_stdout = terminal.stdout
if full:
return cur_stdout
whole_stdout = cur_stdout.split("\n")
if len(whole_stdout) > self.stdout_last_line_number:
resulted_stdout = "\n".join(whole_stdout[self.stdout_last_line_number :])
self.stdout_last_line_number = len(whole_stdout)
return resulted_stdout
return ""
@allure.step("Get process stderr")
def stderr(self, full: bool = False) -> str:
"""
Method to get process stderr, either fresh info or full.
Args:
full: returns full stderr that we have to this moment
Returns:
Fresh stderr. By means of stderr_last_line_number only new stderr lines are returned.
If process is finished (proc_rc is not None) saved stderr is returned
"""
if self.saved_stderr is not None:
cur_stderr = self.saved_stderr
else:
terminal = self.shell.exec(f"cat {self.process_dir}/stderr")
if self.proc_rc is not None:
self.saved_stderr = terminal.stdout
cur_stderr = terminal.stdout
if full:
return cur_stderr
whole_stderr = cur_stderr.split("\n")
if len(whole_stderr) > self.stderr_last_line_number:
resulted_stderr = "\n".join(whole_stderr[self.stderr_last_line_number :])
self.stderr_last_line_number = len(whole_stderr)
return resulted_stderr
return ""
@allure.step("Get process rc")
def rc(self) -> Optional[int]:
if self.proc_rc is not None:
return self.proc_rc
terminal = self.shell.exec(f"cat {self.process_dir}/rc", CommandOptions(check=False))
if "No such file or directory" in terminal.stderr:
return None
elif terminal.stderr or terminal.return_code != 0:
raise AssertionError(f"cat process rc was not successfull: {terminal.stderr}")
self.proc_rc = int(terminal.stdout)
return self.proc_rc
@allure.step("Check if process is running")
def running(self) -> bool:
return self.rc() is None
@allure.step("Send signal to process")
def send_signal(self, signal: int) -> None:
kill_res = self.shell.exec(f"kill -{signal} {self.pid}", CommandOptions(check=False))
if "No such process" in kill_res.stderr:
return
if kill_res.return_code:
raise AssertionError(
f"Signal {signal} not sent. Return code of kill: {kill_res.return_code}"
)
@allure.step("Stop process")
def stop(self) -> None:
self.send_signal(15)
@allure.step("Kill process")
def kill(self) -> None:
self.send_signal(9)
@allure.step("Clear process directory")
def clear(self) -> None:
if self.process_dir == "/":
raise AssertionError(f"Invalid path to delete: {self.process_dir}")
self.shell.exec(f"rm -rf {self.process_dir}")
@allure.step("Start remote process")
def _start_process(self) -> None:
self.shell.exec(
f"nohup {self.process_dir}/command.sh </dev/null "
f">{self.process_dir}/stdout "
f"2>{self.process_dir}/stderr &"
)
@allure.step("Create process directory")
def _create_process_dir(self) -> None:
self.shell.exec(f"mkdir {self.process_dir}; chmod 777 {self.process_dir}")
terminal = self.shell.exec(f"realpath {self.process_dir}")
self.process_dir = terminal.stdout.strip()
@allure.step("Get pid")
@retry(wait=wait_fixed(10), stop=stop_after_attempt(5), reraise=True)
def _get_pid(self) -> str:
terminal = self.shell.exec(f"cat {self.process_dir}/pid")
assert terminal.stdout, f"invalid pid: {terminal.stdout}"
return terminal.stdout.strip()
@allure.step("Generate command script")
def _generate_command_script(self, command: str) -> None:
command = command.replace('"', '\\"').replace("\\", "\\\\")
script = (
f"#!/bin/bash\n"
f"cd {self.process_dir}\n"
f"{command} &\n"
f"pid=\$!\n"
f"cd {self.process_dir}\n"
f"echo \$pid > {self.process_dir}/pid\n"
f"wait \$pid\n"
f"echo $? > {self.process_dir}/rc"
)
self.shell.exec(f'echo "{script}" > {self.process_dir}/command.sh')
self.shell.exec(f"cat {self.process_dir}/command.sh")
self.shell.exec(f"chmod +x {self.process_dir}/command.sh")

View file

@ -1,159 +0,0 @@
import logging
import os
from datetime import datetime, timedelta
from typing import Optional
import allure
from dateutil.parser import parse
from pytest_tests.steps import s3_gate_bucket, s3_gate_object
logger = logging.getLogger("NeoLogger")
@allure.step("Expected all objects are presented in the bucket")
def check_objects_in_bucket(
s3_client, bucket, expected_objects: list, unexpected_objects: Optional[list] = None
) -> None:
unexpected_objects = unexpected_objects or []
bucket_objects = s3_gate_object.list_objects_s3(s3_client, bucket)
assert len(bucket_objects) == len(
expected_objects
), f"Expected {len(expected_objects)} objects in the bucket"
for bucket_object in expected_objects:
assert (
bucket_object in bucket_objects
), f"Expected object {bucket_object} in objects list {bucket_objects}"
for bucket_object in unexpected_objects:
assert (
bucket_object not in bucket_objects
), f"Expected object {bucket_object} not in objects list {bucket_objects}"
@allure.step("Try to get object and got error")
def try_to_get_objects_and_expect_error(s3_client, bucket: str, object_keys: list) -> None:
for obj in object_keys:
try:
s3_gate_object.get_object_s3(s3_client, bucket, obj)
raise AssertionError(f"Object {obj} found in bucket {bucket}")
except Exception as err:
assert "The specified key does not exist" in str(
err
), f"Expected error in exception {err}"
@allure.step("Set versioning status to '{status}' for bucket '{bucket}'")
def set_bucket_versioning(s3_client, bucket: str, status: s3_gate_bucket.VersioningStatus):
s3_gate_bucket.get_bucket_versioning_status(s3_client, bucket)
s3_gate_bucket.set_bucket_versioning(s3_client, bucket, status=status)
bucket_status = s3_gate_bucket.get_bucket_versioning_status(s3_client, bucket)
assert bucket_status == status.value, f"Expected {bucket_status} status. Got {status.value}"
def object_key_from_file_path(full_path: str) -> str:
return os.path.basename(full_path)
def assert_tags(
actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None
) -> None:
expected_tags = (
[{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else []
)
unexpected_tags = (
[{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else []
)
if expected_tags == []:
assert not actual_tags, f"Expected there is no tags, got {actual_tags}"
assert len(expected_tags) == len(actual_tags)
for tag in expected_tags:
assert tag in actual_tags, f"Tag {tag} must be in {actual_tags}"
for tag in unexpected_tags:
assert tag not in actual_tags, f"Tag {tag} should not be in {actual_tags}"
@allure.step("Expected all tags are presented in object")
def check_tags_by_object(
s3_client,
bucket: str,
key_name: str,
expected_tags: list,
unexpected_tags: Optional[list] = None,
) -> None:
actual_tags = s3_gate_object.get_object_tagging(s3_client, bucket, key_name)
assert_tags(
expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags
)
@allure.step("Expected all tags are presented in bucket")
def check_tags_by_bucket(
s3_client, bucket: str, expected_tags: list, unexpected_tags: Optional[list] = None
) -> None:
actual_tags = s3_gate_bucket.get_bucket_tagging(s3_client, bucket)
assert_tags(
expected_tags=expected_tags, unexpected_tags=unexpected_tags, actual_tags=actual_tags
)
def assert_object_lock_mode(
s3_client,
bucket: str,
file_name: str,
object_lock_mode: str,
retain_untile_date: datetime,
legal_hold_status: str = "OFF",
retain_period: Optional[int] = None,
):
object_dict = s3_gate_object.get_object_s3(s3_client, bucket, file_name, full_output=True)
assert (
object_dict.get("ObjectLockMode") == object_lock_mode
), f"Expected Object Lock Mode is {object_lock_mode}"
assert (
object_dict.get("ObjectLockLegalHoldStatus") == legal_hold_status
), f"Expected Object Lock Legal Hold Status is {legal_hold_status}"
object_retain_date = object_dict.get("ObjectLockRetainUntilDate")
retain_date = (
parse(object_retain_date) if isinstance(object_retain_date, str) else object_retain_date
)
if retain_untile_date:
assert retain_date.strftime("%Y-%m-%dT%H:%M:%S") == retain_untile_date.strftime(
"%Y-%m-%dT%H:%M:%S"
), f'Expected Object Lock Retain Until Date is {str(retain_untile_date.strftime("%Y-%m-%dT%H:%M:%S"))}'
elif retain_period:
last_modify_date = object_dict.get("LastModified")
last_modify = (
parse(last_modify_date) if isinstance(last_modify_date, str) else last_modify_date
)
assert (
retain_date - last_modify + timedelta(seconds=1)
).days == retain_period, f"Expected retention period is {retain_period} days"
def assert_s3_acl(acl_grants: list, permitted_users: str):
if permitted_users == "AllUsers":
grantees = {"AllUsers": 0, "CanonicalUser": 0}
for acl_grant in acl_grants:
if acl_grant.get("Grantee", {}).get("Type") == "Group":
uri = acl_grant.get("Grantee", {}).get("URI")
permission = acl_grant.get("Permission")
assert (uri, permission) == (
"http://acs.amazonaws.com/groups/global/AllUsers",
"FULL_CONTROL",
), "All Groups should have FULL_CONTROL"
grantees["AllUsers"] += 1
if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser":
permission = acl_grant.get("Permission")
assert permission == "FULL_CONTROL", "Canonical User should have FULL_CONTROL"
grantees["CanonicalUser"] += 1
assert grantees["AllUsers"] >= 1, "All Users should have FULL_CONTROL"
assert grantees["CanonicalUser"] >= 1, "Canonical User should have FULL_CONTROL"
if permitted_users == "CanonicalUser":
for acl_grant in acl_grants:
if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser":
permission = acl_grant.get("Permission")
assert permission == "FULL_CONTROL", "Only CanonicalUser should have FULL_CONTROL"
else:
logger.error("FULL_CONTROL is given to All Users")

View file

@ -1,275 +0,0 @@
"""
This module contains keywords for work with Storage Groups.
It contains wrappers for `frostfs-cli storagegroup` verbs.
"""
import logging
from typing import Optional
import allure
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.shell import Shell
from pytest_tests.helpers.cluster import Cluster
from pytest_tests.helpers.complex_object_actions import get_link_object
from pytest_tests.helpers.frostfs_verbs import head_object
from pytest_tests.resources.common import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, WALLET_CONFIG
logger = logging.getLogger("NeoLogger")
@allure.step("Put Storagegroup")
def put_storagegroup(
shell: Shell,
endpoint: str,
wallet: str,
cid: str,
objects: list,
bearer: Optional[str] = None,
wallet_config: str = WALLET_CONFIG,
lifetime: int = 10,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> str:
"""
Wrapper for `frostfs-cli storagegroup put`. Before the SG is created,
frostfs-cli performs HEAD on `objects`, so this verb must be allowed
for `wallet` in `cid`.
Args:
shell: Shell instance.
wallet: Path to wallet on whose behalf the SG is created.
cid: ID of Container to put SG to.
lifetime: Storage group lifetime in epochs.
objects: List of Object IDs to include into the SG.
bearer: Path to Bearer token file.
wallet_config: Path to frostfs-cli config file.
timeout: Timeout for an operation.
Returns:
Object ID of created Storage Group.
"""
frostfscli = FrostfsCli(
shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config
)
result = frostfscli.storagegroup.put(
wallet=wallet,
cid=cid,
lifetime=lifetime,
members=objects,
bearer=bearer,
rpc_endpoint=endpoint,
)
gid = result.stdout.split("\n")[1].split(": ")[1]
return gid
@allure.step("List Storagegroup")
def list_storagegroup(
shell: Shell,
endpoint: str,
wallet: str,
cid: str,
bearer: Optional[str] = None,
wallet_config: str = WALLET_CONFIG,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> list:
"""
Wrapper for `frostfs-cli storagegroup list`. This operation
requires SEARCH allowed for `wallet` in `cid`.
Args:
shell: Shell instance.
wallet: Path to wallet on whose behalf the SGs are listed in the container
cid: ID of Container to list.
bearer: Path to Bearer token file.
wallet_config: Path to frostfs-cli config file.
timeout: Timeout for an operation.
Returns:
Object IDs of found Storage Groups.
"""
frostfscli = FrostfsCli(
shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config
)
result = frostfscli.storagegroup.list(
wallet=wallet,
cid=cid,
bearer=bearer,
rpc_endpoint=endpoint,
timeout=timeout,
)
# throwing off the first string of output
found_objects = result.stdout.split("\n")[1:]
return found_objects
@allure.step("Get Storagegroup")
def get_storagegroup(
shell: Shell,
endpoint: str,
wallet: str,
cid: str,
gid: str,
bearer: str = "",
wallet_config: str = WALLET_CONFIG,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> dict:
"""
Wrapper for `frostfs-cli storagegroup get`.
Args:
shell: Shell instance.
wallet: Path to wallet on whose behalf the SG is got.
cid: ID of Container where SG is stored.
gid: ID of the Storage Group.
bearer: Path to Bearer token file.
wallet_config: Path to frostfs-cli config file.
timeout: Timeout for an operation.
Returns:
Detailed information on the Storage Group.
"""
frostfscli = FrostfsCli(
shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config
)
result = frostfscli.storagegroup.get(
wallet=wallet,
cid=cid,
bearer=bearer,
id=gid,
rpc_endpoint=endpoint,
timeout=timeout,
)
# TODO: temporary solution for parsing output. Needs to be replaced with
# JSON parsing when https://github.com/nspcc-dev/frostfs-node/issues/1355
# is done.
strings = result.stdout.strip().split("\n")
# first three strings go to `data`;
# skip the 'Members:' string;
# the rest of strings go to `members`
data, members = strings[:3], strings[3:]
sg_dict = {}
for i in data:
key, val = i.split(": ")
sg_dict[key] = val
sg_dict["Members"] = []
for member in members[1:]:
sg_dict["Members"].append(member.strip())
return sg_dict
@allure.step("Delete Storagegroup")
def delete_storagegroup(
shell: Shell,
endpoint: str,
wallet: str,
cid: str,
gid: str,
bearer: str = "",
wallet_config: str = WALLET_CONFIG,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> str:
"""
Wrapper for `frostfs-cli storagegroup delete`.
Args:
shell: Shell instance.
wallet: Path to wallet on whose behalf the SG is deleted.
cid: ID of Container where SG is stored.
gid: ID of the Storage Group.
bearer: Path to Bearer token file.
wallet_config: Path to frostfs-cli config file.
timeout: Timeout for an operation.
Returns:
Tombstone ID of the deleted Storage Group.
"""
frostfscli = FrostfsCli(
shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config
)
result = frostfscli.storagegroup.delete(
wallet=wallet,
cid=cid,
bearer=bearer,
id=gid,
rpc_endpoint=endpoint,
timeout=timeout,
)
tombstone_id = result.stdout.strip().split("\n")[1].split(": ")[1]
return tombstone_id
@allure.step("Verify list operation over Storagegroup")
def verify_list_storage_group(
shell: Shell,
endpoint: str,
wallet: str,
cid: str,
gid: str,
bearer: str = None,
wallet_config: str = WALLET_CONFIG,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
):
storage_groups = list_storagegroup(
shell=shell,
endpoint=endpoint,
wallet=wallet,
cid=cid,
bearer=bearer,
wallet_config=wallet_config,
timeout=timeout,
)
assert gid in storage_groups
@allure.step("Verify get operation over Storagegroup")
def verify_get_storage_group(
shell: Shell,
cluster: Cluster,
wallet: str,
cid: str,
gid: str,
obj_list: list,
object_size: int,
max_object_size: int,
bearer: str = None,
wallet_config: str = WALLET_CONFIG,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
):
obj_parts = []
endpoint = cluster.default_rpc_endpoint
if object_size > max_object_size:
for obj in obj_list:
link_oid = get_link_object(
wallet,
cid,
obj,
shell=shell,
nodes=cluster.storage_nodes,
bearer=bearer,
wallet_config=wallet_config,
timeout=timeout,
)
obj_head = head_object(
wallet=wallet,
cid=cid,
oid=link_oid,
shell=shell,
endpoint=endpoint,
is_raw=True,
bearer=bearer,
wallet_config=wallet_config,
timeout=timeout,
)
obj_parts = obj_head["header"]["split"]["children"]
obj_num = len(obj_list)
storagegroup_data = get_storagegroup(
shell=shell,
endpoint=endpoint,
wallet=wallet,
cid=cid,
gid=gid,
bearer=bearer,
wallet_config=wallet_config,
timeout=timeout,
)
exp_size = object_size * obj_num
if object_size < max_object_size:
assert int(storagegroup_data["Group size"]) == exp_size
assert storagegroup_data["Members"] == obj_list
else:
assert int(storagegroup_data["Group size"]) == exp_size
assert storagegroup_data["Members"] == obj_parts

View file

@ -1,25 +0,0 @@
from dataclasses import dataclass
from typing import Optional
@dataclass
class ObjectRef:
cid: str
oid: str
@dataclass
class LockObjectInfo(ObjectRef):
lifetime: Optional[int] = None
expire_at: Optional[int] = None
@dataclass
class StorageObjectInfo(ObjectRef):
size: Optional[int] = None
wallet_file_path: Optional[str] = None
file_path: Optional[str] = None
file_hash: Optional[str] = None
attributes: Optional[list[dict[str, str]]] = None
tombstone: Optional[str] = None
locks: Optional[list[LockObjectInfo]] = None

View file

@ -1,173 +0,0 @@
#!/usr/bin/python3
"""
This module contains keywords which are used for asserting
that storage policies are respected.
"""
import logging
import allure
from frostfs_testlib.resources.common import OBJECT_NOT_FOUND
from frostfs_testlib.shell import Shell
from frostfs_testlib.utils import string_utils
from pytest_tests.helpers import complex_object_actions, frostfs_verbs
from pytest_tests.helpers.cluster import StorageNode
logger = logging.getLogger("NeoLogger")
@allure.step("Get Object Copies")
def get_object_copies(
complexity: str, wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> int:
"""
The function performs requests to all nodes of the container and
finds out if they store a copy of the object. The procedure is
different for simple and complex object, so the function requires
a sign of object complexity.
Args:
complexity (str): the tag of object size and complexity,
[Simple|Complex]
wallet (str): the path to the wallet on whose behalf the
copies are got
cid (str): ID of the container
oid (str): ID of the Object
shell: executor for cli command
Returns:
(int): the number of object copies in the container
"""
return (
get_simple_object_copies(wallet, cid, oid, shell, nodes)
if complexity == "Simple"
else get_complex_object_copies(wallet, cid, oid, shell, nodes)
)
@allure.step("Get Simple Object Copies")
def get_simple_object_copies(
wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> int:
"""
To figure out the number of a simple object copies, only direct
HEAD requests should be made to the every node of the container.
We consider non-empty HEAD response as a stored object copy.
Args:
wallet (str): the path to the wallet on whose behalf the
copies are got
cid (str): ID of the container
oid (str): ID of the Object
shell: executor for cli command
nodes: nodes to search on
Returns:
(int): the number of object copies in the container
"""
copies = 0
for node in nodes:
try:
response = frostfs_verbs.head_object(
wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True
)
if response:
logger.info(f"Found object {oid} on node {node}")
copies += 1
except Exception:
logger.info(f"No {oid} object copy found on {node}, continue")
continue
return copies
@allure.step("Get Complex Object Copies")
def get_complex_object_copies(
wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> int:
"""
To figure out the number of a complex object copies, we firstly
need to retrieve its Last object. We consider that the number of
complex object copies is equal to the number of its last object
copies. When we have the Last object ID, the task is reduced
to getting simple object copies.
Args:
wallet (str): the path to the wallet on whose behalf the
copies are got
cid (str): ID of the container
oid (str): ID of the Object
shell: executor for cli command
Returns:
(int): the number of object copies in the container
"""
last_oid = complex_object_actions.get_last_object(wallet, cid, oid, shell, nodes)
assert last_oid, f"No Last Object for {cid}/{oid} found among all Storage Nodes"
return get_simple_object_copies(wallet, cid, last_oid, shell, nodes)
@allure.step("Get Nodes With Object")
def get_nodes_with_object(
cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> list[StorageNode]:
"""
The function returns list of nodes which store
the given object.
Args:
cid (str): ID of the container which store the object
oid (str): object ID
shell: executor for cli command
nodes: nodes to find on
Returns:
(list): nodes which store the object
"""
nodes_list = []
for node in nodes:
wallet = node.get_wallet_path()
wallet_config = node.get_wallet_config_path()
try:
res = frostfs_verbs.head_object(
wallet,
cid,
oid,
shell=shell,
endpoint=node.get_rpc_endpoint(),
is_direct=True,
wallet_config=wallet_config,
)
if res is not None:
logger.info(f"Found object {oid} on node {node}")
nodes_list.append(node)
except Exception:
logger.info(f"No {oid} object copy found on {node}, continue")
continue
return nodes_list
@allure.step("Get Nodes Without Object")
def get_nodes_without_object(
wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> list[StorageNode]:
"""
The function returns list of nodes which do not store
the given object.
Args:
wallet (str): the path to the wallet on whose behalf
we request the nodes
cid (str): ID of the container which store the object
oid (str): object ID
shell: executor for cli command
Returns:
(list): nodes which do not store the object
"""
nodes_list = []
for node in nodes:
try:
res = frostfs_verbs.head_object(
wallet, cid, oid, shell=shell, endpoint=node.get_rpc_endpoint(), is_direct=True
)
if res is None:
nodes_list.append(node)
except Exception as err:
if string_utils.is_str_match_pattern(err, OBJECT_NOT_FOUND):
nodes_list.append(node)
else:
raise Exception(f"Got error {err} on head object command") from err
return nodes_list

View file

@ -1,80 +0,0 @@
import logging
from functools import wraps
from time import sleep, time
from _pytest.outcomes import Failed
from pytest import fail
logger = logging.getLogger("NeoLogger")
class expect_not_raises:
"""
Decorator/Context manager check that some action, method or test does not raises exceptions
Useful to set proper state of failed test cases in allure
Example:
def do_stuff():
raise Exception("Fail")
def test_yellow(): <- this test is marked yellow (Test Defect) in allure
do_stuff()
def test_red(): <- this test is marked red (Failed) in allure
with expect_not_raises():
do_stuff()
@expect_not_raises()
def test_also_red(): <- this test is also marked red (Failed) in allure
do_stuff()
"""
def __enter__(self):
pass
def __exit__(self, exception_type, exception_value, exception_traceback):
if exception_value:
fail(str(exception_value))
def __call__(self, func):
@wraps(func)
def impl(*a, **kw):
with expect_not_raises():
func(*a, **kw)
return impl
def wait_for_success(max_wait_time: int = 60, interval: int = 1):
"""
Decorator to wait for some conditions/functions to pass successfully.
This is useful if you don't know exact time when something should pass successfully and do not
want to use sleep(X) with too big X.
Be careful though, wrapped function should only check the state of something, not change it.
"""
def wrapper(func):
@wraps(func)
def impl(*a, **kw):
start = int(round(time()))
last_exception = None
while start + max_wait_time >= int(round(time())):
try:
return func(*a, **kw)
except Exception as ex:
logger.debug(ex)
last_exception = ex
sleep(interval)
except Failed as ex:
logger.debug(ex)
last_exception = ex
sleep(interval)
# timeout exceeded with no success, raise last_exception
raise last_exception
return impl
return wrapper

View file

@ -1,40 +0,0 @@
import json
import logging
import allure
from frostfs_testlib.shell import Shell
from neo3.wallet import wallet
from pytest_tests.helpers.frostfs_verbs import head_object
logger = logging.getLogger("NeoLogger")
@allure.step("Verify Head Tombstone")
def verify_head_tombstone(
wallet_path: str, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str
):
header = head_object(wallet_path, cid, oid_ts, shell=shell, endpoint=endpoint)["header"]
s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"]
logger.info(f"Header Session OIDs is {s_oid}")
logger.info(f"OID is {oid}")
assert header["containerID"] == cid, "Tombstone Header CID is wrong"
with open(wallet_path, "r") as file:
wlt_data = json.loads(file.read())
wlt = wallet.Wallet.from_json(wlt_data, password="")
addr = wlt.accounts[0].address
assert header["ownerID"] == addr, "Tombstone Owner ID is wrong"
assert header["objectType"] == "TOMBSTONE", "Header Type isn't Tombstone"
assert (
header["sessionToken"]["body"]["object"]["verb"] == "DELETE"
), "Header Session Type isn't DELETE"
assert (
header["sessionToken"]["body"]["object"]["target"]["container"] == cid
), "Header Session ID is wrong"
assert (
oid in header["sessionToken"]["body"]["object"]["target"]["objects"]
), "Header Session OID is wrong"

View file

@ -1,10 +1,9 @@
import time
import allure
from frostfs_testlib.resources.common import STORAGE_GC_TIME
from frostfs_testlib.utils import datetime_utils
from pytest_tests.resources.common import STORAGE_GC_TIME
def placement_policy_from_container(container_info: str) -> str:
"""

View file

@ -1,71 +0,0 @@
import os
import uuid
from dataclasses import dataclass
from frostfs_testlib.shell import Shell
from frostfs_testlib.utils import wallet_utils
from pytest_tests.helpers.cluster import Cluster, NodeBase
from pytest_tests.helpers.payment_neogo import deposit_gas, transfer_gas
from pytest_tests.resources.common import FREE_STORAGE, WALLET_CONFIG, WALLET_PASS
@dataclass
class WalletFile:
path: str
password: str = WALLET_PASS
config_path: str = WALLET_CONFIG
@staticmethod
def from_node(node: NodeBase):
return WalletFile(
node.get_wallet_path(), node.get_wallet_password(), node.get_wallet_config_path()
)
def get_address(self) -> str:
"""
Extracts the last address from wallet.
Returns:
The address of the wallet.
"""
return wallet_utils.get_last_address_from_wallet(self.path, self.password)
class WalletFactory:
def __init__(self, wallets_dir: str, shell: Shell, cluster: Cluster) -> None:
self.shell = shell
self.wallets_dir = wallets_dir
self.cluster = cluster
def create_wallet(self, password: str = WALLET_PASS) -> WalletFile:
"""
Creates new default wallet
Args:
password: wallet password
Returns:
WalletFile object of new wallet
"""
wallet_path = os.path.join(self.wallets_dir, f"{str(uuid.uuid4())}.json")
wallet_utils.init_wallet(wallet_path, password)
if not FREE_STORAGE:
main_chain = self.cluster.main_chain_nodes[0]
deposit = 30
transfer_gas(
shell=self.shell,
amount=deposit + 1,
main_chain=main_chain,
wallet_to_path=wallet_path,
wallet_to_password=password,
)
deposit_gas(
shell=self.shell,
amount=deposit,
main_chain=main_chain,
wallet_from_path=wallet_path,
wallet_from_password=password,
)
return WalletFile(wallet_path, password)

View file

@ -1,56 +1,7 @@
import os
import yaml
CONTAINER_WAIT_INTERVAL = "1m"
SIMPLE_OBJECT_SIZE = os.getenv("SIMPLE_OBJECT_SIZE", "1000")
COMPLEX_OBJECT_CHUNKS_COUNT = os.getenv("COMPLEX_OBJECT_CHUNKS_COUNT", "3")
COMPLEX_OBJECT_TAIL_SIZE = os.getenv("COMPLEX_OBJECT_TAIL_SIZE", "1000")
TEST_CYCLES_COUNT = int(os.getenv("TEST_CYCLES_COUNT", "1"))
MAINNET_BLOCK_TIME = os.getenv("MAINNET_BLOCK_TIME", "1s")
MAINNET_TIMEOUT = os.getenv("MAINNET_TIMEOUT", "1min")
MORPH_BLOCK_TIME = os.getenv("MORPH_BLOCK_TIME", "1s")
FROSTFS_CONTRACT_CACHE_TIMEOUT = os.getenv("FROSTFS_CONTRACT_CACHE_TIMEOUT", "30s")
# Time interval that allows a GC pass on storage node (this includes GC sleep interval
# of 1min plus 15 seconds for GC pass itself)
STORAGE_GC_TIME = os.getenv("STORAGE_GC_TIME", "75s")
GAS_HASH = os.getenv("GAS_HASH", "0xd2a4cff31913016155e38e474a2c06d08be276cf")
FROSTFS_CONTRACT = os.getenv("FROSTFS_IR_CONTRACTS_FROSTFS")
ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir")
DEVENV_PATH = os.getenv("DEVENV_PATH", os.path.join("..", "frostfs-dev-env"))
# Password of wallet owned by user on behalf of whom we are running tests
WALLET_PASS = os.getenv("WALLET_PASS", "")
# Paths to CLI executables on machine that runs tests
NEOGO_EXECUTABLE = os.getenv("NEOGO_EXECUTABLE", "neo-go")
FROSTFS_CLI_EXEC = os.getenv("FROSTFS_CLI_EXEC", "frostfs-cli")
FROSTFS_AUTHMATE_EXEC = os.getenv("FROSTFS_AUTHMATE_EXEC", "frostfs-authmate")
FROSTFS_ADM_EXEC = os.getenv("FROSTFS_ADM_EXEC", "frostfs-adm")
# Config for frostfs-adm utility. Optional if tests are running against devenv
FROSTFS_ADM_CONFIG_PATH = os.getenv("FROSTFS_ADM_CONFIG_PATH")
FREE_STORAGE = os.getenv("FREE_STORAGE", "false").lower() == "true"
BIN_VERSIONS_FILE = os.getenv("BIN_VERSIONS_FILE")
DEVENV_PATH = os.getenv("DEVENV_PATH", os.path.join("..", "frostfs-dev-env"))
HOSTING_CONFIG_FILE = os.getenv("HOSTING_CONFIG_FILE", ".devenv.hosting.yaml")
STORAGE_NODE_SERVICE_NAME_REGEX = r"s\d\d"
HTTP_GATE_SERVICE_NAME_REGEX = r"http-gate\d\d"
S3_GATE_SERVICE_NAME_REGEX = r"s3-gate\d\d"
CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", None)
# Generate wallet configs
# TODO: we should move all info about wallet configs to fixtures
WALLET_CONFIG = os.path.join(os.getcwd(), "wallet_config.yml")
with open(WALLET_CONFIG, "w") as file:
yaml.dump({"password": WALLET_PASS}, file)

View file

@ -1,4 +1,4 @@
{
"rep-3": "REP 3",
"complex": "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
}
}

View file

@ -1,27 +0,0 @@
import os
# Load node parameters
LOAD_NODES = os.getenv("LOAD_NODES", "").split(",")
LOAD_NODE_SSH_USER = os.getenv("LOAD_NODE_SSH_USER", "root")
LOAD_NODE_SSH_PRIVATE_KEY_PATH = os.getenv("LOAD_NODE_SSH_PRIVATE_KEY_PATH")
BACKGROUND_WRITERS_COUNT = os.getenv("BACKGROUND_WRITERS_COUNT", 10)
BACKGROUND_READERS_COUNT = os.getenv("BACKGROUND_READERS_COUNT", 10)
BACKGROUND_OBJ_SIZE = os.getenv("BACKGROUND_OBJ_SIZE", 1024)
BACKGROUND_LOAD_MAX_TIME = os.getenv("BACKGROUND_LOAD_MAX_TIME", 600)
# Load run parameters
OBJ_SIZE = os.getenv("OBJ_SIZE", "1000").split(",")
CONTAINERS_COUNT = os.getenv("CONTAINERS_COUNT", "1").split(",")
OUT_FILE = os.getenv("OUT_FILE", "1mb_200.json").split(",")
OBJ_COUNT = os.getenv("OBJ_COUNT", "4").split(",")
WRITERS = os.getenv("WRITERS", "200").split(",")
READERS = os.getenv("READER", "0").split(",")
DELETERS = os.getenv("DELETERS", "0").split(",")
LOAD_TIME = os.getenv("LOAD_TIME", "200").split(",")
LOAD_TYPE = os.getenv("LOAD_TYPE", "grpc").split(",")
LOAD_NODES_COUNT = os.getenv("LOAD_NODES_COUNT", "1").split(",")
STORAGE_NODE_COUNT = os.getenv("STORAGE_NODE_COUNT", "4").split(",")
CONTAINER_PLACEMENT_POLICY = os.getenv(
"CONTAINER_PLACEMENT_POLICY", "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
)

View file

@ -1,35 +0,0 @@
import allure
import pytest
from frostfs_testlib.shell import Shell
from pytest_tests.helpers import epoch
from pytest_tests.helpers.cluster import Cluster
# To skip adding every mandatory singleton dependency to EACH test function
class ClusterTestBase:
shell: Shell
cluster: Cluster
@pytest.fixture(scope="session", autouse=True)
def fill_mandatory_dependencies(self, cluster: Cluster, client_shell: Shell):
ClusterTestBase.shell = client_shell
ClusterTestBase.cluster = cluster
yield
@allure.title("Tick {epochs_to_tick} epochs")
def tick_epochs(self, epochs_to_tick: int):
for _ in range(epochs_to_tick):
self.tick_epoch()
def tick_epoch(self):
epoch.tick_epoch(self.shell, self.cluster)
def wait_for_epochs_align(self):
epoch.wait_for_epochs_align(self.shell, self.cluster)
def get_epoch(self):
return epoch.get_epoch(self.shell, self.cluster)
def ensure_fresh_epoch(self):
return epoch.ensure_fresh_epoch(self.shell, self.cluster)

View file

@ -1,163 +0,0 @@
import concurrent.futures
import re
from dataclasses import asdict
import allure
from frostfs_testlib.cli.frostfs_authmate import FrostfsAuthmate
from frostfs_testlib.cli.neogo import NeoGo
from frostfs_testlib.hosting import Hosting
from frostfs_testlib.shell import CommandOptions, SSHShell
from frostfs_testlib.shell.interfaces import InteractiveInput
from pytest_tests.helpers.k6 import K6, LoadParams, LoadResults
from pytest_tests.resources.common import STORAGE_NODE_SERVICE_NAME_REGEX
FROSTFS_AUTHMATE_PATH = "frostfs-authmate"
STOPPED_HOSTS = []
@allure.title("Get services endpoints")
def get_services_endpoints(
hosting: Hosting, service_name_regex: str, endpoint_attribute: str
) -> list[str]:
service_configs = hosting.find_service_configs(service_name_regex)
return [service_config.attributes[endpoint_attribute] for service_config in service_configs]
@allure.title("Stop nodes")
def stop_unused_nodes(storage_nodes: list, used_nodes_count: int):
for node in storage_nodes[used_nodes_count:]:
host = node.host
STOPPED_HOSTS.append(host)
host.stop_host("hard")
@allure.title("Start nodes")
def start_stopped_nodes():
for host in STOPPED_HOSTS:
host.start_host()
STOPPED_HOSTS.remove(host)
@allure.title("Init s3 client")
def init_s3_client(
load_nodes: list, login: str, pkey: str, container_placement_policy: str, hosting: Hosting
):
service_configs = hosting.find_service_configs(STORAGE_NODE_SERVICE_NAME_REGEX)
host = hosting.get_host_by_service(service_configs[0].name)
wallet_path = service_configs[0].attributes["wallet_path"]
neogo_cli_config = host.get_cli_config("neo-go")
neogo_wallet = NeoGo(shell=host.get_shell(), neo_go_exec_path=neogo_cli_config.exec_path).wallet
dump_keys_output = neogo_wallet.dump_keys(wallet=wallet_path, wallet_config=None).stdout
public_key = str(re.search(r":\n(?P<public_key>.*)", dump_keys_output).group("public_key"))
node_endpoint = service_configs[0].attributes["rpc_endpoint"]
# prompt_pattern doesn't work at the moment
for load_node in load_nodes:
ssh_client = SSHShell(host=load_node, login=login, private_key_path=pkey)
path = ssh_client.exec(r"sudo find . -name 'k6' -exec dirname {} \; -quit").stdout.strip(
"\n"
)
frostfs_authmate_exec = FrostfsAuthmate(ssh_client, FROSTFS_AUTHMATE_PATH)
issue_secret_output = frostfs_authmate_exec.secret.issue(
wallet=f"{path}/scenarios/files/wallet.json",
peer=node_endpoint,
bearer_rules=f"{path}/scenarios/files/rules.json",
gate_public_key=public_key,
container_placement_policy=container_placement_policy,
container_policy=f"{path}/scenarios/files/policy.json",
wallet_password="",
).stdout
aws_access_key_id = str(
re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group(
"aws_access_key_id"
)
)
aws_secret_access_key = str(
re.search(
r"secret_access_key.*:\s.(?P<aws_secret_access_key>\w*)", issue_secret_output
).group("aws_secret_access_key")
)
# prompt_pattern doesn't work at the moment
configure_input = [
InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id),
InteractiveInput(
prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key
),
InteractiveInput(prompt_pattern=r".*", input=""),
InteractiveInput(prompt_pattern=r".*", input=""),
]
ssh_client.exec("aws configure", CommandOptions(interactive_inputs=configure_input))
@allure.title("Clear cache and data from storage nodes")
def clear_cache_and_data(hosting: Hosting):
service_configs = hosting.find_service_configs(STORAGE_NODE_SERVICE_NAME_REGEX)
for service_config in service_configs:
host = hosting.get_host_by_service(service_config.name)
host.stop_service(service_config.name)
host.delete_storage_node_data(service_config.name)
host.start_service(service_config.name)
@allure.title("Prepare objects")
def prepare_objects(k6_instance: K6):
k6_instance.prepare()
@allure.title("Prepare K6 instances and objects")
def prepare_k6_instances(
load_nodes: list, login: str, pkey: str, load_params: LoadParams, prepare: bool = True
) -> list[K6]:
k6_load_objects = []
for load_node in load_nodes:
ssh_client = SSHShell(host=load_node, login=login, private_key_path=pkey)
k6_load_object = K6(load_params, ssh_client)
k6_load_objects.append(k6_load_object)
for k6_load_object in k6_load_objects:
if prepare:
with allure.step("Prepare objects"):
prepare_objects(k6_load_object)
return k6_load_objects
@allure.title("Run K6")
def run_k6_load(k6_instance: K6) -> LoadResults:
with allure.step("Executing load"):
k6_instance.start()
k6_instance.wait_until_finished(k6_instance.load_params.load_time * 2)
with allure.step("Printing results"):
k6_instance.get_k6_results()
return k6_instance.parsing_results()
@allure.title("MultiNode K6 Run")
def multi_node_k6_run(k6_instances: list) -> dict:
results = []
avg_results = {}
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = []
for k6_instance in k6_instances:
futures.append(executor.submit(run_k6_load, k6_instance))
for future in concurrent.futures.as_completed(futures):
results.append(asdict(future.result()))
for k6_result in results:
for key in k6_result:
try:
avg_results[key] += k6_result[key] / len(results)
except KeyError:
avg_results[key] = k6_result[key] / len(results)
return avg_results
@allure.title("Compare results")
def compare_load_results(result: dict, result_new: dict):
for key in result:
if result[key] != 0 and result_new[key] != 0:
if (abs(result[key] - result_new[key]) / min(result[key], result_new[key])) < 0.25:
continue
else:
raise AssertionError(f"Difference in {key} values more than 25%")
elif result[key] == 0 and result_new[key] == 0:
continue
else:
raise AssertionError(f"Unexpected zero value in {key}")

View file

@ -1,217 +0,0 @@
import json
import logging
import os
import re
import uuid
from typing import Any, Optional
import allure
import boto3
import pytest
import urllib3
from botocore.config import Config
from botocore.exceptions import ClientError
from frostfs_testlib.shell import Shell
from pytest import FixtureRequest
from pytest_tests.helpers.aws_cli_client import AwsCliClient
from pytest_tests.helpers.cli_helpers import _cmd_run, _configure_aws_cli, _run_with_passwd
from pytest_tests.helpers.cluster import Cluster
from pytest_tests.helpers.container import list_containers
from pytest_tests.helpers.s3_helper import set_bucket_versioning
from pytest_tests.resources.common import FROSTFS_AUTHMATE_EXEC
from pytest_tests.steps import s3_gate_bucket, s3_gate_object
from pytest_tests.steps.cluster_test_base import ClusterTestBase
# Disable warnings on self-signed certificate which the
# boto library produces on requests to S3-gate in dev-env
urllib3.disable_warnings()
logger = logging.getLogger("NeoLogger")
CREDENTIALS_CREATE_TIMEOUT = "1m"
# Number of attempts that S3 clients will attempt per each request (1 means single attempt
# without any retries)
MAX_REQUEST_ATTEMPTS = 1
RETRY_MODE = "standard"
S3_MALFORMED_XML_REQUEST = (
"The XML you provided was not well-formed or did not validate against our published schema."
)
class TestS3GateBase(ClusterTestBase):
s3_client: Any = None
@pytest.fixture(scope="class", autouse=True)
@allure.title("[Class/Autouse]: Create S3 client")
def s3_client(
self, default_wallet, client_shell: Shell, request: FixtureRequest, cluster: Cluster
) -> Any:
s3_bearer_rules_file = f"{os.getcwd()}/pytest_tests/resources/files/s3_bearer_rules.json"
policy = None if isinstance(request.param, str) else request.param[1]
(cid, bucket, access_key_id, secret_access_key, owner_private_key,) = init_s3_credentials(
default_wallet, cluster, s3_bearer_rules_file=s3_bearer_rules_file, policy=policy
)
containers_list = list_containers(
default_wallet, shell=client_shell, endpoint=self.cluster.default_rpc_endpoint
)
assert cid in containers_list, f"Expected cid {cid} in {containers_list}"
if "aws cli" in request.param:
client = configure_cli_client(
access_key_id, secret_access_key, cluster.default_s3_gate_endpoint
)
else:
client = configure_boto3_client(
access_key_id, secret_access_key, cluster.default_s3_gate_endpoint
)
TestS3GateBase.s3_client = client
TestS3GateBase.wallet = default_wallet
@pytest.fixture
@allure.title("Create/delete bucket")
def bucket(self, request: FixtureRequest):
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client)
versioning_status: Optional[s3_gate_bucket.VersioningStatus] = None
if "param" in request.__dict__:
versioning_status = request.param
if versioning_status:
set_bucket_versioning(self.s3_client, bucket, versioning_status)
yield bucket
self.delete_all_object_in_bucket(bucket)
@pytest.fixture
@allure.title("Create two buckets")
def two_buckets(self):
bucket_1 = s3_gate_bucket.create_bucket_s3(self.s3_client)
bucket_2 = s3_gate_bucket.create_bucket_s3(self.s3_client)
yield bucket_1, bucket_2
for bucket in [bucket_1, bucket_2]:
self.delete_all_object_in_bucket(bucket)
def delete_all_object_in_bucket(self, bucket):
versioning_status = s3_gate_bucket.get_bucket_versioning_status(self.s3_client, bucket)
if versioning_status == s3_gate_bucket.VersioningStatus.ENABLED.value:
# From versioned bucket we should delete all versions and delete markers of all objects
objects_versions = s3_gate_object.list_objects_versions_s3(self.s3_client, bucket)
if objects_versions:
s3_gate_object.delete_object_versions_s3_without_dm(
self.s3_client, bucket, objects_versions
)
objects_delete_markers = s3_gate_object.list_objects_delete_markers_s3(
self.s3_client, bucket
)
if objects_delete_markers:
s3_gate_object.delete_object_versions_s3_without_dm(
self.s3_client, bucket, objects_delete_markers
)
else:
# From non-versioned bucket it's sufficient to delete objects by key
objects = s3_gate_object.list_objects_s3(self.s3_client, bucket)
if objects:
s3_gate_object.delete_objects_s3(self.s3_client, bucket, objects)
objects_delete_markers = s3_gate_object.list_objects_delete_markers_s3(
self.s3_client, bucket
)
if objects_delete_markers:
s3_gate_object.delete_object_versions_s3_without_dm(
self.s3_client, bucket, objects_delete_markers
)
# Delete the bucket itself
s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket)
@allure.step("Init S3 Credentials")
def init_s3_credentials(
wallet_path: str,
cluster: Cluster,
s3_bearer_rules_file: Optional[str] = None,
policy: Optional[dict] = None,
):
bucket = str(uuid.uuid4())
s3_bearer_rules = s3_bearer_rules_file or "pytest_tests/resources/files/s3_bearer_rules.json"
s3gate_node = cluster.s3gates[0]
gate_public_key = s3gate_node.get_wallet_public_key()
cmd = (
f"{FROSTFS_AUTHMATE_EXEC} --debug --with-log --timeout {CREDENTIALS_CREATE_TIMEOUT} "
f"issue-secret --wallet {wallet_path} --gate-public-key={gate_public_key} "
f"--peer {cluster.default_rpc_endpoint} --container-friendly-name {bucket} "
f"--bearer-rules {s3_bearer_rules}"
)
if policy:
cmd += f" --container-policy {policy}'"
logger.info(f"Executing command: {cmd}")
try:
output = _run_with_passwd(cmd)
logger.info(f"Command completed with output: {output}")
# output contains some debug info and then several JSON structures, so we find each
# JSON structure by curly brackets (naive approach, but works while JSON is not nested)
# and then we take JSON containing secret_access_key
json_blocks = re.findall(r"\{.*?\}", output, re.DOTALL)
for json_block in json_blocks:
try:
parsed_json_block = json.loads(json_block)
if "secret_access_key" in parsed_json_block:
return (
parsed_json_block["container_id"],
bucket,
parsed_json_block["access_key_id"],
parsed_json_block["secret_access_key"],
parsed_json_block["owner_private_key"],
)
except json.JSONDecodeError:
raise AssertionError(f"Could not parse info from output\n{output}")
raise AssertionError(f"Could not find AWS credentials in output:\n{output}")
except Exception as exc:
raise RuntimeError(f"Failed to init s3 credentials because of error\n{exc}") from exc
@allure.step("Configure S3 client (boto3)")
def configure_boto3_client(access_key_id: str, secret_access_key: str, s3gate_endpoint: str):
try:
session = boto3.Session()
config = Config(
retries={
"max_attempts": MAX_REQUEST_ATTEMPTS,
"mode": RETRY_MODE,
}
)
s3_client = session.client(
service_name="s3",
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key,
config=config,
endpoint_url=s3gate_endpoint,
verify=False,
)
return s3_client
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Configure S3 client (aws cli)")
def configure_cli_client(access_key_id: str, secret_access_key: str, s3gate_endpoint: str):
try:
client = AwsCliClient(s3gate_endpoint)
_configure_aws_cli("aws configure", access_key_id, secret_access_key)
_cmd_run(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS}")
_cmd_run(f"aws configure set retry_mode {RETRY_MODE}")
return client
except Exception as err:
if "command was not found or was not executable" in str(err):
pytest.skip("AWS CLI was not found")
else:
raise RuntimeError("Error while configuring AwsCliClient") from err

View file

@ -1,316 +0,0 @@
import json
import logging
import uuid
from enum import Enum
from time import sleep
from typing import Optional
import allure
from botocore.exceptions import ClientError
from pytest_tests.helpers.cli_helpers import log_command_execution
logger = logging.getLogger("NeoLogger")
# Artificial delay that we add after object deletion and container creation
# Delay is added because sometimes immediately after deletion object still appears
# to be existing (probably because tombstone object takes some time to replicate)
# TODO: remove after https://github.com/nspcc-dev/neofs-s3-gw/issues/610 is fixed
S3_SYNC_WAIT_TIME = 5
class VersioningStatus(Enum):
ENABLED = "Enabled"
SUSPENDED = "Suspended"
@allure.step("Create bucket S3")
def create_bucket_s3(
s3_client,
object_lock_enabled_for_bucket: Optional[bool] = None,
acl: Optional[str] = None,
grant_write: Optional[str] = None,
grant_read: Optional[str] = None,
grant_full_control: Optional[str] = None,
bucket_configuration: Optional[str] = None,
) -> str:
bucket_name = str(uuid.uuid4())
try:
params = {"Bucket": bucket_name}
if object_lock_enabled_for_bucket is not None:
params.update({"ObjectLockEnabledForBucket": object_lock_enabled_for_bucket})
if acl is not None:
params.update({"ACL": acl})
elif grant_write or grant_read or grant_full_control:
if grant_write:
params.update({"GrantWrite": grant_write})
elif grant_read:
params.update({"GrantRead": grant_read})
elif grant_full_control:
params.update({"GrantFullControl": grant_full_control})
if bucket_configuration:
params.update(
{"CreateBucketConfiguration": {"LocationConstraint": bucket_configuration}}
)
s3_bucket = s3_client.create_bucket(**params)
log_command_execution(f"Created S3 bucket {bucket_name}", s3_bucket)
sleep(S3_SYNC_WAIT_TIME)
return bucket_name
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("List buckets S3")
def list_buckets_s3(s3_client):
found_buckets = []
try:
response = s3_client.list_buckets()
log_command_execution("S3 List buckets result", response)
for bucket in response["Buckets"]:
found_buckets.append(bucket["Name"])
return found_buckets
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Delete bucket S3")
def delete_bucket_s3(s3_client, bucket: str):
try:
response = s3_client.delete_bucket(Bucket=bucket)
log_command_execution("S3 Delete bucket result", response)
sleep(S3_SYNC_WAIT_TIME)
return response
except ClientError as err:
log_command_execution("S3 Delete bucket error", str(err))
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Head bucket S3")
def head_bucket(s3_client, bucket: str):
try:
response = s3_client.head_bucket(Bucket=bucket)
log_command_execution("S3 Head bucket result", response)
return response
except ClientError as err:
log_command_execution("S3 Head bucket error", str(err))
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Set bucket versioning status")
def set_bucket_versioning(s3_client, bucket_name: str, status: VersioningStatus) -> None:
try:
response = s3_client.put_bucket_versioning(
Bucket=bucket_name, VersioningConfiguration={"Status": status.value}
)
log_command_execution("S3 Set bucket versioning to", response)
except ClientError as err:
raise Exception(f"Got error during set bucket versioning: {err}") from err
@allure.step("Get bucket versioning status")
def get_bucket_versioning_status(s3_client, bucket_name: str) -> str:
try:
response = s3_client.get_bucket_versioning(Bucket=bucket_name)
status = response.get("Status")
log_command_execution("S3 Got bucket versioning status", response)
return status
except ClientError as err:
raise Exception(f"Got error during get bucket versioning status: {err}") from err
@allure.step("Put bucket tagging")
def put_bucket_tagging(s3_client, bucket_name: str, tags: list):
try:
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
tagging = {"TagSet": tags}
response = s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging=tagging)
log_command_execution("S3 Put bucket tagging", response)
except ClientError as err:
raise Exception(f"Got error during put bucket tagging: {err}") from err
@allure.step("Get bucket acl")
def get_bucket_acl(s3_client, bucket_name: str) -> list:
try:
response = s3_client.get_bucket_acl(Bucket=bucket_name)
log_command_execution("S3 Get bucket acl", response)
return response.get("Grants")
except ClientError as err:
raise Exception(f"Got error during get bucket tagging: {err}") from err
@allure.step("Get bucket tagging")
def get_bucket_tagging(s3_client, bucket_name: str) -> list:
try:
response = s3_client.get_bucket_tagging(Bucket=bucket_name)
log_command_execution("S3 Get bucket tagging", response)
return response.get("TagSet")
except ClientError as err:
raise Exception(f"Got error during get bucket tagging: {err}") from err
@allure.step("Delete bucket tagging")
def delete_bucket_tagging(s3_client, bucket_name: str) -> None:
try:
response = s3_client.delete_bucket_tagging(Bucket=bucket_name)
log_command_execution("S3 Delete bucket tagging", response)
except ClientError as err:
raise Exception(f"Got error during delete bucket tagging: {err}") from err
@allure.step("Put bucket ACL")
def put_bucket_acl_s3(
s3_client,
bucket: str,
acl: Optional[str] = None,
grant_write: Optional[str] = None,
grant_read: Optional[str] = None,
) -> list:
params = {"Bucket": bucket}
if acl:
params.update({"ACL": acl})
elif grant_write or grant_read:
if grant_write:
params.update({"GrantWrite": grant_write})
elif grant_read:
params.update({"GrantRead": grant_read})
try:
response = s3_client.put_bucket_acl(**params)
log_command_execution("S3 ACL bucket result", response)
return response.get("Grants")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Put object lock configuration")
def put_object_lock_configuration(s3_client, bucket: str, configuration: dict):
params = {"Bucket": bucket, "ObjectLockConfiguration": configuration}
try:
response = s3_client.put_object_lock_configuration(**params)
log_command_execution("S3 put_object_lock_configuration result", response)
return response
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Get object lock configuration")
def get_object_lock_configuration(s3_client, bucket: str):
params = {"Bucket": bucket}
try:
response = s3_client.get_object_lock_configuration(**params)
log_command_execution("S3 get_object_lock_configuration result", response)
return response.get("ObjectLockConfiguration")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
def get_bucket_policy(s3_client, bucket: str):
params = {"Bucket": bucket}
try:
response = s3_client.get_bucket_policy(**params)
log_command_execution("S3 get_object_lock_configuration result", response)
return response.get("ObjectLockConfiguration")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
def put_bucket_policy(s3_client, bucket: str, policy: dict):
params = {"Bucket": bucket, "Policy": json.dumps(policy)}
try:
response = s3_client.put_bucket_policy(**params)
log_command_execution("S3 put_bucket_policy result", response)
return response
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
def get_bucket_cors(s3_client, bucket: str):
params = {"Bucket": bucket}
try:
response = s3_client.get_bucket_cors(**params)
log_command_execution("S3 get_bucket_cors result", response)
return response.get("CORSRules")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
def get_bucket_location(s3_client, bucket: str):
params = {"Bucket": bucket}
try:
response = s3_client.get_bucket_location(**params)
log_command_execution("S3 get_bucket_location result", response)
return response.get("LocationConstraint")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
def put_bucket_cors(s3_client, bucket: str, cors_configuration: dict):
params = {"Bucket": bucket, "CORSConfiguration": cors_configuration}
try:
response = s3_client.put_bucket_cors(**params)
log_command_execution("S3 put_bucket_cors result", response)
return response
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
def delete_bucket_cors(s3_client, bucket: str):
params = {"Bucket": bucket}
try:
response = s3_client.delete_bucket_cors(**params)
log_command_execution("S3 delete_bucket_cors result", response)
return response.get("ObjectLockConfiguration")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err

View file

@ -1,595 +0,0 @@
import logging
import os
import uuid
from time import sleep
from typing import Optional
import allure
import pytest
import urllib3
from botocore.exceptions import ClientError
from pytest_tests.helpers.aws_cli_client import AwsCliClient
from pytest_tests.helpers.cli_helpers import log_command_execution
from pytest_tests.steps.s3_gate_bucket import S3_SYNC_WAIT_TIME
##########################################################
# Disabling warnings on self-signed certificate which the
# boto library produces on requests to S3-gate in dev-env.
urllib3.disable_warnings()
##########################################################
logger = logging.getLogger("NeoLogger")
ACL_COPY = [
"private",
"public-read",
"public-read-write",
"authenticated-read",
"aws-exec-read",
"bucket-owner-read",
"bucket-owner-full-control",
]
ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/")
@allure.step("List objects S3 v2")
def list_objects_s3_v2(s3_client, bucket: str, full_output: bool = False) -> list:
try:
response = s3_client.list_objects_v2(Bucket=bucket)
content = response.get("Contents", [])
log_command_execution("S3 v2 List objects result", response)
obj_list = []
for obj in content:
obj_list.append(obj["Key"])
logger.info(f"Found s3 objects: {obj_list}")
return response if full_output else obj_list
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("List objects S3")
def list_objects_s3(s3_client, bucket: str, full_output: bool = False) -> list:
try:
response = s3_client.list_objects(Bucket=bucket)
content = response.get("Contents", [])
log_command_execution("S3 List objects result", response)
obj_list = []
for obj in content:
obj_list.append(obj["Key"])
logger.info(f"Found s3 objects: {obj_list}")
return response if full_output else obj_list
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("List objects versions S3")
def list_objects_versions_s3(s3_client, bucket: str, full_output: bool = False) -> list:
try:
response = s3_client.list_object_versions(Bucket=bucket)
versions = response.get("Versions", [])
log_command_execution("S3 List objects versions result", response)
return response if full_output else versions
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("List objects delete markers S3")
def list_objects_delete_markers_s3(s3_client, bucket: str, full_output: bool = False) -> list:
try:
response = s3_client.list_object_versions(Bucket=bucket)
delete_markers = response.get("DeleteMarkers", [])
log_command_execution("S3 List objects delete markers result", response)
return response if full_output else delete_markers
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Put object S3")
def put_object_s3(s3_client, bucket: str, filepath: str, **kwargs):
filename = os.path.basename(filepath)
if isinstance(s3_client, AwsCliClient):
file_content = filepath
else:
with open(filepath, "rb") as put_file:
file_content = put_file.read()
try:
params = {"Body": file_content, "Bucket": bucket, "Key": filename}
if kwargs:
params = {**params, **kwargs}
response = s3_client.put_object(**params)
log_command_execution("S3 Put object result", response)
return response.get("VersionId")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Head object S3")
def head_object_s3(s3_client, bucket: str, object_key: str, version_id: Optional[str] = None):
try:
params = {"Bucket": bucket, "Key": object_key}
if version_id:
params["VersionId"] = version_id
response = s3_client.head_object(**params)
log_command_execution("S3 Head object result", response)
return response
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Delete object S3")
def delete_object_s3(
s3_client, bucket: str, object_key: str, version_id: Optional[str] = None
) -> dict:
try:
params = {"Bucket": bucket, "Key": object_key}
if version_id:
params["VersionId"] = version_id
response = s3_client.delete_object(**params)
log_command_execution("S3 Delete object result", response)
sleep(S3_SYNC_WAIT_TIME)
return response
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Delete objects S3")
def delete_objects_s3(s3_client, bucket: str, object_keys: list):
try:
response = s3_client.delete_objects(Bucket=bucket, Delete=_make_objs_dict(object_keys))
log_command_execution("S3 Delete objects result", response)
sleep(S3_SYNC_WAIT_TIME)
return response
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Delete object versions S3")
def delete_object_versions_s3(s3_client, bucket: str, object_versions: list):
try:
# Build deletion list in S3 format
delete_list = {
"Objects": [
{
"Key": object_version["Key"],
"VersionId": object_version["VersionId"],
}
for object_version in object_versions
]
}
response = s3_client.delete_objects(Bucket=bucket, Delete=delete_list)
log_command_execution("S3 Delete objects result", response)
return response
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Delete object versions S3 without delete markers")
def delete_object_versions_s3_without_dm(s3_client, bucket: str, object_versions: list):
try:
# Delete objects without creating delete markers
for object_version in object_versions:
params = {
"Bucket": bucket,
"Key": object_version["Key"],
"VersionId": object_version["VersionId"],
}
response = s3_client.delete_object(**params)
log_command_execution("S3 Delete object result", response)
return response
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Put object ACL")
def put_object_acl_s3(
s3_client,
bucket: str,
object_key: str,
acl: Optional[str] = None,
grant_write: Optional[str] = None,
grant_read: Optional[str] = None,
) -> list:
if not isinstance(s3_client, AwsCliClient):
pytest.skip("Method put_object_acl is not supported by boto3 client")
params = {"Bucket": bucket, "Key": object_key}
if acl:
params.update({"ACL": acl})
elif grant_write or grant_read:
if grant_write:
params.update({"GrantWrite": grant_write})
elif grant_read:
params.update({"GrantRead": grant_read})
try:
response = s3_client.put_object_acl(**params)
log_command_execution("S3 ACL objects result", response)
return response.get("Grants")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Get object ACL")
def get_object_acl_s3(
s3_client, bucket: str, object_key: str, version_id: Optional[str] = None
) -> list:
params = {"Bucket": bucket, "Key": object_key}
try:
if version_id:
params.update({"VersionId": version_id})
response = s3_client.get_object_acl(**params)
log_command_execution("S3 ACL objects result", response)
return response.get("Grants")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Copy object S3")
def copy_object_s3(
s3_client, bucket: str, object_key: str, bucket_dst: Optional[str] = None, **kwargs
) -> str:
filename = os.path.join(os.getcwd(), str(uuid.uuid4()))
try:
params = {
"Bucket": bucket_dst or bucket,
"CopySource": f"{bucket}/{object_key}",
"Key": filename,
}
if "ACL" in kwargs and kwargs["ACL"] in ACL_COPY:
params.update({"ACL": kwargs["ACL"]})
if "metadata_directive" in kwargs.keys():
params.update({"MetadataDirective": kwargs["metadata_directive"]})
if "metadata_directive" in kwargs.keys() and "metadata" in kwargs.keys():
params.update({"Metadata": kwargs["metadata"]})
if "tagging_directive" in kwargs.keys():
params.update({"TaggingDirective": kwargs["tagging_directive"]})
if "tagging_directive" in kwargs.keys() and "tagging" in kwargs.keys():
params.update({"Tagging": kwargs["tagging"]})
response = s3_client.copy_object(**params)
log_command_execution("S3 Copy objects result", response)
return filename
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Get object S3")
def get_object_s3(
s3_client,
bucket: str,
object_key: str,
version_id: Optional[str] = None,
range: Optional[list] = None,
full_output: bool = False,
):
filename = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
try:
params = {"Bucket": bucket, "Key": object_key}
if version_id:
params["VersionId"] = version_id
if isinstance(s3_client, AwsCliClient):
params["file_path"] = filename
if range:
params["Range"] = f"bytes={range[0]}-{range[1]}"
response = s3_client.get_object(**params)
log_command_execution("S3 Get objects result", response)
if not isinstance(s3_client, AwsCliClient):
with open(f"{filename}", "wb") as get_file:
chunk = response["Body"].read(1024)
while chunk:
get_file.write(chunk)
chunk = response["Body"].read(1024)
return response if full_output else filename
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Create multipart upload S3")
def create_multipart_upload_s3(s3_client, bucket_name: str, object_key: str) -> str:
try:
response = s3_client.create_multipart_upload(Bucket=bucket_name, Key=object_key)
log_command_execution("S3 Created multipart upload", response)
assert response.get("UploadId"), f"Expected UploadId in response:\n{response}"
return response.get("UploadId")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("List multipart uploads S3")
def list_multipart_uploads_s3(s3_client, bucket_name: str) -> Optional[list[dict]]:
try:
response = s3_client.list_multipart_uploads(Bucket=bucket_name)
log_command_execution("S3 List multipart upload", response)
return response.get("Uploads")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Abort multipart upload S3")
def abort_multipart_upload_s3(s3_client, bucket_name: str, object_key: str, upload_id: str):
try:
response = s3_client.abort_multipart_upload(
Bucket=bucket_name, Key=object_key, UploadId=upload_id
)
log_command_execution("S3 Abort multipart upload", response)
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Upload part S3")
def upload_part_s3(
s3_client, bucket_name: str, object_key: str, upload_id: str, part_num: int, filepath: str
) -> str:
if isinstance(s3_client, AwsCliClient):
file_content = filepath
else:
with open(filepath, "rb") as put_file:
file_content = put_file.read()
try:
response = s3_client.upload_part(
UploadId=upload_id,
Bucket=bucket_name,
Key=object_key,
PartNumber=part_num,
Body=file_content,
)
log_command_execution("S3 Upload part", response)
assert response.get("ETag"), f"Expected ETag in response:\n{response}"
return response.get("ETag")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Upload copy part S3")
def upload_part_copy_s3(
s3_client, bucket_name: str, object_key: str, upload_id: str, part_num: int, copy_source: str
) -> str:
try:
response = s3_client.upload_part_copy(
UploadId=upload_id,
Bucket=bucket_name,
Key=object_key,
PartNumber=part_num,
CopySource=copy_source,
)
log_command_execution("S3 Upload copy part", response)
assert response.get("CopyPartResult").get("ETag"), f"Expected ETag in response:\n{response}"
return response.get("CopyPartResult").get("ETag")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("List parts S3")
def list_parts_s3(s3_client, bucket_name: str, object_key: str, upload_id: str) -> list[dict]:
try:
response = s3_client.list_parts(UploadId=upload_id, Bucket=bucket_name, Key=object_key)
log_command_execution("S3 List part", response)
assert response.get("Parts"), f"Expected Parts in response:\n{response}"
return response.get("Parts")
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Complete multipart upload S3")
def complete_multipart_upload_s3(
s3_client, bucket_name: str, object_key: str, upload_id: str, parts: list
):
try:
parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]
response = s3_client.complete_multipart_upload(
Bucket=bucket_name, Key=object_key, UploadId=upload_id, MultipartUpload={"Parts": parts}
)
log_command_execution("S3 Complete multipart upload", response)
except ClientError as err:
raise Exception(
f'Error Message: {err.response["Error"]["Message"]}\n'
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
) from err
@allure.step("Put object retention")
def put_object_retention(
s3_client,
bucket_name: str,
object_key: str,
retention: dict,
version_id: Optional[str] = None,
bypass_governance_retention: Optional[bool] = None,
):
try:
params = {"Bucket": bucket_name, "Key": object_key, "Retention": retention}
if version_id:
params.update({"VersionId": version_id})
if not bypass_governance_retention is None:
params.update({"BypassGovernanceRetention": bypass_governance_retention})
s3_client.put_object_retention(**params)
log_command_execution("S3 Put object retention ", str(retention))
except ClientError as err:
raise Exception(f"Got error during put object tagging: {err}") from err
@allure.step("Put object legal hold")
def put_object_legal_hold(
s3_client, bucket_name: str, object_key: str, legal_hold: str, version_id: Optional[str] = None
):
try:
params = {"Bucket": bucket_name, "Key": object_key, "LegalHold": {"Status": legal_hold}}
if version_id:
params.update({"VersionId": version_id})
s3_client.put_object_legal_hold(**params)
log_command_execution("S3 Put object legal hold ", str(legal_hold))
except ClientError as err:
raise Exception(f"Got error during put object tagging: {err}") from err
@allure.step("Put object tagging")
def put_object_tagging(s3_client, bucket_name: str, object_key: str, tags: list):
try:
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
tagging = {"TagSet": tags}
s3_client.put_object_tagging(Bucket=bucket_name, Key=object_key, Tagging=tagging)
log_command_execution("S3 Put object tagging", str(tags))
except ClientError as err:
raise Exception(f"Got error during put object tagging: {err}") from err
@allure.step("Get object tagging")
def get_object_tagging(
s3_client, bucket_name: str, object_key: str, version_id: Optional[str] = None
) -> list:
try:
params = {"Bucket": bucket_name, "Key": object_key}
if version_id:
params.update({"VersionId": version_id})
response = s3_client.get_object_tagging(**params)
log_command_execution("S3 Get object tagging", response)
return response.get("TagSet")
except ClientError as err:
raise Exception(f"Got error during get object tagging: {err}") from err
@allure.step("Delete object tagging")
def delete_object_tagging(s3_client, bucket_name: str, object_key: str):
try:
response = s3_client.delete_object_tagging(Bucket=bucket_name, Key=object_key)
log_command_execution("S3 Delete object tagging", response)
except ClientError as err:
raise Exception(f"Got error during delete object tagging: {err}") from err
@allure.step("Get object attributes")
def get_object_attributes(
s3_client,
bucket_name: str,
object_key: str,
*attributes: str,
version_id: Optional[str] = None,
max_parts: Optional[int] = None,
part_number: Optional[int] = None,
get_full_resp: bool = True,
) -> dict:
try:
if not isinstance(s3_client, AwsCliClient):
logger.warning("Method get_object_attributes is not supported by boto3 client")
return {}
response = s3_client.get_object_attributes(
bucket_name,
object_key,
*attributes,
version_id=version_id,
max_parts=max_parts,
part_number=part_number,
)
log_command_execution("S3 Get object attributes", response)
for attr in attributes:
assert attr in response, f"Expected attribute {attr} in {response}"
if get_full_resp:
return response
else:
return response.get(attributes[0])
except ClientError as err:
raise Exception(f"Got error during get object attributes: {err}") from err
def _make_objs_dict(key_names):
objs_list = []
for key in key_names:
obj_dict = {"Key": key}
objs_list.append(obj_dict)
objs_dict = {"Objects": objs_list}
return objs_dict

View file

@ -1,286 +0,0 @@
import base64
import json
import logging
import os
import uuid
from dataclasses import dataclass
from enum import Enum
from typing import Any, Optional
import allure
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.shell import Shell
from frostfs_testlib.utils import json_utils, wallet_utils
from pytest_tests.helpers.storage_object_info import StorageObjectInfo
from pytest_tests.helpers.wallet import WalletFile
from pytest_tests.resources.common import ASSETS_DIR, FROSTFS_CLI_EXEC, WALLET_CONFIG
logger = logging.getLogger("NeoLogger")
UNRELATED_KEY = "unrelated key in the session"
UNRELATED_OBJECT = "unrelated object in the session"
UNRELATED_CONTAINER = "unrelated container in the session"
WRONG_VERB = "wrong verb of the session"
INVALID_SIGNATURE = "invalid signature of the session data"
class ObjectVerb(Enum):
PUT = "PUT"
DELETE = "DELETE"
GET = "GET"
RANGEHASH = "RANGEHASH"
RANGE = "RANGE"
HEAD = "HEAD"
SEARCH = "SEARCH"
class ContainerVerb(Enum):
CREATE = "PUT"
DELETE = "DELETE"
SETEACL = "SETEACL"
@dataclass
class Lifetime:
exp: int = 100000000
nbf: int = 0
iat: int = 0
@allure.step("Generate Session Token")
def generate_session_token(
owner_wallet: WalletFile,
session_wallet: WalletFile,
session: dict[str, dict[str, Any]],
tokens_dir: str,
lifetime: Optional[Lifetime] = None,
) -> str:
"""
This function generates session token and writes it to the file.
Args:
owner_wallet: wallet of container owner
session_wallet: wallet to which we grant the access via session token
session: Contains allowed operation with parameters
tokens_dir: Dir for token
lifetime: lifetime options for session
Returns:
The path to the generated session token file
"""
file_path = os.path.join(tokens_dir, str(uuid.uuid4()))
pub_key_64 = wallet_utils.get_wallet_public_key(
session_wallet.path, session_wallet.password, "base64"
)
lifetime = lifetime or Lifetime()
session_token = {
"body": {
"id": f"{base64.b64encode(uuid.uuid4().bytes).decode('utf-8')}",
"ownerID": {"value": f"{json_utils.encode_for_json(owner_wallet.get_address())}"},
"lifetime": {
"exp": f"{lifetime.exp}",
"nbf": f"{lifetime.nbf}",
"iat": f"{lifetime.iat}",
},
"sessionKey": pub_key_64,
}
}
session_token["body"].update(session)
logger.info(f"Got this Session Token: {session_token}")
with open(file_path, "w", encoding="utf-8") as session_token_file:
json.dump(session_token, session_token_file, ensure_ascii=False, indent=4)
return file_path
@allure.step("Generate Session Token For Container")
def generate_container_session_token(
owner_wallet: WalletFile,
session_wallet: WalletFile,
verb: ContainerVerb,
tokens_dir: str,
lifetime: Optional[Lifetime] = None,
cid: Optional[str] = None,
) -> str:
"""
This function generates session token for ContainerSessionContext
and writes it to the file. It is able to prepare session token file
for a specific container (<cid>) or for every container (adds
"wildcard" field).
Args:
owner_wallet: wallet of container owner.
session_wallet: wallet to which we grant the access via session token.
verb: verb to grant access to.
lifetime: lifetime options for session.
cid: container ID of the container
Returns:
The path to the generated session token file
"""
session = {
"container": {
"verb": verb.value,
"wildcard": cid is None,
**(
{"containerID": {"value": f"{json_utils.encode_for_json(cid)}"}}
if cid is not None
else {}
),
},
}
return generate_session_token(
owner_wallet=owner_wallet,
session_wallet=session_wallet,
session=session,
tokens_dir=tokens_dir,
lifetime=lifetime,
)
@allure.step("Generate Session Token For Object")
def generate_object_session_token(
owner_wallet: WalletFile,
session_wallet: WalletFile,
oids: list[str],
cid: str,
verb: ObjectVerb,
tokens_dir: str,
lifetime: Optional[Lifetime] = None,
) -> str:
"""
This function generates session token for ObjectSessionContext
and writes it to the file.
Args:
owner_wallet: wallet of container owner
session_wallet: wallet to which we grant the access via session token
cid: container ID of the container
oids: list of objectIDs to put into session
verb: verb to grant access to; Valid verbs are: ObjectVerb.
lifetime: lifetime options for session
Returns:
The path to the generated session token file
"""
session = {
"object": {
"verb": verb.value,
"target": {
"container": {"value": json_utils.encode_for_json(cid)},
"objects": [{"value": json_utils.encode_for_json(oid)} for oid in oids],
},
},
}
return generate_session_token(
owner_wallet=owner_wallet,
session_wallet=session_wallet,
session=session,
tokens_dir=tokens_dir,
lifetime=lifetime,
)
@allure.step("Get signed token for container session")
def get_container_signed_token(
owner_wallet: WalletFile,
user_wallet: WalletFile,
verb: ContainerVerb,
shell: Shell,
tokens_dir: str,
lifetime: Optional[Lifetime] = None,
) -> str:
"""
Returns signed token file path for static container session
"""
session_token_file = generate_container_session_token(
owner_wallet=owner_wallet,
session_wallet=user_wallet,
verb=verb,
tokens_dir=tokens_dir,
lifetime=lifetime,
)
return sign_session_token(shell, session_token_file, owner_wallet)
@allure.step("Get signed token for object session")
def get_object_signed_token(
owner_wallet: WalletFile,
user_wallet: WalletFile,
cid: str,
storage_objects: list[StorageObjectInfo],
verb: ObjectVerb,
shell: Shell,
tokens_dir: str,
lifetime: Optional[Lifetime] = None,
) -> str:
"""
Returns signed token file path for static object session
"""
storage_object_ids = [storage_object.oid for storage_object in storage_objects]
session_token_file = generate_object_session_token(
owner_wallet=owner_wallet,
session_wallet=user_wallet,
oids=storage_object_ids,
cid=cid,
verb=verb,
tokens_dir=tokens_dir,
lifetime=lifetime,
)
return sign_session_token(shell, session_token_file, owner_wallet)
@allure.step("Create Session Token")
def create_session_token(
shell: Shell,
owner: str,
wallet_path: str,
wallet_password: str,
rpc_endpoint: str,
) -> str:
"""
Create session token for an object.
Args:
shell: Shell instance.
owner: User that writes the token.
wallet_path: The path to wallet to which we grant the access via session token.
wallet_password: Wallet password.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
Returns:
The path to the generated session token file.
"""
session_token = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC)
frostfscli.session.create(
rpc_endpoint=rpc_endpoint,
address=owner,
wallet=wallet_path,
wallet_password=wallet_password,
out=session_token,
)
return session_token
@allure.step("Sign Session Token")
def sign_session_token(shell: Shell, session_token_file: str, wlt: WalletFile) -> str:
"""
This function signs the session token by the given wallet.
Args:
shell: Shell instance.
session_token_file: The path to the session token file.
wlt: The path to the signing wallet.
Returns:
The path to the signed token.
"""
signed_token_file = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
frostfscli = FrostfsCli(
shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=WALLET_CONFIG
)
frostfscli.util.sign_session_token(
wallet=wlt.path, from_file=session_token_file, to_file=signed_token_file
)
return signed_token_file

View file

@ -1,62 +0,0 @@
import logging
from time import sleep
import allure
import pytest
from frostfs_testlib.resources.common import OBJECT_ALREADY_REMOVED
from frostfs_testlib.shell import Shell
from pytest_tests.helpers.cluster import Cluster
from pytest_tests.helpers.epoch import tick_epoch
from pytest_tests.helpers.frostfs_verbs import delete_object, get_object
from pytest_tests.helpers.storage_object_info import StorageObjectInfo
from pytest_tests.helpers.tombstone import verify_head_tombstone
logger = logging.getLogger("NeoLogger")
CLEANUP_TIMEOUT = 10
@allure.step("Delete Objects")
def delete_objects(
storage_objects: list[StorageObjectInfo], shell: Shell, cluster: Cluster
) -> None:
"""
Deletes given storage objects.
Args:
storage_objects: list of objects to delete
shell: executor for cli command
"""
with allure.step("Delete objects"):
for storage_object in storage_objects:
storage_object.tombstone = delete_object(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
shell=shell,
endpoint=cluster.default_rpc_endpoint,
)
verify_head_tombstone(
wallet_path=storage_object.wallet_file_path,
cid=storage_object.cid,
oid_ts=storage_object.tombstone,
oid=storage_object.oid,
shell=shell,
endpoint=cluster.default_rpc_endpoint,
)
tick_epoch(shell, cluster)
sleep(CLEANUP_TIMEOUT)
with allure.step("Get objects and check errors"):
for storage_object in storage_objects:
with pytest.raises(Exception, match=OBJECT_ALREADY_REMOVED):
get_object(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
shell=shell,
endpoint=cluster.default_rpc_endpoint,
)

View file

@ -5,16 +5,16 @@ from typing import Optional
import allure
import pytest
from frostfs_testlib.resources.common import PUBLIC_ACL
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG, DEFAULT_WALLET_PASS
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.acl import EACLRole
from frostfs_testlib.storage.dataclasses.frostfs_services import InnerRing, StorageNode
from frostfs_testlib.utils import wallet_utils
from pytest_tests.helpers.acl import EACLRole
from pytest_tests.helpers.cluster import Cluster
from pytest_tests.helpers.container import create_container
from pytest_tests.helpers.file_helper import generate_file
from pytest_tests.helpers.frostfs_verbs import put_object_to_random_node
from pytest_tests.resources.common import WALLET_CONFIG, WALLET_PASS
from frostfs_testlib.utils.file_utils import generate_file
OBJECT_COUNT = 5
@ -37,15 +37,15 @@ class Wallets:
@pytest.fixture(scope="module")
def wallets(default_wallet, temp_directory, cluster: Cluster) -> Wallets:
def wallets(default_wallet: str, temp_directory: str, cluster: Cluster) -> Wallets:
other_wallets_paths = [
os.path.join(temp_directory, f"{str(uuid.uuid4())}.json") for _ in range(2)
]
for other_wallet_path in other_wallets_paths:
wallet_utils.init_wallet(other_wallet_path, WALLET_PASS)
wallet_utils.init_wallet(other_wallet_path, DEFAULT_WALLET_PASS)
ir_node = cluster.ir_nodes[0]
storage_node = cluster.storage_nodes[0]
ir_node: InnerRing = cluster.ir_nodes[0]
storage_node: StorageNode = cluster.storage_nodes[0]
ir_wallet_path = ir_node.get_wallet_path()
ir_wallet_config = ir_node.get_wallet_config_path()
@ -55,9 +55,9 @@ def wallets(default_wallet, temp_directory, cluster: Cluster) -> Wallets:
yield Wallets(
wallets={
EACLRole.USER: [Wallet(wallet_path=default_wallet, config_path=WALLET_CONFIG)],
EACLRole.USER: [Wallet(wallet_path=default_wallet, config_path=DEFAULT_WALLET_CONFIG)],
EACLRole.OTHERS: [
Wallet(wallet_path=other_wallet_path, config_path=WALLET_CONFIG)
Wallet(wallet_path=other_wallet_path, config_path=DEFAULT_WALLET_CONFIG)
for other_wallet_path in other_wallets_paths
],
EACLRole.SYSTEM: [
@ -69,14 +69,14 @@ def wallets(default_wallet, temp_directory, cluster: Cluster) -> Wallets:
@pytest.fixture(scope="module")
def file_path(simple_object_size):
def file_path(simple_object_size: int) -> str:
yield generate_file(simple_object_size)
@pytest.fixture(scope="function")
def eacl_container_with_objects(
wallets: Wallets, client_shell: Shell, cluster: Cluster, file_path: str
):
) -> tuple[str, list[str], str]:
user_wallet = wallets.get_wallet()
with allure.step("Create eACL public container"):
cid = create_container(

View file

@ -1,16 +1,18 @@
import allure
import pytest
from frostfs_testlib.resources.common import PRIVATE_ACL_F, PUBLIC_ACL_F, READONLY_ACL_F
from frostfs_testlib.resources.wellknown_acl import PRIVATE_ACL_F, PUBLIC_ACL_F, READONLY_ACL_F
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.storage.dataclasses.acl import EACLRole
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from pytest_tests.helpers.acl import EACLRole
from pytest_tests.helpers.container import create_container
from pytest_tests.helpers.container_access import (
check_full_access_to_container,
check_no_access_to_container,
check_read_only_container,
)
from pytest_tests.helpers.frostfs_verbs import put_object_to_random_node
from pytest_tests.steps.cluster_test_base import ClusterTestBase
from pytest_tests.testsuites.acl.conftest import Wallets
@pytest.mark.sanity
@ -35,7 +37,7 @@ class TestACLBasic(ClusterTestBase):
# delete_container(user_wallet.wallet_path, cid_public)
@pytest.fixture(scope="function")
def private_container(self, wallets):
def private_container(self, wallets: Wallets):
user_wallet = wallets.get_wallet()
with allure.step("Create private container"):
cid_private = create_container(
@ -51,7 +53,7 @@ class TestACLBasic(ClusterTestBase):
# delete_container(user_wallet.wallet_path, cid_private)
@pytest.fixture(scope="function")
def read_only_container(self, wallets):
def read_only_container(self, wallets: Wallets):
user_wallet = wallets.get_wallet()
with allure.step("Create public readonly container"):
cid_read_only = create_container(
@ -67,7 +69,7 @@ class TestACLBasic(ClusterTestBase):
# delete_container(user_wallet.wallet_path, cid_read_only)
@allure.title("Test basic ACL on public container")
def test_basic_acl_public(self, wallets, public_container, file_path):
def test_basic_acl_public(self, wallets: Wallets, public_container: str, file_path: str):
"""
Test basic ACL set during public container creation.
"""
@ -113,7 +115,7 @@ class TestACLBasic(ClusterTestBase):
)
@allure.title("Test basic ACL on private container")
def test_basic_acl_private(self, wallets, private_container, file_path):
def test_basic_acl_private(self, wallets: Wallets, private_container: str, file_path: str):
"""
Test basic ACL set during private container creation.
"""
@ -147,7 +149,9 @@ class TestACLBasic(ClusterTestBase):
)
@allure.title("Test basic ACL on readonly container")
def test_basic_acl_readonly(self, wallets, client_shell, read_only_container, file_path):
def test_basic_acl_readonly(
self, wallets: Wallets, client_shell: Shell, read_only_container: str, file_path: str
):
"""
Test basic ACL Operations for Read-Only Container.
"""

View file

@ -1,22 +1,20 @@
import allure
import pytest
from pytest_tests.helpers.acl import (
EACLAccess,
EACLOperation,
EACLRole,
EACLRule,
from frostfs_testlib.steps.acl import (
create_eacl,
form_bearertoken_file,
set_eacl,
wait_for_cache_expired,
)
from frostfs_testlib.storage.dataclasses.acl import EACLAccess, EACLOperation, EACLRole, EACLRule
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from pytest_tests.helpers.container_access import (
check_custom_access_to_container,
check_full_access_to_container,
check_no_access_to_container,
)
from pytest_tests.steps.cluster_test_base import ClusterTestBase
from pytest_tests.testsuites.acl.conftest import Wallets
@pytest.mark.sanity
@ -24,7 +22,12 @@ from pytest_tests.steps.cluster_test_base import ClusterTestBase
@pytest.mark.acl_bearer
class TestACLBearer(ClusterTestBase):
@pytest.mark.parametrize("role", [EACLRole.USER, EACLRole.OTHERS])
def test_bearer_token_operations(self, wallets, eacl_container_with_objects, role):
def test_bearer_token_operations(
self,
wallets: Wallets,
eacl_container_with_objects: tuple[str, list[str], str],
role: EACLRole,
):
allure.dynamic.title(
f"Testcase to validate FrostFS operations with {role.value} BearerToken"
)

View file

@ -1,24 +1,18 @@
import allure
import pytest
from frostfs_testlib.resources.common import PUBLIC_ACL
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.acl import create_eacl, set_eacl, wait_for_cache_expired
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.steps.node_management import drop_object
from frostfs_testlib.storage.dataclasses.acl import EACLAccess, EACLOperation, EACLRole, EACLRule
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.failover_utils import wait_object_replication
from pytest_tests.helpers.acl import (
EACLAccess,
EACLOperation,
EACLRole,
EACLRule,
create_eacl,
set_eacl,
wait_for_cache_expired,
)
from pytest_tests.helpers.container import create_container
from pytest_tests.helpers.container_access import (
check_full_access_to_container,
check_no_access_to_container,
)
from pytest_tests.helpers.failover_utils import wait_object_replication
from pytest_tests.helpers.frostfs_verbs import put_object_to_random_node
from pytest_tests.helpers.node_management import drop_object
from pytest_tests.helpers.object_access import (
can_delete_object,
can_get_head_object,
@ -28,7 +22,7 @@ from pytest_tests.helpers.object_access import (
can_put_object,
can_search_object,
)
from pytest_tests.steps.cluster_test_base import ClusterTestBase
from pytest_tests.testsuites.acl.conftest import Wallets
@pytest.mark.sanity
@ -36,7 +30,7 @@ from pytest_tests.steps.cluster_test_base import ClusterTestBase
@pytest.mark.acl_extended
class TestEACLContainer(ClusterTestBase):
@pytest.fixture(scope="function")
def eacl_full_placement_container_with_object(self, wallets, file_path) -> str:
def eacl_full_placement_container_with_object(self, wallets: Wallets, file_path: str) -> str:
user_wallet = wallets.get_wallet()
storage_nodes = self.cluster.storage_nodes
node_count = len(storage_nodes)
@ -66,7 +60,10 @@ class TestEACLContainer(ClusterTestBase):
@pytest.mark.parametrize("deny_role", [EACLRole.USER, EACLRole.OTHERS])
def test_extended_acl_deny_all_operations(
self, wallets, eacl_container_with_objects, deny_role
self,
wallets: Wallets,
eacl_container_with_objects: tuple[str, list[str], str],
deny_role: EACLRole,
):
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(EACLRole.OTHERS)
@ -150,7 +147,7 @@ class TestEACLContainer(ClusterTestBase):
@allure.title("Testcase to allow FrostFS operations for only one other pubkey.")
def test_extended_acl_deny_all_operations_exclude_pubkey(
self, wallets, eacl_container_with_objects
self, wallets: Wallets, eacl_container_with_objects: tuple[str, list[str], str]
):
user_wallet = wallets.get_wallet()
other_wallet, other_wallet_allow = wallets.get_wallets_list(EACLRole.OTHERS)[0:2]
@ -212,8 +209,8 @@ class TestEACLContainer(ClusterTestBase):
@allure.title("Testcase to validate FrostFS replication with eACL deny rules.")
def test_extended_acl_deny_replication(
self,
wallets,
eacl_full_placement_container_with_object,
wallets: Wallets,
eacl_full_placement_container_with_object: tuple[str, list[str], str],
):
user_wallet = wallets.get_wallet()
cid, oid, file_path = eacl_full_placement_container_with_object
@ -252,7 +249,9 @@ class TestEACLContainer(ClusterTestBase):
)
@allure.title("Testcase to validate FrostFS system operations with extended ACL")
def test_extended_actions_system(self, wallets, eacl_container_with_objects):
def test_extended_actions_system(
self, wallets: Wallets, eacl_container_with_objects: tuple[str, list[str], str]
):
user_wallet = wallets.get_wallet()
ir_wallet, storage_wallet = wallets.get_wallets_list(role=EACLRole.SYSTEM)[:2]

View file

@ -1,8 +1,15 @@
import allure
import pytest
from frostfs_testlib.resources.common import PUBLIC_ACL
from pytest_tests.helpers.acl import (
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.acl import (
create_eacl,
form_bearertoken_file,
set_eacl,
wait_for_cache_expired,
)
from frostfs_testlib.steps.cli.container import create_container, delete_container
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.storage.dataclasses.acl import (
EACLAccess,
EACLFilter,
EACLFilters,
@ -11,19 +18,15 @@ from pytest_tests.helpers.acl import (
EACLOperation,
EACLRole,
EACLRule,
create_eacl,
form_bearertoken_file,
set_eacl,
wait_for_cache_expired,
)
from pytest_tests.helpers.container import create_container, delete_container
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from pytest_tests.helpers.container_access import (
check_full_access_to_container,
check_no_access_to_container,
)
from pytest_tests.helpers.frostfs_verbs import put_object_to_random_node
from pytest_tests.helpers.object_access import can_get_head_object, can_get_object, can_put_object
from pytest_tests.steps.cluster_test_base import ClusterTestBase
from pytest_tests.testsuites.acl.conftest import Wallets
@pytest.mark.sanity
@ -69,7 +72,7 @@ class TestEACLFilters(ClusterTestBase):
]
@pytest.fixture(scope="function")
def eacl_container_with_objects(self, wallets, file_path):
def eacl_container_with_objects(self, wallets: Wallets, file_path: str):
user_wallet = wallets.get_wallet()
with allure.step("Create eACL public container"):
cid = create_container(
@ -128,7 +131,12 @@ class TestEACLFilters(ClusterTestBase):
@pytest.mark.parametrize(
"match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL]
)
def test_extended_acl_filters_request(self, wallets, eacl_container_with_objects, match_type):
def test_extended_acl_filters_request(
self,
wallets: Wallets,
eacl_container_with_objects: tuple[str, list[str], str],
match_type: EACLMatchType,
):
allure.dynamic.title(f"Validate FrostFS operations with request filter: {match_type.name}")
user_wallet = wallets.get_wallet()
other_wallet = wallets.get_wallet(EACLRole.OTHERS)
@ -241,7 +249,10 @@ class TestEACLFilters(ClusterTestBase):
"match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL]
)
def test_extended_acl_deny_filters_object(
self, wallets, eacl_container_with_objects, match_type
self,
wallets: Wallets,
eacl_container_with_objects: tuple[str, list[str], str],
match_type: EACLMatchType,
):
allure.dynamic.title(
f"Validate FrostFS operations with deny user headers filter: {match_type.name}"
@ -423,7 +434,10 @@ class TestEACLFilters(ClusterTestBase):
"match_type", [EACLMatchType.STRING_EQUAL, EACLMatchType.STRING_NOT_EQUAL]
)
def test_extended_acl_allow_filters_object(
self, wallets, eacl_container_with_objects, match_type
self,
wallets: Wallets,
eacl_container_with_objects: tuple[str, list[str], str],
match_type: EACLMatchType,
):
allure.dynamic.title(
"Testcase to validate FrostFS operation with allow eACL user headers filters:"
@ -476,7 +490,7 @@ class TestEACLFilters(ClusterTestBase):
allow_attribute = self.OTHER_ATTRIBUTE
deny_attribute = self.ATTRIBUTE
with allure.step(f"Check other cannot get and put objects without attributes"):
with allure.step("Check other cannot get and put objects without attributes"):
oid = objects_without_header.pop()
with pytest.raises(AssertionError):
assert can_get_head_object(
@ -543,7 +557,7 @@ class TestEACLFilters(ClusterTestBase):
bearer=bearer_other,
)
with allure.step(f"Check other can get objects with attributes matching the filter"):
with allure.step("Check other can get objects with attributes matching the filter"):
oid = allow_objects.pop()
assert can_get_head_object(
other_wallet.wallet_path,

View file

@ -1,45 +1,34 @@
import logging
import os
import shutil
import uuid
from datetime import datetime
from typing import Optional
import allure
import pytest
import yaml
from frostfs_testlib.hosting import Hosting
from frostfs_testlib.reporter import AllureHandler, get_reporter
from frostfs_testlib.shell import LocalShell, Shell
from frostfs_testlib.utils import wallet_utils
from pytest_tests.helpers import binary_version, env_properties
from pytest_tests.helpers.cluster import Cluster
from pytest_tests.helpers.frostfs_verbs import get_netmap_netinfo
from pytest_tests.helpers.k6 import LoadParams
from pytest_tests.helpers.node_management import storage_node_healthcheck
from pytest_tests.helpers.payment_neogo import deposit_gas, transfer_gas
from pytest_tests.helpers.wallet import WalletFactory
from pytest_tests.resources.common import (
from frostfs_testlib.resources.common import (
ASSETS_DIR,
COMPLEX_OBJECT_CHUNKS_COUNT,
COMPLEX_OBJECT_TAIL_SIZE,
FREE_STORAGE,
HOSTING_CONFIG_FILE,
DEFAULT_WALLET_PASS,
SIMPLE_OBJECT_SIZE,
STORAGE_NODE_SERVICE_NAME_REGEX,
TEST_CYCLES_COUNT,
WALLET_PASS,
)
from pytest_tests.resources.load_params import (
BACKGROUND_LOAD_MAX_TIME,
BACKGROUND_OBJ_SIZE,
BACKGROUND_READERS_COUNT,
BACKGROUND_WRITERS_COUNT,
LOAD_NODE_SSH_PRIVATE_KEY_PATH,
LOAD_NODE_SSH_USER,
LOAD_NODES,
)
from pytest_tests.steps.load import get_services_endpoints, prepare_k6_instances
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus
from frostfs_testlib.shell import LocalShell, Shell
from frostfs_testlib.steps.cli.container import list_containers
from frostfs_testlib.steps.cli.object import get_netmap_netinfo
from frostfs_testlib.steps.node_management import storage_node_healthcheck
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.dataclasses.wallet import WalletFactory, WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils import env_utils, version_utils
from pytest_tests.resources.common import HOSTING_CONFIG_FILE, TEST_CYCLES_COUNT
logger = logging.getLogger("NeoLogger")
@ -81,6 +70,7 @@ def pytest_generate_tests(metafunc: pytest.Metafunc):
@pytest.fixture(scope="session")
def configure_testlib():
get_reporter().register_handler(AllureHandler())
logging.getLogger("paramiko").setLevel(logging.INFO)
yield
@ -139,25 +129,107 @@ def wallet_factory(temp_directory: str, client_shell: Shell, cluster: Cluster) -
@pytest.fixture(scope="session")
def cluster(temp_directory: str, hosting: Hosting) -> Cluster:
def cluster(temp_directory: str, hosting: Hosting, client_shell: Shell) -> Cluster:
cluster = Cluster(hosting)
if cluster.is_local_devevn():
if cluster.is_local_devenv():
cluster.create_wallet_configs(hosting)
ClusterTestBase.shell = client_shell
ClusterTestBase.cluster = cluster
yield cluster
@pytest.fixture(scope="session", autouse=True)
@allure.title("Check binary versions")
def check_binary_versions(request, hosting: Hosting, client_shell: Shell):
local_versions = binary_version.get_local_binaries_versions(client_shell)
remote_versions = binary_version.get_remote_binaries_versions(hosting)
@allure.step("[Class]: Provide S3 policy")
@pytest.fixture(scope="class")
def s3_policy(request: pytest.FixtureRequest):
policy = None
if "param" in request.__dict__:
policy = request.param
all_versions = {**local_versions, **remote_versions}
env_properties.save_env_properties(request.config, all_versions)
return policy
@pytest.fixture(scope="session")
@allure.title("Prepare tmp directory")
def cluster_state_controller(client_shell: Shell, cluster: Cluster) -> ClusterStateController:
controller = ClusterStateController(client_shell, cluster)
yield controller
@allure.step("[Class]: Create S3 client")
@pytest.fixture(scope="class")
def s3_client(
default_wallet: str,
client_shell: Shell,
s3_policy: Optional[str],
cluster: Cluster,
request: pytest.FixtureRequest,
) -> S3ClientWrapper:
s3_bearer_rules_file = f"{os.getcwd()}/pytest_tests/resources/files/s3_bearer_rules.json"
wallet = WalletInfo(path=default_wallet, password=DEFAULT_WALLET_PASS)
(cid, access_key_id, secret_access_key) = s3_helper.init_s3_credentials(
wallet,
client_shell,
cluster,
s3gates=[cluster_node.s3_gate for cluster_node in cluster.cluster_nodes],
s3_bearer_rules_file=s3_bearer_rules_file,
policy=s3_policy,
)
containers_list = list_containers(
wallet.path, shell=client_shell, endpoint=cluster.default_rpc_endpoint
)
assert cid in containers_list, f"Expected cid {cid} in {containers_list}"
s3_client_cls = request.param
client = s3_client_cls(access_key_id, secret_access_key, cluster.default_s3_gate_endpoint)
yield client
@allure.step("Create/delete bucket")
@pytest.fixture
def bucket(s3_client: S3ClientWrapper, request: pytest.FixtureRequest):
bucket_name = s3_client.create_bucket()
versioning_status: Optional[VersioningStatus] = None
if "param" in request.__dict__:
versioning_status = request.param
if versioning_status:
s3_helper.set_bucket_versioning(s3_client, bucket_name, versioning_status)
yield bucket_name
s3_helper.delete_bucket_with_objects(s3_client, bucket_name)
@allure.step("Create two buckets")
@pytest.fixture
def two_buckets(s3_client: S3ClientWrapper):
bucket_1 = s3_client.create_bucket()
bucket_2 = s3_client.create_bucket()
yield bucket_1, bucket_2
for bucket_name in [bucket_1, bucket_2]:
s3_helper.delete_bucket_with_objects(s3_client, bucket_name)
@allure.step("Check binary versions")
@pytest.fixture(scope="session", autouse=True)
def check_binary_versions(hosting: Hosting, client_shell: Shell, request: pytest.FixtureRequest):
local_versions = version_utils.get_local_binaries_versions(client_shell)
remote_versions = version_utils.get_remote_binaries_versions(hosting)
all_versions = {**local_versions, **remote_versions}
environment_dir = request.config.getoption("--alluredir")
if not environment_dir:
return None
file_path = f"{environment_dir}/environment.properties"
env_utils.save_env_properties(file_path, all_versions)
@allure.step("Prepare tmp directory")
@pytest.fixture(scope="session")
def temp_directory():
with allure.step("Prepare tmp directory"):
full_path = os.path.join(os.getcwd(), ASSETS_DIR)
@ -177,8 +249,8 @@ def session_start_time():
return start_time
@allure.step("Run health check for all storage nodes")
@pytest.fixture(scope="session", autouse=True)
@allure.title("Run health check for all storage nodes")
def run_health_check(session_start_time, cluster: Cluster):
failed_nodes = []
for node in cluster.storage_nodes:
@ -190,94 +262,9 @@ def run_health_check(session_start_time, cluster: Cluster):
raise AssertionError(f"Nodes {failed_nodes} are not healthy")
@allure.step("Prepare wallet and deposit")
@pytest.fixture(scope="session")
def background_grpc_load(client_shell: Shell, hosting: Hosting):
registry_file = os.path.join("/tmp/", f"{str(uuid.uuid4())}.bolt")
prepare_file = os.path.join("/tmp/", f"{str(uuid.uuid4())}.json")
allure.dynamic.title(
f"Start background load with parameters: "
f"writers = {BACKGROUND_WRITERS_COUNT}, "
f"obj_size = {BACKGROUND_OBJ_SIZE}, "
f"load_time = {BACKGROUND_LOAD_MAX_TIME}"
f"prepare_json = {prepare_file}"
)
with allure.step("Get endpoints"):
endpoints_list = get_services_endpoints(
hosting=hosting,
service_name_regex=STORAGE_NODE_SERVICE_NAME_REGEX,
endpoint_attribute="rpc_endpoint",
)
endpoints = ",".join(endpoints_list)
load_params = LoadParams(
endpoint=endpoints,
obj_size=BACKGROUND_OBJ_SIZE,
registry_file=registry_file,
containers_count=1,
obj_count=0,
out_file=prepare_file,
readers=0,
writers=BACKGROUND_WRITERS_COUNT,
deleters=0,
load_time=BACKGROUND_LOAD_MAX_TIME,
load_type="grpc",
)
k6_load_instances = prepare_k6_instances(
load_nodes=LOAD_NODES,
login=LOAD_NODE_SSH_USER,
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
load_params=load_params,
)
with allure.step("Run background load"):
for k6_load_instance in k6_load_instances:
k6_load_instance.start()
yield
with allure.step("Stop background load"):
for k6_load_instance in k6_load_instances:
k6_load_instance.stop()
with allure.step("Verify background load data"):
verify_params = LoadParams(
endpoint=endpoints,
clients=BACKGROUND_READERS_COUNT,
registry_file=registry_file,
load_time=BACKGROUND_LOAD_MAX_TIME,
load_type="verify",
)
k6_verify_instances = prepare_k6_instances(
load_nodes=LOAD_NODES,
login=LOAD_NODE_SSH_USER,
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
load_params=verify_params,
prepare=False,
)
with allure.step("Run verify background load data"):
for k6_verify_instance in k6_verify_instances:
k6_verify_instance.start()
k6_verify_instance.wait_until_finished(BACKGROUND_LOAD_MAX_TIME)
@pytest.fixture(scope="session")
@allure.title("Prepare wallet and deposit")
def default_wallet(client_shell: Shell, temp_directory: str, cluster: Cluster):
wallet_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{str(uuid.uuid4())}.json")
wallet_utils.init_wallet(wallet_path, WALLET_PASS)
allure.attach.file(wallet_path, os.path.basename(wallet_path), allure.attachment_type.JSON)
if not FREE_STORAGE:
main_chain = cluster.main_chain_nodes[0]
deposit = 30
transfer_gas(
shell=client_shell,
amount=deposit + 1,
main_chain=main_chain,
wallet_to_path=wallet_path,
wallet_to_password=WALLET_PASS,
)
deposit_gas(
shell=client_shell,
main_chain=main_chain,
amount=deposit,
wallet_from_path=wallet_path,
wallet_from_password=WALLET_PASS,
)
return wallet_path
def default_wallet(wallet_factory: WalletFactory) -> str:
wallet = wallet_factory.create_wallet(password=DEFAULT_WALLET_PASS)
allure.attach.file(wallet.path, os.path.basename(wallet.path), allure.attachment_type.JSON)
return wallet.path

View file

@ -2,9 +2,8 @@ import json
import allure
import pytest
from frostfs_testlib.resources.common import PRIVATE_ACL_F
from pytest_tests.helpers.container import (
from frostfs_testlib.resources.wellknown_acl import PRIVATE_ACL_F
from frostfs_testlib.steps.cli.container import (
create_container,
delete_container,
get_container,
@ -12,8 +11,9 @@ from pytest_tests.helpers.container import (
wait_for_container_creation,
wait_for_container_deletion,
)
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from pytest_tests.helpers.utility import placement_policy_from_container
from pytest_tests.steps.cluster_test_base import ClusterTestBase
@pytest.mark.container
@ -21,7 +21,7 @@ from pytest_tests.steps.cluster_test_base import ClusterTestBase
class TestContainer(ClusterTestBase):
@pytest.mark.parametrize("name", ["", "test-container"], ids=["No name", "Set particular name"])
@pytest.mark.smoke
def test_container_creation(self, default_wallet, name):
def test_container_creation(self, default_wallet: str, name: str):
scenario_title = f"with name {name}" if name else "without name"
allure.dynamic.title(f"User can create container {scenario_title}")
@ -85,7 +85,7 @@ class TestContainer(ClusterTestBase):
)
@allure.title("Parallel container creation and deletion")
def test_container_creation_deletion_parallel(self, default_wallet):
def test_container_creation_deletion_parallel(self, default_wallet: str):
containers_count = 3
wallet = default_wallet
placement_rule = "REP 2 IN X CBF 1 SELECT 2 FROM * AS X"
@ -104,7 +104,7 @@ class TestContainer(ClusterTestBase):
)
)
with allure.step(f"Wait for containers occur in container list"):
with allure.step("Wait for containers occur in container list"):
for cid in cids:
wait_for_container_creation(
wallet,

View file

@ -0,0 +1,29 @@
import random
from datetime import datetime
import allure
import pytest
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.controllers import ShardsWatcher
@pytest.fixture()
@allure.title("Select random node for testing")
def node_under_test(cluster: Cluster) -> ClusterNode:
selected_node = random.choice(cluster.cluster_nodes)
allure.attach(f"{selected_node}", "Selected node", allure.attachment_type.TEXT)
return selected_node
@pytest.fixture()
@allure.title("Provide Shards watcher")
def shards_watcher(node_under_test: ClusterNode) -> ShardsWatcher:
watcher = ShardsWatcher(node_under_test)
return watcher
@pytest.fixture()
@allure.title("Test start time")
def test_start_time() -> datetime:
start_time = datetime.utcnow()
return start_time

View file

@ -1,21 +1,21 @@
import logging
from random import choices
import random
from time import sleep
import allure
import pytest
from frostfs_testlib.resources.common import PUBLIC_ACL
from pytest_tests.helpers.cluster import StorageNode
from pytest_tests.helpers.container import create_container
from pytest_tests.helpers.failover_utils import (
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import get_object, put_object_to_random_node
from frostfs_testlib.storage.cluster import StorageNode
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.failover_utils import (
wait_all_storage_nodes_returned,
wait_object_replication,
)
from pytest_tests.helpers.file_helper import generate_file, get_file_hash
from pytest_tests.helpers.frostfs_verbs import get_object, put_object_to_random_node
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
from pytest_tests.helpers.iptables_helper import IpTablesHelper
from pytest_tests.steps.cluster_test_base import ClusterTestBase
logger = logging.getLogger("NeoLogger")
STORAGE_NODE_COMMUNICATION_PORT = "8080"
@ -38,11 +38,11 @@ class TestFailoverNetwork(ClusterTestBase):
IpTablesHelper.restore_input_traffic_to_port(node.host.get_shell(), PORTS_TO_BLOCK)
blocked_nodes.remove(node)
if not_empty:
wait_all_storage_nodes_returned(self.cluster)
wait_all_storage_nodes_returned(self.shell, self.cluster)
@allure.title("Block Storage node traffic")
def test_block_storage_node_traffic(
self, default_wallet, require_multiple_hosts, simple_object_size
self, default_wallet: str, require_multiple_hosts, simple_object_size: int
):
"""
Block storage nodes traffic using iptables and wait for replication for objects.
@ -72,7 +72,7 @@ class TestFailoverNetwork(ClusterTestBase):
nodes_to_block = nodes
if nodes_to_block_count > len(nodes):
# TODO: the intent of this logic is not clear, need to revisit
nodes_to_block = choices(nodes, k=2)
nodes_to_block = random.choices(nodes, k=2)
excluded_nodes = []
for node in nodes_to_block:
@ -92,7 +92,7 @@ class TestFailoverNetwork(ClusterTestBase):
)
assert node not in new_nodes
with allure.step(f"Check object data is not corrupted"):
with allure.step("Check object data is not corrupted"):
got_file_path = get_object(
wallet, cid, oid, endpoint=new_nodes[0].get_rpc_endpoint(), shell=self.shell
)
@ -104,7 +104,7 @@ class TestFailoverNetwork(ClusterTestBase):
blocked_nodes.remove(node)
sleep(wakeup_node_timeout)
with allure.step(f"Check object data is not corrupted"):
with allure.step("Check object data is not corrupted"):
new_nodes = wait_object_replication(
cid, oid, 2, shell=self.shell, nodes=self.cluster.storage_nodes
)

View file

@ -0,0 +1,232 @@
import logging
import os.path
import random
import time
import allure
import pytest
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.container import (
StorageContainer,
StorageContainerInfo,
create_container,
)
from frostfs_testlib.steps.cli.object import get_object
from frostfs_testlib.steps.node_management import check_node_in_map, check_node_not_in_map
from frostfs_testlib.storage.cluster import ClusterNode, StorageNode
from frostfs_testlib.storage.controllers import ClusterStateController
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.utils import datetime_utils
from frostfs_testlib.utils.failover_utils import wait_for_host_offline, wait_object_replication
from frostfs_testlib.utils.file_utils import get_file_hash
from pytest import FixtureRequest
logger = logging.getLogger("NeoLogger")
@pytest.mark.failover
@pytest.mark.failover_server
class TestFailoverServer(ClusterTestBase):
@wait_for_success(max_wait_time=120, interval=1)
def wait_node_not_in_map(self, *args, **kwargs):
check_node_not_in_map(*args, **kwargs)
@wait_for_success(max_wait_time=120, interval=1)
def wait_node_in_map(self, *args, **kwargs):
check_node_in_map(*args, **kwargs)
@allure.step("Create {count_containers} containers and {count_files} objects")
@pytest.fixture
def containers(
self,
request: FixtureRequest,
default_wallet: str,
) -> list[StorageContainer]:
placement_rule = "REP 2 CBF 2 SELECT 2 FROM * AS X"
containers = []
for _ in range(request.param):
cont_id = create_container(
default_wallet,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
rule=placement_rule,
basic_acl=PUBLIC_ACL,
)
wallet = WalletInfo(path=default_wallet)
storage_cont_info = StorageContainerInfo(id=cont_id, wallet_file=wallet)
containers.append(
StorageContainer(
storage_container_info=storage_cont_info, shell=self.shell, cluster=self.cluster
)
)
return containers
@allure.step("Create object and delete after test")
@pytest.fixture(scope="class")
def storage_objects(
self,
request: FixtureRequest,
containers: list[StorageContainer],
simple_object_size: int,
complex_object_size: int,
) -> StorageObjectInfo:
count_object = request.param
object_size = [simple_object_size, complex_object_size]
object_list = []
for cont in containers:
for _ in range(count_object):
object_list.append(cont.generate_object(size=random.choice(object_size)))
for storage_object in object_list:
os.remove(storage_object.file_path)
yield object_list
@allure.step("Select random node to stop and start it after test")
@pytest.fixture
def node_to_stop(
self, node_under_test: ClusterNode, cluster_state_controller: ClusterStateController
) -> ClusterNode:
yield node_under_test
with allure.step(f"start {node_under_test.storage_node}"):
cluster_state_controller.start_stopped_hosts()
@allure.step("Upload object with nodes and compare")
def get_corrupted_objects_list(
self, nodes: list[StorageNode], storage_objects: list[StorageObjectInfo]
) -> list[StorageObjectInfo]:
corrupted_objects = []
for node in nodes:
for storage_object in storage_objects:
got_file_path = get_object(
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
endpoint=node.get_rpc_endpoint(),
shell=self.shell,
timeout="60s",
)
if storage_object.file_hash != get_file_hash(got_file_path):
corrupted_objects.append(storage_object)
os.remove(got_file_path)
return corrupted_objects
def check_objects_replication(
self, storage_objects: list[StorageObjectInfo], storage_nodes: list[StorageNode]
) -> None:
for storage_object in storage_objects:
wait_object_replication(
storage_object.cid,
storage_object.oid,
2,
shell=self.shell,
nodes=storage_nodes,
sleep_interval=45,
attempts=60,
)
@allure.title("Full shutdown node")
@pytest.mark.parametrize("containers, storage_objects", [(5, 10)], indirect=True)
def test_complete_node_shutdown(
self,
containers: list[StorageContainer],
storage_objects: list[StorageObjectInfo],
default_wallet: str,
node_to_stop: ClusterNode,
cluster_state_controller: ClusterStateController,
):
with allure.step(f"Remove {node_to_stop} from the list of nodes"):
alive_nodes = list(set(self.cluster.cluster_nodes) - {node_to_stop})
storage_nodes = [cluster.storage_node for cluster in alive_nodes]
with allure.step("Tick epoch"):
self.tick_epochs(1, storage_nodes[0])
with allure.step("Wait 2 block time"):
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
cluster_state_controller.stop_node_host(node=node_to_stop, mode="hard")
with allure.step(f"Check if the node {node_to_stop.storage_node} has stopped"):
wait_for_host_offline(self.shell, node_to_stop.storage_node)
with allure.step("Verify that there are no corrupted objects"):
corrupted_objects_list = self.get_corrupted_objects_list(storage_nodes, storage_objects)
assert not corrupted_objects_list
with allure.step(f"check {node_to_stop.storage_node} in map"):
self.wait_node_in_map(
node_to_stop.storage_node, self.shell, alive_node=storage_nodes[0]
)
count_tick_epoch = int(alive_nodes[0].ir_node.get_netmap_cleaner_threshold()) + 2
with allure.step(f"Tick {count_tick_epoch} epoch, in {storage_nodes[0]} node"):
for tick in range(count_tick_epoch):
self.tick_epoch(storage_nodes[0])
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
with allure.step(f"Check if the node {node_to_stop.storage_node} has stopped"):
wait_for_host_offline(self.shell, node_to_stop.storage_node)
with allure.step(f"Check {node_to_stop} in not map"):
self.wait_node_not_in_map(
node_to_stop.storage_node, self.shell, alive_node=storage_nodes[0]
)
with allure.step(
f"Verify that there are no corrupted objects after {count_tick_epoch} epoch"
):
corrupted_objects_list = self.get_corrupted_objects_list(storage_nodes, storage_objects)
assert not corrupted_objects_list
@allure.title("Temporarily disable a node")
@pytest.mark.parametrize("containers, storage_objects", [(5, 10)], indirect=True)
def test_temporarily_disable_a_node(
self,
containers: list[StorageContainer],
storage_objects: list[StorageObjectInfo],
default_wallet: str,
node_to_stop: ClusterNode,
cluster_state_controller: ClusterStateController,
):
with allure.step(f"Remove {node_to_stop} from the list of nodes"):
storage_nodes = list(set(self.cluster.storage_nodes) - {node_to_stop.storage_node})
with allure.step("Tick epoch"):
self.tick_epochs(1, storage_nodes[0])
with allure.step("Wait 2 block time"):
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
cluster_state_controller.stop_node_host(node=node_to_stop, mode="hard")
with allure.step(f"Check if the node {node_to_stop} has stopped"):
wait_for_host_offline(self.shell, node_to_stop.storage_node)
with allure.step("Verify that there are no corrupted objects"):
corrupted_objects_list = self.get_corrupted_objects_list(storage_nodes, storage_objects)
assert not corrupted_objects_list
with allure.step(f"Check {node_to_stop} in map"):
self.wait_node_in_map(
node_to_stop.storage_node, self.shell, alive_node=storage_nodes[0]
)
cluster_state_controller.start_node_host(node_to_stop)
with allure.step("Verify that there are no corrupted objects"):
corrupted_objects_list = self.get_corrupted_objects_list(storage_nodes, storage_objects)
assert not corrupted_objects_list

View file

@ -1,62 +1,75 @@
import os
import logging
from datetime import datetime
from time import sleep
import allure
import pytest
from frostfs_testlib.analytics import test_case
from frostfs_testlib.hosting import Host
from frostfs_testlib.resources.common import PUBLIC_ACL
from frostfs_testlib.shell import CommandOptions
from frostfs_testlib.utils import datetime_utils
from pytest_tests.resources.common import FROSTFS_CONTRACT_CACHE_TIMEOUT, MORPH_BLOCK_TIME
from pytest_tests.helpers.cluster import Cluster, StorageNode
from pytest_tests.helpers.container import create_container
from pytest_tests.helpers.failover_utils import (
wait_all_storage_nodes_returned,
wait_object_replication,
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
from frostfs_testlib.shell import CommandOptions, Shell
from frostfs_testlib.steps.cli.container import (
StorageContainer,
StorageContainerInfo,
create_container,
)
from pytest_tests.helpers.file_helper import generate_file, get_file_hash
from pytest_tests.helpers.frostfs_verbs import get_object, put_object_to_random_node
from pytest_tests.steps.cluster_test_base import ClusterTestBase
from pytest_tests.helpers.node_management import (
from frostfs_testlib.steps.cli.object import get_object, put_object_to_random_node
from frostfs_testlib.steps.node_management import (
check_node_in_map,
check_node_not_in_map,
exclude_node_from_network_map,
include_node_to_network_map,
stop_random_storage_nodes,
remove_nodes_from_map_morph,
wait_for_node_to_be_ready,
remove_nodes_from_map_morph
)
from pytest_tests.helpers.s3_helper import (
check_objects_in_bucket
)
from pytest_tests.steps import s3_gate_object
from pytest_tests.steps.s3_gate_base import TestS3GateBase
from pytest_tests.helpers.aws_cli_client import AwsCliClient
from pytest_tests.helpers.file_helper import (
generate_file,
get_file_hash,
)
from pytest_tests.helpers.node_management import (
check_node_in_map,
exclude_node_from_network_map,
include_node_to_network_map,
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, StorageNode
from frostfs_testlib.storage.controllers import ClusterStateController, ShardsWatcher
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils import datetime_utils
from frostfs_testlib.utils.failover_utils import (
wait_all_storage_nodes_returned,
wait_object_replication,
)
from frostfs_testlib.utils.file_keeper import FileKeeper
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
logger = logging.getLogger("NeoLogger")
stopped_nodes: list[StorageNode] = []
@pytest.fixture(scope="function")
@allure.title("Provide File Keeper")
def file_keeper():
keeper = FileKeeper()
yield keeper
keeper.restore_files()
@allure.step("Return all stopped hosts")
@pytest.fixture(scope="function", autouse=True)
def after_run_return_all_stopped_hosts(cluster: Cluster):
def after_run_return_all_stopped_hosts(client_shell: Shell, cluster: Cluster) -> str:
yield "After this test stopped services will be started automatically via fixture"
return_stopped_hosts(client_shell, cluster)
@allure.step("Return all stopped storage services after test")
@pytest.fixture(scope="function")
def after_run_return_all_stopped_services(cluster_state_controller: ClusterStateController):
yield
return_stopped_hosts(cluster)
cluster_state_controller.start_stopped_storage_services()
@allure.step("Return all stopped S3 GateWay services after test")
@pytest.fixture(scope="function")
def after_run_return_all_stopped_s3(cluster_state_controller: ClusterStateController):
yield
cluster_state_controller.start_stopped_s3_gate()
def panic_reboot_host(host: Host) -> None:
@ -67,13 +80,13 @@ def panic_reboot_host(host: Host) -> None:
shell.exec('sudo sh -c "echo b > /proc/sysrq-trigger"', options)
def return_stopped_hosts(cluster: Cluster) -> None:
def return_stopped_hosts(shell: Shell, cluster: Cluster) -> None:
for node in list(stopped_nodes):
with allure.step(f"Start host {node}"):
node.host.start_host()
stopped_nodes.remove(node)
wait_all_storage_nodes_returned(cluster)
wait_all_storage_nodes_returned(shell, cluster)
@pytest.mark.failover
@ -123,7 +136,7 @@ class TestFailoverStorage(ClusterTestBase):
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
with allure.step("Return all hosts"):
return_stopped_hosts(self.cluster)
return_stopped_hosts(self.shell, self.cluster)
with allure.step("Check object data is not corrupted"):
new_nodes = wait_object_replication(
@ -206,42 +219,87 @@ class TestFailoverStorage(ClusterTestBase):
)
assert get_file_hash(source_file_path) == get_file_hash(got_file_path)
@allure.title("Do not ignore unhealthy tree endpoints")
def test_unhealthy_tree(
self,
s3_client: S3ClientWrapper,
simple_object_size: int,
cluster_state_controller: ClusterStateController,
after_run_return_all_stopped_s3,
after_run_return_all_stopped_services,
):
default_node = self.cluster.cluster_nodes[0]
default_s3gate = self.cluster.s3_gates[0]
def pytest_generate_tests(metafunc):
with allure.step("Turn S3 GW off on default node"):
default_s3gate.stop_service()
with allure.step("Turn off storage on default node"):
cluster_state_controller.stop_storage_service(default_node)
with allure.step("Turn on S3 GW on default node"):
default_s3gate.start_service()
with allure.step("Turn on storage on default node"):
cluster_state_controller.start_stopped_storage_services()
with allure.step("Create bucket with REP 1 SELECT 1 policy"):
bucket = s3_client.create_bucket(
location_constraint="load-1-1",
)
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Put object into bucket"):
put_object = s3_client.put_object(bucket, file_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
with allure.step("Turn off all storage nodes except default"):
for node in self.cluster.cluster_nodes[1:]:
with allure.step(f"Stop storage service on node: {node}"):
cluster_state_controller.stop_storage_service(node)
with allure.step("Check that object is available"):
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
def pytest_generate_tests(metafunc: pytest.Metafunc):
if "s3_client" in metafunc.fixturenames:
metafunc.parametrize("s3_client", ["aws cli", "boto3"], indirect=True)
metafunc.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
@pytest.mark.failover
@pytest.mark.failover_empty_map
class TestEmptyMap(TestS3GateBase):
class TestEmptyMap(ClusterTestBase):
"""
A set of tests for makes map empty and verify that we can read objects after that
"""
@allure.step("Teardown after EmptyMap offline test")
@pytest.fixture()
def empty_map_offline_teardown(self):
yield
with allure.step("Return all storage nodes to network map"):
for node in list(stopped_nodes):
include_node_to_network_map(
node, node, shell=self.shell, cluster=self.cluster
)
include_node_to_network_map(node, node, shell=self.shell, cluster=self.cluster)
stopped_nodes.remove(node)
@staticmethod
def object_key_from_file_path(full_path: str) -> str:
return os.path.basename(full_path)
@test_case.title("Test makes network map empty (offline all storage nodes)")
@test_case.priority(test_case.TestCasePriority.HIGH)
@test_case.suite_name("failovers")
@test_case.suite_section("test_failover_storage")
@pytest.mark.failover_empty_map_offlne
@allure.title("Test makes network map empty (offline all storage nodes)")
def test_offline_all_storage_nodes(self, bucket, simple_object_size, empty_map_offline_teardown):
def test_offline_all_storage_nodes(
self,
s3_client: S3ClientWrapper,
bucket: str,
simple_object_size: int,
empty_map_offline_teardown,
):
"""
The test makes network map empty (set offline status on all storage nodes) then returns all nodes to map and checks that object can read through s3.
Steps:
1. Check that bucket is empty
2: PUT object into bucket
@ -254,35 +312,31 @@ class TestEmptyMap(TestS3GateBase):
simple_object_size: size of object
"""
file_path = generate_file(simple_object_size)
file_name = self.object_key_from_file_path(file_path)
file_name = s3_helper.object_key_from_file_path(file_path)
bucket_objects = [file_name]
objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket)
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with allure.step("Put object into bucket"):
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
s3_client.put_object(bucket, file_path)
with allure.step("Check that object exists in bucket"):
check_objects_in_bucket(self.s3_client, bucket, bucket_objects)
s3_helper.check_objects_in_bucket(s3_client, bucket, bucket_objects)
storage_nodes = self.cluster.storage_nodes
with allure.step("Exclude all storage nodes from network map"):
for node in storage_nodes:
exclude_node_from_network_map(
node, node, shell=self.shell, cluster=self.cluster
)
exclude_node_from_network_map(node, node, shell=self.shell, cluster=self.cluster)
stopped_nodes.append(node)
with allure.step("Return all storage nodes to network map"):
for node in storage_nodes:
include_node_to_network_map(
node, node, shell=self.shell, cluster=self.cluster
)
include_node_to_network_map(node, node, shell=self.shell, cluster=self.cluster)
stopped_nodes.remove(node)
with allure.step("Check that we can read object"):
check_objects_in_bucket(self.s3_client, bucket, bucket_objects)
s3_helper.check_objects_in_bucket(s3_client, bucket, bucket_objects)
@allure.step("Teardown after EmptyMap stop service test")
@pytest.fixture()
@ -306,17 +360,23 @@ class TestEmptyMap(TestS3GateBase):
@test_case.suite_section("test_failover_storage")
@pytest.mark.failover_empty_map_stop_service
@allure.title("Test makes network map empty (stop storage service on all nodes)")
def test_stop_all_storage_nodes(self, bucket, simple_object_size, empty_map_stop_service_teardown):
def test_stop_all_storage_nodes(
self,
s3_client: S3ClientWrapper,
bucket: str,
simple_object_size: int,
empty_map_stop_service_teardown,
):
"""
The test makes network map empty (stop storage service on all nodes
then use 'frostfs-adm morph delete-nodes' to delete nodes from map)
The test makes network map empty (stop storage service on all nodes
then use 'frostfs-adm morph delete-nodes' to delete nodes from map)
then start all services and checks that object can read through s3.
Steps:
1. Check that bucket is empty
2: PUT object into bucket
3: Check that object exists in bucket
4: Exclude all storage nodes from network map (stop storage service
4: Exclude all storage nodes from network map (stop storage service
and manual exclude from map)
5: Return all storage nodes to network map
6: Check that we can read object from #2
@ -325,17 +385,17 @@ class TestEmptyMap(TestS3GateBase):
simple_object_size: size of object
"""
file_path = generate_file(simple_object_size)
file_name = self.object_key_from_file_path(file_path)
file_name = s3_helper.object_key_from_file_path(file_path)
bucket_objects = [file_name]
objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket)
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with allure.step("Put object into bucket"):
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
s3_client.put_object(bucket, file_path)
with allure.step("Check that object exists in bucket"):
check_objects_in_bucket(self.s3_client, bucket, bucket_objects)
s3_helper.check_objects_in_bucket(s3_client, bucket, bucket_objects)
with allure.step("Stop all storage nodes"):
for node in self.cluster.storage_nodes:
@ -343,17 +403,19 @@ class TestEmptyMap(TestS3GateBase):
node.stop_service()
stopped_nodes.append(node)
with allure.step(f"Remove all nodes from network map"):
remove_nodes_from_map_morph(shell=self.shell, cluster=self.cluster, remove_nodes=stopped_nodes)
with allure.step("Remove all nodes from network map"):
remove_nodes_from_map_morph(
shell=self.shell, cluster=self.cluster, remove_nodes=stopped_nodes
)
with allure.step("Return all storage nodes to network map"):
self.return_nodes_after_stop_with_check_empty_map(stopped_nodes)
with allure.step("Check that object exists in bucket"):
check_objects_in_bucket(self.s3_client, bucket, bucket_objects)
s3_helper.check_objects_in_bucket(s3_client, bucket, bucket_objects)
@allure.step("Return all nodes to cluster with check empty map first")
def return_nodes_after_stop_with_check_empty_map(self, return_nodes = None) -> None:
def return_nodes_after_stop_with_check_empty_map(self, return_nodes=None) -> None:
first_node = True
for node in list(return_nodes):
with allure.step(f"Start node {node}"):
@ -361,7 +423,7 @@ class TestEmptyMap(TestS3GateBase):
with allure.step(f"Waiting status ready for node {node}"):
wait_for_node_to_be_ready(node)
with allure.step(f"We need to make sure that network map is empty"):
with allure.step("Make sure that network map is empty"):
if first_node:
for check_node in list(return_nodes):
check_node_not_in_map(check_node, shell=self.shell, alive_node=node)
@ -372,3 +434,412 @@ class TestEmptyMap(TestS3GateBase):
check_node_in_map(node, shell=self.shell, alive_node=node)
stopped_nodes.remove(node)
@allure.title("Test S3 Object loss from fstree/blobovnicza, versioning is enabled")
def test_s3_fstree_blobovnicza_loss_versioning_on(
self,
s3_client: S3ClientWrapper,
simple_object_size: int,
cluster_state_controller: ClusterStateController,
):
bucket = s3_client.create_bucket()
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
object_versions = []
with allure.step("Put object into one bucket"):
put_object = s3_client.put_object(bucket, file_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
object_versions.append(put_object)
with allure.step("Stop all storage nodes"):
for node in self.cluster.cluster_nodes:
with allure.step(f"Stop storage service on node: {node}"):
cluster_state_controller.stop_storage_service(node)
with allure.step("Delete blobovnicza and fstree from all nodes"):
for node in self.cluster.storage_nodes:
node.delete_blobovnicza()
node.delete_fstree()
with allure.step("Start all storage nodes"):
cluster_state_controller.start_stopped_storage_services()
# need to get Delete Marker first
with allure.step("Delete the object from the bucket"):
delete_object = s3_client.delete_object(bucket, file_name)
object_versions.append(delete_object["VersionId"])
# and now delete all versions of object (including Delete Markers)
with allure.step("Delete all versions of the object from the bucket"):
for version in object_versions:
delete_object = s3_client.delete_object(bucket, file_name, version_id=version)
with allure.step("Delete bucket"):
s3_client.delete_bucket(bucket)
@allure.title("Test S3 Object loss from fstree/blobovnicza, versioning is disabled")
def test_s3_fstree_blobovnicza_loss_versioning_off(
self,
s3_client: S3ClientWrapper,
simple_object_size: int,
cluster_state_controller: ClusterStateController,
):
bucket = s3_client.create_bucket()
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Put object into one bucket"):
s3_client.put_object(bucket, file_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
with allure.step("Stop all storage nodes"):
for node in self.cluster.cluster_nodes:
with allure.step(f"Stop storage service on node: {node}"):
cluster_state_controller.stop_storage_service(node)
with allure.step("Delete blobovnicza and fstree from all nodes"):
for node in self.cluster.storage_nodes:
node.delete_blobovnicza()
node.delete_fstree()
with allure.step("Start all storage nodes"):
cluster_state_controller.start_stopped_storage_services()
with allure.step("Delete the object from the bucket"):
s3_client.delete_object(bucket, file_name)
with allure.step("Delete bucket"):
s3_client.delete_bucket(bucket)
@pytest.mark.skip(reason="Need to increase cache lifetime")
@pytest.mark.parametrize(
# versioning should NOT be VersioningStatus.SUSPENDED, it needs to be undefined
"versioning_status",
[VersioningStatus.ENABLED, None],
)
@allure.title(
"After Pilorama.db loss on all nodes list objects should return nothing in second listing"
)
def test_s3_pilorama_loss(
self,
s3_client: S3ClientWrapper,
simple_object_size: int,
versioning_status: VersioningStatus,
cluster_state_controller: ClusterStateController,
):
bucket = s3_client.create_bucket()
if versioning_status:
s3_helper.set_bucket_versioning(s3_client, bucket, versioning_status)
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Put object into one bucket"):
s3_client.put_object(bucket, file_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
with allure.step("Stop all storage nodes"):
for node in self.cluster.cluster_nodes:
with allure.step(f"Stop storage service on node: {node}"):
cluster_state_controller.stop_storage_service(node)
with allure.step("Delete pilorama.db from all nodes"):
for node in self.cluster.storage_nodes:
node.delete_pilorama()
with allure.step("Start all storage nodes"):
cluster_state_controller.start_stopped_storage_services()
with allure.step("Check list objects first time"):
objects_list = s3_client.list_objects(bucket)
assert objects_list, f"Expected not empty bucket"
with allure.step("Check list objects second time"):
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with allure.step("Delete bucket"):
s3_client.delete_bucket(bucket)
@pytest.mark.failover
@pytest.mark.failover_data_loss
class TestStorageDataLoss(ClusterTestBase):
@allure.step("Get list of all piloramas on node")
def get_piloramas_list(self, cluster_state_controller, node) -> list:
data_directory_path = cluster_state_controller.get_data_directory()
cmd = f"sudo ls -1 {data_directory_path}/meta*/pilorama*"
shell = cluster_state_controller.host.get_shell()
stdout = shell.exec(cmd).stdout
piloramas = stdout.split("\n")
return piloramas
@allure.title(
"After metabase loss on all nodes operations on objects and buckets should be still available via S3"
)
@pytest.mark.metabase_loss
def test_metabase_loss(
self,
s3_client: S3ClientWrapper,
simple_object_size: int,
complex_object_size: int,
cluster_state_controller: ClusterStateController,
after_run_return_all_stopped_services: str,
file_keeper: FileKeeper,
):
allure.dynamic.description(after_run_return_all_stopped_services)
with allure.step("Create bucket"):
bucket = s3_client.create_bucket()
with allure.step("Put objects into bucket"):
simple_object_path = generate_file(simple_object_size)
simple_object_key = s3_helper.object_key_from_file_path(simple_object_path)
complex_object_path = generate_file(complex_object_size)
complex_object_key = s3_helper.object_key_from_file_path(complex_object_path)
s3_client.put_object(bucket, simple_object_path)
s3_client.put_object(bucket, complex_object_path)
with allure.step("Check objects are in bucket"):
s3_helper.check_objects_in_bucket(
s3_client, bucket, expected_objects=[simple_object_key, complex_object_key]
)
with allure.step("Stop storage services on all nodes"):
cluster_state_controller.stop_all_storage_services()
with allure.step("Delete metabase from all nodes"):
for node in cluster_state_controller.cluster.storage_nodes:
node.delete_metabase()
with allure.step("Enable resync_metabase option for storage services"):
for storage_node in cluster_state_controller.cluster.storage_nodes:
with allure.step(f"Enable resync_metabase option for {storage_node}"):
config_file_path, config = storage_node.get_config()
if not config["storage"]["shard"]["default"]["resync_metabase"]:
file_keeper.add(storage_node, config_file_path)
config["storage"]["shard"]["default"]["resync_metabase"] = True
storage_node.save_config(config)
with allure.step("Start storage services on all nodes"):
cluster_state_controller.start_stopped_storage_services()
with allure.step("Delete objects from bucket"):
with allure.step("Delete simple object from bucket"):
with expect_not_raises():
s3_client.delete_object(bucket, simple_object_key)
with allure.step("Delete complex object from bucket"):
with expect_not_raises():
s3_client.delete_object(bucket, complex_object_key)
with allure.step("Delete bucket"):
with expect_not_raises():
s3_client.delete_bucket(bucket)
@allure.title(
"Write cache loss on one node should not affect shards and should not produce errors in log"
)
@pytest.mark.write_cache_loss
def test_write_cache_loss_on_one_node(
self,
node_under_test: ClusterNode,
simple_object_size: int,
cluster_state_controller: ClusterStateController,
shards_watcher: ShardsWatcher,
default_wallet: str,
test_start_time: datetime,
after_run_return_all_stopped_services: str,
):
exception_messages = []
allure.dynamic.description(after_run_return_all_stopped_services)
with allure.step(f"Create container on node {node_under_test}"):
locode = node_under_test.storage_node.get_un_locode()
placement_rule = f"""REP 1 IN X
CBF 1
SELECT 1 FROM C AS X
FILTER 'UN-LOCODE' EQ '{locode}' AS C"""
cid = create_container(
default_wallet,
self.shell,
node_under_test.storage_node.get_rpc_endpoint(),
rule=placement_rule,
)
container = StorageContainer(
StorageContainerInfo(cid, WalletInfo(default_wallet)),
self.shell,
cluster_state_controller.cluster,
)
with allure.step(f"Put couple objects to container on node {node_under_test}"):
storage_objects: list[StorageObjectInfo] = []
for _ in range(5):
storage_object = container.generate_object(
simple_object_size, endpoint=node_under_test.storage_node.get_rpc_endpoint()
)
storage_objects.append(storage_object)
with allure.step("Take shards snapshot"):
shards_watcher.take_shards_snapshot()
with allure.step(f"Stop storage service on node {node_under_test}"):
cluster_state_controller.stop_storage_service(node_under_test)
with allure.step(f"Delete write cache from node {node_under_test}"):
node_under_test.storage_node.delete_write_cache()
with allure.step(f"Start storage service on node {node_under_test}"):
cluster_state_controller.start_storage_service(node_under_test)
with allure.step("Objects should be available"):
for storage_object in storage_objects:
get_object(
storage_object.wallet_file_path,
container.get_id(),
storage_object.oid,
self.shell,
node_under_test.storage_node.get_rpc_endpoint(),
)
with allure.step("No shards should have new errors"):
shards_watcher.take_shards_snapshot()
shards_with_errors = shards_watcher.get_shards_with_new_errors()
if shards_with_errors:
exception_messages.append(f"Shards have new errors: {shards_with_errors}")
with allure.step("No shards should have degraded status"):
snapshot = shards_watcher.get_shards_snapshot()
for shard in snapshot:
status = snapshot[shard]["mode"]
if status != "read-write":
exception_messages.append(f"Shard {shard} changed status to {status}")
with allure.step("No related errors should be in log"):
if node_under_test.host.is_message_in_logs(
message_regex=r"\Wno such file or directory\W", since=test_start_time
):
exception_messages.append(f"Node {node_under_test} have shard errors in logs")
with allure.step("Pass test if no errors found"):
assert not exception_messages, "\n".join(exception_messages)
@allure.title(
"Test S3 Loss of one node should trigger use of tree and storage service in another node"
)
def test_s3_one_endpoint_loss(
self,
bucket,
s3_client: S3ClientWrapper,
simple_object_size: int,
after_run_return_all_stopped_services,
cluster_state_controller: ClusterStateController,
):
# TODO: need to check that s3 gate is connected to localhost (such metric will be supported in 1.3)
with allure.step(
"Stop one node and wait for rebalance connection of s3 gate to storage service"
):
current_node = self.cluster.cluster_nodes[0]
cluster_state_controller.stop_storage_service(current_node)
# waiting for rebalance connection of s3 gate to storage service
sleep(60)
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Put object into one bucket"):
put_object = s3_client.put_object(bucket, file_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
@allure.title("After Pilorama.db loss on one node object are retrievable")
def test_s3_one_pilorama_loss(
self,
s3_client: S3ClientWrapper,
simple_object_size: int,
cluster_state_controller: ClusterStateController,
):
bucket = s3_client.create_bucket(
location_constraint="load-1-4",
grant_read="uri=http://acs.amazonaws.com/groups/global/AllUsers",
)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with allure.step("Check bucket versioning"):
bucket_versioning = s3_client.get_bucket_versioning_status(bucket)
assert bucket_versioning == "Enabled", "Bucket should have enabled versioning"
file_path = generate_file(simple_object_size)
file_name = s3_helper.object_key_from_file_path(file_path)
object_versions = []
with allure.step("Put object into one bucket"):
put_object = s3_client.put_object(bucket, file_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
object_versions.append(put_object)
node_to_check = self.cluster.storage_nodes[0]
piloramas_list_before_removing = {}
with allure.step("Get list of all pilorama.db"):
piloramas_list_before_removing = self.get_piloramas_list(
node_to_check, cluster_state_controller
)
with allure.step("Stop all storage nodes"):
for node in self.cluster.cluster_nodes:
with allure.step(f"Stop storage service on node: {node}"):
cluster_state_controller.stop_storage_service(node)
with allure.step("Delete pilorama.db from one node"):
node_to_check.delete_pilorama()
with allure.step("Start all storage nodes"):
cluster_state_controller.start_stopped_storage_services()
with allure.step("Tick epoch to trigger sync and then wait for 1 minute"):
self.tick_epochs(1)
sleep(120)
piloramas_list_afrer_removing = {}
with allure.step("Get list of all pilorama.db after sync"):
piloramas_list_afrer_removing = self.get_piloramas_list(
node_to_check, cluster_state_controller
)
assert (
piloramas_list_afrer_removing == piloramas_list_before_removing
), "List of pilorama.db is different"
with allure.step("Check bucket versioning"):
bucket_versioning = s3_client.get_bucket_versioning_status(bucket)
assert bucket_versioning == "Enabled", "Bucket should have enabled versioning"
with allure.step("Check list objects"):
objects_list = s3_client.list_objects(bucket)
assert objects_list, f"Expected not empty bucket"
with allure.step("Delete the object from the bucket"):
delete_object = s3_client.delete_object(bucket, file_name)
assert "DeleteMarker" in delete_object.keys(), "Delete markers not found"
with allure.step("Check list objects"):
objects_list = s3_client.list_objects_versions(bucket)
assert objects_list, f"Expected not empty bucket"
object_versions.append(delete_object["VersionId"])
# and now delete all versions of object (including Delete Markers)
with allure.step("Delete all versions of the object from the bucket"):
for version in object_versions:
delete_object = s3_client.delete_object(bucket, file_name, version_id=version)
with allure.step("Check list objects"):
objects_list = s3_client.list_objects_versions(bucket)
assert not objects_list, f"Expected empty bucket"
with allure.step("Delete bucket"):
s3_client.delete_bucket(bucket)

View file

@ -1,124 +0,0 @@
import allure
import pytest
from frostfs_testlib.hosting import Hosting
from pytest_tests.helpers.k6 import LoadParams
from pytest_tests.resources.common import (
HTTP_GATE_SERVICE_NAME_REGEX,
S3_GATE_SERVICE_NAME_REGEX,
STORAGE_NODE_SERVICE_NAME_REGEX,
)
from pytest_tests.resources.load_params import (
CONTAINER_PLACEMENT_POLICY,
CONTAINERS_COUNT,
DELETERS,
LOAD_NODE_SSH_PRIVATE_KEY_PATH,
LOAD_NODE_SSH_USER,
LOAD_NODES,
LOAD_NODES_COUNT,
LOAD_TIME,
LOAD_TYPE,
OBJ_COUNT,
OBJ_SIZE,
OUT_FILE,
READERS,
STORAGE_NODE_COUNT,
WRITERS,
)
from pytest_tests.steps.cluster_test_base import ClusterTestBase
from pytest_tests.steps.load import (
clear_cache_and_data,
get_services_endpoints,
init_s3_client,
multi_node_k6_run,
prepare_k6_instances,
start_stopped_nodes,
stop_unused_nodes,
)
ENDPOINTS_ATTRIBUTES = {
"http": {"regex": HTTP_GATE_SERVICE_NAME_REGEX, "endpoint_attribute": "endpoint"},
"grpc": {"regex": STORAGE_NODE_SERVICE_NAME_REGEX, "endpoint_attribute": "rpc_endpoint"},
"s3": {"regex": S3_GATE_SERVICE_NAME_REGEX, "endpoint_attribute": "endpoint"},
}
@pytest.mark.load
class TestLoad(ClusterTestBase):
@pytest.fixture(autouse=True)
def clear_cache_and_data(self, hosting: Hosting):
clear_cache_and_data(hosting=hosting)
yield
start_stopped_nodes()
@pytest.fixture(scope="session", autouse=True)
def init_s3_client(self, hosting: Hosting):
if "s3" in list(map(lambda x: x.lower(), LOAD_TYPE)):
init_s3_client(
load_nodes=LOAD_NODES,
login=LOAD_NODE_SSH_USER,
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
hosting=hosting,
container_placement_policy=CONTAINER_PLACEMENT_POLICY,
)
@pytest.mark.parametrize("obj_size, out_file", list(zip(OBJ_SIZE, OUT_FILE)))
@pytest.mark.parametrize("writers, readers, deleters", list(zip(WRITERS, READERS, DELETERS)))
@pytest.mark.parametrize("load_time", LOAD_TIME)
@pytest.mark.parametrize("node_count", STORAGE_NODE_COUNT)
@pytest.mark.parametrize("containers_count", CONTAINERS_COUNT)
@pytest.mark.parametrize("load_type", LOAD_TYPE)
@pytest.mark.parametrize("obj_count", OBJ_COUNT)
@pytest.mark.parametrize("load_nodes_count", LOAD_NODES_COUNT)
@pytest.mark.benchmark
@pytest.mark.grpc
def test_custom_load(
self,
obj_size,
out_file,
writers,
readers,
deleters,
load_time,
node_count,
obj_count,
load_type,
load_nodes_count,
containers_count,
hosting: Hosting,
):
allure.dynamic.title(
f"Load test - node_count = {node_count}, "
f"writers = {writers} readers = {readers}, "
f"deleters = {deleters}, obj_size = {obj_size}, "
f"load_time = {load_time}"
)
stop_unused_nodes(self.cluster.storage_nodes, node_count)
with allure.step("Get endpoints"):
endpoints_list = get_services_endpoints(
hosting=hosting,
service_name_regex=ENDPOINTS_ATTRIBUTES[LOAD_TYPE]["regex"],
endpoint_attribute=ENDPOINTS_ATTRIBUTES[LOAD_TYPE]["endpoint_attribute"],
)
endpoints = ",".join(endpoints_list[:node_count])
load_params = LoadParams(
endpoint=endpoints,
obj_size=obj_size,
containers_count=containers_count,
out_file=out_file,
obj_count=obj_count,
writers=writers,
readers=readers,
deleters=deleters,
load_time=load_time,
load_type=load_type,
)
load_nodes_list = LOAD_NODES[:load_nodes_count]
k6_load_instances = prepare_k6_instances(
load_nodes=load_nodes_list,
login=LOAD_NODE_SSH_USER,
pkey=LOAD_NODE_SSH_PRIVATE_KEY_PATH,
load_params=load_params,
)
with allure.step("Run load"):
multi_node_k6_run(k6_load_instances)

View file

@ -5,15 +5,11 @@ from typing import Optional, Tuple
import allure
import pytest
from frostfs_testlib.resources.common import OBJECT_NOT_FOUND, PUBLIC_ACL
from frostfs_testlib.utils import datetime_utils, string_utils
from pytest_tests.helpers.cluster import StorageNode
from pytest_tests.helpers.container import create_container, get_container
from pytest_tests.helpers.epoch import tick_epoch
from pytest_tests.helpers.failover_utils import wait_object_replication
from pytest_tests.helpers.file_helper import generate_file
from pytest_tests.helpers.frostfs_verbs import (
from frostfs_testlib.resources.common import FROSTFS_CONTRACT_CACHE_TIMEOUT, MORPH_BLOCK_TIME
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.container import create_container, get_container
from frostfs_testlib.steps.cli.object import (
delete_object,
get_object,
get_object_from_random_node,
@ -21,7 +17,8 @@ from pytest_tests.helpers.frostfs_verbs import (
put_object,
put_object_to_random_node,
)
from pytest_tests.helpers.node_management import (
from frostfs_testlib.steps.epoch import tick_epoch
from frostfs_testlib.steps.node_management import (
check_node_in_map,
delete_node_data,
drop_object,
@ -32,15 +29,19 @@ from pytest_tests.helpers.node_management import (
node_shard_set_mode,
storage_node_healthcheck,
storage_node_set_status,
wait_for_node_to_be_ready
wait_for_node_to_be_ready,
)
from pytest_tests.helpers.storage_policy import get_nodes_with_object, get_simple_object_copies
from frostfs_testlib.steps.storage_policy import get_nodes_with_object, get_simple_object_copies
from frostfs_testlib.storage.cluster import StorageNode
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils import datetime_utils, string_utils
from frostfs_testlib.utils.failover_utils import wait_object_replication
from frostfs_testlib.utils.file_utils import generate_file
from pytest_tests.helpers.utility import (
placement_policy_from_container,
wait_for_gc_pass_on_storage_nodes,
)
from pytest_tests.resources.common import FROSTFS_CONTRACT_CACHE_TIMEOUT, MORPH_BLOCK_TIME
from pytest_tests.steps.cluster_test_base import ClusterTestBase
logger = logging.getLogger("NeoLogger")
check_nodes: list[StorageNode] = []
@ -133,7 +134,7 @@ class TestNodeManagement(ClusterTestBase):
simple_object_size,
):
"""
This test remove one node from pytest_tests.helpers.cluster then add it back. Test uses base control operations with storage nodes (healthcheck, netmap-snapshot, set-status).
This test remove one node from frostfs_testlib.storage.cluster then add it back. Test uses base control operations with storage nodes (healthcheck, netmap-snapshot, set-status).
"""
wallet = default_wallet
placement_rule_3 = "REP 3 IN X CBF 1 SELECT 3 FROM * AS X"
@ -328,7 +329,7 @@ class TestNodeManagement(ClusterTestBase):
@pytest.mark.node_mgmt
@allure.title("FrostFS object could be dropped using control command")
def test_drop_object(self, default_wallet, complex_object_size, simple_object_size):
def test_drop_object(self, default_wallet, complex_object_size: int, simple_object_size: int):
"""
Test checks object could be dropped using `frostfs-cli control drop-objects` command.
"""
@ -412,6 +413,38 @@ class TestNodeManagement(ClusterTestBase):
oid = put_object_to_random_node(wallet, file_path, cid, self.shell, self.cluster)
delete_object(wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
@pytest.mark.node_mgmt
@allure.title("Put object with stopped node")
def test_stop_node(self, default_wallet, return_nodes_after_test_run, simple_object_size: int):
wallet = default_wallet
placement_rule = "REP 3 SELECT 4 FROM * AS X"
source_file_path = generate_file(simple_object_size)
storage_nodes = self.cluster.storage_nodes
random_node = random.choice(storage_nodes[1:])
alive_node = random.choice(
[storage_node for storage_node in storage_nodes if storage_node.id != random_node.id]
)
cid = create_container(
wallet,
rule=placement_rule,
basic_acl=PUBLIC_ACL,
shell=self.shell,
endpoint=random_node.get_rpc_endpoint(),
)
with allure.step("Stop the random node"):
check_nodes.append(random_node)
random_node.stop_service()
with allure.step("Try to put an object and expect success"):
put_object(
wallet,
source_file_path,
cid,
shell=self.shell,
endpoint=alive_node.get_rpc_endpoint(),
)
self.return_nodes(alive_node)
@allure.step("Validate object has {expected_copies} copies")
def validate_object_copies(
self, wallet: str, placement_rule: str, file_path: str, expected_copies: int

View file

@ -4,7 +4,7 @@ import sys
import allure
import pytest
from frostfs_testlib.resources.common import (
from frostfs_testlib.resources.error_patterns import (
INVALID_LENGTH_SPECIFIER,
INVALID_OFFSET_SPECIFIER,
INVALID_RANGE_OVERFLOW,
@ -12,13 +12,8 @@ from frostfs_testlib.resources.common import (
OUT_OF_RANGE,
)
from frostfs_testlib.shell import Shell
from pytest import FixtureRequest
from pytest_tests.helpers.cluster import Cluster
from pytest_tests.helpers.complex_object_actions import get_complex_object_split_ranges
from pytest_tests.helpers.container import create_container
from pytest_tests.helpers.file_helper import generate_file, get_file_content, get_file_hash
from pytest_tests.helpers.frostfs_verbs import (
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import (
get_object_from_random_node,
get_range,
get_range_hash,
@ -26,10 +21,14 @@ from pytest_tests.helpers.frostfs_verbs import (
put_object_to_random_node,
search_object,
)
from pytest_tests.helpers.storage_object_info import StorageObjectInfo
from pytest_tests.helpers.storage_policy import get_complex_object_copies, get_simple_object_copies
from pytest_tests.steps.cluster_test_base import ClusterTestBase
from pytest_tests.steps.storage_object import delete_objects
from frostfs_testlib.steps.complex_object_actions import get_complex_object_split_ranges
from frostfs_testlib.steps.storage_object import delete_objects
from frostfs_testlib.steps.storage_policy import get_complex_object_copies, get_simple_object_copies
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file, get_file_content, get_file_hash
from pytest import FixtureRequest
logger = logging.getLogger("NeoLogger")

View file

@ -1,30 +1,24 @@
import allure
import pytest
from frostfs_testlib.resources.common import EACL_PUBLIC_READ_WRITE
from frostfs_testlib.resources.wellknown_acl import EACL_PUBLIC_READ_WRITE
from frostfs_testlib.shell import Shell
from pytest import FixtureRequest
from pytest_tests.helpers.acl import (
EACLAccess,
EACLOperation,
EACLRole,
EACLRule,
form_bearertoken_file,
)
from pytest_tests.helpers.cluster import Cluster
from pytest_tests.helpers.container import (
from frostfs_testlib.steps.acl import form_bearertoken_file
from frostfs_testlib.steps.cli.container import (
REP_2_FOR_3_NODES_PLACEMENT_RULE,
SINGLE_PLACEMENT_RULE,
StorageContainer,
StorageContainerInfo,
create_container,
)
from pytest_tests.helpers.epoch import get_epoch
from pytest_tests.helpers.frostfs_verbs import delete_object, get_object
from pytest_tests.helpers.test_control import expect_not_raises
from pytest_tests.helpers.wallet import WalletFile
from pytest_tests.steps.cluster_test_base import ClusterTestBase
from pytest_tests.steps.storage_object import StorageObjectInfo
from frostfs_testlib.steps.cli.object import delete_object, get_object
from frostfs_testlib.steps.epoch import get_epoch
from frostfs_testlib.steps.storage_object import StorageObjectInfo
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.acl import EACLAccess, EACLOperation, EACLRole, EACLRule
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
from pytest import FixtureRequest
@pytest.fixture(scope="module")
@ -57,9 +51,9 @@ def user_container(
endpoint=cluster.default_rpc_endpoint,
)
# Deliberately using s3gate wallet here to test bearer token
s3gate = cluster.s3gates[0]
s3gate = cluster.s3_gates[0]
return StorageContainer(
StorageContainerInfo(container_id, WalletFile.from_node(s3gate)),
StorageContainerInfo(container_id, WalletInfo.from_node(s3gate)),
client_shell,
cluster,
)
@ -112,7 +106,7 @@ class TestObjectApiWithBearerToken(ClusterTestBase):
f"Object can be deleted from any node using s3gate wallet with bearer token for {request.node.callspec.id}"
)
s3_gate_wallet = self.cluster.s3gates[0]
s3_gate_wallet = self.cluster.s3_gates[0]
with allure.step("Try to delete each object from first storage node"):
for storage_object in storage_objects:
with expect_not_raises():
@ -148,7 +142,7 @@ class TestObjectApiWithBearerToken(ClusterTestBase):
f"Object can be fetched from any node using s3gate wallet with bearer token for {request.node.callspec.id}"
)
s3_gate_wallet = self.cluster.s3gates[0]
s3_gate_wallet = self.cluster.s3_gates[0]
with allure.step("Put one object to container"):
epoch = self.get_epoch()
storage_object = user_container.generate_object(

View file

@ -2,18 +2,15 @@ import logging
import allure
import pytest
from frostfs_testlib.resources.common import OBJECT_NOT_FOUND
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import get_object_from_random_node, put_object_to_random_node, head_object
from frostfs_testlib.steps.epoch import get_epoch
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
from pytest import FixtureRequest
from pytest_tests.helpers.container import create_container
from pytest_tests.helpers.epoch import get_epoch
from pytest_tests.helpers.file_helper import generate_file, get_file_hash
from pytest_tests.helpers.frostfs_verbs import (
get_object_from_random_node,
put_object_to_random_node,
)
from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes
from pytest_tests.steps.cluster_test_base import ClusterTestBase
logger = logging.getLogger("NeoLogger")
@ -57,6 +54,19 @@ class TestObjectApiLifetime(ClusterTestBase):
# Wait for GC, because object with expiration is counted as alive until GC removes it
wait_for_gc_pass_on_storage_nodes()
with allure.step("Check object deleted because it expires-on epoch"):
with allure.step("Check object deleted because it expires on epoch"):
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
head_object(wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object_from_random_node(wallet, cid, oid, self.shell, self.cluster)
with allure.step("Tick additional epoch"):
self.tick_epoch()
wait_for_gc_pass_on_storage_nodes()
with allure.step("Check object deleted because it expires on previous epoch"):
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
head_object(wallet, cid, oid, self.shell, self.cluster.default_rpc_endpoint)
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
get_object_from_random_node(wallet, cid, oid, self.shell, self.cluster)

View file

@ -3,7 +3,8 @@ import re
import allure
import pytest
from frostfs_testlib.resources.common import (
from frostfs_testlib.resources.common import STORAGE_GC_TIME
from frostfs_testlib.resources.error_patterns import (
LIFETIME_REQUIRED,
LOCK_NON_REGULAR_OBJECT,
LOCK_OBJECT_EXPIRATION,
@ -13,23 +14,29 @@ from frostfs_testlib.resources.common import (
OBJECT_NOT_FOUND,
)
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.container import (
StorageContainer,
StorageContainerInfo,
create_container,
)
from frostfs_testlib.steps.cli.object import delete_object, head_object, lock_object
from frostfs_testlib.steps.complex_object_actions import get_link_object, get_storage_object_chunks
from frostfs_testlib.steps.epoch import ensure_fresh_epoch, get_epoch, tick_epoch
from frostfs_testlib.steps.node_management import drop_object
from frostfs_testlib.steps.storage_object import delete_objects
from frostfs_testlib.steps.storage_policy import get_nodes_with_object
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.storage_object_info import (
LockObjectInfo,
StorageObjectInfo,
)
from frostfs_testlib.storage.dataclasses.wallet import WalletFactory, WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises, wait_for_success
from frostfs_testlib.utils import datetime_utils
from pytest import FixtureRequest
from pytest_tests.helpers.cluster import Cluster
from pytest_tests.helpers.complex_object_actions import get_link_object, get_storage_object_chunks
from pytest_tests.helpers.container import StorageContainer, StorageContainerInfo, create_container
from pytest_tests.helpers.epoch import ensure_fresh_epoch, get_epoch, tick_epoch
from pytest_tests.helpers.frostfs_verbs import delete_object, head_object, lock_object
from pytest_tests.helpers.node_management import drop_object
from pytest_tests.helpers.storage_object_info import LockObjectInfo, StorageObjectInfo
from pytest_tests.helpers.storage_policy import get_nodes_with_object
from pytest_tests.helpers.test_control import expect_not_raises, wait_for_success
from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes
from pytest_tests.helpers.wallet import WalletFactory, WalletFile
from pytest_tests.resources.common import STORAGE_GC_TIME
from pytest_tests.steps.cluster_test_base import ClusterTestBase
from pytest_tests.steps.storage_object import delete_objects
logger = logging.getLogger("NeoLogger")
@ -49,7 +56,7 @@ def user_wallet(wallet_factory: WalletFactory):
@pytest.fixture(
scope="module",
)
def user_container(user_wallet: WalletFile, client_shell: Shell, cluster: Cluster):
def user_container(user_wallet: WalletInfo, client_shell: Shell, cluster: Cluster):
container_id = create_container(
user_wallet.path, shell=client_shell, endpoint=cluster.default_rpc_endpoint
)

View file

@ -2,13 +2,8 @@ import logging
import allure
import pytest
from frostfs_testlib.resources.common import PUBLIC_ACL
from pytest_tests.helpers.acl import (
EACLAccess,
EACLOperation,
EACLRole,
EACLRule,
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.acl import (
bearer_token_base64_from_file,
create_eacl,
form_bearertoken_file,
@ -16,10 +11,11 @@ from pytest_tests.helpers.acl import (
sign_bearer,
wait_for_cache_expired,
)
from pytest_tests.helpers.container import create_container
from pytest_tests.helpers.file_helper import generate_file
from pytest_tests.helpers.http_gate import get_object_and_verify_hashes, upload_via_http_gate_curl
from pytest_tests.steps.cluster_test_base import ClusterTestBase
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.http.http_gate import upload_via_http_gate_curl, verify_object_hash
from frostfs_testlib.storage.dataclasses.acl import EACLAccess, EACLOperation, EACLRole, EACLRule
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
logger = logging.getLogger("NeoLogger")
@ -120,7 +116,7 @@ class Test_http_bearer(ClusterTestBase):
endpoint=self.cluster.default_http_gate_endpoint,
headers=headers,
)
get_object_and_verify_hashes(
verify_object_hash(
oid=oid,
file_name=file_path,
wallet=self.wallet,

View file

@ -2,15 +2,12 @@ import os
import allure
import pytest
from frostfs_testlib.resources.common import PUBLIC_ACL
from pytest_tests.helpers.container import create_container
from pytest_tests.helpers.epoch import get_epoch
from pytest_tests.helpers.file_helper import generate_file, get_file_hash
from pytest_tests.helpers.frostfs_verbs import put_object_to_random_node
from pytest_tests.helpers.http_gate import (
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.steps.epoch import get_epoch
from frostfs_testlib.steps.http.http_gate import (
attr_into_header,
get_object_and_verify_hashes,
get_object_by_attr_and_verify_hashes,
get_via_http_curl,
get_via_http_gate,
@ -18,9 +15,12 @@ from pytest_tests.helpers.http_gate import (
try_to_get_object_and_expect_error,
upload_via_http_gate,
upload_via_http_gate_curl,
verify_object_hash,
)
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash
from pytest_tests.helpers.utility import wait_for_gc_pass_on_storage_nodes
from pytest_tests.steps.cluster_test_base import ClusterTestBase
OBJECT_NOT_FOUND_ERROR = "not found"
@ -43,7 +43,7 @@ class TestHttpGate(ClusterTestBase):
TestHttpGate.wallet = default_wallet
@allure.title("Test Put over gRPC, Get over HTTP")
def test_put_grpc_get_http(self, complex_object_size, simple_object_size):
def test_put_grpc_get_http(self, complex_object_size: int, simple_object_size: int):
"""
Test that object can be put using gRPC interface and get using HTTP.
@ -86,7 +86,7 @@ class TestHttpGate(ClusterTestBase):
)
for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)):
get_object_and_verify_hashes(
verify_object_hash(
oid=oid,
file_name=file_path,
wallet=self.wallet,
@ -100,7 +100,7 @@ class TestHttpGate(ClusterTestBase):
@allure.link("https://github.com/TrueCloudLab/frostfs-http-gw#downloading", name="downloading")
@allure.title("Test Put over HTTP, Get over HTTP")
@pytest.mark.smoke
def test_put_http_get_http(self, complex_object_size, simple_object_size):
def test_put_http_get_http(self, complex_object_size: int, simple_object_size: int):
"""
Test that object can be put and get using HTTP interface.
@ -133,7 +133,7 @@ class TestHttpGate(ClusterTestBase):
)
for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)):
get_object_and_verify_hashes(
verify_object_hash(
oid=oid,
file_name=file_path,
wallet=self.wallet,
@ -157,7 +157,7 @@ class TestHttpGate(ClusterTestBase):
],
ids=["simple", "hyphen", "percent"],
)
def test_put_http_get_http_with_headers(self, attributes: dict, simple_object_size):
def test_put_http_get_http_with_headers(self, attributes: dict, simple_object_size: int):
"""
Test that object can be downloaded using different attributes in HTTP header.
@ -198,7 +198,7 @@ class TestHttpGate(ClusterTestBase):
@allure.title("Test Expiration-Epoch in HTTP header")
@pytest.mark.parametrize("epoch_gap", [0, 1])
def test_expiration_epoch_in_http(self, simple_object_size, epoch_gap):
def test_expiration_epoch_in_http(self, simple_object_size: int, epoch_gap):
endpoint = self.cluster.default_rpc_endpoint
http_endpoint = self.cluster.default_http_gate_endpoint
min_valid_epoch = get_epoch(self.shell, self.cluster) + epoch_gap
@ -247,7 +247,7 @@ class TestHttpGate(ClusterTestBase):
get_via_http_gate(cid=cid, oid=oid, endpoint=http_endpoint)
@allure.title("Test Zip in HTTP header")
def test_zip_in_http(self, complex_object_size, simple_object_size):
def test_zip_in_http(self, complex_object_size: int, simple_object_size: int):
cid = create_container(
self.wallet,
shell=self.shell,
@ -286,7 +286,7 @@ class TestHttpGate(ClusterTestBase):
@pytest.mark.long
@allure.title("Test Put over HTTP/Curl, Get over HTTP/Curl for large object")
def test_put_http_get_http_large_file(self, complex_object_size):
def test_put_http_get_http_large_file(self, complex_object_size: int):
"""
This test checks upload and download using curl with 'large' object.
Large is object with size up to 20Mb.
@ -312,7 +312,7 @@ class TestHttpGate(ClusterTestBase):
endpoint=self.cluster.default_http_gate_endpoint,
)
get_object_and_verify_hashes(
verify_object_hash(
oid=oid_gate,
file_name=file_path,
wallet=self.wallet,
@ -321,7 +321,7 @@ class TestHttpGate(ClusterTestBase):
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
)
get_object_and_verify_hashes(
verify_object_hash(
oid=oid_curl,
file_name=file_path,
wallet=self.wallet,
@ -333,7 +333,7 @@ class TestHttpGate(ClusterTestBase):
)
@allure.title("Test Put/Get over HTTP using Curl utility")
def test_put_http_get_http_curl(self, complex_object_size, simple_object_size):
def test_put_http_get_http_curl(self, complex_object_size: int, simple_object_size: int):
"""
Test checks upload and download over HTTP using curl utility.
"""
@ -359,7 +359,7 @@ class TestHttpGate(ClusterTestBase):
)
for oid, file_path in ((oid_simple, file_path_simple), (oid_large, file_path_large)):
get_object_and_verify_hashes(
verify_object_hash(
oid=oid,
file_name=file_path,
wallet=self.wallet,

View file

@ -3,26 +3,25 @@ import os
import allure
import pytest
from frostfs_testlib.resources.common import PUBLIC_ACL
from pytest import FixtureRequest
from pytest_tests.helpers.container import (
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.container import (
create_container,
delete_container,
list_containers,
wait_for_container_deletion,
)
from pytest_tests.helpers.file_helper import generate_file
from pytest_tests.helpers.frostfs_verbs import delete_object
from pytest_tests.helpers.http_gate import (
from frostfs_testlib.steps.cli.object import delete_object
from frostfs_testlib.steps.http.http_gate import (
attr_into_str_header_curl,
get_object_by_attr_and_verify_hashes,
try_to_get_object_and_expect_error,
try_to_get_object_via_passed_request_and_expect_error,
upload_via_http_gate_curl,
)
from pytest_tests.helpers.storage_object_info import StorageObjectInfo
from pytest_tests.steps.cluster_test_base import ClusterTestBase
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
from pytest import FixtureRequest
OBJECT_ALREADY_REMOVED_ERROR = "object already removed"
logger = logging.getLogger("NeoLogger")
@ -202,6 +201,7 @@ class Test_http_headers(ClusterTestBase):
cid=storage_object_1.cid,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
await_mode=True,
)
self.tick_epoch()
wait_for_container_deletion(

View file

@ -2,17 +2,16 @@ import logging
import allure
import pytest
from frostfs_testlib.resources.common import PUBLIC_ACL
from pytest_tests.helpers.container import create_container
from pytest_tests.helpers.file_helper import generate_file
from pytest_tests.helpers.frostfs_verbs import put_object_to_random_node
from pytest_tests.helpers.http_gate import (
get_object_and_verify_hashes,
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import put_object_to_random_node
from frostfs_testlib.steps.http.http_gate import (
get_object_by_attr_and_verify_hashes,
try_to_get_object_via_passed_request_and_expect_error,
verify_object_hash,
)
from pytest_tests.steps.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
logger = logging.getLogger("NeoLogger")
@ -84,7 +83,7 @@ class Test_http_object(ClusterTestBase):
attributes=f"{key_value1},{key_value2}",
)
with allure.step("Get object and verify hashes [ get/$CID/$OID ]"):
get_object_and_verify_hashes(
verify_object_hash(
oid=oid,
file_name=file_path,
wallet=self.wallet,

View file

@ -2,12 +2,11 @@ import logging
import allure
import pytest
from frostfs_testlib.resources.common import PUBLIC_ACL
from pytest_tests.helpers.container import create_container
from pytest_tests.helpers.file_helper import generate_file
from pytest_tests.helpers.http_gate import get_object_and_verify_hashes, upload_via_http_gate_curl
from pytest_tests.steps.cluster_test_base import ClusterTestBase
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.http.http_gate import upload_via_http_gate_curl, verify_object_hash
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
logger = logging.getLogger("NeoLogger")
@ -59,7 +58,7 @@ class Test_http_streaming(ClusterTestBase):
oid = upload_via_http_gate_curl(
cid=cid, filepath=file_path, endpoint=self.cluster.default_http_gate_endpoint
)
get_object_and_verify_hashes(
verify_object_hash(
oid=oid,
file_name=file_path,
wallet=self.wallet,

View file

@ -5,23 +5,23 @@ from typing import Optional
import allure
import pytest
from frostfs_testlib.resources.common import OBJECT_NOT_FOUND, PUBLIC_ACL
from pytest_tests.helpers.container import create_container
from pytest_tests.helpers.epoch import get_epoch, wait_for_epochs_align
from pytest_tests.helpers.file_helper import generate_file
from pytest_tests.helpers.frostfs_verbs import (
from frostfs_testlib.resources.error_patterns import OBJECT_NOT_FOUND
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import (
get_netmap_netinfo,
get_object_from_random_node,
head_object,
)
from pytest_tests.helpers.http_gate import (
from frostfs_testlib.steps.epoch import get_epoch, wait_for_epochs_align
from frostfs_testlib.steps.http.http_gate import (
attr_into_str_header_curl,
get_object_and_verify_hashes,
try_to_get_object_and_expect_error,
upload_via_http_gate_curl,
verify_object_hash,
)
from pytest_tests.steps.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
logger = logging.getLogger("NeoLogger")
EXPIRATION_TIMESTAMP_HEADER = "__SYSTEM__EXPIRATION_TIMESTAMP"
@ -122,7 +122,7 @@ class Test_http_system_header(ClusterTestBase):
endpoint=self.cluster.default_http_gate_endpoint,
headers=attr_into_str_header_curl(attributes),
)
get_object_and_verify_hashes(
verify_object_hash(
oid=oid,
file_name=file_path,
wallet=self.wallet,

View file

@ -1,72 +1,67 @@
import allure
import pytest
from pytest_tests.helpers.file_helper import generate_file
from pytest_tests.helpers.s3_helper import assert_s3_acl, object_key_from_file_path
from pytest_tests.steps import s3_gate_bucket, s3_gate_object
from pytest_tests.steps.s3_gate_base import TestS3GateBase
def pytest_generate_tests(metafunc):
if "s3_client" in metafunc.fixturenames:
metafunc.parametrize("s3_client", ["aws cli", "boto3"], indirect=True)
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.utils.file_utils import generate_file
@pytest.mark.sanity
@pytest.mark.acl
@pytest.mark.s3_gate
class TestS3GateACL(TestS3GateBase):
class TestS3GateACL:
@allure.title("Test S3: Object ACL")
def test_s3_object_ACL(self, bucket, simple_object_size):
@pytest.mark.parametrize("s3_client", [AwsCliClient], indirect=True)
def test_s3_object_ACL(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int):
file_path = generate_file(simple_object_size)
file_name = object_key_from_file_path(file_path)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Put object into bucket, Check ACL is empty"):
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
obj_acl = s3_gate_object.get_object_acl_s3(self.s3_client, bucket, file_name)
s3_client.put_object(bucket, file_path)
obj_acl = s3_client.get_object_acl(bucket, file_name)
assert obj_acl == [], f"Expected ACL is empty, got {obj_acl}"
with allure.step("Put object ACL = public-read"):
s3_gate_object.put_object_acl_s3(self.s3_client, bucket, file_name, "public-read")
obj_acl = s3_gate_object.get_object_acl_s3(self.s3_client, bucket, file_name)
assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
s3_client.put_object_acl(bucket, file_name, "public-read")
obj_acl = s3_client.get_object_acl(bucket, file_name)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
with allure.step("Put object ACL = private"):
s3_gate_object.put_object_acl_s3(self.s3_client, bucket, file_name, "private")
obj_acl = s3_gate_object.get_object_acl_s3(self.s3_client, bucket, file_name)
assert_s3_acl(acl_grants=obj_acl, permitted_users="CanonicalUser")
s3_client.put_object_acl(bucket, file_name, "private")
obj_acl = s3_client.get_object_acl(bucket, file_name)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="CanonicalUser")
with allure.step(
"Put object with grant-read uri=http://acs.amazonaws.com/groups/global/AllUsers"
):
s3_gate_object.put_object_acl_s3(
self.s3_client,
s3_client.put_object_acl(
bucket,
file_name,
grant_read="uri=http://acs.amazonaws.com/groups/global/AllUsers",
)
obj_acl = s3_gate_object.get_object_acl_s3(self.s3_client, bucket, file_name)
assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
obj_acl = s3_client.get_object_acl(bucket, file_name)
s3_helper.assert_s3_acl(acl_grants=obj_acl, permitted_users="AllUsers")
@allure.title("Test S3: Bucket ACL")
def test_s3_bucket_ACL(self):
@pytest.mark.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
def test_s3_bucket_ACL(self, s3_client: S3ClientWrapper):
with allure.step("Create bucket with ACL = public-read-write"):
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client, True, acl="public-read-write")
bucket_acl = s3_gate_bucket.get_bucket_acl(self.s3_client, bucket)
assert_s3_acl(acl_grants=bucket_acl, permitted_users="AllUsers")
bucket = s3_client.create_bucket(
object_lock_enabled_for_bucket=True, acl="public-read-write"
)
bucket_acl = s3_client.get_bucket_acl(bucket)
s3_helper.assert_s3_acl(acl_grants=bucket_acl, permitted_users="AllUsers")
with allure.step("Change bucket ACL to private"):
s3_gate_bucket.put_bucket_acl_s3(self.s3_client, bucket, acl="private")
bucket_acl = s3_gate_bucket.get_bucket_acl(self.s3_client, bucket)
assert_s3_acl(acl_grants=bucket_acl, permitted_users="CanonicalUser")
s3_client.put_bucket_acl(bucket, acl="private")
bucket_acl = s3_client.get_bucket_acl(bucket)
s3_helper.assert_s3_acl(acl_grants=bucket_acl, permitted_users="CanonicalUser")
with allure.step(
"Change bucket acl to --grant-write uri=http://acs.amazonaws.com/groups/global/AllUsers"
):
s3_gate_bucket.put_bucket_acl_s3(
self.s3_client,
s3_client.put_bucket_acl(
bucket,
grant_write="uri=http://acs.amazonaws.com/groups/global/AllUsers",
)
bucket_acl = s3_gate_bucket.get_bucket_acl(self.s3_client, bucket)
assert_s3_acl(acl_grants=bucket_acl, permitted_users="AllUsers")
bucket_acl = s3_client.get_bucket_acl(bucket)
s3_helper.assert_s3_acl(acl_grants=bucket_acl, permitted_users="AllUsers")

View file

@ -2,142 +2,132 @@ from datetime import datetime, timedelta
import allure
import pytest
from pytest_tests.helpers.file_helper import generate_file
from pytest_tests.helpers.s3_helper import (
assert_object_lock_mode,
assert_s3_acl,
check_objects_in_bucket,
object_key_from_file_path,
)
from pytest_tests.steps import s3_gate_bucket, s3_gate_object
from pytest_tests.steps.s3_gate_base import TestS3GateBase
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.utils.file_utils import generate_file
def pytest_generate_tests(metafunc):
def pytest_generate_tests(metafunc: pytest.Metafunc):
if "s3_client" in metafunc.fixturenames:
metafunc.parametrize("s3_client", ["aws cli", "boto3"], indirect=True)
metafunc.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
@pytest.mark.sanity
@pytest.mark.s3_gate
@pytest.mark.s3_gate_bucket
class TestS3GateBucket(TestS3GateBase):
class TestS3GateBucket:
@allure.title("Test S3: Create Bucket with different ACL")
def test_s3_create_bucket_with_ACL(self):
def test_s3_create_bucket_with_ACL(self, s3_client: S3ClientWrapper):
with allure.step("Create bucket with ACL private"):
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client, True, acl="private")
bucket_acl = s3_gate_bucket.get_bucket_acl(self.s3_client, bucket)
assert_s3_acl(acl_grants=bucket_acl, permitted_users="CanonicalUser")
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True, acl="private")
bucket_acl = s3_client.get_bucket_acl(bucket)
s3_helper.assert_s3_acl(acl_grants=bucket_acl, permitted_users="CanonicalUser")
with allure.step("Create bucket with ACL = public-read"):
bucket_1 = s3_gate_bucket.create_bucket_s3(self.s3_client, True, acl="public-read")
bucket_acl_1 = s3_gate_bucket.get_bucket_acl(self.s3_client, bucket_1)
assert_s3_acl(acl_grants=bucket_acl_1, permitted_users="AllUsers")
bucket_1 = s3_client.create_bucket(
object_lock_enabled_for_bucket=True, acl="public-read"
)
bucket_acl_1 = s3_client.get_bucket_acl(bucket_1)
s3_helper.assert_s3_acl(acl_grants=bucket_acl_1, permitted_users="AllUsers")
with allure.step("Create bucket with ACL public-read-write"):
bucket_2 = s3_gate_bucket.create_bucket_s3(
self.s3_client, True, acl="public-read-write"
bucket_2 = s3_client.create_bucket(
object_lock_enabled_for_bucket=True, acl="public-read-write"
)
bucket_acl_2 = s3_gate_bucket.get_bucket_acl(self.s3_client, bucket_2)
assert_s3_acl(acl_grants=bucket_acl_2, permitted_users="AllUsers")
bucket_acl_2 = s3_client.get_bucket_acl(bucket_2)
s3_helper.assert_s3_acl(acl_grants=bucket_acl_2, permitted_users="AllUsers")
with allure.step("Create bucket with ACL = authenticated-read"):
bucket_3 = s3_gate_bucket.create_bucket_s3(
self.s3_client, True, acl="authenticated-read"
bucket_3 = s3_client.create_bucket(
object_lock_enabled_for_bucket=True, acl="authenticated-read"
)
bucket_acl_3 = s3_gate_bucket.get_bucket_acl(self.s3_client, bucket_3)
assert_s3_acl(acl_grants=bucket_acl_3, permitted_users="AllUsers")
bucket_acl_3 = s3_client.get_bucket_acl(bucket_3)
s3_helper.assert_s3_acl(acl_grants=bucket_acl_3, permitted_users="AllUsers")
@allure.title("Test S3: Create Bucket with different ACL by grand")
def test_s3_create_bucket_with_grands(self):
def test_s3_create_bucket_with_grands(self, s3_client: S3ClientWrapper):
with allure.step("Create bucket with --grant-read"):
bucket = s3_gate_bucket.create_bucket_s3(
self.s3_client,
True,
bucket = s3_client.create_bucket(
object_lock_enabled_for_bucket=True,
grant_read="uri=http://acs.amazonaws.com/groups/global/AllUsers",
)
bucket_acl = s3_gate_bucket.get_bucket_acl(self.s3_client, bucket)
assert_s3_acl(acl_grants=bucket_acl, permitted_users="AllUsers")
bucket_acl = s3_client.get_bucket_acl(bucket)
s3_helper.assert_s3_acl(acl_grants=bucket_acl, permitted_users="AllUsers")
with allure.step("Create bucket with --grant-wtite"):
bucket_1 = s3_gate_bucket.create_bucket_s3(
self.s3_client,
True,
bucket_1 = s3_client.create_bucket(
object_lock_enabled_for_bucket=True,
grant_write="uri=http://acs.amazonaws.com/groups/global/AllUsers",
)
bucket_acl_1 = s3_gate_bucket.get_bucket_acl(self.s3_client, bucket_1)
assert_s3_acl(acl_grants=bucket_acl_1, permitted_users="AllUsers")
bucket_acl_1 = s3_client.get_bucket_acl(bucket_1)
s3_helper.assert_s3_acl(acl_grants=bucket_acl_1, permitted_users="AllUsers")
with allure.step("Create bucket with --grant-full-control"):
bucket_2 = s3_gate_bucket.create_bucket_s3(
self.s3_client,
True,
bucket_2 = s3_client.create_bucket(
object_lock_enabled_for_bucket=True,
grant_full_control="uri=http://acs.amazonaws.com/groups/global/AllUsers",
)
bucket_acl_2 = s3_gate_bucket.get_bucket_acl(self.s3_client, bucket_2)
assert_s3_acl(acl_grants=bucket_acl_2, permitted_users="AllUsers")
bucket_acl_2 = s3_client.get_bucket_acl(bucket_2)
s3_helper.assert_s3_acl(acl_grants=bucket_acl_2, permitted_users="AllUsers")
@allure.title("Test S3: create bucket with object lock")
def test_s3_bucket_object_lock(self, simple_object_size):
def test_s3_bucket_object_lock(self, s3_client: S3ClientWrapper, simple_object_size: int):
file_path = generate_file(simple_object_size)
file_name = object_key_from_file_path(file_path)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Create bucket with --no-object-lock-enabled-for-bucket"):
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client, False)
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=False)
date_obj = datetime.utcnow() + timedelta(days=1)
with pytest.raises(
Exception, match=r".*Object Lock configuration does not exist for this bucket.*"
):
# An error occurred (ObjectLockConfigurationNotFoundError) when calling the PutObject operation (reached max retries: 0):
# Object Lock configuration does not exist for this bucket
s3_gate_object.put_object_s3(
self.s3_client,
s3_client.put_object(
bucket,
file_path,
ObjectLockMode="COMPLIANCE",
ObjectLockRetainUntilDate=date_obj.strftime("%Y-%m-%dT%H:%M:%S"),
object_lock_mode="COMPLIANCE",
object_lock_retain_until_date=date_obj.strftime("%Y-%m-%dT%H:%M:%S"),
)
with allure.step("Create bucket with --object-lock-enabled-for-bucket"):
bucket_1 = s3_gate_bucket.create_bucket_s3(self.s3_client, True)
bucket_1 = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
date_obj_1 = datetime.utcnow() + timedelta(days=1)
s3_gate_object.put_object_s3(
self.s3_client,
s3_client.put_object(
bucket_1,
file_path,
ObjectLockMode="COMPLIANCE",
ObjectLockRetainUntilDate=date_obj_1.strftime("%Y-%m-%dT%H:%M:%S"),
ObjectLockLegalHoldStatus="ON",
object_lock_mode="COMPLIANCE",
object_lock_retain_until_date=date_obj_1.strftime("%Y-%m-%dT%H:%M:%S"),
object_lock_legal_hold_status="ON",
)
assert_object_lock_mode(
self.s3_client, bucket_1, file_name, "COMPLIANCE", date_obj_1, "ON"
s3_helper.assert_object_lock_mode(
s3_client, bucket_1, file_name, "COMPLIANCE", date_obj_1, "ON"
)
@allure.title("Test S3: delete bucket")
def test_s3_delete_bucket(self, simple_object_size):
def test_s3_delete_bucket(self, s3_client: S3ClientWrapper, simple_object_size: int):
file_path_1 = generate_file(simple_object_size)
file_name_1 = object_key_from_file_path(file_path_1)
file_name_1 = s3_helper.object_key_from_file_path(file_path_1)
file_path_2 = generate_file(simple_object_size)
file_name_2 = object_key_from_file_path(file_path_2)
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client)
file_name_2 = s3_helper.object_key_from_file_path(file_path_2)
bucket = s3_client.create_bucket()
with allure.step("Put two objects into bucket"):
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path_1)
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path_2)
check_objects_in_bucket(self.s3_client, bucket, [file_name_1, file_name_2])
s3_client.put_object(bucket, file_path_1)
s3_client.put_object(bucket, file_path_2)
s3_helper.check_objects_in_bucket(s3_client, bucket, [file_name_1, file_name_2])
with allure.step("Try to delete not empty bucket and get error"):
with pytest.raises(Exception, match=r".*The bucket you tried to delete is not empty.*"):
s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket)
s3_client.delete_bucket(bucket)
with allure.step("Delete object in bucket"):
s3_gate_object.delete_object_s3(self.s3_client, bucket, file_name_1)
s3_gate_object.delete_object_s3(self.s3_client, bucket, file_name_2)
check_objects_in_bucket(self.s3_client, bucket, [])
s3_client.delete_object(bucket, file_name_1)
s3_client.delete_object(bucket, file_name_2)
s3_helper.check_objects_in_bucket(s3_client, bucket, [])
with allure.step(f"Delete empty bucket"):
s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket)
with allure.step("Delete empty bucket"):
s3_client.delete_bucket(bucket)
with pytest.raises(Exception, match=r".*Not Found.*"):
s3_gate_bucket.head_bucket(self.s3_client, bucket)
s3_client.head_bucket(bucket)

View file

@ -4,33 +4,26 @@ from random import choice, choices
import allure
import pytest
from pytest_tests.helpers.aws_cli_client import AwsCliClient
from pytest_tests.helpers.epoch import tick_epoch
from pytest_tests.helpers.file_helper import (
from frostfs_testlib.resources.common import ASSETS_DIR
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.epoch import tick_epoch
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.utils.file_utils import (
generate_file,
generate_file_with_content,
get_file_content,
get_file_hash,
split_file,
)
from pytest_tests.helpers.s3_helper import (
check_objects_in_bucket,
check_tags_by_bucket,
check_tags_by_object,
set_bucket_versioning,
try_to_get_objects_and_expect_error,
)
from pytest_tests.resources.common import ASSETS_DIR
from pytest_tests.steps import s3_gate_bucket, s3_gate_object
from pytest_tests.steps.s3_gate_base import TestS3GateBase
logger = logging.getLogger("NeoLogger")
def pytest_generate_tests(metafunc):
def pytest_generate_tests(metafunc: pytest.Metafunc):
if "s3_client" in metafunc.fixturenames:
metafunc.parametrize("s3_client", ["aws cli", "boto3"], indirect=True)
metafunc.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
@allure.link(
@ -39,108 +32,121 @@ def pytest_generate_tests(metafunc):
@pytest.mark.sanity
@pytest.mark.s3_gate
@pytest.mark.s3_gate_base
class TestS3Gate(TestS3GateBase):
class TestS3Gate:
@allure.title("Test S3 Bucket API")
def test_s3_buckets(self, simple_object_size):
def test_s3_buckets(
self,
s3_client: S3ClientWrapper,
client_shell: Shell,
cluster: Cluster,
simple_object_size: int,
):
"""
Test base S3 Bucket API (Create/List/Head/Delete).
"""
file_path = generate_file(simple_object_size)
file_name = self.object_key_from_file_path(file_path)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Create buckets"):
bucket_1 = s3_gate_bucket.create_bucket_s3(self.s3_client, True)
set_bucket_versioning(self.s3_client, bucket_1, s3_gate_bucket.VersioningStatus.ENABLED)
bucket_2 = s3_gate_bucket.create_bucket_s3(self.s3_client)
bucket_1 = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
s3_helper.set_bucket_versioning(s3_client, bucket_1, VersioningStatus.ENABLED)
bucket_2 = s3_client.create_bucket()
with allure.step("Check buckets are presented in the system"):
buckets = s3_gate_bucket.list_buckets_s3(self.s3_client)
buckets = s3_client.list_buckets()
assert bucket_1 in buckets, f"Expected bucket {bucket_1} is in the list"
assert bucket_2 in buckets, f"Expected bucket {bucket_2} is in the list"
with allure.step("Bucket must be empty"):
for bucket in (bucket_1, bucket_2):
objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket)
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with allure.step("Check buckets are visible with S3 head command"):
s3_gate_bucket.head_bucket(self.s3_client, bucket_1)
s3_gate_bucket.head_bucket(self.s3_client, bucket_2)
s3_client.head_bucket(bucket_1)
s3_client.head_bucket(bucket_2)
with allure.step("Check we can put/list object with S3 commands"):
version_id = s3_gate_object.put_object_s3(self.s3_client, bucket_1, file_path)
s3_gate_object.head_object_s3(self.s3_client, bucket_1, file_name)
version_id = s3_client.put_object(bucket_1, file_path)
s3_client.head_object(bucket_1, file_name)
bucket_objects = s3_gate_object.list_objects_s3(self.s3_client, bucket_1)
bucket_objects = s3_client.list_objects(bucket_1)
assert (
file_name in bucket_objects
), f"Expected file {file_name} in objects list {bucket_objects}"
with allure.step("Try to delete not empty bucket and get error"):
with pytest.raises(Exception, match=r".*The bucket you tried to delete is not empty.*"):
s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket_1)
s3_client.delete_bucket(bucket_1)
s3_gate_bucket.head_bucket(self.s3_client, bucket_1)
s3_client.head_bucket(bucket_1)
with allure.step(f"Delete empty bucket {bucket_2}"):
s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket_2)
tick_epoch(self.shell, self.cluster)
s3_client.delete_bucket(bucket_2)
tick_epoch(client_shell, cluster)
with allure.step(f"Check bucket {bucket_2} deleted"):
with pytest.raises(Exception, match=r".*Not Found.*"):
s3_gate_bucket.head_bucket(self.s3_client, bucket_2)
s3_client.head_bucket(bucket_2)
buckets = s3_gate_bucket.list_buckets_s3(self.s3_client)
buckets = s3_client.list_buckets()
assert bucket_1 in buckets, f"Expected bucket {bucket_1} is in the list"
assert bucket_2 not in buckets, f"Expected bucket {bucket_2} is not in the list"
with allure.step(f"Delete object from {bucket_1}"):
s3_gate_object.delete_object_s3(self.s3_client, bucket_1, file_name, version_id)
check_objects_in_bucket(self.s3_client, bucket_1, expected_objects=[])
s3_client.delete_object(bucket_1, file_name, version_id)
s3_helper.check_objects_in_bucket(s3_client, bucket_1, expected_objects=[])
with allure.step(f"Delete bucket {bucket_1}"):
s3_gate_bucket.delete_bucket_s3(self.s3_client, bucket_1)
self.tick_epoch()
s3_client.delete_bucket(bucket_1)
tick_epoch(client_shell, cluster)
with allure.step(f"Check bucket {bucket_1} deleted"):
with pytest.raises(Exception, match=r".*Not Found.*"):
s3_gate_bucket.head_bucket(self.s3_client, bucket_1)
s3_client.head_bucket(bucket_1)
@allure.title("Test S3 Object API")
@pytest.mark.parametrize(
"file_type", ["simple", "large"], ids=["Simple object", "Large object"]
)
def test_s3_api_object(self, file_type, two_buckets, simple_object_size, complex_object_size):
def test_s3_api_object(
self,
s3_client: S3ClientWrapper,
file_type: str,
two_buckets: tuple[str, str],
simple_object_size: int,
complex_object_size: int,
):
"""
Test base S3 Object API (Put/Head/List) for simple and large objects.
"""
file_path = generate_file(
simple_object_size if file_type == "simple" else complex_object_size
)
file_name = self.object_key_from_file_path(file_path)
file_name = s3_helper.object_key_from_file_path(file_path)
bucket_1, bucket_2 = two_buckets
for bucket in (bucket_1, bucket_2):
with allure.step("Bucket must be empty"):
objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket)
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
s3_gate_object.head_object_s3(self.s3_client, bucket, file_name)
s3_client.put_object(bucket, file_path)
s3_client.head_object(bucket, file_name)
bucket_objects = s3_gate_object.list_objects_s3(self.s3_client, bucket)
bucket_objects = s3_client.list_objects(bucket)
assert (
file_name in bucket_objects
), f"Expected file {file_name} in objects list {bucket_objects}"
with allure.step("Check object's attributes"):
for attrs in (["ETag"], ["ObjectSize", "StorageClass"]):
s3_gate_object.get_object_attributes(self.s3_client, bucket, file_name, *attrs)
s3_client.get_object_attributes(bucket, file_name, attrs)
@allure.title("Test S3 Sync directory")
def test_s3_sync_dir(self, bucket, simple_object_size):
def test_s3_sync_dir(self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int):
"""
Test checks sync directory with AWS CLI utility.
"""
@ -148,29 +154,31 @@ class TestS3Gate(TestS3GateBase):
file_path_2 = os.path.join(os.getcwd(), ASSETS_DIR, "test_sync", "test_file_2")
key_to_path = {"test_file_1": file_path_1, "test_file_2": file_path_2}
if not isinstance(self.s3_client, AwsCliClient):
if not isinstance(s3_client, AwsCliClient):
pytest.skip("This test is not supported with boto3 client")
generate_file_with_content(simple_object_size, file_path=file_path_1)
generate_file_with_content(simple_object_size, file_path=file_path_2)
self.s3_client.sync(bucket_name=bucket, dir_path=os.path.dirname(file_path_1))
s3_client.sync(bucket=bucket, dir_path=os.path.dirname(file_path_1))
with allure.step("Check objects are synced"):
objects = s3_gate_object.list_objects_s3(self.s3_client, bucket)
objects = s3_client.list_objects(bucket)
with allure.step("Check these are the same objects"):
assert set(key_to_path.keys()) == set(
objects
), f"Expected all objects saved. Got {objects}"
for obj_key in objects:
got_object = s3_gate_object.get_object_s3(self.s3_client, bucket, obj_key)
got_object = s3_client.get_object(bucket, obj_key)
assert get_file_hash(got_object) == get_file_hash(
key_to_path.get(obj_key)
), "Expected hashes are the same"
@allure.title("Test S3 Object versioning")
def test_s3_api_versioning(self, bucket, simple_object_size):
def test_s3_api_versioning(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int
):
"""
Test checks basic versioning functionality for S3 bucket.
"""
@ -178,17 +186,17 @@ class TestS3Gate(TestS3GateBase):
version_2_content = "Version 2"
file_name_simple = generate_file_with_content(simple_object_size, content=version_1_content)
obj_key = os.path.basename(file_name_simple)
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_simple)
version_id_1 = s3_client.put_object(bucket, file_name_simple)
generate_file_with_content(
simple_object_size, file_path=file_name_simple, content=version_2_content
)
version_id_2 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_simple)
version_id_2 = s3_client.put_object(bucket, file_name_simple)
with allure.step("Check bucket shows all versions"):
versions = s3_gate_object.list_objects_versions_s3(self.s3_client, bucket)
versions = s3_client.list_objects_versions(bucket)
obj_versions = {
version.get("VersionId") for version in versions if version.get("Key") == obj_key
}
@ -199,9 +207,7 @@ class TestS3Gate(TestS3GateBase):
with allure.step("Show information about particular version"):
for version_id in (version_id_1, version_id_2):
response = s3_gate_object.head_object_s3(
self.s3_client, bucket, obj_key, version_id=version_id
)
response = s3_client.head_object(bucket, obj_key, version_id=version_id)
assert "LastModified" in response, "Expected LastModified field"
assert "ETag" in response, "Expected ETag field"
assert (
@ -211,8 +217,8 @@ class TestS3Gate(TestS3GateBase):
with allure.step("Check object's attributes"):
for version_id in (version_id_1, version_id_2):
got_attrs = s3_gate_object.get_object_attributes(
self.s3_client, bucket, obj_key, "ETag", version_id=version_id
got_attrs = s3_client.get_object_attributes(
bucket, obj_key, ["ETag"], version_id=version_id
)
if got_attrs:
assert (
@ -220,31 +226,27 @@ class TestS3Gate(TestS3GateBase):
), f"Expected VersionId is {version_id}"
with allure.step("Delete object and check it was deleted"):
response = s3_gate_object.delete_object_s3(self.s3_client, bucket, obj_key)
response = s3_client.delete_object(bucket, obj_key)
version_id_delete = response.get("VersionId")
with pytest.raises(Exception, match=r".*Not Found.*"):
s3_gate_object.head_object_s3(self.s3_client, bucket, obj_key)
s3_client.head_object(bucket, obj_key)
with allure.step("Get content for all versions and check it is correct"):
for version, content in (
(version_id_2, version_2_content),
(version_id_1, version_1_content),
):
file_name = s3_gate_object.get_object_s3(
self.s3_client, bucket, obj_key, version_id=version
)
file_name = s3_client.get_object(bucket, obj_key, version_id=version)
got_content = get_file_content(file_name)
assert (
got_content == content
), f"Expected object content is\n{content}\nGot\n{got_content}"
with allure.step("Restore previous object version"):
s3_gate_object.delete_object_s3(
self.s3_client, bucket, obj_key, version_id=version_id_delete
)
s3_client.delete_object(bucket, obj_key, version_id=version_id_delete)
file_name = s3_gate_object.get_object_s3(self.s3_client, bucket, obj_key)
file_name = s3_client.get_object(bucket, obj_key)
got_content = get_file_content(file_name)
assert (
got_content == version_2_content
@ -252,7 +254,9 @@ class TestS3Gate(TestS3GateBase):
@pytest.mark.s3_gate_multipart
@allure.title("Test S3 Object Multipart API")
def test_s3_api_multipart(self, bucket, simple_object_size):
def test_s3_api_multipart(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int
):
"""
Test checks S3 Multipart API (Create multipart upload/Abort multipart upload/List multipart upload/
Upload part/List parts/Complete multipart upload).
@ -261,18 +265,16 @@ class TestS3Gate(TestS3GateBase):
file_name_large = generate_file(
simple_object_size * 1024 * 6 * parts_count
) # 5Mb - min part
object_key = self.object_key_from_file_path(file_name_large)
object_key = s3_helper.object_key_from_file_path(file_name_large)
part_files = split_file(file_name_large, parts_count)
parts = []
uploads = s3_gate_object.list_multipart_uploads_s3(self.s3_client, bucket)
uploads = s3_client.list_multipart_uploads(bucket)
assert not uploads, f"Expected there is no uploads in bucket {bucket}"
with allure.step("Create and abort multipart upload"):
upload_id = s3_gate_object.create_multipart_upload_s3(
self.s3_client, bucket, object_key
)
uploads = s3_gate_object.list_multipart_uploads_s3(self.s3_client, bucket)
upload_id = s3_client.create_multipart_upload(bucket, object_key)
uploads = s3_client.list_multipart_uploads(bucket)
assert uploads, f"Expected there one upload in bucket {bucket}"
assert (
uploads[0].get("Key") == object_key
@ -281,54 +283,50 @@ class TestS3Gate(TestS3GateBase):
uploads[0].get("UploadId") == upload_id
), f"Expected correct UploadId {upload_id} in upload {uploads}"
s3_gate_object.abort_multipart_upload_s3(self.s3_client, bucket, object_key, upload_id)
uploads = s3_gate_object.list_multipart_uploads_s3(self.s3_client, bucket)
s3_client.abort_multipart_upload(bucket, object_key, upload_id)
uploads = s3_client.list_multipart_uploads(bucket)
assert not uploads, f"Expected there is no uploads in bucket {bucket}"
with allure.step("Create new multipart upload and upload several parts"):
upload_id = s3_gate_object.create_multipart_upload_s3(
self.s3_client, bucket, object_key
)
upload_id = s3_client.create_multipart_upload(bucket, object_key)
for part_id, file_path in enumerate(part_files, start=1):
etag = s3_gate_object.upload_part_s3(
self.s3_client, bucket, object_key, upload_id, part_id, file_path
)
etag = s3_client.upload_part(bucket, object_key, upload_id, part_id, file_path)
parts.append((part_id, etag))
with allure.step("Check all parts are visible in bucket"):
got_parts = s3_gate_object.list_parts_s3(self.s3_client, bucket, object_key, upload_id)
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
assert len(got_parts) == len(
part_files
), f"Expected {parts_count} parts, got\n{got_parts}"
s3_gate_object.complete_multipart_upload_s3(
self.s3_client, bucket, object_key, upload_id, parts
)
s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
uploads = s3_gate_object.list_multipart_uploads_s3(self.s3_client, bucket)
uploads = s3_client.list_multipart_uploads(bucket)
assert not uploads, f"Expected there is no uploads in bucket {bucket}"
with allure.step("Check we can get whole object from bucket"):
got_object = s3_gate_object.get_object_s3(self.s3_client, bucket, object_key)
got_object = s3_client.get_object(bucket, object_key)
assert get_file_hash(got_object) == get_file_hash(file_name_large)
self.check_object_attributes(bucket, object_key, parts_count)
self.check_object_attributes(s3_client, bucket, object_key, parts_count)
@allure.title("Test S3 Bucket tagging API")
def test_s3_api_bucket_tagging(self, bucket):
def test_s3_api_bucket_tagging(self, s3_client: S3ClientWrapper, bucket: str):
"""
Test checks S3 Bucket tagging API (Put tag/Get tag).
"""
key_value_pair = [("some-key", "some-value"), ("some-key-2", "some-value-2")]
s3_gate_bucket.put_bucket_tagging(self.s3_client, bucket, key_value_pair)
check_tags_by_bucket(self.s3_client, bucket, key_value_pair)
s3_client.put_bucket_tagging(bucket, key_value_pair)
s3_helper.check_tags_by_bucket(s3_client, bucket, key_value_pair)
s3_gate_bucket.delete_bucket_tagging(self.s3_client, bucket)
check_tags_by_bucket(self.s3_client, bucket, [])
s3_client.delete_bucket_tagging(bucket)
s3_helper.check_tags_by_bucket(s3_client, bucket, [])
@allure.title("Test S3 Object tagging API")
def test_s3_api_object_tagging(self, bucket, simple_object_size):
def test_s3_api_object_tagging(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int
):
"""
Test checks S3 Object tagging API (Put tag/Get tag/Update tag).
"""
@ -339,26 +337,32 @@ class TestS3Gate(TestS3GateBase):
]
key_value_pair_obj_new = [("some-key-obj-new", "some-value-obj-new")]
file_name_simple = generate_file(simple_object_size)
obj_key = self.object_key_from_file_path(file_name_simple)
obj_key = s3_helper.object_key_from_file_path(file_name_simple)
s3_gate_bucket.put_bucket_tagging(self.s3_client, bucket, key_value_pair_bucket)
s3_client.put_bucket_tagging(bucket, key_value_pair_bucket)
s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_simple)
s3_client.put_object(bucket, file_name_simple)
for tags in (key_value_pair_obj, key_value_pair_obj_new):
s3_gate_object.put_object_tagging(self.s3_client, bucket, obj_key, tags)
check_tags_by_object(
self.s3_client,
s3_client.put_object_tagging(bucket, obj_key, tags)
s3_helper.check_tags_by_object(
s3_client,
bucket,
obj_key,
tags,
)
s3_gate_object.delete_object_tagging(self.s3_client, bucket, obj_key)
check_tags_by_object(self.s3_client, bucket, obj_key, [])
s3_client.delete_object_tagging(bucket, obj_key)
s3_helper.check_tags_by_object(s3_client, bucket, obj_key, [])
@allure.title("Test S3: Delete object & delete objects S3 API")
def test_s3_api_delete(self, two_buckets, simple_object_size, complex_object_size):
def test_s3_api_delete(
self,
s3_client: S3ClientWrapper,
two_buckets: tuple[str, str],
simple_object_size: int,
complex_object_size: int,
):
"""
Check delete_object and delete_objects S3 API operation. From first bucket some objects deleted one by one.
From second bucket some objects deleted all at once.
@ -377,15 +381,15 @@ class TestS3Gate(TestS3GateBase):
for bucket in (bucket_1, bucket_2):
with allure.step(f"Bucket {bucket} must be empty as it just created"):
objects_list = s3_gate_object.list_objects_s3_v2(self.s3_client, bucket)
objects_list = s3_client.list_objects_v2(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
for file_path in file_paths:
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
put_objects.append(self.object_key_from_file_path(file_path))
s3_client.put_object(bucket, file_path)
put_objects.append(s3_helper.object_key_from_file_path(file_path))
with allure.step(f"Check all objects put in bucket {bucket} successfully"):
bucket_objects = s3_gate_object.list_objects_s3_v2(self.s3_client, bucket)
bucket_objects = s3_client.list_objects_v2(bucket)
assert set(put_objects) == set(
bucket_objects
), f"Expected all objects {put_objects} in objects list {bucket_objects}"
@ -393,28 +397,38 @@ class TestS3Gate(TestS3GateBase):
with allure.step("Delete some objects from bucket_1 one by one"):
objects_to_delete_b1 = choices(put_objects, k=max_delete_objects)
for obj in objects_to_delete_b1:
s3_gate_object.delete_object_s3(self.s3_client, bucket_1, obj)
s3_client.delete_object(bucket_1, obj)
with allure.step("Check deleted objects are not visible in bucket bucket_1"):
bucket_objects = s3_gate_object.list_objects_s3_v2(self.s3_client, bucket_1)
bucket_objects = s3_client.list_objects_v2(bucket_1)
assert set(put_objects).difference(set(objects_to_delete_b1)) == set(
bucket_objects
), f"Expected all objects {put_objects} in objects list {bucket_objects}"
try_to_get_objects_and_expect_error(self.s3_client, bucket_1, objects_to_delete_b1)
for object_key in objects_to_delete_b1:
with pytest.raises(Exception, match="The specified key does not exist"):
s3_client.get_object(bucket_1, object_key)
with allure.step("Delete some objects from bucket_2 at once"):
objects_to_delete_b2 = choices(put_objects, k=max_delete_objects)
s3_gate_object.delete_objects_s3(self.s3_client, bucket_2, objects_to_delete_b2)
s3_client.delete_objects(bucket_2, objects_to_delete_b2)
with allure.step("Check deleted objects are not visible in bucket bucket_2"):
objects_list = s3_gate_object.list_objects_s3_v2(self.s3_client, bucket_2)
objects_list = s3_client.list_objects_v2(bucket_2)
assert set(put_objects).difference(set(objects_to_delete_b2)) == set(
objects_list
), f"Expected all objects {put_objects} in objects list {bucket_objects}"
try_to_get_objects_and_expect_error(self.s3_client, bucket_2, objects_to_delete_b2)
for object_key in objects_to_delete_b2:
with pytest.raises(Exception, match="The specified key does not exist"):
s3_client.get_object(bucket_2, object_key)
@allure.title("Test S3: Copy object to the same bucket")
def test_s3_copy_same_bucket(self, bucket, complex_object_size, simple_object_size):
def test_s3_copy_same_bucket(
self,
s3_client: S3ClientWrapper,
bucket: str,
complex_object_size: int,
simple_object_size: int,
):
"""
Test object can be copied to the same bucket.
#TODO: delete after test_s3_copy_object will be merge
@ -422,43 +436,49 @@ class TestS3Gate(TestS3GateBase):
file_path_simple, file_path_large = generate_file(simple_object_size), generate_file(
complex_object_size
)
file_name_simple = self.object_key_from_file_path(file_path_simple)
file_name_large = self.object_key_from_file_path(file_path_large)
file_name_simple = s3_helper.object_key_from_file_path(file_path_simple)
file_name_large = s3_helper.object_key_from_file_path(file_path_large)
bucket_objects = [file_name_simple, file_name_large]
with allure.step("Bucket must be empty"):
objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket)
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with allure.step("Put objects into bucket"):
for file_path in (file_path_simple, file_path_large):
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
s3_client.put_object(bucket, file_path)
with allure.step("Copy one object into the same bucket"):
copy_obj_path = s3_gate_object.copy_object_s3(self.s3_client, bucket, file_name_simple)
copy_obj_path = s3_client.copy_object(bucket, file_name_simple)
bucket_objects.append(copy_obj_path)
check_objects_in_bucket(self.s3_client, bucket, bucket_objects)
s3_helper.check_objects_in_bucket(s3_client, bucket, bucket_objects)
with allure.step("Check copied object has the same content"):
got_copied_file = s3_gate_object.get_object_s3(self.s3_client, bucket, copy_obj_path)
got_copied_file = s3_client.get_object(bucket, copy_obj_path)
assert get_file_hash(file_path_simple) == get_file_hash(
got_copied_file
), "Hashes must be the same"
with allure.step("Delete one object from bucket"):
s3_gate_object.delete_object_s3(self.s3_client, bucket, file_name_simple)
s3_client.delete_object(bucket, file_name_simple)
bucket_objects.remove(file_name_simple)
check_objects_in_bucket(
self.s3_client,
s3_helper.check_objects_in_bucket(
s3_client,
bucket,
expected_objects=bucket_objects,
unexpected_objects=[file_name_simple],
)
@allure.title("Test S3: Copy object to another bucket")
def test_s3_copy_to_another_bucket(self, two_buckets, complex_object_size, simple_object_size):
def test_s3_copy_to_another_bucket(
self,
s3_client: S3ClientWrapper,
two_buckets: tuple[str, str],
complex_object_size: int,
simple_object_size: int,
):
"""
Test object can be copied to another bucket.
#TODO: delete after test_s3_copy_object will be merge
@ -466,55 +486,53 @@ class TestS3Gate(TestS3GateBase):
file_path_simple, file_path_large = generate_file(simple_object_size), generate_file(
complex_object_size
)
file_name_simple = self.object_key_from_file_path(file_path_simple)
file_name_large = self.object_key_from_file_path(file_path_large)
file_name_simple = s3_helper.object_key_from_file_path(file_path_simple)
file_name_large = s3_helper.object_key_from_file_path(file_path_large)
bucket_1_objects = [file_name_simple, file_name_large]
bucket_1, bucket_2 = two_buckets
with allure.step("Buckets must be empty"):
for bucket in (bucket_1, bucket_2):
objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket)
objects_list = s3_client.list_objects(bucket)
assert not objects_list, f"Expected empty bucket, got {objects_list}"
with allure.step("Put objects into one bucket"):
for file_path in (file_path_simple, file_path_large):
s3_gate_object.put_object_s3(self.s3_client, bucket_1, file_path)
s3_client.put_object(bucket_1, file_path)
with allure.step("Copy object from first bucket into second"):
copy_obj_path_b2 = s3_gate_object.copy_object_s3(
self.s3_client, bucket_1, file_name_large, bucket_dst=bucket_2
)
check_objects_in_bucket(self.s3_client, bucket_1, expected_objects=bucket_1_objects)
check_objects_in_bucket(self.s3_client, bucket_2, expected_objects=[copy_obj_path_b2])
copy_obj_path_b2 = s3_client.copy_object(bucket_1, file_name_large, bucket=bucket_2)
s3_helper.check_objects_in_bucket(s3_client, bucket_1, expected_objects=bucket_1_objects)
s3_helper.check_objects_in_bucket(s3_client, bucket_2, expected_objects=[copy_obj_path_b2])
with allure.step("Check copied object has the same content"):
got_copied_file_b2 = s3_gate_object.get_object_s3(
self.s3_client, bucket_2, copy_obj_path_b2
)
got_copied_file_b2 = s3_client.get_object(bucket_2, copy_obj_path_b2)
assert get_file_hash(file_path_large) == get_file_hash(
got_copied_file_b2
), "Hashes must be the same"
with allure.step("Delete one object from first bucket"):
s3_gate_object.delete_object_s3(self.s3_client, bucket_1, file_name_simple)
s3_client.delete_object(bucket_1, file_name_simple)
bucket_1_objects.remove(file_name_simple)
check_objects_in_bucket(self.s3_client, bucket_1, expected_objects=bucket_1_objects)
check_objects_in_bucket(self.s3_client, bucket_2, expected_objects=[copy_obj_path_b2])
s3_helper.check_objects_in_bucket(s3_client, bucket_1, expected_objects=bucket_1_objects)
s3_helper.check_objects_in_bucket(s3_client, bucket_2, expected_objects=[copy_obj_path_b2])
with allure.step("Delete one object from second bucket and check it is empty"):
s3_gate_object.delete_object_s3(self.s3_client, bucket_2, copy_obj_path_b2)
check_objects_in_bucket(self.s3_client, bucket_2, expected_objects=[])
s3_client.delete_object(bucket_2, copy_obj_path_b2)
s3_helper.check_objects_in_bucket(s3_client, bucket_2, expected_objects=[])
def check_object_attributes(self, bucket: str, object_key: str, parts_count: int):
if not isinstance(self.s3_client, AwsCliClient):
def check_object_attributes(
self, s3_client: S3ClientWrapper, bucket: str, object_key: str, parts_count: int
):
if not isinstance(s3_client, AwsCliClient):
logger.warning("Attributes check is not supported for boto3 implementation")
return
with allure.step("Check object's attributes"):
obj_parts = s3_gate_object.get_object_attributes(
self.s3_client, bucket, object_key, "ObjectParts", get_full_resp=False
obj_parts = s3_client.get_object_attributes(
bucket, object_key, ["ObjectParts"], full_output=False
)
assert (
obj_parts.get("TotalPartsCount") == parts_count
@ -525,13 +543,12 @@ class TestS3Gate(TestS3GateBase):
with allure.step("Check object's attribute max-parts"):
max_parts = 2
obj_parts = s3_gate_object.get_object_attributes(
self.s3_client,
obj_parts = s3_client.get_object_attributes(
bucket,
object_key,
"ObjectParts",
["ObjectParts"],
max_parts=max_parts,
get_full_resp=False,
full_output=False,
)
assert (
obj_parts.get("TotalPartsCount") == parts_count
@ -543,13 +560,12 @@ class TestS3Gate(TestS3GateBase):
with allure.step("Check object's attribute part-number-marker"):
part_number_marker = 3
obj_parts = s3_gate_object.get_object_attributes(
self.s3_client,
obj_parts = s3_client.get_object_attributes(
bucket,
object_key,
"ObjectParts",
["ObjectParts"],
part_number=part_number_marker,
get_full_resp=False,
full_output=False,
)
assert (
obj_parts.get("TotalPartsCount") == parts_count
@ -558,7 +574,3 @@ class TestS3Gate(TestS3GateBase):
obj_parts.get("PartNumberMarker") == part_number_marker
), f"Expected PartNumberMarker is {part_number_marker}"
assert len(obj_parts.get("Parts")) == 1, f"Expected Parts count is {parts_count}"
@staticmethod
def object_key_from_file_path(full_path: str) -> str:
return os.path.basename(full_path)

View file

@ -3,40 +3,36 @@ from datetime import datetime, timedelta
import allure
import pytest
from pytest_tests.helpers.file_helper import generate_file, generate_file_with_content
from pytest_tests.helpers.s3_helper import (
assert_object_lock_mode,
check_objects_in_bucket,
object_key_from_file_path,
)
from pytest_tests.steps import s3_gate_bucket, s3_gate_object
from pytest_tests.steps.s3_gate_base import TestS3GateBase
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.utils.file_utils import generate_file, generate_file_with_content
def pytest_generate_tests(metafunc):
def pytest_generate_tests(metafunc: pytest.Metafunc):
if "s3_client" in metafunc.fixturenames:
metafunc.parametrize("s3_client", ["aws cli", "boto3"], indirect=True)
metafunc.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
@pytest.mark.sanity
@pytest.mark.s3_gate
@pytest.mark.s3_gate_locking
@pytest.mark.parametrize("version_id", [None, "second"])
class TestS3GateLocking(TestS3GateBase):
class TestS3GateLocking:
@allure.title("Test S3: Checking the operation of retention period & legal lock on the object")
def test_s3_object_locking(self, version_id, simple_object_size):
def test_s3_object_locking(
self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: int
):
file_path = generate_file(simple_object_size)
file_name = object_key_from_file_path(file_path)
file_name = s3_helper.object_key_from_file_path(file_path)
retention_period = 2
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client, True)
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
with allure.step("Put several versions of object into bucket"):
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
s3_client.put_object(bucket, file_path)
file_name_1 = generate_file_with_content(simple_object_size, file_path=file_path)
version_id_2 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_1)
check_objects_in_bucket(self.s3_client, bucket, [file_name])
version_id_2 = s3_client.put_object(bucket, file_name_1)
s3_helper.check_objects_in_bucket(s3_client, bucket, [file_name])
if version_id:
version_id = version_id_2
@ -46,51 +42,53 @@ class TestS3GateLocking(TestS3GateBase):
"Mode": "COMPLIANCE",
"RetainUntilDate": date_obj,
}
s3_gate_object.put_object_retention(
self.s3_client, bucket, file_name, retention, version_id
)
assert_object_lock_mode(
self.s3_client, bucket, file_name, "COMPLIANCE", date_obj, "OFF"
s3_client.put_object_retention(bucket, file_name, retention, version_id)
s3_helper.assert_object_lock_mode(
s3_client, bucket, file_name, "COMPLIANCE", date_obj, "OFF"
)
with allure.step(f"Put legal hold to object {file_name}"):
s3_gate_object.put_object_legal_hold(
self.s3_client, bucket, file_name, "ON", version_id
s3_client.put_object_legal_hold(bucket, file_name, "ON", version_id)
s3_helper.assert_object_lock_mode(
s3_client, bucket, file_name, "COMPLIANCE", date_obj, "ON"
)
assert_object_lock_mode(self.s3_client, bucket, file_name, "COMPLIANCE", date_obj, "ON")
with allure.step(f"Fail with deleting object with legal hold and retention period"):
with allure.step("Fail with deleting object with legal hold and retention period"):
if version_id:
with pytest.raises(Exception):
# An error occurred (AccessDenied) when calling the DeleteObject operation (reached max retries: 0): Access Denied.
s3_gate_object.delete_object_s3(self.s3_client, bucket, file_name, version_id)
s3_client.delete_object(bucket, file_name, version_id)
with allure.step(f"Check retention period is no longer set on the uploaded object"):
with allure.step("Check retention period is no longer set on the uploaded object"):
time.sleep((retention_period + 1) * 60)
assert_object_lock_mode(self.s3_client, bucket, file_name, "COMPLIANCE", date_obj, "ON")
s3_helper.assert_object_lock_mode(
s3_client, bucket, file_name, "COMPLIANCE", date_obj, "ON"
)
with allure.step(f"Fail with deleting object with legal hold and retention period"):
with allure.step("Fail with deleting object with legal hold and retention period"):
if version_id:
with pytest.raises(Exception):
# An error occurred (AccessDenied) when calling the DeleteObject operation (reached max retries: 0): Access Denied.
s3_gate_object.delete_object_s3(self.s3_client, bucket, file_name, version_id)
s3_client.delete_object(bucket, file_name, version_id)
else:
s3_gate_object.delete_object_s3(self.s3_client, bucket, file_name, version_id)
s3_client.delete_object(bucket, file_name, version_id)
@allure.title("Test S3: Checking the impossibility to change the retention mode COMPLIANCE")
def test_s3_mode_compliance(self, version_id, simple_object_size):
def test_s3_mode_compliance(
self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: int
):
file_path = generate_file(simple_object_size)
file_name = object_key_from_file_path(file_path)
file_name = s3_helper.object_key_from_file_path(file_path)
retention_period = 2
retention_period_1 = 1
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client, True)
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
with allure.step("Put object into bucket"):
obj_version = s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
obj_version = s3_client.put_object(bucket, file_path)
if version_id:
version_id = obj_version
check_objects_in_bucket(self.s3_client, bucket, [file_name])
s3_helper.check_objects_in_bucket(s3_client, bucket, [file_name])
with allure.step(f"Put retention period {retention_period}min to object {file_name}"):
date_obj = datetime.utcnow() + timedelta(minutes=retention_period)
@ -98,11 +96,9 @@ class TestS3GateLocking(TestS3GateBase):
"Mode": "COMPLIANCE",
"RetainUntilDate": date_obj,
}
s3_gate_object.put_object_retention(
self.s3_client, bucket, file_name, retention, version_id
)
assert_object_lock_mode(
self.s3_client, bucket, file_name, "COMPLIANCE", date_obj, "OFF"
s3_client.put_object_retention(bucket, file_name, retention, version_id)
s3_helper.assert_object_lock_mode(
s3_client, bucket, file_name, "COMPLIANCE", date_obj, "OFF"
)
with allure.step(
@ -114,25 +110,25 @@ class TestS3GateLocking(TestS3GateBase):
"RetainUntilDate": date_obj,
}
with pytest.raises(Exception):
s3_gate_object.put_object_retention(
self.s3_client, bucket, file_name, retention, version_id
)
s3_client.put_object_retention(bucket, file_name, retention, version_id)
@allure.title("Test S3: Checking the ability to change retention mode GOVERNANCE")
def test_s3_mode_governance(self, version_id, simple_object_size):
def test_s3_mode_governance(
self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: int
):
file_path = generate_file(simple_object_size)
file_name = object_key_from_file_path(file_path)
file_name = s3_helper.object_key_from_file_path(file_path)
retention_period = 3
retention_period_1 = 2
retention_period_2 = 5
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client, True)
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
with allure.step("Put object into bucket"):
obj_version = s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
obj_version = s3_client.put_object(bucket, file_path)
if version_id:
version_id = obj_version
check_objects_in_bucket(self.s3_client, bucket, [file_name])
s3_helper.check_objects_in_bucket(s3_client, bucket, [file_name])
with allure.step(f"Put retention period {retention_period}min to object {file_name}"):
date_obj = datetime.utcnow() + timedelta(minutes=retention_period)
@ -140,11 +136,9 @@ class TestS3GateLocking(TestS3GateBase):
"Mode": "GOVERNANCE",
"RetainUntilDate": date_obj,
}
s3_gate_object.put_object_retention(
self.s3_client, bucket, file_name, retention, version_id
)
assert_object_lock_mode(
self.s3_client, bucket, file_name, "GOVERNANCE", date_obj, "OFF"
s3_client.put_object_retention(bucket, file_name, retention, version_id)
s3_helper.assert_object_lock_mode(
s3_client, bucket, file_name, "GOVERNANCE", date_obj, "OFF"
)
with allure.step(
@ -156,9 +150,7 @@ class TestS3GateLocking(TestS3GateBase):
"RetainUntilDate": date_obj,
}
with pytest.raises(Exception):
s3_gate_object.put_object_retention(
self.s3_client, bucket, file_name, retention, version_id
)
s3_client.put_object_retention(bucket, file_name, retention, version_id)
with allure.step(
f"Try to change retention period {retention_period_1}min to object {file_name}"
@ -169,9 +161,7 @@ class TestS3GateLocking(TestS3GateBase):
"RetainUntilDate": date_obj,
}
with pytest.raises(Exception):
s3_gate_object.put_object_retention(
self.s3_client, bucket, file_name, retention, version_id
)
s3_client.put_object_retention(bucket, file_name, retention, version_id)
with allure.step(f"Put new retention period {retention_period_2}min to object {file_name}"):
date_obj = datetime.utcnow() + timedelta(minutes=retention_period_2)
@ -179,55 +169,55 @@ class TestS3GateLocking(TestS3GateBase):
"Mode": "GOVERNANCE",
"RetainUntilDate": date_obj,
}
s3_gate_object.put_object_retention(
self.s3_client, bucket, file_name, retention, version_id, True
)
assert_object_lock_mode(
self.s3_client, bucket, file_name, "GOVERNANCE", date_obj, "OFF"
s3_client.put_object_retention(bucket, file_name, retention, version_id, True)
s3_helper.assert_object_lock_mode(
s3_client, bucket, file_name, "GOVERNANCE", date_obj, "OFF"
)
@allure.title("Test S3: Checking if an Object Cannot Be Locked")
def test_s3_legal_hold(self, version_id, simple_object_size):
def test_s3_legal_hold(
self, s3_client: S3ClientWrapper, version_id: str, simple_object_size: int
):
file_path = generate_file(simple_object_size)
file_name = object_key_from_file_path(file_path)
file_name = s3_helper.object_key_from_file_path(file_path)
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client, False)
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=False)
with allure.step("Put object into bucket"):
obj_version = s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
obj_version = s3_client.put_object(bucket, file_path)
if version_id:
version_id = obj_version
check_objects_in_bucket(self.s3_client, bucket, [file_name])
s3_helper.check_objects_in_bucket(s3_client, bucket, [file_name])
with allure.step(f"Put legal hold to object {file_name}"):
with pytest.raises(Exception):
s3_gate_object.put_object_legal_hold(
self.s3_client, bucket, file_name, "ON", version_id
)
s3_client.put_object_legal_hold(bucket, file_name, "ON", version_id)
@pytest.mark.s3_gate
class TestS3GateLockingBucket(TestS3GateBase):
class TestS3GateLockingBucket:
@allure.title("Test S3: Bucket Lock")
def test_s3_bucket_lock(self, simple_object_size):
def test_s3_bucket_lock(self, s3_client: S3ClientWrapper, simple_object_size: int):
file_path = generate_file(simple_object_size)
file_name = object_key_from_file_path(file_path)
file_name = s3_helper.object_key_from_file_path(file_path)
configuration = {"Rule": {"DefaultRetention": {"Mode": "COMPLIANCE", "Days": 1}}}
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client, True)
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
with allure.step("PutObjectLockConfiguration with ObjectLockEnabled=False"):
s3_gate_bucket.put_object_lock_configuration(self.s3_client, bucket, configuration)
s3_client.put_object_lock_configuration(bucket, configuration)
with allure.step("PutObjectLockConfiguration with ObjectLockEnabled=True"):
configuration["ObjectLockEnabled"] = "Enabled"
s3_gate_bucket.put_object_lock_configuration(self.s3_client, bucket, configuration)
s3_client.put_object_lock_configuration(bucket, configuration)
with allure.step("GetObjectLockConfiguration"):
config = s3_gate_bucket.get_object_lock_configuration(self.s3_client, bucket)
config = s3_client.get_object_lock_configuration(bucket)
configuration["Rule"]["DefaultRetention"]["Years"] = 0
assert config == configuration, f"Configurations must be equal {configuration}"
with allure.step("Put object into bucket"):
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
assert_object_lock_mode(self.s3_client, bucket, file_name, "COMPLIANCE", None, "OFF", 1)
s3_client.put_object(bucket, file_path)
s3_helper.assert_object_lock_mode(
s3_client, bucket, file_name, "COMPLIANCE", None, "OFF", 1
)

View file

@ -1,77 +1,71 @@
import logging
import allure
import pytest
import pytest_tests.helpers.container as container
from pytest_tests.helpers.file_helper import generate_file, get_file_hash, split_file
from pytest_tests.helpers.s3_helper import check_objects_in_bucket, object_key_from_file_path
from pytest_tests.steps import s3_gate_bucket, s3_gate_object
from pytest_tests.steps.s3_gate_base import TestS3GateBase
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
from frostfs_testlib.steps.cli.container import list_objects, search_container_by_name
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash, split_file
PART_SIZE = 5 * 1024 * 1024
def pytest_generate_tests(metafunc):
def pytest_generate_tests(metafunc: pytest.Metafunc):
if "s3_client" in metafunc.fixturenames:
metafunc.parametrize("s3_client", ["aws cli", "boto3"], indirect=True)
metafunc.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
@pytest.mark.sanity
@pytest.mark.s3_gate
@pytest.mark.s3_gate_multipart
class TestS3GateMultipart(TestS3GateBase):
class TestS3GateMultipart(ClusterTestBase):
NO_SUCH_UPLOAD = (
"The upload ID may be invalid, or the upload may have been aborted or completed."
)
@allure.title("Test S3 Object Multipart API")
@pytest.mark.parametrize("bucket", [s3_gate_bucket.VersioningStatus.ENABLED], indirect=True)
def test_s3_object_multipart(self, bucket):
@pytest.mark.parametrize("bucket", [VersioningStatus.ENABLED], indirect=True)
def test_s3_object_multipart(self, s3_client: S3ClientWrapper, bucket: str):
parts_count = 5
file_name_large = generate_file(PART_SIZE * parts_count) # 5Mb - min part
object_key = object_key_from_file_path(file_name_large)
object_key = s3_helper.object_key_from_file_path(file_name_large)
part_files = split_file(file_name_large, parts_count)
parts = []
with allure.step("Upload first part"):
upload_id = s3_gate_object.create_multipart_upload_s3(
self.s3_client, bucket, object_key
)
uploads = s3_gate_object.list_multipart_uploads_s3(self.s3_client, bucket)
etag = s3_gate_object.upload_part_s3(
self.s3_client, bucket, object_key, upload_id, 1, part_files[0]
)
upload_id = s3_client.create_multipart_upload(bucket, object_key)
uploads = s3_client.list_multipart_uploads(bucket)
etag = s3_client.upload_part(bucket, object_key, upload_id, 1, part_files[0])
parts.append((1, etag))
got_parts = s3_gate_object.list_parts_s3(self.s3_client, bucket, object_key, upload_id)
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
assert len(got_parts) == 1, f"Expected {1} parts, got\n{got_parts}"
with allure.step("Upload last parts"):
for part_id, file_path in enumerate(part_files[1:], start=2):
etag = s3_gate_object.upload_part_s3(
self.s3_client, bucket, object_key, upload_id, part_id, file_path
)
etag = s3_client.upload_part(bucket, object_key, upload_id, part_id, file_path)
parts.append((part_id, etag))
got_parts = s3_gate_object.list_parts_s3(self.s3_client, bucket, object_key, upload_id)
s3_gate_object.complete_multipart_upload_s3(
self.s3_client, bucket, object_key, upload_id, parts
)
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
assert len(got_parts) == len(
part_files
), f"Expected {parts_count} parts, got\n{got_parts}"
with allure.step("Check upload list is empty"):
uploads = s3_gate_object.list_multipart_uploads_s3(self.s3_client, bucket)
uploads = s3_client.list_multipart_uploads(bucket)
assert not uploads, f"Expected there is no uploads in bucket {bucket}"
with allure.step("Check we can get whole object from bucket"):
got_object = s3_gate_object.get_object_s3(self.s3_client, bucket, object_key)
got_object = s3_client.get_object(bucket, object_key)
assert get_file_hash(got_object) == get_file_hash(file_name_large)
@allure.title("Test S3 Multipart abort")
@pytest.mark.parametrize("bucket", [s3_gate_bucket.VersioningStatus.ENABLED], indirect=True)
@pytest.mark.parametrize("bucket", [VersioningStatus.ENABLED], indirect=True)
def test_s3_abort_multipart(
self, bucket: str, simple_object_size: int, complex_object_size: int
self,
s3_client: S3ClientWrapper,
default_wallet: str,
bucket: str,
simple_object_size: int,
complex_object_size: int,
):
complex_file = generate_file(complex_object_size)
simple_file = generate_file(simple_object_size)
@ -80,85 +74,79 @@ class TestS3GateMultipart(TestS3GateBase):
upload_key = "multipart_abort"
with allure.step(f"Get related container_id for bucket '{bucket}'"):
container_id = container.search_container_by_name(
self.wallet, bucket, self.shell, self.cluster.default_rpc_endpoint
container_id = search_container_by_name(
default_wallet, bucket, self.shell, self.cluster.default_rpc_endpoint
)
with allure.step("Create multipart upload"):
upload_id = s3_gate_object.create_multipart_upload_s3(
self.s3_client, bucket, upload_key
)
upload_id = s3_client.create_multipart_upload(bucket, upload_key)
with allure.step(f"Upload {files_count} files to multipart upload"):
for i, file in enumerate(to_upload, 1):
s3_gate_object.upload_part_s3(
self.s3_client, bucket, upload_key, upload_id, i, file
)
s3_client.upload_part(bucket, upload_key, upload_id, i, file)
with allure.step(f"Check that we have {files_count} files in bucket"):
parts = s3_gate_object.list_parts_s3(self.s3_client, bucket, upload_key, upload_id)
parts = s3_client.list_parts(bucket, upload_key, upload_id)
assert len(parts) == files_count, f"Expected {files_count} parts, got\n{parts}"
with allure.step(f"Check that we have {files_count} files in container '{container_id}'"):
objects = container.list_objects(
self.wallet, self.shell, container_id, self.cluster.default_rpc_endpoint
objects = list_objects(
default_wallet, self.shell, container_id, self.cluster.default_rpc_endpoint
)
assert (
len(objects) == files_count
), f"Expected {files_count} objects in container, got\n{objects}"
with allure.step("Abort multipart upload"):
s3_gate_object.abort_multipart_upload_s3(self.s3_client, bucket, upload_key, upload_id)
uploads = s3_gate_object.list_multipart_uploads_s3(self.s3_client, bucket)
s3_client.abort_multipart_upload(bucket, upload_key, upload_id)
uploads = s3_client.list_multipart_uploads(bucket)
assert not uploads, f"Expected no uploads in bucket {bucket}"
with allure.step("Check that we have no files in bucket since upload was aborted"):
with pytest.raises(Exception, match=self.NO_SUCH_UPLOAD):
s3_gate_object.list_parts_s3(self.s3_client, bucket, upload_key, upload_id)
s3_client.list_parts(bucket, upload_key, upload_id)
with allure.step("Check that we have no files in container since upload was aborted"):
objects = container.list_objects(
self.wallet, self.shell, container_id, self.cluster.default_rpc_endpoint
objects = list_objects(
default_wallet, self.shell, container_id, self.cluster.default_rpc_endpoint
)
assert len(objects) == 0, f"Expected no objects in container, got\n{objects}"
@allure.title("Test S3 Upload Part Copy")
@pytest.mark.parametrize("bucket", [s3_gate_bucket.VersioningStatus.ENABLED], indirect=True)
def test_s3_multipart_copy(self, bucket):
@pytest.mark.parametrize("bucket", [VersioningStatus.ENABLED], indirect=True)
def test_s3_multipart_copy(self, s3_client: S3ClientWrapper, bucket: str):
parts_count = 3
file_name_large = generate_file(PART_SIZE * parts_count) # 5Mb - min part
object_key = object_key_from_file_path(file_name_large)
object_key = s3_helper.object_key_from_file_path(file_name_large)
part_files = split_file(file_name_large, parts_count)
parts = []
objs = []
with allure.step(f"Put {parts_count} objects in bucket"):
for part in part_files:
s3_gate_object.put_object_s3(self.s3_client, bucket, part)
objs.append(object_key_from_file_path(part))
check_objects_in_bucket(self.s3_client, bucket, objs)
s3_client.put_object(bucket, part)
objs.append(s3_helper.object_key_from_file_path(part))
s3_helper.check_objects_in_bucket(s3_client, bucket, objs)
with allure.step("Create multipart upload object"):
upload_id = s3_gate_object.create_multipart_upload_s3(
self.s3_client, bucket, object_key
)
uploads = s3_gate_object.list_multipart_uploads_s3(self.s3_client, bucket)
upload_id = s3_client.create_multipart_upload(bucket, object_key)
uploads = s3_client.list_multipart_uploads(bucket)
assert uploads, f"Expected there are uploads in bucket {bucket}"
with allure.step("Start multipart upload"):
with allure.step("Upload parts to multipart upload"):
for part_id, obj_key in enumerate(objs, start=1):
etag = s3_gate_object.upload_part_copy_s3(
self.s3_client, bucket, object_key, upload_id, part_id, f"{bucket}/{obj_key}"
etag = s3_client.upload_part_copy(
bucket, object_key, upload_id, part_id, f"{bucket}/{obj_key}"
)
parts.append((part_id, etag))
got_parts = s3_gate_object.list_parts_s3(self.s3_client, bucket, object_key, upload_id)
s3_gate_object.complete_multipart_upload_s3(
self.s3_client, bucket, object_key, upload_id, parts
)
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
with allure.step("Complete multipart upload"):
s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
assert len(got_parts) == len(
part_files
), f"Expected {parts_count} parts, got\n{got_parts}"
with allure.step("Check we can get whole object from bucket"):
got_object = s3_gate_object.get_object_s3(self.s3_client, bucket, object_key)
got_object = s3_client.get_object(bucket, object_key)
assert get_file_hash(got_object) == get_file_hash(file_name_large)

File diff suppressed because it is too large Load diff

View file

@ -2,75 +2,73 @@ import os
import allure
import pytest
from pytest_tests.helpers.container import search_container_by_name
from pytest_tests.helpers.file_helper import generate_file
from pytest_tests.helpers.s3_helper import (
check_objects_in_bucket,
object_key_from_file_path,
set_bucket_versioning,
)
from pytest_tests.helpers.storage_policy import get_simple_object_copies
from pytest_tests.steps import s3_gate_bucket, s3_gate_object
from pytest_tests.steps.s3_gate_base import TestS3GateBase
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
from frostfs_testlib.steps.cli.container import search_container_by_name
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.steps.storage_policy import get_simple_object_copies
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils.file_utils import generate_file
def pytest_generate_tests(metafunc):
def pytest_generate_tests(metafunc: pytest.Metafunc):
policy = f"{os.getcwd()}/pytest_tests/resources/files/policy.json"
if "s3_client" in metafunc.fixturenames:
metafunc.parametrize(
"s3_client",
[("aws cli", policy), ("boto3", policy)],
"s3_client, s3_policy",
[(AwsCliClient, policy), (Boto3ClientWrapper, policy)],
indirect=True,
ids=["aws cli", "boto3"],
)
@pytest.mark.s3_gate
class TestS3GatePolicy(TestS3GateBase):
class TestS3GatePolicy(ClusterTestBase):
@allure.title("Test S3: Verify bucket creation with retention policy applied")
def test_s3_bucket_location(self, simple_object_size):
def test_s3_bucket_location(
self, default_wallet: str, s3_client: S3ClientWrapper, simple_object_size: int
):
file_path_1 = generate_file(simple_object_size)
file_name_1 = object_key_from_file_path(file_path_1)
file_name_1 = s3_helper.object_key_from_file_path(file_path_1)
file_path_2 = generate_file(simple_object_size)
file_name_2 = object_key_from_file_path(file_path_2)
file_name_2 = s3_helper.object_key_from_file_path(file_path_2)
with allure.step("Create two buckets with different bucket configuration"):
bucket_1 = s3_gate_bucket.create_bucket_s3(
self.s3_client, bucket_configuration="complex"
)
set_bucket_versioning(self.s3_client, bucket_1, s3_gate_bucket.VersioningStatus.ENABLED)
bucket_2 = s3_gate_bucket.create_bucket_s3(self.s3_client, bucket_configuration="rep-3")
set_bucket_versioning(self.s3_client, bucket_2, s3_gate_bucket.VersioningStatus.ENABLED)
list_buckets = s3_gate_bucket.list_buckets_s3(self.s3_client)
bucket_1 = s3_client.create_bucket(location_constraint="complex")
s3_helper.set_bucket_versioning(s3_client, bucket_1, VersioningStatus.ENABLED)
bucket_2 = s3_client.create_bucket(location_constraint="rep-3")
s3_helper.set_bucket_versioning(s3_client, bucket_2, VersioningStatus.ENABLED)
list_buckets = s3_client.list_buckets()
assert (
bucket_1 in list_buckets and bucket_2 in list_buckets
), f"Expected two buckets {bucket_1, bucket_2}, got {list_buckets}"
# with allure.step("Check head buckets"):
head_1 = s3_gate_bucket.head_bucket(self.s3_client, bucket_1)
head_2 = s3_gate_bucket.head_bucket(self.s3_client, bucket_2)
assert head_1 == {} or head_1.get("HEAD") == None, "Expected head is empty"
assert head_2 == {} or head_2.get("HEAD") == None, "Expected head is empty"
with allure.step("Check head buckets"):
with expect_not_raises():
s3_client.head_bucket(bucket_1)
s3_client.head_bucket(bucket_2)
with allure.step("Put objects into buckets"):
version_id_1 = s3_gate_object.put_object_s3(self.s3_client, bucket_1, file_path_1)
version_id_2 = s3_gate_object.put_object_s3(self.s3_client, bucket_2, file_path_2)
check_objects_in_bucket(self.s3_client, bucket_1, [file_name_1])
check_objects_in_bucket(self.s3_client, bucket_2, [file_name_2])
version_id_1 = s3_client.put_object(bucket_1, file_path_1)
version_id_2 = s3_client.put_object(bucket_2, file_path_2)
s3_helper.check_objects_in_bucket(s3_client, bucket_1, [file_name_1])
s3_helper.check_objects_in_bucket(s3_client, bucket_2, [file_name_2])
with allure.step("Check bucket location"):
bucket_loc_1 = s3_gate_bucket.get_bucket_location(self.s3_client, bucket_1)
bucket_loc_2 = s3_gate_bucket.get_bucket_location(self.s3_client, bucket_2)
bucket_loc_1 = s3_client.get_bucket_location(bucket_1)
bucket_loc_2 = s3_client.get_bucket_location(bucket_2)
assert bucket_loc_1 == "complex"
assert bucket_loc_2 == "rep-3"
with allure.step("Check object policy"):
cid_1 = search_container_by_name(
self.wallet, bucket_1, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
default_wallet,
bucket_1,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
copies_1 = get_simple_object_copies(
wallet=self.wallet,
wallet=default_wallet,
cid=cid_1,
oid=version_id_1,
shell=self.shell,
@ -78,10 +76,13 @@ class TestS3GatePolicy(TestS3GateBase):
)
assert copies_1 == 1
cid_2 = search_container_by_name(
self.wallet, bucket_2, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
default_wallet,
bucket_2,
shell=self.shell,
endpoint=self.cluster.default_rpc_endpoint,
)
copies_2 = get_simple_object_copies(
wallet=self.wallet,
wallet=default_wallet,
cid=cid_2,
oid=version_id_2,
shell=self.shell,
@ -89,14 +90,20 @@ class TestS3GatePolicy(TestS3GateBase):
)
assert copies_2 == 3
@allure.title("Test S3: bucket with unexisting location constraint")
def test_s3_bucket_wrong_location(self, s3_client: S3ClientWrapper):
with allure.step("Create bucket with unenxisting location constraint policy"):
with pytest.raises(Exception):
s3_client.create_bucket(location_constraint="UNEXISTING LOCATION CONSTRAINT")
@allure.title("Test S3: bucket policy ")
def test_s3_bucket_policy(self):
def test_s3_bucket_policy(self, s3_client: S3ClientWrapper):
with allure.step("Create bucket with default policy"):
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client)
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED)
bucket = s3_client.create_bucket()
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with allure.step("GetBucketPolicy"):
s3_gate_bucket.get_bucket_policy(self.s3_client, bucket)
s3_client.get_bucket_policy(bucket)
with allure.step("Put new policy"):
custom_policy = f"file://{os.getcwd()}/pytest_tests/resources/files/bucket_policy.json"
@ -114,19 +121,19 @@ class TestS3GatePolicy(TestS3GateBase):
],
}
s3_gate_bucket.put_bucket_policy(self.s3_client, bucket, custom_policy)
s3_client.put_bucket_policy(bucket, custom_policy)
with allure.step("GetBucketPolicy"):
policy_1 = s3_gate_bucket.get_bucket_policy(self.s3_client, bucket)
policy_1 = s3_client.get_bucket_policy(bucket)
print(policy_1)
@allure.title("Test S3: bucket policy ")
def test_s3_cors(self):
@allure.title("Test S3: bucket CORS")
def test_s3_cors(self, s3_client: S3ClientWrapper):
with allure.step("Create bucket without cors"):
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client)
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED)
bucket = s3_client.create_bucket()
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with pytest.raises(Exception):
bucket_cors = s3_gate_bucket.get_bucket_cors(self.s3_client, bucket)
bucket_cors = s3_client.get_bucket_cors(bucket)
with allure.step("Put bucket cors"):
cors = {
@ -146,14 +153,14 @@ class TestS3GatePolicy(TestS3GateBase):
},
]
}
s3_gate_bucket.put_bucket_cors(self.s3_client, bucket, cors)
bucket_cors = s3_gate_bucket.get_bucket_cors(self.s3_client, bucket)
s3_client.put_bucket_cors(bucket, cors)
bucket_cors = s3_client.get_bucket_cors(bucket)
assert bucket_cors == cors.get(
"CORSRules"
), f"Expected corsrules must be {cors.get('CORSRules')}"
), f"Expected CORSRules must be {cors.get('CORSRules')}"
with allure.step("delete bucket cors"):
s3_gate_bucket.delete_bucket_cors(self.s3_client, bucket)
s3_client.delete_bucket_cors(bucket)
with pytest.raises(Exception):
bucket_cors = s3_gate_bucket.get_bucket_cors(self.s3_client, bucket)
bucket_cors = s3_client.get_bucket_cors(bucket)

View file

@ -4,26 +4,20 @@ from typing import Tuple
import allure
import pytest
from pytest_tests.helpers.file_helper import generate_file
from pytest_tests.helpers.s3_helper import (
check_tags_by_bucket,
check_tags_by_object,
object_key_from_file_path,
)
from pytest_tests.steps import s3_gate_bucket, s3_gate_object
from pytest_tests.steps.s3_gate_base import TestS3GateBase
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.utils.file_utils import generate_file
def pytest_generate_tests(metafunc):
def pytest_generate_tests(metafunc: pytest.Metafunc):
if "s3_client" in metafunc.fixturenames:
metafunc.parametrize("s3_client", ["aws cli", "boto3"], indirect=True)
metafunc.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
@pytest.mark.sanity
@pytest.mark.s3_gate
@pytest.mark.s3_gate_tagging
class TestS3GateTagging(TestS3GateBase):
class TestS3GateTagging:
@staticmethod
def create_tags(count: int) -> Tuple[list, list]:
tags = []
@ -34,82 +28,84 @@ class TestS3GateTagging(TestS3GateBase):
return tags
@allure.title("Test S3: Object tagging")
def test_s3_object_tagging(self, bucket, simple_object_size):
def test_s3_object_tagging(
self, s3_client: S3ClientWrapper, bucket: str, simple_object_size: int
):
file_path = generate_file(simple_object_size)
file_name = object_key_from_file_path(file_path)
file_name = s3_helper.object_key_from_file_path(file_path)
with allure.step("Put with 3 tags object into bucket"):
tag_1 = "Tag1=Value1"
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path, Tagging=tag_1)
got_tags = s3_gate_object.get_object_tagging(self.s3_client, bucket, file_name)
s3_client.put_object(bucket, file_path, tagging=tag_1)
got_tags = s3_client.get_object_tagging(bucket, file_name)
assert got_tags, f"Expected tags, got {got_tags}"
assert got_tags == [{"Key": "Tag1", "Value": "Value1"}], "Tags must be the same"
with allure.step("Put 10 new tags for object"):
tags_2 = self.create_tags(10)
s3_gate_object.put_object_tagging(self.s3_client, bucket, file_name, tags=tags_2)
check_tags_by_object(self.s3_client, bucket, file_name, tags_2, [("Tag1", "Value1")])
s3_client.put_object_tagging(bucket, file_name, tags=tags_2)
s3_helper.check_tags_by_object(
s3_client, bucket, file_name, tags_2, [("Tag1", "Value1")]
)
with allure.step("Put 10 extra new tags for object"):
tags_3 = self.create_tags(10)
s3_gate_object.put_object_tagging(self.s3_client, bucket, file_name, tags=tags_3)
check_tags_by_object(self.s3_client, bucket, file_name, tags_3, tags_2)
s3_client.put_object_tagging(bucket, file_name, tags=tags_3)
s3_helper.check_tags_by_object(s3_client, bucket, file_name, tags_3, tags_2)
with allure.step("Copy one object with tag"):
copy_obj_path_1 = s3_gate_object.copy_object_s3(
self.s3_client, bucket, file_name, tagging_directive="COPY"
)
check_tags_by_object(self.s3_client, bucket, copy_obj_path_1, tags_3, tags_2)
copy_obj_path_1 = s3_client.copy_object(bucket, file_name, tagging_directive="COPY")
s3_helper.check_tags_by_object(s3_client, bucket, copy_obj_path_1, tags_3, tags_2)
with allure.step("Put 11 new tags to object and expect an error"):
tags_4 = self.create_tags(11)
with pytest.raises(Exception, match=r".*Object tags cannot be greater than 10*"):
# An error occurred (BadRequest) when calling the PutObjectTagging operation: Object tags cannot be greater than 10
s3_gate_object.put_object_tagging(self.s3_client, bucket, file_name, tags=tags_4)
s3_client.put_object_tagging(bucket, file_name, tags=tags_4)
with allure.step("Put empty tag"):
tags_5 = []
s3_gate_object.put_object_tagging(self.s3_client, bucket, file_name, tags=tags_5)
check_tags_by_object(self.s3_client, bucket, file_name, [])
s3_client.put_object_tagging(bucket, file_name, tags=tags_5)
s3_helper.check_tags_by_object(s3_client, bucket, file_name, [])
with allure.step("Put 10 object tags"):
tags_6 = self.create_tags(10)
s3_gate_object.put_object_tagging(self.s3_client, bucket, file_name, tags=tags_6)
check_tags_by_object(self.s3_client, bucket, file_name, tags_6)
s3_client.put_object_tagging(bucket, file_name, tags=tags_6)
s3_helper.check_tags_by_object(s3_client, bucket, file_name, tags_6)
with allure.step("Delete tags by delete-object-tagging"):
s3_gate_object.delete_object_tagging(self.s3_client, bucket, file_name)
check_tags_by_object(self.s3_client, bucket, file_name, [])
s3_client.delete_object_tagging(bucket, file_name)
s3_helper.check_tags_by_object(s3_client, bucket, file_name, [])
@allure.title("Test S3: bucket tagging")
def test_s3_bucket_tagging(self, bucket):
def test_s3_bucket_tagging(self, s3_client: S3ClientWrapper, bucket: str):
with allure.step("Put 10 bucket tags"):
tags_1 = self.create_tags(10)
s3_gate_bucket.put_bucket_tagging(self.s3_client, bucket, tags_1)
check_tags_by_bucket(self.s3_client, bucket, tags_1)
s3_client.put_bucket_tagging(bucket, tags_1)
s3_helper.check_tags_by_bucket(s3_client, bucket, tags_1)
with allure.step("Put new 10 bucket tags"):
tags_2 = self.create_tags(10)
s3_gate_bucket.put_bucket_tagging(self.s3_client, bucket, tags_2)
check_tags_by_bucket(self.s3_client, bucket, tags_2, tags_1)
s3_client.put_bucket_tagging(bucket, tags_2)
s3_helper.check_tags_by_bucket(s3_client, bucket, tags_2, tags_1)
with allure.step("Put 11 new tags to bucket and expect an error"):
tags_3 = self.create_tags(11)
with pytest.raises(Exception, match=r".*Object tags cannot be greater than 10.*"):
# An error occurred (BadRequest) when calling the PutBucketTagging operation (reached max retries: 0): Object tags cannot be greater than 10
s3_gate_bucket.put_bucket_tagging(self.s3_client, bucket, tags_3)
s3_client.put_bucket_tagging(bucket, tags_3)
with allure.step("Put empty tag"):
tags_4 = []
s3_gate_bucket.put_bucket_tagging(self.s3_client, bucket, tags_4)
check_tags_by_bucket(self.s3_client, bucket, tags_4)
s3_client.put_bucket_tagging(bucket, tags_4)
s3_helper.check_tags_by_bucket(s3_client, bucket, tags_4)
with allure.step("Put new 10 bucket tags"):
tags_5 = self.create_tags(10)
s3_gate_bucket.put_bucket_tagging(self.s3_client, bucket, tags_5)
check_tags_by_bucket(self.s3_client, bucket, tags_5, tags_2)
s3_client.put_bucket_tagging(bucket, tags_5)
s3_helper.check_tags_by_bucket(s3_client, bucket, tags_5, tags_2)
with allure.step("Delete tags by delete-bucket-tagging"):
s3_gate_bucket.delete_bucket_tagging(self.s3_client, bucket)
check_tags_by_bucket(self.s3_client, bucket, [])
s3_client.delete_bucket_tagging(bucket)
s3_helper.check_tags_by_bucket(s3_client, bucket, [])

View file

@ -2,48 +2,41 @@ import os
import allure
import pytest
from pytest_tests.helpers.file_helper import generate_file, generate_file_with_content
from pytest_tests.helpers.s3_helper import set_bucket_versioning
from pytest_tests.steps import s3_gate_bucket, s3_gate_object
from pytest_tests.steps.s3_gate_base import TestS3GateBase
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.utils.file_utils import generate_file, generate_file_with_content
def pytest_generate_tests(metafunc):
def pytest_generate_tests(metafunc: pytest.Metafunc):
if "s3_client" in metafunc.fixturenames:
metafunc.parametrize("s3_client", ["aws cli", "boto3"], indirect=True)
metafunc.parametrize("s3_client", [AwsCliClient, Boto3ClientWrapper], indirect=True)
@pytest.mark.sanity
@pytest.mark.s3_gate
@pytest.mark.s3_gate_versioning
class TestS3GateVersioning(TestS3GateBase):
@staticmethod
def object_key_from_file_path(full_path: str) -> str:
return os.path.basename(full_path)
class TestS3GateVersioning:
@allure.title("Test S3: try to disable versioning")
def test_s3_version_off(self):
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client, True)
def test_s3_version_off(self, s3_client: S3ClientWrapper):
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=True)
with pytest.raises(Exception):
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.SUSPENDED)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.SUSPENDED)
@allure.title("Test S3: Enable and disable versioning")
def test_s3_version(self, simple_object_size):
def test_s3_version(self, s3_client: S3ClientWrapper, simple_object_size: int):
file_path = generate_file(simple_object_size)
file_name = self.object_key_from_file_path(file_path)
file_name = s3_helper.object_key_from_file_path(file_path)
bucket_objects = [file_name]
bucket = s3_gate_bucket.create_bucket_s3(self.s3_client, False)
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.SUSPENDED)
bucket = s3_client.create_bucket(object_lock_enabled_for_bucket=False)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.SUSPENDED)
with allure.step("Put object into bucket"):
s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
objects_list = s3_gate_object.list_objects_s3(self.s3_client, bucket)
s3_client.put_object(bucket, file_path)
objects_list = s3_client.list_objects(bucket)
assert (
objects_list == bucket_objects
), f"Expected list with single objects in bucket, got {objects_list}"
object_version = s3_gate_object.list_objects_versions_s3(self.s3_client, bucket)
object_version = s3_client.list_objects_versions(bucket)
actual_version = [
version.get("VersionId")
for version in object_version
@ -52,20 +45,20 @@ class TestS3GateVersioning(TestS3GateBase):
assert actual_version == [
"null"
], f"Expected version is null in list-object-versions, got {object_version}"
object_0 = s3_gate_object.head_object_s3(self.s3_client, bucket, file_name)
object_0 = s3_client.head_object(bucket, file_name)
assert (
object_0.get("VersionId") == "null"
), f"Expected version is null in head-object, got {object_0.get('VersionId')}"
set_bucket_versioning(self.s3_client, bucket, s3_gate_bucket.VersioningStatus.ENABLED)
s3_helper.set_bucket_versioning(s3_client, bucket, VersioningStatus.ENABLED)
with allure.step("Put several versions of object into bucket"):
version_id_1 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_path)
version_id_1 = s3_client.put_object(bucket, file_path)
file_name_1 = generate_file_with_content(simple_object_size, file_path=file_path)
version_id_2 = s3_gate_object.put_object_s3(self.s3_client, bucket, file_name_1)
version_id_2 = s3_client.put_object(bucket, file_name_1)
with allure.step("Check bucket shows all versions"):
versions = s3_gate_object.list_objects_versions_s3(self.s3_client, bucket)
versions = s3_client.list_objects_versions(bucket)
obj_versions = [
version.get("VersionId") for version in versions if version.get("Key") == file_name
]
@ -74,25 +67,19 @@ class TestS3GateVersioning(TestS3GateBase):
), f"Expected object has versions: {version_id_1, version_id_2, 'null'}"
with allure.step("Get object"):
object_1 = s3_gate_object.get_object_s3(
self.s3_client, bucket, file_name, full_output=True
)
object_1 = s3_client.get_object(bucket, file_name, full_output=True)
assert (
object_1.get("VersionId") == version_id_2
), f"Get object with version {version_id_2}"
with allure.step("Get first version of object"):
object_2 = s3_gate_object.get_object_s3(
self.s3_client, bucket, file_name, version_id_1, full_output=True
)
object_2 = s3_client.get_object(bucket, file_name, version_id_1, full_output=True)
assert (
object_2.get("VersionId") == version_id_1
), f"Get object with version {version_id_1}"
with allure.step("Get second version of object"):
object_3 = s3_gate_object.get_object_s3(
self.s3_client, bucket, file_name, version_id_2, full_output=True
)
object_3 = s3_client.get_object(bucket, file_name, version_id_2, full_output=True)
assert (
object_3.get("VersionId") == version_id_2
), f"Get object with version {version_id_2}"

View file

@ -1,4 +1,5 @@
import logging
import os
from http import HTTPStatus
from re import match
@ -6,9 +7,11 @@ import allure
import pytest
import requests
from frostfs_testlib.hosting import Hosting
from frostfs_testlib.resources.common import ASSETS_DIR
from frostfs_testlib.utils.env_utils import read_env_properties, save_env_properties
from frostfs_testlib.utils.version_utils import get_remote_binaries_versions
from pytest import FixtureRequest
from pytest_tests.helpers.binary_version import get_remote_binaries_versions
from pytest_tests.helpers.env_properties import read_env_properties, save_env_properties
from pytest_tests.resources.common import BIN_VERSIONS_FILE
logger = logging.getLogger("NeoLogger")
@ -18,7 +21,7 @@ logger = logging.getLogger("NeoLogger")
@pytest.mark.sanity
@pytest.mark.check_binaries
@pytest.mark.skip("Skipped due to https://j.yadro.com/browse/OBJECT-628")
def test_binaries_versions(request, hosting: Hosting):
def test_binaries_versions(request: FixtureRequest, hosting: Hosting):
"""
Compare binaries versions from external source (url) and deployed on servers.
"""
@ -29,7 +32,9 @@ def test_binaries_versions(request, hosting: Hosting):
with allure.step("Get binaries versions from servers"):
got_versions = get_remote_binaries_versions(hosting)
env_properties = read_env_properties(request.config)
environment_dir = request.config.getoption("--alluredir") or ASSETS_DIR
env_file = os.path.join(environment_dir, "environment.properties")
env_properties = read_env_properties(env_file)
# compare versions from servers and file
failed_versions = {}
@ -45,7 +50,7 @@ def test_binaries_versions(request, hosting: Hosting):
additional_env_properties[binary] = actual_version
if env_properties and additional_env_properties:
save_env_properties(request.config, additional_env_properties)
save_env_properties(env_file, additional_env_properties)
# create clear beautiful error with aggregation info
if failed_versions:

View file

@ -1,10 +1,9 @@
import pytest
from pytest_tests.helpers.wallet import WalletFactory, WalletFile
from frostfs_testlib.storage.dataclasses.wallet import WalletFactory, WalletInfo
@pytest.fixture(scope="module")
def owner_wallet(wallet_factory: WalletFactory) -> WalletFile:
def owner_wallet(wallet_factory: WalletFactory) -> WalletInfo:
"""
Returns wallet which owns containers and objects
"""
@ -12,7 +11,7 @@ def owner_wallet(wallet_factory: WalletFactory) -> WalletFile:
@pytest.fixture(scope="module")
def user_wallet(wallet_factory: WalletFactory) -> WalletFile:
def user_wallet(wallet_factory: WalletFactory) -> WalletInfo:
"""
Returns wallet which will use objects from owner via static session
"""
@ -20,7 +19,7 @@ def user_wallet(wallet_factory: WalletFactory) -> WalletFile:
@pytest.fixture(scope="module")
def stranger_wallet(wallet_factory: WalletFactory) -> WalletFile:
def stranger_wallet(wallet_factory: WalletFactory) -> WalletInfo:
"""
Returns stranger wallet which should fail to obtain data
"""

View file

@ -2,15 +2,14 @@ import random
import allure
import pytest
from frostfs_testlib.resources.common import SESSION_NOT_FOUND
from frostfs_testlib.resources.common import DEFAULT_WALLET_PASS
from frostfs_testlib.resources.error_patterns import SESSION_NOT_FOUND
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import delete_object, put_object, put_object_to_random_node
from frostfs_testlib.steps.session_token import create_session_token
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils import wallet_utils
from pytest_tests.helpers.container import create_container
from pytest_tests.helpers.file_helper import generate_file
from pytest_tests.helpers.frostfs_verbs import delete_object, put_object, put_object_to_random_node
from pytest_tests.resources.common import WALLET_PASS
from pytest_tests.steps.cluster_test_base import ClusterTestBase
from pytest_tests.steps.session_token import create_session_token
from frostfs_testlib.utils.file_utils import generate_file
@pytest.mark.sanity
@ -41,18 +40,16 @@ class TestDynamicObjectSession(ClusterTestBase):
address = wallet_utils.get_last_address_from_wallet(wallet, "")
with allure.step("Nodes Settlements"):
(
session_token_node,
container_node,
non_container_node,
) = random.sample(self.cluster.storage_nodes, 3)
session_token_node, container_node, non_container_node = random.sample(
self.cluster.storage_nodes, 3
)
with allure.step("Create Session Token"):
session_token = create_session_token(
shell=self.shell,
owner=address,
wallet_path=wallet,
wallet_password=WALLET_PASS,
wallet_password=DEFAULT_WALLET_PASS,
rpc_endpoint=session_token_node.get_rpc_endpoint(),
)

View file

@ -2,20 +2,15 @@ import logging
import allure
import pytest
from frostfs_testlib.resources.common import (
from frostfs_testlib.resources.error_patterns import (
EXPIRED_SESSION_TOKEN,
MALFORMED_REQUEST,
OBJECT_ACCESS_DENIED,
OBJECT_NOT_FOUND,
)
from frostfs_testlib.shell import Shell
from pytest import FixtureRequest
from pytest_tests.helpers.cluster import Cluster
from pytest_tests.helpers.container import create_container
from pytest_tests.helpers.epoch import ensure_fresh_epoch
from pytest_tests.helpers.file_helper import generate_file
from pytest_tests.helpers.frostfs_verbs import (
from frostfs_testlib.steps.cli.container import create_container
from frostfs_testlib.steps.cli.object import (
delete_object,
get_object,
get_object_from_random_node,
@ -25,11 +20,8 @@ from pytest_tests.helpers.frostfs_verbs import (
put_object_to_random_node,
search_object,
)
from pytest_tests.helpers.storage_object_info import StorageObjectInfo
from pytest_tests.helpers.test_control import expect_not_raises
from pytest_tests.helpers.wallet import WalletFile
from pytest_tests.steps.cluster_test_base import ClusterTestBase
from pytest_tests.steps.session_token import (
from frostfs_testlib.steps.epoch import ensure_fresh_epoch
from frostfs_testlib.steps.session_token import (
INVALID_SIGNATURE,
UNRELATED_CONTAINER,
UNRELATED_KEY,
@ -41,7 +33,14 @@ from pytest_tests.steps.session_token import (
get_object_signed_token,
sign_session_token,
)
from pytest_tests.steps.storage_object import delete_objects
from frostfs_testlib.steps.storage_object import delete_objects
from frostfs_testlib.storage.cluster import Cluster
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.test_control import expect_not_raises
from frostfs_testlib.utils.file_utils import generate_file
from pytest import FixtureRequest
logger = logging.getLogger("NeoLogger")
@ -50,7 +49,7 @@ RANGE_OFFSET_FOR_COMPLEX_OBJECT = 200
@pytest.fixture(scope="module")
def storage_containers(
owner_wallet: WalletFile, client_shell: Shell, cluster: Cluster
owner_wallet: WalletInfo, client_shell: Shell, cluster: Cluster
) -> list[str]:
cid = create_container(
owner_wallet.path, shell=client_shell, endpoint=cluster.default_rpc_endpoint
@ -68,7 +67,7 @@ def storage_containers(
scope="module",
)
def storage_objects(
owner_wallet: WalletFile,
owner_wallet: WalletInfo,
client_shell: Shell,
storage_containers: list[str],
cluster: Cluster,
@ -124,8 +123,8 @@ def get_ranges(
@pytest.fixture(scope="module")
def static_sessions(
owner_wallet: WalletFile,
user_wallet: WalletFile,
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
storage_containers: list[str],
storage_objects: list[StorageObjectInfo],
client_shell: Shell,
@ -161,7 +160,7 @@ class TestObjectStaticSession(ClusterTestBase):
)
def test_static_session_read(
self,
user_wallet: WalletFile,
user_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo],
static_sessions: dict[ObjectVerb, str],
method_under_test,
@ -193,7 +192,7 @@ class TestObjectStaticSession(ClusterTestBase):
)
def test_static_session_range(
self,
user_wallet: WalletFile,
user_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo],
static_sessions: dict[ObjectVerb, str],
method_under_test,
@ -228,7 +227,7 @@ class TestObjectStaticSession(ClusterTestBase):
@allure.title("Validate static session with search operation")
def test_static_session_search(
self,
user_wallet: WalletFile,
user_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo],
static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
@ -253,7 +252,7 @@ class TestObjectStaticSession(ClusterTestBase):
@allure.title("Validate static session with object id not in session")
def test_static_session_unrelated_object(
self,
user_wallet: WalletFile,
user_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo],
static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
@ -277,7 +276,7 @@ class TestObjectStaticSession(ClusterTestBase):
@allure.title("Validate static session with user id not in session")
def test_static_session_head_unrelated_user(
self,
stranger_wallet: WalletFile,
stranger_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo],
static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
@ -303,7 +302,7 @@ class TestObjectStaticSession(ClusterTestBase):
@allure.title("Validate static session with wrong verb in session")
def test_static_session_head_wrong_verb(
self,
user_wallet: WalletFile,
user_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo],
static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
@ -329,7 +328,7 @@ class TestObjectStaticSession(ClusterTestBase):
@allure.title("Validate static session with container id not in session")
def test_static_session_unrelated_container(
self,
user_wallet: WalletFile,
user_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo],
storage_containers: list[str],
static_sessions: dict[ObjectVerb, str],
@ -356,9 +355,9 @@ class TestObjectStaticSession(ClusterTestBase):
@allure.title("Validate static session which signed by another wallet")
def test_static_session_signed_by_other(
self,
owner_wallet: WalletFile,
user_wallet: WalletFile,
stranger_wallet: WalletFile,
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
stranger_wallet: WalletInfo,
storage_containers: list[str],
storage_objects: list[StorageObjectInfo],
temp_directory: str,
@ -394,8 +393,8 @@ class TestObjectStaticSession(ClusterTestBase):
@allure.title("Validate static session which signed for another container")
def test_static_session_signed_for_other_container(
self,
owner_wallet: WalletFile,
user_wallet: WalletFile,
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
storage_containers: list[str],
storage_objects: list[StorageObjectInfo],
temp_directory: str,
@ -432,8 +431,8 @@ class TestObjectStaticSession(ClusterTestBase):
@allure.title("Validate static session which wasn't signed")
def test_static_session_without_sign(
self,
owner_wallet: WalletFile,
user_wallet: WalletFile,
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
storage_containers: list[str],
storage_objects: list[StorageObjectInfo],
temp_directory: str,
@ -468,8 +467,8 @@ class TestObjectStaticSession(ClusterTestBase):
@allure.title("Validate static session which expires at next epoch")
def test_static_session_expiration_at_next(
self,
owner_wallet: WalletFile,
user_wallet: WalletFile,
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
storage_containers: list[str],
storage_objects: list[StorageObjectInfo],
temp_directory: str,
@ -539,8 +538,8 @@ class TestObjectStaticSession(ClusterTestBase):
@allure.title("Validate static session which is valid starting from next epoch")
def test_static_session_start_at_next(
self,
owner_wallet: WalletFile,
user_wallet: WalletFile,
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
storage_containers: list[str],
storage_objects: list[StorageObjectInfo],
temp_directory: str,
@ -624,8 +623,8 @@ class TestObjectStaticSession(ClusterTestBase):
@allure.title("Validate static session which is already expired")
def test_static_session_already_expired(
self,
owner_wallet: WalletFile,
user_wallet: WalletFile,
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
storage_containers: list[str],
storage_objects: list[StorageObjectInfo],
temp_directory: str,
@ -667,7 +666,7 @@ class TestObjectStaticSession(ClusterTestBase):
@allure.title("Delete verb should be restricted for static session")
def test_static_session_delete_verb(
self,
user_wallet: WalletFile,
user_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo],
static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
@ -692,7 +691,7 @@ class TestObjectStaticSession(ClusterTestBase):
@allure.title("Put verb should be restricted for static session")
def test_static_session_put_verb(
self,
user_wallet: WalletFile,
user_wallet: WalletInfo,
storage_objects: list[StorageObjectInfo],
static_sessions: dict[ObjectVerb, str],
request: FixtureRequest,
@ -717,8 +716,8 @@ class TestObjectStaticSession(ClusterTestBase):
@allure.title("Validate static session which is issued in future epoch")
def test_static_session_invalid_issued_epoch(
self,
owner_wallet: WalletFile,
user_wallet: WalletFile,
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
storage_containers: list[str],
storage_objects: list[StorageObjectInfo],
temp_directory: str,

View file

@ -1,28 +1,21 @@
import allure
import pytest
from frostfs_testlib.resources.common import PUBLIC_ACL
from frostfs_testlib.resources.wellknown_acl import PUBLIC_ACL
from frostfs_testlib.shell import Shell
from pytest_tests.helpers.acl import (
EACLAccess,
EACLOperation,
EACLRole,
EACLRule,
create_eacl,
set_eacl,
wait_for_cache_expired,
)
from pytest_tests.helpers.container import (
from frostfs_testlib.steps.acl import create_eacl, set_eacl, wait_for_cache_expired
from frostfs_testlib.steps.cli.container import (
create_container,
delete_container,
get_container,
list_containers,
)
from pytest_tests.helpers.file_helper import generate_file
from frostfs_testlib.steps.session_token import ContainerVerb, get_container_signed_token
from frostfs_testlib.storage.dataclasses.acl import EACLAccess, EACLOperation, EACLRole, EACLRule
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file
from pytest_tests.helpers.object_access import can_put_object
from pytest_tests.helpers.wallet import WalletFile
from pytest_tests.steps.cluster_test_base import ClusterTestBase
from pytest_tests.steps.session_token import ContainerVerb, get_container_signed_token
@pytest.mark.static_session_container
@ -30,8 +23,8 @@ class TestSessionTokenContainer(ClusterTestBase):
@pytest.fixture(scope="module")
def static_sessions(
self,
owner_wallet: WalletFile,
user_wallet: WalletFile,
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
client_shell: Shell,
temp_directory: str,
) -> dict[ContainerVerb, str]:
@ -47,8 +40,8 @@ class TestSessionTokenContainer(ClusterTestBase):
def test_static_session_token_container_create(
self,
owner_wallet: WalletFile,
user_wallet: WalletFile,
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
static_sessions: dict[ContainerVerb, str],
):
"""
@ -77,7 +70,7 @@ class TestSessionTokenContainer(ClusterTestBase):
def test_static_session_token_container_create_with_other_verb(
self,
user_wallet: WalletFile,
user_wallet: WalletInfo,
static_sessions: dict[ContainerVerb, str],
):
"""
@ -96,7 +89,7 @@ class TestSessionTokenContainer(ClusterTestBase):
def test_static_session_token_container_create_with_other_wallet(
self,
stranger_wallet: WalletFile,
stranger_wallet: WalletInfo,
static_sessions: dict[ContainerVerb, str],
):
"""
@ -114,8 +107,8 @@ class TestSessionTokenContainer(ClusterTestBase):
def test_static_session_token_container_delete(
self,
owner_wallet: WalletFile,
user_wallet: WalletFile,
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
static_sessions: dict[ContainerVerb, str],
):
"""
@ -144,9 +137,9 @@ class TestSessionTokenContainer(ClusterTestBase):
def test_static_session_token_container_set_eacl(
self,
owner_wallet: WalletFile,
user_wallet: WalletFile,
stranger_wallet: WalletFile,
owner_wallet: WalletInfo,
user_wallet: WalletInfo,
stranger_wallet: WalletInfo,
static_sessions: dict[ContainerVerb, str],
simple_object_size,
):
@ -163,7 +156,7 @@ class TestSessionTokenContainer(ClusterTestBase):
file_path = generate_file(simple_object_size)
assert can_put_object(stranger_wallet.path, cid, file_path, self.shell, self.cluster)
with allure.step(f"Deny all operations for other via eACL"):
with allure.step("Deny all operations for other via eACL"):
eacl_deny = [
EACLRule(access=EACLAccess.DENY, role=EACLRole.OTHERS, operation=op)
for op in EACLOperation

View file

@ -9,9 +9,9 @@ import pytest
import yaml
from configobj import ConfigObj
from frostfs_testlib.cli import FrostfsCli
from pytest_tests.helpers.cluster import Cluster, StorageNode
from pytest_tests.resources.common import CLI_DEFAULT_TIMEOUT, WALLET_CONFIG
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG
from frostfs_testlib.storage.cluster import Cluster, StorageNode
SHARD_PREFIX = "FROSTFS_STORAGE_SHARD_"
BLOBSTOR_PREFIX = "_BLOBSTOR_"
@ -137,7 +137,7 @@ class TestControlShard:
cli_config = node.host.get_cli_config("frostfs-cli")
cli = FrostfsCli(node.host.get_shell(), cli_config.exec_path, WALLET_CONFIG)
cli = FrostfsCli(node.host.get_shell(), cli_config.exec_path, DEFAULT_WALLET_CONFIG)
result = cli.shards.list(
endpoint=control_endpoint,
wallet=wallet_path,

View file

@ -4,8 +4,7 @@ from datetime import datetime
import allure
import pytest
from pytest_tests.steps.cluster_test_base import ClusterTestBase
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
class TestLogs(ClusterTestBase):

View file

@ -1,10 +1,10 @@
allure-pytest==2.9.45
allure-python-commons==2.9.45
allure-pytest==2.13.2
allure-python-commons==2.13.2
base58==2.1.0
boto3==1.16.33
botocore==1.19.33
configobj==5.0.6
frostfs-testlib==1.3.1
frostfs-testlib>=2.0.1
neo-mamba==1.0.0
pexpect==4.8.0
pyyaml==6.0