Compare commits

..

10 commits

Author SHA1 Message Date
180907b154 [#226] Restore invalid_obj check
Signed-off-by: a.berezin <a.berezin@yadro.com>
2024-05-16 09:57:13 +00:00
3dc29d59f6 [#212] Return response in complete_multipart_upload function 2024-04-23 23:57:32 +03:00
aa8c83b682 port fix fill_percent
Signed-off-by: m.malygina <m.malygina@yadro.com>
2024-03-28 14:54:39 +03:00
e765276c3f [#187] Add total bytes to report to 0.38
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-02-27 12:03:20 +03:00
b464591153 [TrueCloudLab/xk6-frostfs#125] Add acl option
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-02-05 15:50:38 +00:00
c978f55e90 [#170] Update metrics
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-02-05 15:42:37 +00:00
2255ee465f [#173] Add flag to remove registry file
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-02-05 12:41:29 +03:00
2ec24f4cd1 [#168] Strip components for new xk6 archive and update unit tests for 0.38
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-01-26 13:36:44 +03:00
2da1a4583f [#165] Add local flag to preset in load
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-01-22 19:08:30 +03:00
cda3773fa8 [#163] Refactor frostfs-cli functional
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2024-01-22 14:26:25 +03:00
83 changed files with 1244 additions and 3061 deletions

View file

@ -1,108 +0,0 @@
hosts:
- address: localhost
attributes:
sudo_shell: false
plugin_name: docker
healthcheck_plugin_name: basic
attributes:
skip_readiness_check: True
force_transactions: True
services:
- name: frostfs-storage_01
attributes:
container_name: s01
config_path: /etc/frostfs/storage/config.yml
wallet_path: ../frostfs-dev-env/services/storage/wallet01.json
local_wallet_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet01.json
wallet_password: ""
volume_name: storage_storage_s01
endpoint_data0: s01.frostfs.devenv:8080
control_endpoint: s01.frostfs.devenv:8081
un_locode: "RU MOW"
- name: frostfs-storage_02
attributes:
container_name: s02
config_path: /etc/frostfs/storage/config.yml
wallet_path: ../frostfs-dev-env/services/storage/wallet02.json
local_wallet_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet02.json
wallet_password: ""
volume_name: storage_storage_s02
endpoint_data0: s02.frostfs.devenv:8080
control_endpoint: s02.frostfs.devenv:8081
un_locode: "RU LED"
- name: frostfs-storage_03
attributes:
container_name: s03
config_path: /etc/frostfs/storage/config.yml
wallet_path: ../frostfs-dev-env/services/storage/wallet03.json
local_wallet_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet03.json
wallet_password: ""
volume_name: storage_storage_s03
endpoint_data0: s03.frostfs.devenv:8080
control_endpoint: s03.frostfs.devenv:8081
un_locode: "SE STO"
- name: frostfs-storage_04
attributes:
container_name: s04
config_path: /etc/frostfs/storage/config.yml
wallet_path: ../frostfs-dev-env/services/storage/wallet04.json
local_wallet_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet04.json
wallet_password: ""
volume_name: storage_storage_s04
endpoint_data0: s04.frostfs.devenv:8080
control_endpoint: s04.frostfs.devenv:8081
un_locode: "FI HEL"
- name: frostfs-s3_01
attributes:
container_name: s3_gate
config_path: ../frostfs-dev-env/services/s3_gate/.s3.env
wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json
local_wallet_config_path: ./TemporaryDir/password-s3.yml
local_wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json
wallet_password: "s3"
endpoint_data0: https://s3.frostfs.devenv:8080
- name: frostfs-http_01
attributes:
container_name: http_gate
config_path: ../frostfs-dev-env/services/http_gate/.http.env
wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json
local_wallet_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json
wallet_password: "one"
endpoint_data0: http://http.frostfs.devenv
- name: frostfs-ir_01
attributes:
container_name: ir01
config_path: ../frostfs-dev-env/services/ir/.ir.env
wallet_path: ../frostfs-dev-env/services/ir/az.json
local_wallet_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../frostfs-dev-env/services/ir/az.json
wallet_password: "one"
- name: neo-go_01
attributes:
container_name: morph_chain
config_path: ../frostfs-dev-env/services/morph_chain/protocol.privnet.yml
wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json
local_wallet_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json
wallet_password: "one"
endpoint_internal0: http://morph-chain.frostfs.devenv:30333
- name: main-chain_01
attributes:
container_name: main_chain
config_path: ../frostfs-dev-env/services/chain/protocol.privnet.yml
wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json
local_wallet_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json
wallet_password: "one"
endpoint_internal0: http://main-chain.frostfs.devenv:30333
- name: coredns_01
attributes:
container_name: coredns
clis:
- name: frostfs-cli
exec_path: frostfs-cli

View file

@ -1,21 +0,0 @@
name: DCO action
on: [pull_request]
jobs:
dco:
name: DCO
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Setup Go
uses: actions/setup-go@v3
with:
go-version: '1.21'
- name: Run commit format checker
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
with:
from: 'origin/${{ github.event.pull_request.base.ref }}'

1
.github/CODEOWNERS vendored Normal file
View file

@ -0,0 +1 @@
* @aprasolova @vdomnich-yadro @dansingjulia @yadro-vavdeev @abereziny

21
.github/workflows/dco.yml vendored Normal file
View file

@ -0,0 +1,21 @@
name: DCO check
on:
pull_request:
branches:
- master
jobs:
commits_check_job:
runs-on: ubuntu-latest
name: Commits Check
steps:
- name: Get PR Commits
id: 'get-pr-commits'
uses: tim-actions/get-pr-commits@master
with:
token: ${{ secrets.GITHUB_TOKEN }}
- name: DCO Check
uses: tim-actions/dco@master
with:
commits: ${{ steps.get-pr-commits.outputs.commits }}

View file

@ -1 +0,0 @@
* @JuliaKovshova @abereziny @d.zayakin @anikeev-yadro @anurindm @ylukoyan @i.niyazov

View file

@ -51,26 +51,19 @@ basic = "frostfs_testlib.healthcheck.basic_healthcheck:BasicHealthcheck"
config = "frostfs_testlib.storage.controllers.state_managers.config_state_manager:ConfigStateManager"
[project.entry-points."frostfs.testlib.services"]
frostfs-storage = "frostfs_testlib.storage.dataclasses.frostfs_services:StorageNode"
frostfs-s3 = "frostfs_testlib.storage.dataclasses.frostfs_services:S3Gate"
frostfs-http = "frostfs_testlib.storage.dataclasses.frostfs_services:HTTPGate"
neo-go = "frostfs_testlib.storage.dataclasses.frostfs_services:MorphChain"
frostfs-ir = "frostfs_testlib.storage.dataclasses.frostfs_services:InnerRing"
[project.entry-points."frostfs.testlib.credentials_providers"]
authmate = "frostfs_testlib.credentials.authmate_s3_provider:AuthmateS3CredentialsProvider"
wallet_factory = "frostfs_testlib.credentials.wallet_factory_provider:WalletFactoryProvider"
[project.entry-points."frostfs.testlib.bucket_cid_resolver"]
frostfs = "frostfs_testlib.s3.curl_bucket_resolver:CurlBucketContainerResolver"
s = "frostfs_testlib.storage.dataclasses.frostfs_services:StorageNode"
s3-gate = "frostfs_testlib.storage.dataclasses.frostfs_services:S3Gate"
http-gate = "frostfs_testlib.storage.dataclasses.frostfs_services:HTTPGate"
morph-chain = "frostfs_testlib.storage.dataclasses.frostfs_services:MorphChain"
ir = "frostfs_testlib.storage.dataclasses.frostfs_services:InnerRing"
[tool.isort]
profile = "black"
src_paths = ["src", "tests"]
line_length = 140
line_length = 120
[tool.black]
line-length = 140
line-length = 120
target-version = ["py310"]
[tool.bumpver]
@ -89,7 +82,4 @@ push = false
filterwarnings = [
"ignore:Blowfish has been deprecated:cryptography.utils.CryptographyDeprecationWarning",
]
testpaths = ["tests"]
[project.entry-points.pytest11]
testlib = "frostfs_testlib"
testpaths = ["tests"]

View file

@ -1,3 +1 @@
__version__ = "2.0.1"
from .fixtures import configure_testlib, hosting

View file

@ -1,5 +1,5 @@
from frostfs_testlib.analytics import test_case
from frostfs_testlib.analytics.test_case import TestCasePriority
from frostfs_testlib.analytics.test_collector import TestCase, TestCaseCollector
from frostfs_testlib.analytics.test_exporter import TСExporter
from frostfs_testlib.analytics.test_exporter import TestExporter
from frostfs_testlib.analytics.testrail_exporter import TestrailExporter

View file

@ -3,8 +3,7 @@ from abc import ABC, abstractmethod
from frostfs_testlib.analytics.test_collector import TestCase
# TODO: REMOVE ME
class TСExporter(ABC):
class TestExporter(ABC):
test_cases_cache = []
test_suites_cache = []
@ -47,7 +46,9 @@ class TСExporter(ABC):
"""
@abstractmethod
def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None:
def update_test_case(
self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section
) -> None:
"""
Update test case in TMS
"""
@ -59,7 +60,9 @@ class TСExporter(ABC):
for test_case in test_cases:
test_suite = self.get_or_create_test_suite(test_case.suite_name)
test_section = self.get_or_create_suite_section(test_suite, test_case.suite_section_name)
test_section = self.get_or_create_suite_section(
test_suite, test_case.suite_section_name
)
test_case_in_tms = self.search_test_case_id(test_case.id)
steps = [{"content": value, "expected": " "} for key, value in test_case.steps.items()]

View file

@ -1,10 +1,10 @@
from testrail_api import TestRailAPI
from frostfs_testlib.analytics.test_collector import TestCase
from frostfs_testlib.analytics.test_exporter import TСExporter
from frostfs_testlib.analytics.test_exporter import TestExporter
class TestrailExporter(TСExporter):
class TestrailExporter(TestExporter):
def __init__(
self,
tr_url: str,
@ -62,13 +62,19 @@ class TestrailExporter(TСExporter):
It's help do not call TMS each time then we search test case
"""
for test_suite in self.test_suites_cache:
self.test_cases_cache.extend(self.api.cases.get_cases(self.tr_project_id, suite_id=test_suite["id"]))
self.test_cases_cache.extend(
self.api.cases.get_cases(self.tr_project_id, suite_id=test_suite["id"])
)
def search_test_case_id(self, test_case_id: str) -> object:
"""
Find test cases in TestRail (cache) by ID
"""
test_cases = [test_case for test_case in self.test_cases_cache if test_case["custom_autotest_name"] == test_case_id]
test_cases = [
test_case
for test_case in self.test_cases_cache
if test_case["custom_autotest_name"] == test_case_id
]
if len(test_cases) > 1:
raise RuntimeError(f"Too many results found in test rail for id {test_case_id}")
@ -81,7 +87,9 @@ class TestrailExporter(TСExporter):
"""
Get suite name with exact name from Testrail or create if not exist
"""
test_rail_suites = [suite for suite in self.test_suites_cache if suite["name"] == test_suite_name]
test_rail_suites = [
suite for suite in self.test_suites_cache if suite["name"] == test_suite_name
]
if not test_rail_suites:
test_rail_suite = self.api.suites.add_suite(
@ -94,13 +102,17 @@ class TestrailExporter(TСExporter):
elif len(test_rail_suites) == 1:
return test_rail_suites.pop()
else:
raise RuntimeError(f"Too many results found in test rail for suite name {test_suite_name}")
raise RuntimeError(
f"Too many results found in test rail for suite name {test_suite_name}"
)
def get_or_create_suite_section(self, test_rail_suite, section_name) -> object:
"""
Get suite section with exact name from Testrail or create new one if not exist
"""
test_rail_sections = [section for section in test_rail_suite["sections"] if section["name"] == section_name]
test_rail_sections = [
section for section in test_rail_suite["sections"] if section["name"] == section_name
]
if not test_rail_sections:
test_rail_section = self.api.sections.add_section(
@ -116,7 +128,9 @@ class TestrailExporter(TСExporter):
elif len(test_rail_sections) == 1:
return test_rail_sections.pop()
else:
raise RuntimeError(f"Too many results found in test rail for section name {section_name}")
raise RuntimeError(
f"Too many results found in test rail for section name {section_name}"
)
def prepare_request_body(self, test_case: TestCase, test_suite, test_suite_section) -> dict:
"""
@ -150,7 +164,9 @@ class TestrailExporter(TСExporter):
self.api.cases.add_case(**request_body)
def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None:
def update_test_case(
self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section
) -> None:
"""
Update test case in Testrail
"""

View file

@ -1,5 +1,4 @@
from frostfs_testlib.cli.frostfs_adm import FrostfsAdm
from frostfs_testlib.cli.frostfs_authmate import FrostfsAuthmate
from frostfs_testlib.cli.frostfs_cli import FrostfsCli
from frostfs_testlib.cli.generic_cli import GenericCli
from frostfs_testlib.cli.neogo import NeoGo, NetworkType

View file

@ -110,7 +110,7 @@ class FrostfsAdmMorph(CliCommand):
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
)
def dump_hashes(self, rpc_endpoint: str, domain: Optional[str] = None) -> CommandResult:
def dump_hashes(self, rpc_endpoint: str) -> CommandResult:
"""Dump deployed contract hashes.
Args:

View file

@ -1,70 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsCliApeManager(CliCommand):
"""Operations with APE manager."""
def add(
self,
rpc_endpoint: str,
chain_id: Optional[str] = None,
chain_id_hex: Optional[str] = None,
path: Optional[str] = None,
rule: Optional[str] | Optional[list[str]] = None,
target_name: Optional[str] = None,
target_type: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Add rule chain for a target."""
return self._execute(
"ape-manager add",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list(
self,
rpc_endpoint: str,
target_name: Optional[str] = None,
target_type: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Generate APE override by target and APE chains. Util command.
Generated APE override can be dumped to a file in JSON format that is passed to
"create" command.
"""
return self._execute(
"ape-manager list",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def remove(
self,
rpc_endpoint: str,
chain_id: Optional[str] = None,
chain_id_hex: Optional[str] = None,
target_name: Optional[str] = None,
target_type: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Generate APE override by target and APE chains. Util command.
Generated APE override can be dumped to a file in JSON format that is passed to
"create" command.
"""
return self._execute(
"ape-manager remove",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -1,54 +0,0 @@
from typing import Optional
from frostfs_testlib.cli.cli_command import CliCommand
from frostfs_testlib.shell import CommandResult
class FrostfsCliBearer(CliCommand):
def create(
self,
rpc_endpoint: str,
out: str,
issued_at: Optional[str] = None,
expire_at: Optional[str] = None,
not_valid_before: Optional[str] = None,
ape: Optional[str] = None,
eacl: Optional[str] = None,
owner: Optional[str] = None,
json: Optional[bool] = False,
impersonate: Optional[bool] = False,
wallet: Optional[str] = None,
address: Optional[str] = None,
) -> CommandResult:
"""Create bearer token.
All epoch flags can be specified relative to the current epoch with the +n syntax.
In this case --rpc-endpoint flag should be specified and the epoch in bearer token
is set to current epoch + n.
"""
return self._execute(
"bearer create",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def generate_ape_override(
self,
chain_id: Optional[str] = None,
chain_id_hex: Optional[str] = None,
cid: Optional[str] = None,
output: Optional[str] = None,
path: Optional[str] = None,
rule: Optional[str] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
) -> CommandResult:
"""Generate APE override by target and APE chains. Util command.
Generated APE override can be dumped to a file in JSON format that is passed to
"create" command.
"""
return self._execute(
"bearer generate-ape-override",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -2,8 +2,6 @@ from typing import Optional
from frostfs_testlib.cli.frostfs_cli.accounting import FrostfsCliAccounting
from frostfs_testlib.cli.frostfs_cli.acl import FrostfsCliACL
from frostfs_testlib.cli.frostfs_cli.ape_manager import FrostfsCliApeManager
from frostfs_testlib.cli.frostfs_cli.bearer import FrostfsCliBearer
from frostfs_testlib.cli.frostfs_cli.container import FrostfsCliContainer
from frostfs_testlib.cli.frostfs_cli.control import FrostfsCliControl
from frostfs_testlib.cli.frostfs_cli.netmap import FrostfsCliNetmap
@ -43,5 +41,3 @@ class FrostfsCli:
self.version = FrostfsCliVersion(shell, frostfs_cli_exec_path, config=config_file)
self.tree = FrostfsCliTree(shell, frostfs_cli_exec_path, config=config_file)
self.control = FrostfsCliControl(shell, frostfs_cli_exec_path, config=config_file)
self.bearer = FrostfsCliBearer(shell, frostfs_cli_exec_path, config=config_file)
self.ape_manager = FrostfsCliApeManager(shell, frostfs_cli_exec_path, config=config_file)

View file

@ -8,9 +8,7 @@ class FrostfsCliContainer(CliCommand):
def create(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
nns_zone: Optional[str] = None,
nns_name: Optional[str] = None,
wallet: str,
address: Optional[str] = None,
attributes: Optional[dict] = None,
basic_acl: Optional[str] = None,
@ -47,8 +45,6 @@ class FrostfsCliContainer(CliCommand):
wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s).
nns_zone: Container nns zone attribute.
nns_name: Container nns name attribute.
Returns:
Command's result.
@ -61,8 +57,8 @@ class FrostfsCliContainer(CliCommand):
def delete(
self,
rpc_endpoint: str,
wallet: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
await_mode: bool = False,
session: Optional[str] = None,
@ -97,8 +93,8 @@ class FrostfsCliContainer(CliCommand):
def get(
self,
rpc_endpoint: str,
wallet: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
await_mode: bool = False,
to: Optional[str] = None,
@ -133,8 +129,8 @@ class FrostfsCliContainer(CliCommand):
def get_eacl(
self,
rpc_endpoint: str,
wallet: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
await_mode: bool = False,
to: Optional[str] = None,
@ -170,7 +166,7 @@ class FrostfsCliContainer(CliCommand):
def list(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
wallet: str,
address: Optional[str] = None,
owner: Optional[str] = None,
ttl: Optional[int] = None,
@ -201,8 +197,8 @@ class FrostfsCliContainer(CliCommand):
def list_objects(
self,
rpc_endpoint: str,
wallet: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
@ -231,8 +227,8 @@ class FrostfsCliContainer(CliCommand):
def set_eacl(
self,
rpc_endpoint: str,
wallet: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
await_mode: bool = False,
table: Optional[str] = None,
@ -268,8 +264,8 @@ class FrostfsCliContainer(CliCommand):
def search_node(
self,
rpc_endpoint: str,
wallet: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
ttl: Optional[int] = None,
from_file: Optional[str] = None,

View file

@ -39,12 +39,14 @@ class FrostfsCliControl(CliCommand):
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Health check for FrostFS storage nodes
"""Set status of the storage node in FrostFS network map
Args:
wallet: Path to the wallet or binary key
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
force: Force turning to local maintenance
status: New netmap status keyword ('online', 'offline', 'maintenance')
timeout: Timeout for an operation (default 15s)
Returns:
@ -54,179 +56,3 @@ class FrostfsCliControl(CliCommand):
"control healthcheck",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def drop_objects(
self,
endpoint: str,
objects: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
wallet: Path to the wallet or binary key
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
objects: List of object addresses to be removed in string format
timeout: Timeout for an operation (default 15s)
Returns:
Command`s result.
"""
return self._execute(
"control drop-objects",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def add_rule(
self,
endpoint: str,
chain_id: str,
target_name: str,
target_type: str,
rule: Optional[list[str]] = None,
path: Optional[str] = None,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
chain-id: Assign ID to the parsed chain
chain-id-hex: Flag to parse chain ID as hex
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
path: Path to encoded chain in JSON or binary format
rule: Rule statement
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control add-rule",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def get_rule(
self,
endpoint: str,
chain_id: str,
target_name: str,
target_type: str,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address string Address of wallet account
chain-id string Chain id
chain-id-hex Flag to parse chain ID as hex
endpoint string Remote node control address (as 'multiaddr' or '<host>:<port>')
target-name string Resource name in APE resource name format
target-type string Resource type(container/namespace)
timeout duration Timeout for an operation (default 15s)
wallet string Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control get-rule",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list_rules(
self,
endpoint: str,
target_name: str,
target_type: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control list-rules",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list_targets(
self,
endpoint: str,
chain_name: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
chain-name: Chain name(ingress|s3)
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control list-targets",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def remove_rule(
self,
endpoint: str,
chain_id: str,
target_name: str,
target_type: str,
all: Optional[bool] = None,
chain_id_hex: Optional[bool] = None,
wallet: Optional[str] = None,
address: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Drop objects from the node's local storage
Args:
address: Address of wallet account
all: Remove all chains
chain-id: Assign ID to the parsed chain
chain-id-hex: Flag to parse chain ID as hex
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
target-name: Resource name in APE resource name format
target-type: Resource type(container/namespace)
timeout: Timeout for an operation (default 15s)
wallet: Path to the wallet or binary key
Returns:
Command`s result.
"""
return self._execute(
"control remove-rule",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -8,7 +8,7 @@ class FrostfsCliNetmap(CliCommand):
def epoch(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
wallet: str,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
@ -38,7 +38,7 @@ class FrostfsCliNetmap(CliCommand):
def netinfo(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
wallet: str,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,
@ -68,7 +68,7 @@ class FrostfsCliNetmap(CliCommand):
def nodeinfo(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
wallet: str,
address: Optional[str] = None,
generate_key: bool = False,
json: bool = False,
@ -100,7 +100,7 @@ class FrostfsCliNetmap(CliCommand):
def snapshot(
self,
rpc_endpoint: str,
wallet: Optional[str] = None,
wallet: str,
address: Optional[str] = None,
generate_key: bool = False,
ttl: Optional[int] = None,

View file

@ -8,9 +8,9 @@ class FrostfsCliObject(CliCommand):
def delete(
self,
rpc_endpoint: str,
wallet: str,
cid: str,
oid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
session: Optional[str] = None,
@ -44,9 +44,9 @@ class FrostfsCliObject(CliCommand):
def get(
self,
rpc_endpoint: str,
wallet: str,
cid: str,
oid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
file: Optional[str] = None,
@ -88,9 +88,9 @@ class FrostfsCliObject(CliCommand):
def hash(
self,
rpc_endpoint: str,
wallet: str,
cid: str,
oid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
range: Optional[str] = None,
@ -130,9 +130,9 @@ class FrostfsCliObject(CliCommand):
def head(
self,
rpc_endpoint: str,
wallet: str,
cid: str,
oid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
file: Optional[str] = None,
@ -176,9 +176,9 @@ class FrostfsCliObject(CliCommand):
def lock(
self,
rpc_endpoint: str,
wallet: str,
cid: str,
oid: str,
wallet: Optional[str] = None,
lifetime: Optional[int] = None,
expire_at: Optional[int] = None,
address: Optional[str] = None,
@ -216,9 +216,9 @@ class FrostfsCliObject(CliCommand):
def put(
self,
rpc_endpoint: str,
wallet: str,
cid: str,
file: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
attributes: Optional[dict] = None,
bearer: Optional[str] = None,
@ -267,10 +267,10 @@ class FrostfsCliObject(CliCommand):
def range(
self,
rpc_endpoint: str,
wallet: str,
cid: str,
oid: str,
range: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
file: Optional[str] = None,
@ -311,8 +311,8 @@ class FrostfsCliObject(CliCommand):
def search(
self,
rpc_endpoint: str,
wallet: str,
cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
filters: Optional[list] = None,
@ -357,12 +357,11 @@ class FrostfsCliObject(CliCommand):
wallet: Optional[str] = None,
address: Optional[str] = None,
bearer: Optional[str] = None,
generate_key: Optional[bool] = None,
generate_key: Optional = None,
oid: Optional[str] = None,
trace: bool = False,
root: bool = False,
verify_presence_all: bool = False,
json: bool = False,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = None,

View file

@ -9,6 +9,7 @@ class FrostfsCliSession(CliCommand):
self,
rpc_endpoint: str,
wallet: str,
wallet_password: str,
out: str,
lifetime: Optional[int] = None,
address: Optional[str] = None,
@ -29,7 +30,12 @@ class FrostfsCliSession(CliCommand):
Returns:
Command's result.
"""
return self._execute(
return self._execute_with_password(
"session create",
**{param: value for param, value in locals().items() if param not in ["self"]},
wallet_password,
**{
param: value
for param, value in locals().items()
if param not in ["self", "wallet_password"]
},
)

View file

@ -39,10 +39,10 @@ class FrostfsCliShards(CliCommand):
def set_mode(
self,
endpoint: str,
wallet: str,
wallet_password: str,
mode: str,
id: Optional[list[str]],
wallet: Optional[str] = None,
wallet_password: Optional[str] = None,
address: Optional[str] = None,
all: bool = False,
clear_errors: bool = False,
@ -65,11 +65,6 @@ class FrostfsCliShards(CliCommand):
Returns:
Command's result.
"""
if not wallet_password:
return self._execute(
"control shards set-mode",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
return self._execute_with_password(
"control shards set-mode",
wallet_password,
@ -142,4 +137,3 @@ class FrostfsCliShards(CliCommand):
wallet_password,
**{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]},
)

View file

@ -27,27 +27,3 @@ class FrostfsCliTree(CliCommand):
"tree healthcheck",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def list(
self,
cid: str,
rpc_endpoint: Optional[str] = None,
wallet: Optional[str] = None,
timeout: Optional[str] = None,
) -> CommandResult:
"""Get Tree List
Args:
cid: Container ID.
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
wallet: WIF (NEP-2) string or path to the wallet or binary key.
timeout: duration Timeout for the operation (default 15 s)
Returns:
Command's result.
"""
return self._execute(
"tree list",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -6,12 +6,12 @@ from frostfs_testlib.shell import CommandResult
class FrostfsCliUtil(CliCommand):
def sign_bearer_token(
self,
from_file: str,
to_file: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
json: Optional[bool] = False,
self,
wallet: str,
from_file: str,
to_file: str,
address: Optional[str] = None,
json: Optional[bool] = False,
) -> CommandResult:
"""
Sign bearer token to use it in requests.
@ -33,9 +33,9 @@ class FrostfsCliUtil(CliCommand):
def sign_session_token(
self,
wallet: str,
from_file: str,
to_file: str,
wallet: Optional[str] = None,
address: Optional[str] = None,
) -> CommandResult:
"""
@ -54,11 +54,3 @@ class FrostfsCliUtil(CliCommand):
"util sign session-token",
**{param: value for param, value in locals().items() if param not in ["self"]},
)
def convert_eacl(self, from_file: str, to_file: str, json: Optional[bool] = False, ape: Optional[bool] = False):
"""Convert representation of extended ACL table."""
return self._execute(
"util convert eacl",
**{param: value for param, value in locals().items() if param not in ["self"]},
)

View file

@ -1,30 +0,0 @@
from typing import Optional
from frostfs_testlib.hosting.interfaces import Host
from frostfs_testlib.shell.interfaces import CommandOptions, Shell
class GenericCli(object):
def __init__(self, cli_name: str, host: Host) -> None:
self.host = host
self.cli_name = cli_name
def __call__(
self,
args: Optional[str] = "",
pipes: Optional[str] = "",
shell: Optional[Shell] = None,
options: Optional[CommandOptions] = None,
):
if not shell:
shell = self.host.get_shell()
cli_config = self.host.get_cli_config(self.cli_name, True)
extra_args = ""
exec_path = self.cli_name
if cli_config:
extra_args = " ".join(cli_config.extra_args)
exec_path = cli_config.exec_path
cmd = f"{exec_path} {args} {extra_args} {pipes}"
return shell.exec(cmd, options)

View file

@ -1,7 +1,7 @@
import re
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo, NodeStatus
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo
class NetmapParser:
@ -15,8 +15,6 @@ class NetmapParser:
"epoch_duration": r"Epoch duration: (?P<epoch_duration>\d+)",
"inner_ring_candidate_fee": r"Inner Ring candidate fee: (?P<inner_ring_candidate_fee>\d+)",
"maximum_object_size": r"Maximum object size: (?P<maximum_object_size>\d+)",
"maximum_count_of_data_shards": r"Maximum count of data shards: (?P<maximum_count_of_data_shards>\d+)",
"maximum_count_of_parity_shards": r"Maximum count of parity shards: (?P<maximum_count_of_parity_shards>\d+)",
"withdrawal_fee": r"Withdrawal fee: (?P<withdrawal_fee>\d+)",
"homomorphic_hashing_disabled": r"Homomorphic hashing disabled: (?P<homomorphic_hashing_disabled>true|false)",
"maintenance_mode_allowed": r"Maintenance mode allowed: (?P<maintenance_mode_allowed>true|false)",
@ -46,7 +44,7 @@ class NetmapParser:
regexes = {
"node_id": r"\d+: (?P<node_id>\w+)",
"node_data_ips": r"(?P<node_data_ips>/ip4/.+?)$",
"node_status": r"(?P<node_status>ONLINE|MAINTENANCE|OFFLINE)",
"node_status": r"(?P<node_status>ONLINE|OFFLINE)",
"cluster_name": r"ClusterName: (?P<cluster_name>\w+)",
"continent": r"Continent: (?P<continent>\w+)",
"country": r"Country: (?P<country>\w+)",
@ -64,17 +62,14 @@ class NetmapParser:
for node in netmap_nodes:
for key, regex in regexes.items():
search_result = re.search(regex, node, flags=re.MULTILINE)
if search_result == None:
result_netmap[key] = None
continue
if key == "node_data_ips":
result_netmap[key] = search_result[key].strip().split(" ")
continue
if key == "external_address":
result_netmap[key] = search_result[key].strip().split(",")
continue
if key == "node_status":
result_netmap[key] = NodeStatus(search_result[key].strip().lower())
if search_result == None:
result_netmap[key] = None
continue
result_netmap[key] = search_result[key].strip()

View file

@ -1,47 +0,0 @@
import re
from datetime import datetime
from typing import Optional
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsAuthmate
from frostfs_testlib.credentials.interfaces import S3Credentials, S3CredentialsProvider, User
from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC
from frostfs_testlib.shell import LocalShell
from frostfs_testlib.steps.cli.container import list_containers
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate
class AuthmateS3CredentialsProvider(S3CredentialsProvider):
@reporter.step("Init S3 Credentials using Authmate CLI")
def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None) -> S3Credentials:
cluster_nodes: list[ClusterNode] = self.cluster.cluster_nodes
shell = LocalShell()
wallet = user.wallet
endpoint = cluster_node.storage_node.get_rpc_endpoint()
gate_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes]
# unique short bucket name
bucket = f"bucket-{hex(int(datetime.now().timestamp()*1000000))}"
frostfs_authmate: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC)
issue_secret_output = frostfs_authmate.secret.issue(
wallet=wallet.path,
peer=endpoint,
gate_public_key=gate_public_keys,
wallet_password=wallet.password,
container_policy=location_constraints,
container_friendly_name=bucket,
).stdout
aws_access_key_id = str(re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group("aws_access_key_id"))
aws_secret_access_key = str(
re.search(r"secret_access_key.*:\s.(?P<aws_secret_access_key>\w*)", issue_secret_output).group("aws_secret_access_key")
)
cid = str(re.search(r"container_id.*:\s.(?P<container_id>\w*)", issue_secret_output).group("container_id"))
containers_list = list_containers(wallet, shell, endpoint)
assert cid in containers_list, f"Expected cid {cid} in {containers_list}"
user.s3_credentials = S3Credentials(aws_access_key_id, aws_secret_access_key)
return user.s3_credentials

View file

@ -1,51 +0,0 @@
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from typing import Any, Optional
from frostfs_testlib.plugins import load_plugin
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
@dataclass
class S3Credentials:
access_key: str
secret_key: str
@dataclass
class User:
name: str
attributes: dict[str, Any] = field(default_factory=dict)
wallet: WalletInfo | None = None
s3_credentials: S3Credentials | None = None
class S3CredentialsProvider(ABC):
def __init__(self, cluster: Cluster) -> None:
self.cluster = cluster
@abstractmethod
def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None, **kwargs) -> S3Credentials:
raise NotImplementedError("Directly called abstract class?")
class GrpcCredentialsProvider(ABC):
def __init__(self, cluster: Cluster) -> None:
self.cluster = cluster
@abstractmethod
def provide(self, user: User, cluster_node: ClusterNode, **kwargs) -> WalletInfo:
raise NotImplementedError("Directly called abstract class?")
class CredentialsProvider(object):
S3: S3CredentialsProvider
GRPC: GrpcCredentialsProvider
def __init__(self, cluster: Cluster) -> None:
config = cluster.cluster_nodes[0].host.config
s3_cls = load_plugin("frostfs.testlib.credentials_providers", config.s3_creds_plugin_name)
self.S3 = s3_cls(cluster)
grpc_cls = load_plugin("frostfs.testlib.credentials_providers", config.grpc_creds_plugin_name)
self.GRPC = grpc_cls(cluster)

View file

@ -1,14 +0,0 @@
from frostfs_testlib import reporter
from frostfs_testlib.credentials.interfaces import GrpcCredentialsProvider, User
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_PASS
from frostfs_testlib.shell.local_shell import LocalShell
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.dataclasses.wallet import WalletFactory, WalletInfo
class WalletFactoryProvider(GrpcCredentialsProvider):
@reporter.step("Init gRPC Credentials using wallet generation")
def provide(self, user: User, cluster_node: ClusterNode) -> WalletInfo:
wallet_factory = WalletFactory(ASSETS_DIR, LocalShell())
user.wallet = wallet_factory.create_wallet(file_name=user.name, password=DEFAULT_WALLET_PASS)
return user.wallet

View file

@ -1,35 +0,0 @@
import logging
import os
from importlib.metadata import entry_points
import pytest
import yaml
from frostfs_testlib import reporter
from frostfs_testlib.hosting.hosting import Hosting
from frostfs_testlib.resources.common import HOSTING_CONFIG_FILE
from frostfs_testlib.storage import get_service_registry
@pytest.fixture(scope="session")
def configure_testlib():
reporter.get_reporter().register_handler(reporter.AllureHandler())
reporter.get_reporter().register_handler(reporter.StepsLogger())
logging.getLogger("paramiko").setLevel(logging.INFO)
# Register Services for cluster
registry = get_service_registry()
services = entry_points(group="frostfs.testlib.services")
for svc in services:
registry.register_service(svc.name, svc.load())
@pytest.fixture(scope="session")
def hosting(configure_testlib) -> Hosting:
with open(HOSTING_CONFIG_FILE, "r") as file:
hosting_config = yaml.full_load(file)
hosting_instance = Hosting()
hosting_instance.configure(hosting_config)
return hosting_instance

View file

@ -47,14 +47,6 @@ class BasicHealthcheck(Healthcheck):
self._perform(cluster_node, checks)
@wait_for_success(900, 30, title="Wait for tree healthcheck on {cluster_node}")
def tree_healthcheck(self, cluster_node: ClusterNode) -> str | None:
checks = {
self._tree_healthcheck: {},
}
self._perform(cluster_node, checks)
@wait_for_success(120, 5, title="Wait for service healthcheck on {cluster_node}")
def services_healthcheck(self, cluster_node: ClusterNode):
svcs_to_check = cluster_node.services

View file

@ -19,7 +19,3 @@ class Healthcheck(ABC):
@abstractmethod
def services_healthcheck(self, cluster_node: ClusterNode):
"""Perform service status check on target cluster node"""
@abstractmethod
def tree_healthcheck(self, cluster_node: ClusterNode):
"""Perform tree healthcheck on target cluster node"""

View file

@ -10,7 +10,9 @@ class ParsedAttributes:
def parse(cls, attributes: dict[str, Any]):
# Pick attributes supported by the class
field_names = set(field.name for field in fields(cls))
supported_attributes = {key: value for key, value in attributes.items() if key in field_names}
supported_attributes = {
key: value for key, value in attributes.items() if key in field_names
}
return cls(**supported_attributes)
@ -27,7 +29,6 @@ class CLIConfig:
name: str
exec_path: str
attributes: dict[str, str] = field(default_factory=dict)
extra_args: list[str] = field(default_factory=list)
@dataclass
@ -62,9 +63,6 @@ class HostConfig:
plugin_name: str
healthcheck_plugin_name: str
address: str
s3_creds_plugin_name: str = field(default="authmate")
grpc_creds_plugin_name: str = field(default="wallet_factory")
product: str = field(default="frostfs")
services: list[ServiceConfig] = field(default_factory=list)
clis: list[CLIConfig] = field(default_factory=list)
attributes: dict[str, str] = field(default_factory=dict)

View file

@ -240,7 +240,6 @@ class DockerHost(Host):
until: Optional[datetime] = None,
unit: Optional[str] = None,
exclude_filter: Optional[str] = None,
priority: Optional[str] = None
) -> str:
client = self._get_docker_client()
filtered_logs = ""

View file

@ -54,7 +54,7 @@ class Host(ABC):
raise ValueError(f"Unknown service name: '{service_name}'")
return service_config
def get_cli_config(self, cli_name: str, allow_empty: bool = False) -> CLIConfig:
def get_cli_config(self, cli_name: str) -> CLIConfig:
"""Returns config of CLI tool with specified name.
The CLI must be located on this host.
@ -66,7 +66,7 @@ class Host(ABC):
Config of the CLI tool.
"""
cli_config = self._cli_config_by_name.get(cli_name)
if cli_config is None and not allow_empty:
if cli_config is None:
raise ValueError(f"Unknown CLI name: '{cli_name}'")
return cli_config
@ -297,7 +297,6 @@ class Host(ABC):
until: Optional[datetime] = None,
unit: Optional[str] = None,
exclude_filter: Optional[str] = None,
priority: Optional[str] = None
) -> str:
"""Get logs from host filtered by regex.
@ -306,8 +305,6 @@ class Host(ABC):
since: If set, limits the time from which logs should be collected. Must be in UTC.
until: If set, limits the time until which logs should be collected. Must be in UTC.
unit: required unit.
priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher.
For example, if we specify the -p 2 option, journalctl will show all messages with levels 2, 1 and 0.
Returns:
Found entries as str if any found.

View file

@ -86,7 +86,7 @@ class SummarizedStats:
target.latencies.by_node[node_key] = operation.latency
target.throughput += operation.throughput
target.errors.threshold = load_params.error_threshold
target.total_bytes += operation.total_bytes
target.total_bytes = operation.total_bytes
if operation.failed_iterations:
target.errors.by_node[node_key] = operation.failed_iterations

View file

@ -10,13 +10,13 @@ from typing import Any
from urllib.parse import urlparse
from frostfs_testlib import reporter
from frostfs_testlib.credentials.interfaces import User
from frostfs_testlib.load.interfaces.loader import Loader
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario, LoadType
from frostfs_testlib.processes.remote_process import RemoteProcess
from frostfs_testlib.resources.common import STORAGE_USER_NAME
from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, K6_TEARDOWN_PERIOD
from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.test_control import wait_for_success
EXIT_RESULT_CODE = 0
@ -43,16 +43,16 @@ class K6:
k6_dir: str,
shell: Shell,
loader: Loader,
user: User,
wallet: WalletInfo,
):
if load_params.scenario is None:
raise RuntimeError("Scenario should not be none")
self.load_params = load_params
self.load_params: LoadParams = load_params
self.endpoints = endpoints
self.loader = loader
self.shell = shell
self.user = user
self.loader: Loader = loader
self.shell: Shell = shell
self.wallet = wallet
self.preset_output: str = ""
self.summary_json: str = os.path.join(
self.load_params.working_dir,
@ -62,15 +62,21 @@ class K6:
self._k6_dir: str = k6_dir
command = (
f"{self._generate_env_variables()}{self._k6_dir}/k6 run {self._generate_k6_variables()} "
f"{self._k6_dir}/k6 run {self._generate_env_variables()} "
f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js"
)
remote_user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None
process_id = self.load_params.load_id if self.load_params.scenario != LoadScenario.VERIFY else f"{self.load_params.load_id}_verify"
self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, remote_user, process_id)
user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None
process_id = (
self.load_params.load_id
if self.load_params.scenario != LoadScenario.VERIFY
else f"{self.load_params.load_id}_verify"
)
self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, user, process_id)
def _get_fill_percents(self):
fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs | grep data").stdout.split("\n")
fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs | grep data").stdout.split(
"\n"
)
return [line.split() for line in fill_percents][:-1]
def check_fill_percent(self):
@ -100,8 +106,8 @@ class K6:
preset_grpc: [
preset_grpc,
f"--endpoint {','.join(self.endpoints)}",
f"--wallet {self.user.wallet.path} ",
f"--config {self.user.wallet.config_path} ",
f"--wallet {self.wallet.path} ",
f"--config {self.wallet.config_path} ",
],
preset_s3: [
preset_s3,
@ -122,9 +128,9 @@ class K6:
self.preset_output = result.stdout.strip("\n")
return self.preset_output
@reporter.step("Generate K6 variables")
def _generate_k6_variables(self) -> str:
env_vars = self.load_params.get_k6_vars()
@reporter.step("Generate K6 command")
def _generate_env_variables(self) -> str:
env_vars = self.load_params.get_env_vars()
env_vars[f"{self.load_params.load_type.value.upper()}_ENDPOINTS"] = ",".join(self.endpoints)
env_vars["SUMMARY_JSON"] = self.summary_json
@ -132,14 +138,6 @@ class K6:
reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables")
return " ".join([f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None])
@reporter.step("Generate env variables")
def _generate_env_variables(self) -> str:
env_vars = self.load_params.get_env_vars()
if not env_vars:
return ""
reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "ENV variables")
return " ".join([f"{param}='{value}'" for param, value in env_vars.items() if value is not None]) + " "
def get_start_time(self) -> datetime:
return datetime.fromtimestamp(self._k6_process.start_time())
@ -164,7 +162,9 @@ class K6:
remaining_time = timeout - working_time
setup_teardown_time = (
int(K6_TEARDOWN_PERIOD) + self.load_params.get_init_time() + int(self.load_params.setup_timeout.replace("s", "").strip())
int(K6_TEARDOWN_PERIOD)
+ self.load_params.get_init_time()
+ int(self.load_params.setup_timeout.replace("s", "").strip())
)
remaining_time_including_setup_and_teardown = remaining_time + setup_teardown_time
timeout = remaining_time_including_setup_and_teardown
@ -196,7 +196,9 @@ class K6:
if not self.load_params.fill_percent is None:
with reporter.step(f"Check the percentage of filling of all data disks on the node"):
if self.check_fill_percent():
logger.info(f"Stopping load on because disks is filled more then {self.load_params.fill_percent}%")
logger.info(
f"Stopping load on because disks is filled more then {self.load_params.fill_percent}%"
)
event.set()
self.stop()
return

View file

@ -25,16 +25,6 @@ def convert_time_to_seconds(time: int | str | None) -> int:
return seconds
def force_list(input: str | list[str]):
if input is None:
return None
if isinstance(input, list):
return list(map(str.strip, input))
return [input.strip()]
class LoadType(Enum):
gRPC = "grpc"
S3 = "s3"
@ -104,18 +94,16 @@ def metadata_field(
string_repr: Optional[bool] = True,
distributed: Optional[bool] = False,
formatter: Optional[Callable] = None,
env_variable: Optional[str] = None,
):
return field(
default=None,
metadata={
"applicable_scenarios": applicable_scenarios,
"preset_argument": preset_param,
"scenario_variable": scenario_variable,
"env_variable": scenario_variable,
"string_repr": string_repr,
"distributed": distributed,
"formatter": formatter,
"env_variable": env_variable,
},
)
@ -129,8 +117,6 @@ class NodesSelectionStrategy(Enum):
ALL_EXCEPT_UNDER_TEST = "ALL_EXCEPT_UNDER_TEST"
# Select ONE random node except under test (useful for failover).
RANDOM_SINGLE_EXCEPT_UNDER_TEST = "RANDOM_SINGLE_EXCEPT_UNDER_TEST"
# Select node under test
NODE_UNDER_TEST = "NODE_UNDER_TEST"
class EndpointSelectionStrategy(Enum):
@ -152,29 +138,8 @@ class K6ProcessAllocationStrategy(Enum):
PER_ENDPOINT = "PER_ENDPOINT"
class MetaConfig:
def _get_field_formatter(self, field_name: str) -> Callable | None:
data_fields = fields(self)
formatters = [
field.metadata["formatter"]
for field in data_fields
if field.name == field_name and "formatter" in field.metadata and field.metadata["formatter"] != None
]
if formatters:
return formatters[0]
return None
def __setattr__(self, field_name, value):
formatter = self._get_field_formatter(field_name)
if formatter:
value = formatter(value)
super().__setattr__(field_name, value)
@dataclass
class Preset(MetaConfig):
class Preset:
# ------ COMMON ------
# Amount of objects which should be created
objects_count: Optional[int] = metadata_field(all_load_scenarios, "preload_obj", None, False)
@ -189,13 +154,13 @@ class Preset(MetaConfig):
# Amount of containers which should be created
containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None, False)
# Container placement policy for containers for gRPC
container_placement_policy: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "policy", None, False, formatter=force_list)
container_placement_policy: Optional[str] = metadata_field(grpc_preset_scenarios, "policy", None, False)
# ------ S3 ------
# Amount of buckets which should be created
buckets_count: Optional[int] = metadata_field(s3_preset_scenarios, "buckets", None, False)
# S3 region (AKA placement policy for S3 buckets)
s3_location: Optional[list[str]] = metadata_field(s3_preset_scenarios, "location", None, False, formatter=force_list)
s3_location: Optional[str] = metadata_field(s3_preset_scenarios, "location", None, False)
# Delay between containers creation and object upload for preset
object_upload_delay: Optional[int] = metadata_field(all_load_scenarios, "sleep", None, False)
@ -208,17 +173,7 @@ class Preset(MetaConfig):
@dataclass
class PrometheusParams(MetaConfig):
# Prometheus server URL
server_url: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_SERVER_URL", string_repr=False)
# Prometheus trend stats
trend_stats: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_TREND_STATS", string_repr=False)
# Additional tags
metrics_tags: Optional[str] = metadata_field(all_load_scenarios, None, "METRIC_TAGS", False)
@dataclass
class LoadParams(MetaConfig):
class LoadParams:
# ------- CONTROL PARAMS -------
# Load type can be gRPC, HTTP, S3.
load_type: LoadType
@ -266,18 +221,12 @@ class LoadParams(MetaConfig):
)
# Percentage of filling of all data disks on all nodes
fill_percent: Optional[float] = None
# if specified, max payload size in GB of the storage engine. If the storage engine is already full, no new objects will be saved.
max_total_size_gb: Optional[float] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "MAX_TOTAL_SIZE_GB")
# if set, the payload is generated on the fly and is not read into memory fully.
streaming: Optional[int] = metadata_field(all_load_scenarios, None, "STREAMING", False)
# Output format
output: Optional[str] = metadata_field(all_load_scenarios, None, "K6_OUT", False)
# Prometheus params
prometheus: Optional[PrometheusParams] = None
# ------- COMMON SCENARIO PARAMS -------
# Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value.
load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION", False, formatter=convert_time_to_seconds)
load_time: Optional[int] = metadata_field(
all_load_scenarios, None, "DURATION", False, formatter=convert_time_to_seconds
)
# Object size in KB for load and preset.
object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE", False)
# For read operations, controls from which set get objects to read
@ -295,7 +244,9 @@ class LoadParams(MetaConfig):
# sleep for the remainder of the time until the specified minimum duration is reached.
min_iteration_duration: Optional[str] = metadata_field(all_load_scenarios, None, "K6_MIN_ITERATION_DURATION", False)
# Prepare/cut objects locally on client before sending
prepare_locally: Optional[bool] = metadata_field([LoadScenario.gRPC, LoadScenario.gRPC_CAR], None, "PREPARE_LOCALLY", False)
prepare_locally: Optional[bool] = metadata_field(
[LoadScenario.gRPC, LoadScenario.gRPC_CAR], None, "PREPARE_LOCALLY", False
)
# Specifies K6 setupTimeout time. Currently hardcoded in xk6 as 5 seconds for all scenarios
# https://k6.io/docs/using-k6/k6-options/reference/#setup-timeout
setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT", False)
@ -325,25 +276,35 @@ class LoadParams(MetaConfig):
delete_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "DELETE_RATE", True, True)
# Amount of preAllocatedVUs for write operations.
preallocated_writers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True, True)
preallocated_writers: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True, True
)
# Amount of maxVUs for write operations.
max_writers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_WRITERS", False, True)
# Amount of preAllocatedVUs for read operations.
preallocated_readers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True, True)
preallocated_readers: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True, True
)
# Amount of maxVUs for read operations.
max_readers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_READERS", False, True)
# Amount of preAllocatedVUs for read operations.
preallocated_deleters: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True, True)
preallocated_deleters: Optional[int] = metadata_field(
constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True, True
)
# Amount of maxVUs for delete operations.
max_deleters: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_DELETERS", False, True)
# Multipart
# Number of parts to upload in parallel
writers_multipart: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITERS_MULTIPART", False, True)
writers_multipart: Optional[int] = metadata_field(
[LoadScenario.S3_MULTIPART], None, "WRITERS_MULTIPART", False, True
)
# part size must be greater than (5 MB)
write_object_part_size: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITE_OBJ_PART_SIZE", False)
write_object_part_size: Optional[int] = metadata_field(
[LoadScenario.S3_MULTIPART], None, "WRITE_OBJ_PART_SIZE", False
)
# Period of time to apply the rate value.
time_unit: Optional[str] = metadata_field(constant_arrival_rate_scenarios, None, "TIME_UNIT", False)
@ -358,7 +319,7 @@ class LoadParams(MetaConfig):
# Config file location (filled automatically)
config_file: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_FILE", False)
# Config directory location (filled automatically)
config_dir: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_DIR", False)
config_dir: Optional[str] = metadata_field([LoadScenario.S3_LOCAL], None, "CONFIG_DIR", False)
def set_id(self, load_id):
self.load_id = load_id
@ -376,17 +337,6 @@ class LoadParams(MetaConfig):
if self.preset:
self.preset.pregen_json = os.path.join(self.working_dir, f"{load_id}_prepare.json")
def get_k6_vars(self):
env_vars = {
meta_field.metadata["scenario_variable"]: meta_field.value
for meta_field in self._get_meta_fields(self)
if self.scenario in meta_field.metadata["applicable_scenarios"]
and meta_field.metadata["scenario_variable"]
and meta_field.value is not None
}
return env_vars
def get_env_vars(self):
env_vars = {
meta_field.metadata["env_variable"]: meta_field.value
@ -443,11 +393,6 @@ class LoadParams(MetaConfig):
# For preset calls, bool values are passed with just --<argument_name> if the value is True
return f"--{meta_field.metadata['preset_argument']}" if meta_field.value else ""
if isinstance(meta_field.value, list):
return (
" ".join(f"--{meta_field.metadata['preset_argument']} '{value}'" for value in meta_field.value) if meta_field.value else ""
)
return f"--{meta_field.metadata['preset_argument']} '{meta_field.value}'"
@staticmethod
@ -467,6 +412,25 @@ class LoadParams(MetaConfig):
return fields_with_data or []
def _get_field_formatter(self, field_name: str) -> Callable | None:
data_fields = fields(self)
formatters = [
field.metadata["formatter"]
for field in data_fields
if field.name == field_name and "formatter" in field.metadata and field.metadata["formatter"] != None
]
if formatters:
return formatters[0]
return None
def __setattr__(self, field_name, value):
formatter = self._get_field_formatter(field_name)
if formatter:
value = formatter(value)
super().__setattr__(field_name, value)
def __str__(self) -> str:
load_type_str = self.scenario.value if self.scenario else self.load_type.value
# TODO: migrate load_params defaults to testlib
@ -477,7 +441,9 @@ class LoadParams(MetaConfig):
static_params = [f"{load_type_str}"]
dynamic_params = [
f"{meta_field.name}={meta_field.value}" for meta_field in self._get_applicable_fields() if meta_field.metadata["string_repr"]
f"{meta_field.name}={meta_field.value}"
for meta_field in self._get_applicable_fields()
if meta_field.metadata["string_repr"]
]
params = ", ".join(static_params + dynamic_params)

View file

@ -1,20 +1,24 @@
import copy
import itertools
import math
import re
import time
from dataclasses import fields
from threading import Event
from typing import Optional
from urllib.parse import urlparse
import yaml
from frostfs_testlib import reporter
from frostfs_testlib.credentials.interfaces import S3Credentials, User
from frostfs_testlib.cli.frostfs_authmate.authmate import FrostfsAuthmate
from frostfs_testlib.load.interfaces.loader import Loader
from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner
from frostfs_testlib.load.k6 import K6
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadType
from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader
from frostfs_testlib.resources import optionals
from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC
from frostfs_testlib.resources.common import STORAGE_USER_NAME
from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_VUS_COUNT_DIVISOR, LOAD_NODE_SSH_USER, LOAD_NODES
from frostfs_testlib.shell.command_inspectors import SuInspector
@ -22,6 +26,7 @@ from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing import parallel, run_optionally
from frostfs_testlib.testing.test_control import retry
from frostfs_testlib.utils import datetime_utils
@ -52,17 +57,17 @@ class RunnerBase(ScenarioRunner):
class DefaultRunner(RunnerBase):
loaders: list[Loader]
user: User
loaders_wallet: WalletInfo
def __init__(
self,
user: User,
loaders_wallet: WalletInfo,
load_ip_list: Optional[list[str]] = None,
) -> None:
if load_ip_list is None:
load_ip_list = LOAD_NODES
self.loaders = RemoteLoader.from_ip_list(load_ip_list)
self.user = user
self.loaders_wallet = loaders_wallet
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step("Preparation steps")
@ -81,27 +86,55 @@ class DefaultRunner(RunnerBase):
return
with reporter.step("Init s3 client on loaders"):
s3_credentials = self.user.s3_credentials
parallel(self._aws_configure_on_loader, self.loaders, s3_credentials)
storage_node = nodes_under_load[0].service(StorageNode)
s3_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes]
grpc_peer = storage_node.get_rpc_endpoint()
parallel(self._prepare_loader, self.loaders, load_params, grpc_peer, s3_public_keys, k6_dir)
def _force_fresh_registry(self, loader: Loader, load_params: LoadParams):
with reporter.step(f"Forcing fresh registry on {loader.ip}"):
shell = loader.get_shell()
shell.exec(f"rm -f {load_params.registry_file}")
def _aws_configure_on_loader(
def _prepare_loader(
self,
loader: Loader,
s3_credentials: S3Credentials,
load_params: LoadParams,
grpc_peer: str,
s3_public_keys: list[str],
k6_dir: str,
):
with reporter.step(f"Aws configure on {loader.ip}"):
with reporter.step(f"Init s3 client on {loader.ip}"):
shell = loader.get_shell()
frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC)
issue_secret_output = frostfs_authmate_exec.secret.issue(
wallet=self.loaders_wallet.path,
peer=grpc_peer,
gate_public_key=s3_public_keys,
container_placement_policy=load_params.preset.container_placement_policy,
container_policy=f"{k6_dir}/scenarios/files/policy.json",
wallet_password=self.loaders_wallet.password,
).stdout
aws_access_key_id = str(
re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group(
"aws_access_key_id"
)
)
aws_secret_access_key = str(
re.search(
r"secret_access_key.*:\s.(?P<aws_secret_access_key>\w*)",
issue_secret_output,
).group("aws_secret_access_key")
)
configure_input = [
InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=s3_credentials.access_key),
InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=s3_credentials.secret_key),
InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id),
InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key),
InteractiveInput(prompt_pattern=r".*", input=""),
InteractiveInput(prompt_pattern=r".*", input=""),
]
loader.get_shell().exec("aws configure", CommandOptions(interactive_inputs=configure_input))
shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input))
@reporter.step("Init k6 instances")
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
@ -143,10 +176,12 @@ class DefaultRunner(RunnerBase):
k6_dir,
shell,
loader,
self.user,
self.loaders_wallet,
)
def _get_distributed_load_params_list(self, original_load_params: LoadParams, workers_count: int) -> list[LoadParams]:
def _get_distributed_load_params_list(
self, original_load_params: LoadParams, workers_count: int
) -> list[LoadParams]:
divisor = int(BACKGROUND_LOAD_VUS_COUNT_DIVISOR)
distributed_load_params: list[LoadParams] = []
@ -231,20 +266,18 @@ class LocalRunner(RunnerBase):
loaders: list[Loader]
cluster_state_controller: ClusterStateController
file_keeper: FileKeeper
user: User
wallet: WalletInfo
def __init__(
self,
cluster_state_controller: ClusterStateController,
file_keeper: FileKeeper,
nodes_under_load: list[ClusterNode],
user: User,
) -> None:
self.cluster_state_controller = cluster_state_controller
self.file_keeper = file_keeper
self.loaders = [NodeLoader(node) for node in nodes_under_load]
self.nodes_under_load = nodes_under_load
self.user = user
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@reporter.step("Preparation steps")
@ -293,9 +326,11 @@ class LocalRunner(RunnerBase):
shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz --strip-components 2 -C {k6_dir}")
shell.exec(f"sudo chmod -R 777 {k6_dir}")
with reporter.step("chmod 777 wallet related files on loader"):
shell.exec(f"sudo chmod -R 777 {self.user.wallet.config_path}")
shell.exec(f"sudo chmod -R 777 {self.user.wallet.path}")
with reporter.step("Create empty_passwd"):
self.wallet = WalletInfo(f"{k6_dir}/scenarios/files/wallet.json", "", "/tmp/empty_passwd.yml")
content = yaml.dump({"password": ""})
shell.exec(f'echo "{content}" | sudo tee {self.wallet.config_path}')
shell.exec(f"sudo chmod -R 777 {self.wallet.config_path}")
@reporter.step("Init k6 instances")
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
@ -328,7 +363,7 @@ class LocalRunner(RunnerBase):
k6_dir,
shell,
loader,
self.user,
self.wallet,
)
def start(self):
@ -418,7 +453,7 @@ class S3LocalRunner(LocalRunner):
k6_dir,
shell,
loader,
self.user,
self.wallet,
)
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
@ -431,10 +466,17 @@ class S3LocalRunner(LocalRunner):
k6_dir: str,
):
self.k6_dir = k6_dir
parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, cluster_nodes)
with reporter.step("Init s3 client on loaders"):
storage_node = nodes_under_load[0].service(StorageNode)
s3_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes]
grpc_peer = storage_node.get_rpc_endpoint()
parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, s3_public_keys, grpc_peer)
@reporter.step("Prepare node {cluster_node}")
def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams, cluster_nodes: list[ClusterNode]):
def prepare_node(
self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams, s3_public_keys: list[str], grpc_peer: str
):
LocalRunner.prepare_node(self, cluster_node, k6_dir, load_params)
self.endpoints = cluster_node.s3_gate.get_all_endpoints()
shell = cluster_node.host.get_shell()
@ -455,9 +497,29 @@ class S3LocalRunner(LocalRunner):
shell.exec(f"sudo python3 -m pip install -I {k6_dir}/requests.tar.gz")
with reporter.step(f"Init s3 client on {cluster_node.host_ip}"):
frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC)
issue_secret_output = frostfs_authmate_exec.secret.issue(
wallet=self.wallet.path,
peer=grpc_peer,
gate_public_key=s3_public_keys,
container_placement_policy=load_params.preset.container_placement_policy,
container_policy=f"{k6_dir}/scenarios/files/policy.json",
wallet_password=self.wallet.password,
).stdout
aws_access_key_id = str(
re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group(
"aws_access_key_id"
)
)
aws_secret_access_key = str(
re.search(
r"secret_access_key.*:\s.(?P<aws_secret_access_key>\w*)",
issue_secret_output,
).group("aws_secret_access_key")
)
configure_input = [
InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=self.user.s3_credentials.access_key),
InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=self.user.s3_credentials.secret_key),
InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id),
InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key),
InteractiveInput(prompt_pattern=r".*", input=""),
InteractiveInput(prompt_pattern=r".*", input=""),
]

View file

@ -46,8 +46,3 @@ with open(DEFAULT_WALLET_CONFIG, "w") as file:
MAX_REQUEST_ATTEMPTS = 5
RETRY_MODE = "standard"
CREDENTIALS_CREATE_TIMEOUT = "1m"
HOSTING_CONFIG_FILE = os.getenv(
"HOSTING_CONFIG_FILE", os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..", ".devenv.hosting.yaml"))
)

View file

@ -23,9 +23,6 @@ INVALID_RANGE_OVERFLOW = "invalid '{range}' range: uint64 overflow"
INVALID_OFFSET_SPECIFIER = "invalid '{range}' range offset specifier"
INVALID_LENGTH_SPECIFIER = "invalid '{range}' range length specifier"
S3_BUCKET_DOES_NOT_ALLOW_ACL = "The bucket does not allow ACLs"
S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not validate against our published schema."
RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied"
RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied"
NO_RULE_FOUND_CONTAINER = "access to container operation {operation} is denied by access policy engine: NoRuleFound"
S3_MALFORMED_XML_REQUEST = (
"The XML you provided was not well-formed or did not validate against our published schema."
)

View file

@ -1,9 +0,0 @@
ALL_USERS_GROUP_URI = "http://acs.amazonaws.com/groups/global/AllUsers"
ALL_USERS_GROUP_WRITE_GRANT = {"Grantee": {"Type": "Group", "URI": ALL_USERS_GROUP_URI}, "Permission": "WRITE"}
ALL_USERS_GROUP_READ_GRANT = {"Grantee": {"Type": "Group", "URI": ALL_USERS_GROUP_URI}, "Permission": "READ"}
CANONICAL_USER_FULL_CONTROL_GRANT = {"Grantee": {"Type": "CanonicalUser"}, "Permission": "FULL_CONTROL"}
# https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl
PRIVATE_GRANTS = []
PUBLIC_READ_GRANTS = [ALL_USERS_GROUP_READ_GRANT]
PUBLIC_READ_WRITE_GRANTS = [ALL_USERS_GROUP_WRITE_GRANT, ALL_USERS_GROUP_READ_GRANT]

View file

@ -1,6 +1,7 @@
import json
import logging
import os
import uuid
from datetime import datetime
from time import sleep
from typing import Literal, Optional, Union
@ -10,11 +11,9 @@ from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, R
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
from frostfs_testlib.shell import CommandOptions
from frostfs_testlib.shell.local_shell import LocalShell
from frostfs_testlib.utils import string_utils
# TODO: Refactor this code to use shell instead of _cmd_run
from frostfs_testlib.utils.cli_utils import _configure_aws_cli
from frostfs_testlib.utils.file_utils import TestFile
logger = logging.getLogger("NeoLogger")
command_options = CommandOptions(timeout=480)
@ -30,17 +29,13 @@ class AwsCliClient(S3ClientWrapper):
@reporter.step("Configure S3 client (aws cli)")
def __init__(
self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1"
self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default"
) -> None:
self.s3gate_endpoint = s3gate_endpoint
self.access_key_id: str = access_key_id
self.secret_access_key: str = secret_access_key
self.profile = profile
self.local_shell = LocalShell()
self.region = region
self.iam_endpoint = None
try:
_configure_aws_cli(f"aws configure --profile {profile}", access_key_id, secret_access_key, region)
_configure_aws_cli(f"aws configure --profile {profile}", access_key_id, secret_access_key)
self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS} --profile {profile}")
self.local_shell.exec(
f"aws configure set retry_mode {RETRY_MODE} --profile {profile}",
@ -48,14 +43,10 @@ class AwsCliClient(S3ClientWrapper):
except Exception as err:
raise RuntimeError("Error while configuring AwsCliClient") from err
@reporter.step("Set S3 endpoint to {s3gate_endpoint}")
@reporter.step("Set endpoint S3 to {s3gate_endpoint}")
def set_endpoint(self, s3gate_endpoint: str):
self.s3gate_endpoint = s3gate_endpoint
@reporter.step("Set IAM endpoint to {iam_endpoint}")
def set_iam_endpoint(self, iam_endpoint: str):
self.iam_endpoint = iam_endpoint
@reporter.step("Create bucket S3")
def create_bucket(
self,
@ -68,7 +59,7 @@ class AwsCliClient(S3ClientWrapper):
location_constraint: Optional[str] = None,
) -> str:
if bucket is None:
bucket = string_utils.unique_name("bucket-")
bucket = str(uuid.uuid4())
if object_lock_enabled_for_bucket is None:
object_lock = ""
@ -91,6 +82,7 @@ class AwsCliClient(S3ClientWrapper):
if location_constraint:
cmd += f" --create-bucket-configuration LocationConstraint={location_constraint}"
self.local_shell.exec(cmd)
sleep(S3_SYNC_WAIT_TIME)
return bucket
@ -105,6 +97,7 @@ class AwsCliClient(S3ClientWrapper):
def delete_bucket(self, bucket: str) -> None:
cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}"
self.local_shell.exec(cmd, command_options)
sleep(S3_SYNC_WAIT_TIME)
@reporter.step("Head bucket S3")
def head_bucket(self, bucket: str) -> None:
@ -152,7 +145,8 @@ class AwsCliClient(S3ClientWrapper):
@reporter.step("Get bucket acl")
def get_bucket_acl(self, bucket: str) -> list:
cmd = (
f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
)
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
@ -170,7 +164,10 @@ class AwsCliClient(S3ClientWrapper):
@reporter.step("List objects S3")
def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
cmd = (
f"aws {self.common_flags} s3api list-objects --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
)
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
@ -229,7 +226,7 @@ class AwsCliClient(S3ClientWrapper):
if bucket is None:
bucket = source_bucket
if key is None:
key = string_utils.unique_name("copy-object-")
key = os.path.join(os.getcwd(), str(uuid.uuid4()))
copy_source = f"{source_bucket}/{source_key}"
cmd = (
@ -314,18 +311,18 @@ class AwsCliClient(S3ClientWrapper):
version_id: Optional[str] = None,
object_range: Optional[tuple[int, int]] = None,
full_output: bool = False,
) -> dict | TestFile:
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, string_utils.unique_name("dl-object-")))
) -> Union[dict, str]:
file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
version = f" --version-id {version_id}" if version_id else ""
cmd = (
f"aws {self.common_flags} s3api get-object --bucket {bucket} --key {key} "
f"{version} {test_file} --endpoint {self.s3gate_endpoint} --profile {self.profile}"
f"{version} {file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}"
)
if object_range:
cmd += f" --range bytes={object_range[0]}-{object_range[1]}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response if full_output else test_file
return response if full_output else file_path
@reporter.step("Get object ACL")
def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
@ -395,6 +392,7 @@ class AwsCliClient(S3ClientWrapper):
)
output = self.local_shell.exec(cmd, command_options).stdout
response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME)
return response
@reporter.step("Delete object S3")
@ -405,6 +403,7 @@ class AwsCliClient(S3ClientWrapper):
f"--key {key} {version} --endpoint {self.s3gate_endpoint} --profile {self.profile}"
)
output = self.local_shell.exec(cmd, command_options).stdout
sleep(S3_SYNC_WAIT_TIME)
return self._to_json(output)
@reporter.step("Delete object versions S3")
@ -431,6 +430,7 @@ class AwsCliClient(S3ClientWrapper):
f"--delete file://{file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}"
)
output = self.local_shell.exec(cmd, command_options).stdout
sleep(S3_SYNC_WAIT_TIME)
return self._to_json(output)
@reporter.step("Delete object versions S3 without delete markers")
@ -481,16 +481,6 @@ class AwsCliClient(S3ClientWrapper):
response = self._to_json(output)
return response.get("Policy")
@reporter.step("Delete bucket policy")
def delete_bucket_policy(self, bucket: str) -> dict:
cmd = (
f"aws {self.common_flags} s3api delete-bucket-policy --bucket {bucket} "
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
)
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Put bucket policy")
def put_bucket_policy(self, bucket: str, policy: dict) -> None:
# Leaving it as is was in test repo. Double dumps to escape resulting string
@ -575,13 +565,12 @@ class AwsCliClient(S3ClientWrapper):
self.local_shell.exec(cmd)
@reporter.step("Put object tagging")
def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None:
def put_object_tagging(self, bucket: str, key: str, tags: list) -> None:
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
tagging = {"TagSet": tags}
version = f" --version-id {version_id}" if version_id else ""
cmd = (
f"aws {self.common_flags} s3api put-object-tagging --bucket {bucket} --key {key} "
f"{version} --tagging '{json.dumps(tagging)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}"
f"--tagging '{json.dumps(tagging)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}"
)
self.local_shell.exec(cmd)
@ -597,11 +586,10 @@ class AwsCliClient(S3ClientWrapper):
return response.get("TagSet")
@reporter.step("Delete object tagging")
def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None:
version = f" --version-id {version_id}" if version_id else ""
def delete_object_tagging(self, bucket: str, key: str) -> None:
cmd = (
f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} "
f"--key {key} {version} --endpoint {self.s3gate_endpoint} --profile {self.profile}"
f"--key {key} --endpoint {self.s3gate_endpoint} --profile {self.profile}"
)
self.local_shell.exec(cmd)
@ -614,7 +602,8 @@ class AwsCliClient(S3ClientWrapper):
metadata: Optional[dict] = None,
) -> dict:
cmd = (
f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}"
f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} "
f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}"
)
if metadata:
cmd += " --metadata"
@ -764,495 +753,3 @@ class AwsCliClient(S3ClientWrapper):
json_output = json.loads(output[output.index("{") :])
return json_output
# IAM METHODS #
# Some methods don't have checks because AWS is silent in some cases (delete, attach, etc.)
@reporter.step("Adds the specified user to the specified group")
def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict:
cmd = f"aws {self.common_flags} iam add-user-to-group --user-name {user_name} --group-name {group_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Attaches the specified managed policy to the specified IAM group")
def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict:
cmd = f"aws {self.common_flags} iam attach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Attaches the specified managed policy to the specified user")
def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict:
cmd = f"aws {self.common_flags} iam attach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Creates a new AWS secret access key and access key ID for the specified user")
def iam_create_access_key(self, user_name: Optional[str] = None) -> dict:
cmd = f"aws {self.common_flags} iam create-access-key --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
if user_name:
cmd += f" --user-name {user_name}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
access_key_id = response["AccessKey"].get("AccessKeyId")
secret_access_key = response["AccessKey"].get("SecretAccessKey")
assert access_key_id, f"Expected AccessKeyId in response:\n{response}"
assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}"
return access_key_id, secret_access_key
@reporter.step("Creates a new group")
def iam_create_group(self, group_name: str) -> dict:
cmd = f"aws {self.common_flags} iam create-group --group-name {group_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert response.get("Group"), f"Expected Group in response:\n{response}"
assert response["Group"].get("GroupName") == group_name, f"GroupName should be equal to {group_name}"
return response
@reporter.step("Creates a new managed policy for your AWS account")
def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict:
cmd = (
f"aws {self.common_flags} iam create-policy --endpoint {self.iam_endpoint}"
f" --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'"
)
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert response.get("Policy"), f"Expected Policy in response:\n{response}"
assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}"
return response
@reporter.step("Creates a new IAM user for your AWS account")
def iam_create_user(self, user_name: str) -> dict:
cmd = f"aws {self.common_flags} iam create-user --user-name {user_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert response.get("User"), f"Expected User in response:\n{response}"
assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}"
return response
@reporter.step("Deletes the access key pair associated with the specified IAM user")
def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict:
cmd = f"aws {self.common_flags} iam delete-access-key --access-key-id {access_key_id} --user-name {user_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Deletes the specified IAM group")
def iam_delete_group(self, group_name: str) -> dict:
cmd = f"aws {self.common_flags} iam delete-group --group-name {group_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group")
def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict:
cmd = f"aws {self.common_flags} iam delete-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Deletes the specified managed policy")
def iam_delete_policy(self, policy_arn: str) -> dict:
cmd = f"aws {self.common_flags} iam delete-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Deletes the specified IAM user")
def iam_delete_user(self, user_name: str) -> dict:
cmd = f"aws {self.common_flags} iam delete-user --user-name {user_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user")
def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict:
cmd = f"aws {self.common_flags} iam delete-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Removes the specified managed policy from the specified IAM group")
def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict:
cmd = f"aws {self.common_flags} iam detach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Removes the specified managed policy from the specified user")
def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict:
cmd = f"aws {self.common_flags} iam detach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Returns a list of IAM users that are in the specified IAM group")
def iam_get_group(self, group_name: str) -> dict:
cmd = f"aws {self.common_flags} iam get-group --group-name {group_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert "Users" in response.keys(), f"Expected Users in response:\n{response}"
assert response.get("Group").get("GroupName") == group_name, f"GroupName should be equal to {group_name}"
return response
@reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group")
def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict:
cmd = f"aws {self.common_flags} iam get-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Retrieves information about the specified managed policy")
def iam_get_policy(self, policy_arn: str) -> dict:
cmd = f"aws {self.common_flags} iam get-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert response.get("Policy"), f"Expected Policy in response:\n{response}"
assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}"
return response
@reporter.step("Retrieves information about the specified version of the specified managed policy")
def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict:
cmd = f"aws {self.common_flags} iam get-policy-version --policy-arn {policy_arn} --version-id {version_id} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert response.get("PolicyVersion"), f"Expected PolicyVersion in response:\n{response}"
assert response["PolicyVersion"].get("VersionId") == version_id, f"VersionId should be equal to {version_id}"
return response
@reporter.step("Retrieves information about the specified IAM user")
def iam_get_user(self, user_name: str) -> dict:
cmd = f"aws {self.common_flags} iam get-user --user-name {user_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert response.get("User"), f"Expected User in response:\n{response}"
assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}"
return response
@reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user")
def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict:
cmd = f"aws {self.common_flags} iam get-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert response.get("UserName"), f"Expected User in response:\n{response}"
return response
@reporter.step("Returns information about the access key IDs associated with the specified IAM user")
def iam_list_access_keys(self, user_name: str) -> dict:
cmd = f"aws {self.common_flags} iam list-access-keys --user-name {user_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Lists all managed policies that are attached to the specified IAM group")
def iam_list_attached_group_policies(self, group_name: str) -> dict:
cmd = f"aws {self.common_flags} iam list-attached-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}"
return response
@reporter.step("Lists all managed policies that are attached to the specified IAM user")
def iam_list_attached_user_policies(self, user_name: str) -> dict:
cmd = f"aws {self.common_flags} iam list-attached-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}"
return response
@reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to")
def iam_list_entities_for_policy(self, policy_arn: str) -> dict:
cmd = f"aws {self.common_flags} iam list-entities-for-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert response.get("PolicyGroups"), f"Expected PolicyGroups in response:\n{response}"
assert response.get("PolicyUsers"), f"Expected PolicyUsers in response:\n{response}"
return response
@reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group")
def iam_list_group_policies(self, group_name: str) -> dict:
cmd = f"aws {self.common_flags} iam list-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}"
return response
@reporter.step("Lists the IAM groups")
def iam_list_groups(self) -> dict:
cmd = f"aws {self.common_flags} iam list-groups --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert response.get("Groups"), f"Expected Groups in response:\n{response}"
return response
@reporter.step("Lists the IAM groups that the specified IAM user belongs to")
def iam_list_groups_for_user(self, user_name: str) -> dict:
cmd = f"aws {self.common_flags} iam list-groups-for-user --user-name {user_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert response.get("Groups"), f"Expected Groups in response:\n{response}"
return response
@reporter.step("Lists all the managed policies that are available in your AWS account")
def iam_list_policies(self) -> dict:
cmd = f"aws {self.common_flags} iam list-policies --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert "Policies" in response.keys(), f"Expected Policies in response:\n{response}"
return response
@reporter.step("Lists information about the versions of the specified managed policy")
def iam_list_policy_versions(self, policy_arn: str) -> dict:
cmd = f"aws {self.common_flags} iam list-policy-versions --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert response.get("Versions"), f"Expected Versions in response:\n{response}"
return response
@reporter.step("Lists the names of the inline policies embedded in the specified IAM user")
def iam_list_user_policies(self, user_name: str) -> dict:
cmd = f"aws {self.common_flags} iam list-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}"
return response
@reporter.step("Lists the IAM users")
def iam_list_users(self) -> dict:
cmd = f"aws {self.common_flags} iam list-users --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
assert "Users" in response.keys(), f"Expected Users in response:\n{response}"
return response
@reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group")
def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict:
cmd = (
f"aws {self.common_flags} iam put-group-policy --endpoint {self.iam_endpoint}"
f" --group-name {group_name} --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'"
)
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user")
def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict:
cmd = (
f"aws {self.common_flags} iam put-user-policy --endpoint {self.iam_endpoint}"
f" --user-name {user_name} --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'"
)
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Removes the specified user from the specified group")
def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict:
cmd = (
f"aws {self.common_flags} iam remove-user-from-group --endpoint {self.iam_endpoint}"
f" --group-name {group_name} --user-name {user_name}"
)
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Updates the name and/or the path of the specified IAM group")
def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict:
cmd = f"aws {self.common_flags} iam update-group --group-name {group_name} --endpoint {self.iam_endpoint}"
if new_name:
cmd += f" --new-group-name {new_name}"
if new_path:
cmd += f" --new-path {new_path}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Updates the name and/or the path of the specified IAM user")
def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict:
cmd = f"aws {self.common_flags} iam update-user --user-name {user_name} --endpoint {self.iam_endpoint}"
if new_name:
cmd += f" --new-user-name {new_name}"
if new_path:
cmd += f" --new-path {new_path}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Adds one or more tags to an IAM user")
def iam_tag_user(self, user_name: str, tags: list) -> dict:
tags_json = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
cmd = (
f"aws {self.common_flags} iam tag-user --user-name {user_name} --tags '{json.dumps(tags_json)}' --endpoint {self.iam_endpoint}"
)
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("List tags of IAM user")
def iam_list_user_tags(self, user_name: str) -> dict:
cmd = f"aws {self.common_flags} iam list-user-tags --user-name {user_name} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Removes the specified tags from the user")
def iam_untag_user(self, user_name: str, tag_keys: list) -> dict:
tag_keys_joined = " ".join(tag_keys)
cmd = f"aws {self.common_flags} iam untag-user --user-name {user_name} --tag-keys {tag_keys_joined} --endpoint {self.iam_endpoint}"
if self.profile:
cmd += f" --profile {self.profile}"
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response

View file

@ -1,6 +1,7 @@
import json
import logging
import os
import uuid
from datetime import datetime
from functools import wraps
from time import sleep
@ -15,11 +16,7 @@ from mypy_boto3_s3 import S3Client
from frostfs_testlib import reporter
from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
from frostfs_testlib.utils import string_utils
# TODO: Refactor this code to use shell instead of _cmd_run
from frostfs_testlib.utils.cli_utils import log_command_execution
from frostfs_testlib.utils.file_utils import TestFile
logger = logging.getLogger("NeoLogger")
@ -34,15 +31,7 @@ def report_error(func):
try:
return func(*a, **kw)
except ClientError as err:
url = None
params = {"args": a, "kwargs": kw}
if isinstance(a[0], Boto3ClientWrapper):
client: Boto3ClientWrapper = a[0]
url = client.s3gate_endpoint
params = {"args": a[1:], "kwargs": kw}
log_command_execution(url, f"Failed {err.operation_name}", err.response, params)
log_command_execution("Result", str(err))
raise
return deco
@ -54,11 +43,10 @@ class Boto3ClientWrapper(S3ClientWrapper):
@reporter.step("Configure S3 client (boto3)")
@report_error
def __init__(
self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1"
self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default"
) -> None:
self.boto3_client: S3Client = None
self.session = boto3.Session()
self.region = region
self.session = boto3.Session(profile_name=profile)
self.config = Config(
retries={
"max_attempts": MAX_REQUEST_ATTEMPTS,
@ -68,7 +56,6 @@ class Boto3ClientWrapper(S3ClientWrapper):
self.access_key_id: str = access_key_id
self.secret_access_key: str = secret_access_key
self.s3gate_endpoint: str = ""
self.boto3_iam_client: S3Client = None
self.set_endpoint(s3gate_endpoint)
@reporter.step("Set endpoint S3 to {s3gate_endpoint}")
@ -82,23 +69,12 @@ class Boto3ClientWrapper(S3ClientWrapper):
service_name="s3",
aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key,
region_name=self.region,
config=self.config,
endpoint_url=s3gate_endpoint,
verify=False,
)
@reporter.step("Set endpoint IAM to {iam_endpoint}")
def set_iam_endpoint(self, iam_endpoint: str):
self.boto3_iam_client = self.session.client(
service_name="iam",
aws_access_key_id=self.access_key_id,
aws_secret_access_key=self.secret_access_key,
endpoint_url=iam_endpoint,
verify=False,
)
def _to_s3_param(self, param: str) -> str:
def _to_s3_param(self, param: str):
replacement_map = {
"Acl": "ACL",
"Cors": "CORS",
@ -109,11 +85,6 @@ class Boto3ClientWrapper(S3ClientWrapper):
result = result.replace(find, replace)
return result
def _convert_to_s3_params(self, scope: dict, exclude: Optional[list[str]] = None) -> dict:
if not exclude:
exclude = ["self"]
return {self._to_s3_param(param): value for param, value in scope if param not in exclude and value is not None}
# BUCKET METHODS #
@reporter.step("Create bucket S3")
@report_error
@ -128,7 +99,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
location_constraint: Optional[str] = None,
) -> str:
if bucket is None:
bucket = string_utils.unique_name("bucket-")
bucket = str(uuid.uuid4())
params = {"Bucket": bucket}
if object_lock_enabled_for_bucket is not None:
@ -146,7 +117,8 @@ class Boto3ClientWrapper(S3ClientWrapper):
params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}})
s3_bucket = self.boto3_client.create_bucket(**params)
log_command_execution(self.s3gate_endpoint, f"Created S3 bucket {bucket}", s3_bucket, params)
log_command_execution(f"Created S3 bucket {bucket}", s3_bucket)
sleep(S3_SYNC_WAIT_TIME)
return bucket
@reporter.step("List buckets S3")
@ -155,7 +127,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
found_buckets = []
response = self.boto3_client.list_buckets()
log_command_execution(self.s3gate_endpoint, "S3 List buckets result", response)
log_command_execution("S3 List buckets result", response)
for bucket in response["Buckets"]:
found_buckets.append(bucket["Name"])
@ -166,27 +138,29 @@ class Boto3ClientWrapper(S3ClientWrapper):
@report_error
def delete_bucket(self, bucket: str) -> None:
response = self.boto3_client.delete_bucket(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 Delete bucket result", response, {"Bucket": bucket})
log_command_execution("S3 Delete bucket result", response)
sleep(S3_SYNC_WAIT_TIME)
@reporter.step("Head bucket S3")
@report_error
def head_bucket(self, bucket: str) -> None:
response = self.boto3_client.head_bucket(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 Head bucket result", response, {"Bucket": bucket})
log_command_execution("S3 Head bucket result", response)
@reporter.step("Put bucket versioning status")
@report_error
def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None:
params = {"Bucket": bucket, "VersioningConfiguration": {"Status": status.value}}
response = self.boto3_client.put_bucket_versioning(**params)
log_command_execution(self.s3gate_endpoint, "S3 Set bucket versioning to", response, params)
response = self.boto3_client.put_bucket_versioning(
Bucket=bucket, VersioningConfiguration={"Status": status.value}
)
log_command_execution("S3 Set bucket versioning to", response)
@reporter.step("Get bucket versioning status")
@report_error
def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]:
response = self.boto3_client.get_bucket_versioning(Bucket=bucket)
status = response.get("Status")
log_command_execution(self.s3gate_endpoint, "S3 Got bucket versioning status", response, {"Bucket": bucket})
log_command_execution("S3 Got bucket versioning status", response)
return status
@reporter.step("Put bucket tagging")
@ -194,29 +168,28 @@ class Boto3ClientWrapper(S3ClientWrapper):
def put_bucket_tagging(self, bucket: str, tags: list) -> None:
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
tagging = {"TagSet": tags}
params = self._convert_to_s3_params(locals().items(), exclude=["self", "tags"])
response = self.boto3_client.put_bucket_tagging(**params)
log_command_execution(self.s3gate_endpoint, "S3 Put bucket tagging", response, params)
response = self.boto3_client.put_bucket_tagging(Bucket=bucket, Tagging=tagging)
log_command_execution("S3 Put bucket tagging", response)
@reporter.step("Get bucket tagging")
@report_error
def get_bucket_tagging(self, bucket: str) -> list:
response = self.boto3_client.get_bucket_tagging(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 Get bucket tagging", response, {"Bucket": bucket})
log_command_execution("S3 Get bucket tagging", response)
return response.get("TagSet")
@reporter.step("Get bucket acl")
@report_error
def get_bucket_acl(self, bucket: str) -> list:
response = self.boto3_client.get_bucket_acl(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 Get bucket acl", response, {"Bucket": bucket})
log_command_execution("S3 Get bucket acl", response)
return response.get("Grants")
@reporter.step("Delete bucket tagging")
@report_error
def delete_bucket_tagging(self, bucket: str) -> None:
response = self.boto3_client.delete_bucket_tagging(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 Delete bucket tagging", response, {"Bucket": bucket})
log_command_execution("S3 Delete bucket tagging", response)
@reporter.step("Put bucket ACL")
@report_error
@ -227,74 +200,68 @@ class Boto3ClientWrapper(S3ClientWrapper):
grant_write: Optional[str] = None,
grant_read: Optional[str] = None,
) -> None:
params = self._convert_to_s3_params(locals().items())
params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self"] and value is not None
}
response = self.boto3_client.put_bucket_acl(**params)
log_command_execution(self.s3gate_endpoint, "S3 ACL bucket result", response, params)
log_command_execution("S3 ACL bucket result", response)
@reporter.step("Put object lock configuration")
@report_error
def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict:
params = {"Bucket": bucket, "ObjectLockConfiguration": configuration}
response = self.boto3_client.put_object_lock_configuration(**params)
log_command_execution(self.s3gate_endpoint, "S3 put_object_lock_configuration result", response, params)
response = self.boto3_client.put_object_lock_configuration(Bucket=bucket, ObjectLockConfiguration=configuration)
log_command_execution("S3 put_object_lock_configuration result", response)
return response
@reporter.step("Get object lock configuration")
@report_error
def get_object_lock_configuration(self, bucket: str) -> dict:
response = self.boto3_client.get_object_lock_configuration(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 get_object_lock_configuration result", response, {"Bucket": bucket})
log_command_execution("S3 get_object_lock_configuration result", response)
return response.get("ObjectLockConfiguration")
@reporter.step("Get bucket policy")
@report_error
def get_bucket_policy(self, bucket: str) -> str:
response = self.boto3_client.get_bucket_policy(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 get_bucket_policy result", response, {"Bucket": bucket})
log_command_execution("S3 get_bucket_policy result", response)
return response.get("Policy")
@reporter.step("Delete bucket policy")
@report_error
def delete_bucket_policy(self, bucket: str) -> str:
response = self.boto3_client.delete_bucket_policy(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 delete_bucket_policy result", response, {"Bucket": bucket})
return response
@reporter.step("Put bucket policy")
@report_error
def put_bucket_policy(self, bucket: str, policy: dict) -> None:
params = {"Bucket": bucket, "Policy": json.dumps(policy)}
response = self.boto3_client.put_bucket_policy(**params)
log_command_execution(self.s3gate_endpoint, "S3 put_bucket_policy result", response, params)
response = self.boto3_client.put_bucket_policy(Bucket=bucket, Policy=json.dumps(policy))
log_command_execution("S3 put_bucket_policy result", response)
return response
@reporter.step("Get bucket cors")
@report_error
def get_bucket_cors(self, bucket: str) -> dict:
response = self.boto3_client.get_bucket_cors(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 get_bucket_cors result", response, {"Bucket": bucket})
log_command_execution("S3 get_bucket_cors result", response)
return response.get("CORSRules")
@reporter.step("Get bucket location")
@report_error
def get_bucket_location(self, bucket: str) -> str:
response = self.boto3_client.get_bucket_location(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 get_bucket_location result", response, {"Bucket": bucket})
log_command_execution("S3 get_bucket_location result", response)
return response.get("LocationConstraint")
@reporter.step("Put bucket cors")
@report_error
def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None:
params = self._convert_to_s3_params(locals().items())
response = self.boto3_client.put_bucket_cors(**params)
log_command_execution(self.s3gate_endpoint, "S3 put_bucket_cors result", response, params)
response = self.boto3_client.put_bucket_cors(Bucket=bucket, CORSConfiguration=cors_configuration)
log_command_execution("S3 put_bucket_cors result", response)
return response
@reporter.step("Delete bucket cors")
@report_error
def delete_bucket_cors(self, bucket: str) -> None:
response = self.boto3_client.delete_bucket_cors(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 delete_bucket_cors result", response, {"Bucket": bucket})
log_command_execution("S3 delete_bucket_cors result", response)
# END OF BUCKET METHODS #
# OBJECT METHODS #
@ -302,9 +269,8 @@ class Boto3ClientWrapper(S3ClientWrapper):
@reporter.step("List objects S3 v2")
@report_error
def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
params = self._convert_to_s3_params(locals().items())
response = self.boto3_client.list_objects_v2(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 v2 List objects result", response, params)
log_command_execution("S3 v2 List objects result", response)
obj_list = [obj["Key"] for obj in response.get("Contents", [])]
logger.info(f"Found s3 objects: {obj_list}")
@ -314,9 +280,8 @@ class Boto3ClientWrapper(S3ClientWrapper):
@reporter.step("List objects S3")
@report_error
def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
params = self._convert_to_s3_params(locals().items())
response = self.boto3_client.list_objects(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 List objects result", response, params)
log_command_execution("S3 List objects result", response)
obj_list = [obj["Key"] for obj in response.get("Contents", [])]
logger.info(f"Found s3 objects: {obj_list}")
@ -326,17 +291,15 @@ class Boto3ClientWrapper(S3ClientWrapper):
@reporter.step("List objects versions S3")
@report_error
def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict:
params = self._convert_to_s3_params(locals().items())
response = self.boto3_client.list_object_versions(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 List objects versions result", response, params)
log_command_execution("S3 List objects versions result", response)
return response if full_output else response.get("Versions", [])
@reporter.step("List objects delete markers S3")
@report_error
def list_delete_markers(self, bucket: str, full_output: bool = False) -> list:
params = self._convert_to_s3_params(locals().items())
response = self.boto3_client.list_object_versions(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 List objects delete markers result", response, params)
log_command_execution("S3 List objects delete markers result", response)
return response if full_output else response.get("DeleteMarkers", [])
@reporter.step("Put object S3")
@ -361,36 +324,49 @@ class Boto3ClientWrapper(S3ClientWrapper):
with open(filepath, "rb") as put_file:
body = put_file.read()
params = self._convert_to_s3_params(locals().items(), exclude=["self", "filepath", "put_file", "body"])
response = self.boto3_client.put_object(Body=body, **params)
log_command_execution(self.s3gate_endpoint, "S3 Put object result", response, params)
params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self", "filepath", "put_file"] and value is not None
}
response = self.boto3_client.put_object(**params)
log_command_execution("S3 Put object result", response)
return response.get("VersionId")
@reporter.step("Head object S3")
@report_error
def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict:
params = self._convert_to_s3_params(locals().items())
params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self"] and value is not None
}
response = self.boto3_client.head_object(**params)
log_command_execution(self.s3gate_endpoint, "S3 Head object result", response, params)
log_command_execution("S3 Head object result", response)
return response
@reporter.step("Delete object S3")
@report_error
def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict:
params = self._convert_to_s3_params(locals().items())
params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self"] and value is not None
}
response = self.boto3_client.delete_object(**params)
log_command_execution(self.s3gate_endpoint, "S3 Delete object result", response, params)
log_command_execution("S3 Delete object result", response)
sleep(S3_SYNC_WAIT_TIME)
return response
@reporter.step("Delete objects S3")
@report_error
def delete_objects(self, bucket: str, keys: list[str]) -> dict:
params = {"Bucket": bucket, "Delete": _make_objs_dict(keys)}
response = self.boto3_client.delete_objects(**params)
log_command_execution(self.s3gate_endpoint, "S3 Delete objects result", response, params)
response = self.boto3_client.delete_objects(Bucket=bucket, Delete=_make_objs_dict(keys))
log_command_execution("S3 Delete objects result", response)
assert (
"Errors" not in response
), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}'
sleep(S3_SYNC_WAIT_TIME)
return response
@reporter.step("Delete object versions S3")
@ -406,9 +382,8 @@ class Boto3ClientWrapper(S3ClientWrapper):
for object_version in object_versions
]
}
params = {"Bucket": bucket, "Delete": delete_list}
response = self.boto3_client.delete_objects(**params)
log_command_execution(self.s3gate_endpoint, "S3 Delete objects result", response, params)
response = self.boto3_client.delete_objects(Bucket=bucket, Delete=delete_list)
log_command_execution("S3 Delete objects result", response)
return response
@reporter.step("Delete object versions S3 without delete markers")
@ -416,9 +391,10 @@ class Boto3ClientWrapper(S3ClientWrapper):
def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None:
# Delete objects without creating delete markers
for object_version in object_versions:
params = {"Bucket": bucket, "Key": object_version["Key"], "VersionId": object_version["VersionId"]}
response = self.boto3_client.delete_object(**params)
log_command_execution(self.s3gate_endpoint, "S3 Delete object result", response, params)
response = self.boto3_client.delete_object(
Bucket=bucket, Key=object_version["Key"], VersionId=object_version["VersionId"]
)
log_command_execution("S3 Delete object result", response)
@reporter.step("Put object ACL")
@report_error
@ -430,17 +406,19 @@ class Boto3ClientWrapper(S3ClientWrapper):
grant_write: Optional[str] = None,
grant_read: Optional[str] = None,
) -> list:
params = self._convert_to_s3_params(locals().items())
response = self.boto3_client.put_object_acl(**params)
log_command_execution(self.s3gate_endpoint, "S3 put object ACL", response, params)
return response.get("Grants")
# pytest.skip("Method put_object_acl is not supported by boto3 client")
raise NotImplementedError("Unsupported for boto3 client")
@reporter.step("Get object ACL")
@report_error
def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
params = self._convert_to_s3_params(locals().items())
params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self"] and value is not None
}
response = self.boto3_client.get_object_acl(**params)
log_command_execution(self.s3gate_endpoint, "S3 ACL objects result", response, params)
log_command_execution("S3 ACL objects result", response)
return response.get("Grants")
@reporter.step("Copy object S3")
@ -460,12 +438,16 @@ class Boto3ClientWrapper(S3ClientWrapper):
if bucket is None:
bucket = source_bucket
if key is None:
key = string_utils.unique_name("copy-object-")
key = os.path.join(os.getcwd(), str(uuid.uuid4()))
copy_source = f"{source_bucket}/{source_key}"
params = self._convert_to_s3_params(locals().items(), exclude=["self", "source_bucket", "source_key"])
params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self", "source_bucket", "source_key"] and value is not None
}
response = self.boto3_client.copy_object(**params)
log_command_execution(self.s3gate_endpoint, "S3 Copy objects result", response, params)
log_command_execution("S3 Copy objects result", response)
return key
@reporter.step("Get object S3")
@ -477,35 +459,32 @@ class Boto3ClientWrapper(S3ClientWrapper):
version_id: Optional[str] = None,
object_range: Optional[tuple[int, int]] = None,
full_output: bool = False,
) -> dict | TestFile:
) -> Union[dict, str]:
filename = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
range_str = None
if object_range:
range_str = f"bytes={object_range[0]}-{object_range[1]}"
params = self._convert_to_s3_params(
{**locals(), **{"Range": range_str}}.items(),
exclude=["self", "object_range", "full_output", "range_str"],
)
params = {
self._to_s3_param(param): value
for param, value in {**locals(), **{"Range": range_str}}.items()
if param not in ["self", "object_range", "full_output", "range_str", "filename"] and value is not None
}
response = self.boto3_client.get_object(**params)
log_command_execution(self.s3gate_endpoint, "S3 Get objects result", response, params)
log_command_execution("S3 Get objects result", response)
if full_output:
return response
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, string_utils.unique_name("dl-object-")))
with open(test_file, "wb") as file:
with open(f"{filename}", "wb") as get_file:
chunk = response["Body"].read(1024)
while chunk:
file.write(chunk)
get_file.write(chunk)
chunk = response["Body"].read(1024)
return test_file
return response if full_output else filename
@reporter.step("Create multipart upload S3")
@report_error
def create_multipart_upload(self, bucket: str, key: str) -> str:
params = self._convert_to_s3_params(locals().items())
response = self.boto3_client.create_multipart_upload(**params)
log_command_execution(self.s3gate_endpoint, "S3 Created multipart upload", response, params)
response = self.boto3_client.create_multipart_upload(Bucket=bucket, Key=key)
log_command_execution("S3 Created multipart upload", response)
assert response.get("UploadId"), f"Expected UploadId in response:\n{response}"
return response["UploadId"]
@ -514,16 +493,15 @@ class Boto3ClientWrapper(S3ClientWrapper):
@report_error
def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]:
response = self.boto3_client.list_multipart_uploads(Bucket=bucket)
log_command_execution(self.s3gate_endpoint, "S3 List multipart upload", response, {"Bucket": bucket})
log_command_execution("S3 List multipart upload", response)
return response.get("Uploads")
@reporter.step("Abort multipart upload S3")
@report_error
def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None:
params = self._convert_to_s3_params(locals().items())
response = self.boto3_client.abort_multipart_upload(**params)
log_command_execution(self.s3gate_endpoint, "S3 Abort multipart upload", response, params)
response = self.boto3_client.abort_multipart_upload(Bucket=bucket, Key=key, UploadId=upload_id)
log_command_execution("S3 Abort multipart upload", response)
@reporter.step("Upload part S3")
@report_error
@ -531,10 +509,14 @@ class Boto3ClientWrapper(S3ClientWrapper):
with open(filepath, "rb") as put_file:
body = put_file.read()
params = self._convert_to_s3_params(locals().items(), exclude=["self", "put_file", "part_num", "filepath", "body"])
params["PartNumber"] = part_num
response = self.boto3_client.upload_part(Body=body, **params)
log_command_execution(self.s3gate_endpoint, "S3 Upload part", response, params)
response = self.boto3_client.upload_part(
UploadId=upload_id,
Bucket=bucket,
Key=key,
PartNumber=part_num,
Body=body,
)
log_command_execution("S3 Upload part", response)
assert response.get("ETag"), f"Expected ETag in response:\n{response}"
return response["ETag"]
@ -542,10 +524,14 @@ class Boto3ClientWrapper(S3ClientWrapper):
@reporter.step("Upload copy part S3")
@report_error
def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str:
params = self._convert_to_s3_params(locals().items(), exclude=["self", "put_file", "part_num", "filepath"])
params["PartNumber"] = part_num
response = self.boto3_client.upload_part_copy(**params)
log_command_execution(self.s3gate_endpoint, "S3 Upload copy part", response, params)
response = self.boto3_client.upload_part_copy(
UploadId=upload_id,
Bucket=bucket,
Key=key,
PartNumber=part_num,
CopySource=copy_source,
)
log_command_execution("S3 Upload copy part", response)
assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}"
return response["CopyPartResult"]["ETag"]
@ -553,9 +539,8 @@ class Boto3ClientWrapper(S3ClientWrapper):
@reporter.step("List parts S3")
@report_error
def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]:
params = self._convert_to_s3_params(locals().items())
response = self.boto3_client.list_parts(**params)
log_command_execution(self.s3gate_endpoint, "S3 List part", response, params)
response = self.boto3_client.list_parts(UploadId=upload_id, Bucket=bucket, Key=key)
log_command_execution("S3 List part", response)
assert response.get("Parts"), f"Expected Parts in response:\n{response}"
return response["Parts"]
@ -564,10 +549,10 @@ class Boto3ClientWrapper(S3ClientWrapper):
@report_error
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None:
parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]
params = self._convert_to_s3_params(locals().items(), exclude=["self", "parts"])
params["MultipartUpload"] = {"Parts": parts}
response = self.boto3_client.complete_multipart_upload(**params)
log_command_execution(self.s3gate_endpoint, "S3 Complete multipart upload", response, params)
response = self.boto3_client.complete_multipart_upload(
Bucket=bucket, Key=key, UploadId=upload_id, MultipartUpload={"Parts": parts}
)
log_command_execution("S3 Complete multipart upload", response)
return response
@ -581,9 +566,13 @@ class Boto3ClientWrapper(S3ClientWrapper):
version_id: Optional[str] = None,
bypass_governance_retention: Optional[bool] = None,
) -> None:
params = self._convert_to_s3_params(locals().items())
params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self"] and value is not None
}
response = self.boto3_client.put_object_retention(**params)
log_command_execution(self.s3gate_endpoint, "S3 Put object retention ", response, params)
log_command_execution("S3 Put object retention ", response)
@reporter.step("Put object legal hold")
@report_error
@ -595,33 +584,39 @@ class Boto3ClientWrapper(S3ClientWrapper):
version_id: Optional[str] = None,
) -> None:
legal_hold = {"Status": legal_hold_status}
params = self._convert_to_s3_params(locals().items(), exclude=["self", "legal_hold_status"])
params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self", "legal_hold_status"] and value is not None
}
response = self.boto3_client.put_object_legal_hold(**params)
log_command_execution(self.s3gate_endpoint, "S3 Put object legal hold ", response, params)
log_command_execution("S3 Put object legal hold ", response)
@reporter.step("Put object tagging")
@report_error
def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None:
def put_object_tagging(self, bucket: str, key: str, tags: list) -> None:
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
tagging = {"TagSet": tags}
params = self._convert_to_s3_params(locals().items(), exclude=["self", "tags"])
response = self.boto3_client.put_object_tagging(**params)
log_command_execution(self.s3gate_endpoint, "S3 Put object tagging", response, params)
response = self.boto3_client.put_object_tagging(Bucket=bucket, Key=key, Tagging=tagging)
log_command_execution("S3 Put object tagging", response)
@reporter.step("Get object tagging")
@report_error
def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
params = self._convert_to_s3_params(locals().items())
params = {
self._to_s3_param(param): value
for param, value in locals().items()
if param not in ["self"] and value is not None
}
response = self.boto3_client.get_object_tagging(**params)
log_command_execution(self.s3gate_endpoint, "S3 Get object tagging", response, params)
log_command_execution("S3 Get object tagging", response)
return response.get("TagSet")
@reporter.step("Delete object tagging")
@report_error
def delete_object_tagging(self, bucket: str, key: str) -> None:
params = self._convert_to_s3_params(locals().items())
response = self.boto3_client.delete_object_tagging(**params)
log_command_execution(self.s3gate_endpoint, "S3 Delete object tagging", response, params)
response = self.boto3_client.delete_object_tagging(Bucket=bucket, Key=key)
log_command_execution("S3 Delete object tagging", response)
@reporter.step("Get object attributes")
@report_error
@ -661,270 +656,3 @@ class Boto3ClientWrapper(S3ClientWrapper):
raise NotImplementedError("Cp is not supported for boto3 client")
# END OBJECT METHODS #
# IAM METHODS #
# Some methods don't have checks because boto3 is silent in some cases (delete, attach, etc.)
@reporter.step("Adds the specified user to the specified group")
def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict:
response = self.boto3_iam_client.add_user_to_group(UserName=user_name, GroupName=group_name)
return response
@reporter.step("Attaches the specified managed policy to the specified IAM group")
def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict:
response = self.boto3_iam_client.attach_group_policy(GroupName=group_name, PolicyArn=policy_arn)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Attaches the specified managed policy to the specified user")
def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict:
response = self.boto3_iam_client.attach_user_policy(UserName=user_name, PolicyArn=policy_arn)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Creates a new AWS secret access key and access key ID for the specified user")
def iam_create_access_key(self, user_name: str) -> dict:
response = self.boto3_iam_client.create_access_key(UserName=user_name)
access_key_id = response["AccessKey"].get("AccessKeyId")
secret_access_key = response["AccessKey"].get("SecretAccessKey")
assert access_key_id, f"Expected AccessKeyId in response:\n{response}"
assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}"
return access_key_id, secret_access_key
@reporter.step("Creates a new group")
def iam_create_group(self, group_name: str) -> dict:
response = self.boto3_iam_client.create_group(GroupName=group_name)
assert response.get("Group"), f"Expected Group in response:\n{response}"
assert response["Group"].get("GroupName") == group_name, f"GroupName should be equal to {group_name}"
return response
@reporter.step("Creates a new managed policy for your AWS account")
def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict:
response = self.boto3_iam_client.create_policy(PolicyName=policy_name, PolicyDocument=json.dumps(policy_document))
assert response.get("Policy"), f"Expected Policy in response:\n{response}"
assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}"
return response
@reporter.step("Creates a new IAM user for your AWS account")
def iam_create_user(self, user_name: str) -> dict:
response = self.boto3_iam_client.create_user(UserName=user_name)
assert response.get("User"), f"Expected User in response:\n{response}"
assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}"
return response
@reporter.step("Deletes the access key pair associated with the specified IAM user")
def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict:
response = self.boto3_iam_client.delete_access_key(AccessKeyId=access_key_id, UserName=user_name)
return response
@reporter.step("Deletes the specified IAM group")
def iam_delete_group(self, group_name: str) -> dict:
response = self.boto3_iam_client.delete_group(GroupName=group_name)
return response
@reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group")
def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict:
response = self.boto3_iam_client.delete_group_policy(GroupName=group_name, PolicyName=policy_name)
return response
@reporter.step("Deletes the specified managed policy")
def iam_delete_policy(self, policy_arn: str) -> dict:
response = self.boto3_iam_client.delete_policy(PolicyArn=policy_arn)
return response
@reporter.step("Deletes the specified IAM user")
def iam_delete_user(self, user_name: str) -> dict:
response = self.boto3_iam_client.delete_user(UserName=user_name)
return response
@reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user")
def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict:
response = self.boto3_iam_client.delete_user_policy(UserName=user_name, PolicyName=policy_name)
return response
@reporter.step("Removes the specified managed policy from the specified IAM group")
def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict:
response = self.boto3_iam_client.detach_group_policy(GroupName=group_name, PolicyArn=policy_arn)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Removes the specified managed policy from the specified user")
def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict:
response = self.boto3_iam_client.detach_user_policy(UserName=user_name, PolicyArn=policy_arn)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Returns a list of IAM users that are in the specified IAM group")
def iam_get_group(self, group_name: str) -> dict:
response = self.boto3_iam_client.get_group(GroupName=group_name)
assert response.get("Group").get("GroupName") == group_name, f"GroupName should be equal to {group_name}"
return response
@reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group")
def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict:
response = self.boto3_iam_client.get_group_policy(GroupName=group_name, PolicyName=policy_name)
return response
@reporter.step("Retrieves information about the specified managed policy")
def iam_get_policy(self, policy_arn: str) -> dict:
response = self.boto3_iam_client.get_policy(PolicyArn=policy_arn)
assert response.get("Policy"), f"Expected Policy in response:\n{response}"
assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}"
return response
@reporter.step("Retrieves information about the specified version of the specified managed policy")
def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict:
response = self.boto3_iam_client.get_policy_version(PolicyArn=policy_arn, VersionId=version_id)
assert response.get("PolicyVersion"), f"Expected PolicyVersion in response:\n{response}"
assert response["PolicyVersion"].get("VersionId") == version_id, f"VersionId should be equal to {version_id}"
return response
@reporter.step("Retrieves information about the specified IAM user")
def iam_get_user(self, user_name: str) -> dict:
response = self.boto3_iam_client.get_user(UserName=user_name)
assert response.get("User"), f"Expected User in response:\n{response}"
assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}"
return response
@reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user")
def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict:
response = self.boto3_iam_client.get_user_policy(UserName=user_name, PolicyName=policy_name)
assert response.get("UserName"), f"Expected UserName in response:\n{response}"
return response
@reporter.step("Returns information about the access key IDs associated with the specified IAM user")
def iam_list_access_keys(self, user_name: str) -> dict:
response = self.boto3_iam_client.list_access_keys(UserName=user_name)
return response
@reporter.step("Lists all managed policies that are attached to the specified IAM group")
def iam_list_attached_group_policies(self, group_name: str) -> dict:
response = self.boto3_iam_client.list_attached_group_policies(GroupName=group_name)
assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}"
return response
@reporter.step("Lists all managed policies that are attached to the specified IAM user")
def iam_list_attached_user_policies(self, user_name: str) -> dict:
response = self.boto3_iam_client.list_attached_user_policies(UserName=user_name)
assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}"
return response
@reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to")
def iam_list_entities_for_policy(self, policy_arn: str) -> dict:
response = self.boto3_iam_client.list_entities_for_policy(PolicyArn=policy_arn)
assert response.get("PolicyGroups"), f"Expected PolicyGroups in response:\n{response}"
assert response.get("PolicyUsers"), f"Expected PolicyUsers in response:\n{response}"
return response
@reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group")
def iam_list_group_policies(self, group_name: str) -> dict:
response = self.boto3_iam_client.list_group_policies(GroupName=group_name)
assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}"
return response
@reporter.step("Lists the IAM groups")
def iam_list_groups(self) -> dict:
response = self.boto3_iam_client.list_groups()
assert response.get("Groups"), f"Expected Groups in response:\n{response}"
return response
@reporter.step("Lists the IAM groups that the specified IAM user belongs to")
def iam_list_groups_for_user(self, user_name: str) -> dict:
response = self.boto3_iam_client.list_groups_for_user(UserName=user_name)
assert response.get("Groups"), f"Expected Groups in response:\n{response}"
return response
@reporter.step("Lists all the managed policies that are available in your AWS account")
def iam_list_policies(self) -> dict:
response = self.boto3_iam_client.list_policies()
assert response.get("Policies"), f"Expected Policies in response:\n{response}"
return response
@reporter.step("Lists information about the versions of the specified managed policy")
def iam_list_policy_versions(self, policy_arn: str) -> dict:
response = self.boto3_iam_client.list_policy_versions(PolicyArn=policy_arn)
assert response.get("Versions"), f"Expected Versions in response:\n{response}"
return response
@reporter.step("Lists the names of the inline policies embedded in the specified IAM user")
def iam_list_user_policies(self, user_name: str) -> dict:
response = self.boto3_iam_client.list_user_policies(UserName=user_name)
assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}"
return response
@reporter.step("Lists the IAM users")
def iam_list_users(self) -> dict:
response = self.boto3_iam_client.list_users()
assert response.get("Users"), f"Expected Users in response:\n{response}"
return response
@reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group")
def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict:
response = self.boto3_iam_client.put_group_policy(
GroupName=group_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)
)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user")
def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict:
response = self.boto3_iam_client.put_user_policy(
UserName=user_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)
)
sleep(S3_SYNC_WAIT_TIME * 10)
return response
@reporter.step("Removes the specified user from the specified group")
def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict:
response = self.boto3_iam_client.remove_user_from_group(GroupName=group_name, UserName=user_name)
return response
@reporter.step("Updates the name and/or the path of the specified IAM group")
def iam_update_group(self, group_name: str, new_name: str, new_path: Optional[str] = None) -> dict:
response = self.boto3_iam_client.update_group(GroupName=group_name, NewGroupName=new_name, NewPath="/")
return response
@reporter.step("Updates the name and/or the path of the specified IAM user")
def iam_update_user(self, user_name: str, new_name: str, new_path: Optional[str] = None) -> dict:
response = self.boto3_iam_client.update_user(UserName=user_name, NewUserName=new_name, NewPath="/")
return response
@reporter.step("Adds one or more tags to an IAM user")
def iam_tag_user(self, user_name: str, tags: list) -> dict:
tags_json = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
response = self.boto3_iam_client.tag_user(UserName=user_name, Tags=tags_json)
return response
@reporter.step("List tags of IAM user")
def iam_list_user_tags(self, user_name: str) -> dict:
response = self.boto3_iam_client.list_user_tags(UserName=user_name)
return response
@reporter.step("Removes the specified tags from the user")
def iam_untag_user(self, user_name: str, tag_keys: list) -> dict:
response = self.boto3_iam_client.untag_user(UserName=user_name, TagKeys=tag_keys)
return response

View file

@ -1,16 +0,0 @@
import re
from frostfs_testlib.cli.generic_cli import GenericCli
from frostfs_testlib.s3.interfaces import BucketContainerResolver
from frostfs_testlib.storage.cluster import ClusterNode
class CurlBucketContainerResolver(BucketContainerResolver):
def resolve(self, node: ClusterNode, bucket_name: str, **kwargs: dict) -> str:
curl = GenericCli("curl", node.host)
output = curl(f"-I http://127.0.0.1:8084/{bucket_name}")
pattern = r"X-Container-Id: (\S+)"
cid = re.findall(pattern, output.stdout)
if cid:
return cid[0]
return None

View file

@ -1,10 +1,8 @@
from abc import ABC, abstractmethod
from abc import abstractmethod
from datetime import datetime
from typing import Literal, Optional, Union
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.testing.readable import HumanReadableABC, HumanReadableEnum
from frostfs_testlib.utils.file_utils import TestFile
def _make_objs_dict(key_names):
@ -33,25 +31,9 @@ ACL_COPY = [
]
class BucketContainerResolver(ABC):
@abstractmethod
def resolve(self, node: ClusterNode, bucket_name: str, **kwargs: dict) -> str:
"""
Resolve Container ID from bucket name
Args:
node: node from where we want to resolve
bucket_name: name of the bucket
**kwargs: any other required params
Returns: Container ID
"""
raise NotImplementedError("Call from abstract class")
class S3ClientWrapper(HumanReadableABC):
@abstractmethod
def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str, region: str) -> None:
def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str) -> None:
pass
@abstractmethod
@ -153,10 +135,6 @@ class S3ClientWrapper(HumanReadableABC):
def get_bucket_policy(self, bucket: str) -> str:
"""Returns the policy of a specified bucket."""
@abstractmethod
def delete_bucket_policy(self, bucket: str) -> str:
"""Deletes the policy of a specified bucket."""
@abstractmethod
def put_bucket_policy(self, bucket: str, policy: dict) -> None:
"""Applies S3 bucket policy to an S3 bucket."""
@ -290,7 +268,7 @@ class S3ClientWrapper(HumanReadableABC):
version_id: Optional[str] = None,
object_range: Optional[tuple[int, int]] = None,
full_output: bool = False,
) -> dict | TestFile:
) -> Union[dict, str]:
"""Retrieves objects from S3."""
@abstractmethod
@ -318,11 +296,15 @@ class S3ClientWrapper(HumanReadableABC):
abort a given multipart upload multiple times in order to completely free all storage consumed by all parts."""
@abstractmethod
def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str:
def upload_part(
self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str
) -> str:
"""Uploads a part in a multipart upload."""
@abstractmethod
def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str:
def upload_part_copy(
self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str
) -> str:
"""Uploads a part by copying data from an existing object as data source."""
@abstractmethod
@ -400,165 +382,3 @@ class S3ClientWrapper(HumanReadableABC):
"""cp directory TODO: Add proper description"""
# END OF OBJECT METHODS #
# IAM METHODS #
@abstractmethod
def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict:
"""Adds the specified user to the specified group"""
@abstractmethod
def iam_attach_group_policy(self, group: str, policy_arn: str) -> dict:
"""Attaches the specified managed policy to the specified IAM group"""
@abstractmethod
def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict:
"""Attaches the specified managed policy to the specified user"""
@abstractmethod
def iam_create_access_key(self, user_name: str) -> dict:
"""Creates a new AWS secret access key and access key ID for the specified user"""
@abstractmethod
def iam_create_group(self, group_name: str) -> dict:
"""Creates a new group"""
@abstractmethod
def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict:
"""Creates a new managed policy for your AWS account"""
@abstractmethod
def iam_create_user(self, user_name: str) -> dict:
"""Creates a new IAM user for your AWS account"""
@abstractmethod
def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict:
"""Deletes the access key pair associated with the specified IAM user"""
@abstractmethod
def iam_delete_group(self, group_name: str) -> dict:
"""Deletes the specified IAM group"""
@abstractmethod
def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict:
"""Deletes the specified inline policy that is embedded in the specified IAM group"""
@abstractmethod
def iam_delete_policy(self, policy_arn: str) -> dict:
"""Deletes the specified managed policy"""
@abstractmethod
def iam_delete_user(self, user_name: str) -> dict:
"""Deletes the specified IAM user"""
@abstractmethod
def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict:
"""Deletes the specified inline policy that is embedded in the specified IAM user"""
@abstractmethod
def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict:
"""Removes the specified managed policy from the specified IAM group"""
@abstractmethod
def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict:
"""Removes the specified managed policy from the specified user"""
@abstractmethod
def iam_get_group(self, group_name: str) -> dict:
"""Returns a list of IAM users that are in the specified IAM group"""
@abstractmethod
def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict:
"""Retrieves the specified inline policy document that is embedded in the specified IAM group"""
@abstractmethod
def iam_get_policy(self, policy_arn: str) -> dict:
"""Retrieves information about the specified managed policy"""
@abstractmethod
def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict:
"""Retrieves information about the specified version of the specified managed policy"""
@abstractmethod
def iam_get_user(self, user_name: str) -> dict:
"""Retrieves information about the specified IAM user"""
@abstractmethod
def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict:
"""Retrieves the specified inline policy document that is embedded in the specified IAM user"""
@abstractmethod
def iam_list_access_keys(self, user_name: str) -> dict:
"""Returns information about the access key IDs associated with the specified IAM user"""
@abstractmethod
def iam_list_attached_group_policies(self, group_name: str) -> dict:
"""Lists all managed policies that are attached to the specified IAM group"""
@abstractmethod
def iam_list_attached_user_policies(self, user_name: str) -> dict:
"""Lists all managed policies that are attached to the specified IAM user"""
@abstractmethod
def iam_list_entities_for_policy(self, policy_arn: str) -> dict:
"""Lists all IAM users, groups, and roles that the specified managed policy is attached to"""
@abstractmethod
def iam_list_group_policies(self, group_name: str) -> dict:
"""Lists the names of the inline policies that are embedded in the specified IAM group"""
@abstractmethod
def iam_list_groups(self) -> dict:
"""Lists the IAM groups"""
@abstractmethod
def iam_list_groups_for_user(self, user_name: str) -> dict:
"""Lists the IAM groups that the specified IAM user belongs to"""
@abstractmethod
def iam_list_policies(self) -> dict:
"""Lists all the managed policies that are available in your AWS account"""
@abstractmethod
def iam_list_policy_versions(self, policy_arn: str) -> dict:
"""Lists information about the versions of the specified managed policy"""
@abstractmethod
def iam_list_user_policies(self, user_name: str) -> dict:
"""Lists the names of the inline policies embedded in the specified IAM user"""
@abstractmethod
def iam_list_users(self) -> dict:
"""Lists the IAM users"""
@abstractmethod
def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict:
"""Adds or updates an inline policy document that is embedded in the specified IAM group"""
@abstractmethod
def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict:
"""Adds or updates an inline policy document that is embedded in the specified IAM user"""
@abstractmethod
def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict:
"""Removes the specified user from the specified group"""
@abstractmethod
def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict:
"""Updates the name and/or the path of the specified IAM group"""
@abstractmethod
def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict:
"""Updates the name and/or the path of the specified IAM user"""
@abstractmethod
def iam_tag_user(self, user_name: str, tags: list) -> dict:
"""Adds one or more tags to an IAM user"""
@abstractmethod
def iam_list_user_tags(self, user_name: str) -> dict:
"""List tags of IAM user"""
@abstractmethod
def iam_untag_user(self, user_name: str, tag_keys: list) -> dict:
"""Removes the specified tags from the user"""

View file

@ -28,10 +28,10 @@ class LocalShell(Shell):
for inspector in [*self.command_inspectors, *extra_inspectors]:
command = inspector.inspect(original_command, command)
with reporter.step(f"Executing command: {command}"):
if options.interactive_inputs:
return self._exec_interactive(command, options)
return self._exec_non_interactive(command, options)
logger.info(f"Executing command: {command}")
if options.interactive_inputs:
return self._exec_interactive(command, options)
return self._exec_non_interactive(command, options)
def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult:
start_time = datetime.utcnow()
@ -60,7 +60,9 @@ class LocalShell(Shell):
if options.check and result.return_code != 0:
raise RuntimeError(
f"Command: {command}\nreturn code: {result.return_code}\n" f"Output: {result.stdout}\n" f"Stderr: {result.stderr}\n"
f"Command: {command}\nreturn code: {result.return_code}\n"
f"Output: {result.stdout}\n"
f"Stderr: {result.stderr}\n"
)
return result
@ -91,7 +93,9 @@ class LocalShell(Shell):
stderr="",
return_code=exc.returncode,
)
raise RuntimeError(f"Command: {command}\nError with retcode: {exc.returncode}\n Output: {exc.output}") from exc
raise RuntimeError(
f"Command: {command}\nError:\n" f"return code: {exc.returncode}\n" f"output: {exc.output}"
) from exc
except OSError as exc:
raise RuntimeError(f"Command: {command}\nOutput: {exc.strerror}") from exc
finally:
@ -125,19 +129,22 @@ class LocalShell(Shell):
end_time: datetime,
result: Optional[CommandResult],
) -> None:
if not result:
logger.warning(f"Command: {command}\n" f"Error: result is None")
return
status, log_method = ("Success", logger.info) if result.return_code == 0 else ("Error", logger.warning)
log_method(f"Command: {command}\n" f"{status} with retcode {result.return_code}\n" f"Output: \n{result.stdout}")
elapsed_time = end_time - start_time
command_attachment = (
f"COMMAND: {command}\n"
f"RETCODE: {result.return_code}\n\n"
f"STDOUT:\n{result.stdout}\n"
f"STDERR:\n{result.stderr}\n"
f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}"
# TODO: increase logging level if return code is non 0, should be warning at least
logger.info(
f"Command: {command}\n"
f"{'Success:' if result and result.return_code == 0 else 'Error:'}\n"
f"return code: {result.return_code if result else ''} "
f"\nOutput: {result.stdout if result else ''}"
)
reporter.attach(command_attachment, "Command execution.txt")
if result:
elapsed_time = end_time - start_time
command_attachment = (
f"COMMAND: {command}\n"
f"RETCODE: {result.return_code}\n\n"
f"STDOUT:\n{result.stdout}\n"
f"STDERR:\n{result.stderr}\n"
f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}"
)
with reporter.step(f"COMMAND: {command}"):
reporter.attach(command_attachment, "Command execution.txt")

View file

@ -11,20 +11,25 @@ import base58
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
from frostfs_testlib.resources.common import ASSETS_DIR
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.dataclasses.acl import EACL_LIFETIME, FROSTFS_CONTRACT_CACHE_TIMEOUT, EACLPubKey, EACLRole, EACLRule
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.storage.dataclasses.acl import (
EACL_LIFETIME,
FROSTFS_CONTRACT_CACHE_TIMEOUT,
EACLPubKey,
EACLRole,
EACLRule,
)
from frostfs_testlib.utils import wallet_utils
logger = logging.getLogger("NeoLogger")
@reporter.step("Get extended ACL")
def get_eacl(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str) -> Optional[str]:
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
def get_eacl(wallet_path: str, cid: str, shell: Shell, endpoint: str) -> Optional[str]:
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
try:
result = cli.container.get_eacl(rpc_endpoint=endpoint, cid=cid)
result = cli.container.get_eacl(wallet=wallet_path, rpc_endpoint=endpoint, cid=cid)
except RuntimeError as exc:
logger.info("Extended ACL table is not set for this container")
logger.info(f"Got exception while getting eacl: {exc}")
@ -36,15 +41,16 @@ def get_eacl(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str) -> Optio
@reporter.step("Set extended ACL")
def set_eacl(
wallet: WalletInfo,
wallet_path: str,
cid: str,
eacl_table_path: str,
shell: Shell,
endpoint: str,
session_token: Optional[str] = None,
) -> None:
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
cli.container.set_eacl(
wallet=wallet_path,
rpc_endpoint=endpoint,
cid=cid,
table=eacl_table_path,
@ -60,7 +66,7 @@ def _encode_cid_for_eacl(cid: str) -> str:
def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str:
table_file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"eacl_table_{str(uuid.uuid4())}.json")
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC)
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
cli.acl.extended_create(cid=cid, out=table_file_path, rule=rules_list)
with open(table_file_path, "r") as file:
@ -71,7 +77,7 @@ def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str:
def form_bearertoken_file(
wallet: WalletInfo,
wif: str,
cid: str,
eacl_rule_list: List[Union[EACLRule, EACLPubKey]],
shell: Shell,
@ -86,7 +92,7 @@ def form_bearertoken_file(
enc_cid = _encode_cid_for_eacl(cid) if cid else None
file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
eacl = get_eacl(wallet, cid, shell, endpoint)
eacl = get_eacl(wif, cid, shell, endpoint)
json_eacl = dict()
if eacl:
eacl = eacl.replace("eACL: ", "").split("Signature")[0]
@ -127,7 +133,7 @@ def form_bearertoken_file(
if sign:
sign_bearer(
shell=shell,
wallet=wallet,
wallet_path=wif,
eacl_rules_file_from=file_path,
eacl_rules_file_to=file_path,
json=True,
@ -158,9 +164,11 @@ def eacl_rules(access: str, verbs: list, user: str) -> list[str]:
return rules
def sign_bearer(shell: Shell, wallet: WalletInfo, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool) -> None:
frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
frostfscli.util.sign_bearer_token(eacl_rules_file_from, eacl_rules_file_to, json=json)
def sign_bearer(shell: Shell, wallet_path: str, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool) -> None:
frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG)
frostfscli.util.sign_bearer_token(
wallet=wallet_path, from_file=eacl_rules_file_from, to_file=eacl_rules_file_to, json=json
)
@reporter.step("Wait for eACL cache expired")
@ -170,7 +178,9 @@ def wait_for_cache_expired():
@reporter.step("Return bearer token in base64 to caller")
def bearer_token_base64_from_file(bearer_path: str) -> str:
def bearer_token_base64_from_file(
bearer_path: str,
) -> str:
with open(bearer_path, "rb") as file:
signed = file.read()
return base64.b64encode(signed).decode("utf-8")

View file

@ -5,11 +5,12 @@ from dataclasses import dataclass
from time import sleep
from typing import Optional, Union
import requests
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.plugins import load_plugin
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
from frostfs_testlib.s3.interfaces import BucketContainerResolver
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
@ -24,7 +25,7 @@ logger = logging.getLogger("NeoLogger")
@dataclass
class StorageContainerInfo:
id: str
wallet: WalletInfo
wallet_file: WalletInfo
class StorageContainer:
@ -41,8 +42,11 @@ class StorageContainer:
def get_id(self) -> str:
return self.storage_container_info.id
def get_wallet(self) -> str:
return self.storage_container_info.wallet
def get_wallet_path(self) -> str:
return self.storage_container_info.wallet_file.path
def get_wallet_config_path(self) -> str:
return self.storage_container_info.wallet_file.config_path
@reporter.step("Generate new object and put in container")
def generate_object(
@ -57,34 +61,37 @@ class StorageContainer:
file_hash = get_file_hash(file_path)
container_id = self.get_id()
wallet = self.get_wallet()
wallet_path = self.get_wallet_path()
wallet_config = self.get_wallet_config_path()
with reporter.step(f"Put object with size {size} to container {container_id}"):
if endpoint:
object_id = put_object(
wallet=wallet,
wallet=wallet_path,
path=file_path,
cid=container_id,
expire_at=expire_at,
shell=self.shell,
endpoint=endpoint,
bearer=bearer_token,
wallet_config=wallet_config,
)
else:
object_id = put_object_to_random_node(
wallet=wallet,
wallet=wallet_path,
path=file_path,
cid=container_id,
expire_at=expire_at,
shell=self.shell,
cluster=self.cluster,
bearer=bearer_token,
wallet_config=wallet_config,
)
storage_object = StorageObjectInfo(
container_id,
object_id,
size=size,
wallet=wallet,
wallet_file_path=wallet_path,
file_path=file_path,
file_hash=file_hash,
)
@ -95,18 +102,18 @@ class StorageContainer:
DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X"
REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X"
DEFAULT_EC_PLACEMENT_RULE = "EC 3.1"
@reporter.step("Create Container")
def create_container(
wallet: WalletInfo,
wallet: str,
shell: Shell,
endpoint: str,
rule: str = DEFAULT_PLACEMENT_RULE,
basic_acl: str = "",
attributes: Optional[dict] = None,
session_token: str = "",
session_wallet: str = "",
name: Optional[str] = None,
options: Optional[dict] = None,
await_mode: bool = True,
@ -117,7 +124,7 @@ def create_container(
A wrapper for `frostfs-cli container create` call.
Args:
wallet (WalletInfo): a wallet on whose behalf a container is created
wallet (str): a wallet on whose behalf a container is created
rule (optional, str): placement rule for container
basic_acl (optional, str): an ACL for container, will be
appended to `--basic-acl` key
@ -139,9 +146,10 @@ def create_container(
(str): CID of the created container
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
result = cli.container.create(
rpc_endpoint=endpoint,
wallet=session_wallet if session_wallet else wallet,
policy=rule,
basic_acl=basic_acl,
attributes=attributes,
@ -162,7 +170,9 @@ def create_container(
return cid
def wait_for_container_creation(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, attempts: int = 15, sleep_interval: int = 1):
def wait_for_container_creation(
wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 15, sleep_interval: int = 1
):
for _ in range(attempts):
containers = list_containers(wallet, shell, endpoint)
if cid in containers:
@ -172,7 +182,9 @@ def wait_for_container_creation(wallet: WalletInfo, cid: str, shell: Shell, endp
raise RuntimeError(f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting")
def wait_for_container_deletion(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, attempts: int = 30, sleep_interval: int = 1):
def wait_for_container_deletion(
wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 30, sleep_interval: int = 1
):
for _ in range(attempts):
try:
get_container(wallet, cid, shell=shell, endpoint=endpoint)
@ -186,26 +198,29 @@ def wait_for_container_deletion(wallet: WalletInfo, cid: str, shell: Shell, endp
@reporter.step("List Containers")
def list_containers(wallet: WalletInfo, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT) -> list[str]:
def list_containers(
wallet: str, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT
) -> list[str]:
"""
A wrapper for `frostfs-cli container list` call. It returns all the
available containers for the given wallet.
Args:
wallet (WalletInfo): a wallet on whose behalf we list the containers
wallet (str): a wallet on whose behalf we list the containers
shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
timeout: Timeout for the operation.
Returns:
(list): list of containers
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
result = cli.container.list(rpc_endpoint=endpoint, timeout=timeout)
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
result = cli.container.list(rpc_endpoint=endpoint, wallet=wallet, timeout=timeout)
logger.info(f"Containers: \n{result}")
return result.stdout.split()
@reporter.step("List Objects in container")
def list_objects(
wallet: WalletInfo,
wallet: str,
shell: Shell,
container_id: str,
endpoint: str,
@ -215,7 +230,7 @@ def list_objects(
A wrapper for `frostfs-cli container list-objects` call. It returns all the
available objects in container.
Args:
wallet (WalletInfo): a wallet on whose behalf we list the containers objects
wallet (str): a wallet on whose behalf we list the containers objects
shell: executor for cli command
container_id: cid of container
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
@ -223,15 +238,15 @@ def list_objects(
Returns:
(list): list of containers
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
result = cli.container.list_objects(rpc_endpoint=endpoint, cid=container_id, timeout=timeout)
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
result = cli.container.list_objects(rpc_endpoint=endpoint, wallet=wallet, cid=container_id, timeout=timeout)
logger.info(f"Container objects: \n{result}")
return result.stdout.split()
@reporter.step("Get Container")
def get_container(
wallet: WalletInfo,
wallet: str,
cid: str,
shell: Shell,
endpoint: str,
@ -242,7 +257,7 @@ def get_container(
A wrapper for `frostfs-cli container get` call. It extracts container's
attributes and rearranges them into a more compact view.
Args:
wallet (WalletInfo): path to a wallet on whose behalf we get the container
wallet (str): path to a wallet on whose behalf we get the container
cid (str): ID of the container to get
shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
@ -252,8 +267,8 @@ def get_container(
(dict, str): dict of container attributes
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
result = cli.container.get(rpc_endpoint=endpoint, cid=cid, json_mode=json_mode, timeout=timeout)
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
result = cli.container.get(rpc_endpoint=endpoint, wallet=wallet, cid=cid, json_mode=json_mode, timeout=timeout)
if not json_mode:
return result.stdout
@ -270,7 +285,7 @@ def get_container(
@reporter.step("Delete Container")
# TODO: make the error message about a non-found container more user-friendly
def delete_container(
wallet: WalletInfo,
wallet: str,
cid: str,
shell: Shell,
endpoint: str,
@ -282,7 +297,7 @@ def delete_container(
A wrapper for `frostfs-cli container delete` call.
Args:
await_mode: Block execution until container is removed.
wallet (WalletInfo): path to a wallet on whose behalf we delete the container
wallet (str): path to a wallet on whose behalf we delete the container
cid (str): ID of the container to delete
shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
@ -291,8 +306,9 @@ def delete_container(
This function doesn't return anything.
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
cli.container.delete(
wallet=wallet,
cid=cid,
rpc_endpoint=endpoint,
force=force,
@ -329,22 +345,26 @@ def _parse_cid(output: str) -> str:
@reporter.step("Search container by name")
def search_container_by_name(name: str, node: ClusterNode):
resolver_cls = load_plugin("frostfs.testlib.bucket_cid_resolver", node.host.config.product)
resolver: BucketContainerResolver = resolver_cls()
return resolver.resolve(node, name)
node_shell = node.host.get_shell()
output = node_shell.exec(f"curl -I HEAD http://127.0.0.1:8084/{name}")
pattern = r"X-Container-Id: (\S+)"
cid = re.findall(pattern, output.stdout)
if cid:
return cid[0]
return None
@reporter.step("Search for nodes with a container")
def search_nodes_with_container(
wallet: WalletInfo,
wallet: str,
cid: str,
shell: Shell,
endpoint: str,
cluster: Cluster,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> list[ClusterNode]:
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
result = cli.container.search_node(rpc_endpoint=endpoint, cid=cid, timeout=timeout)
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
result = cli.container.search_node(rpc_endpoint=endpoint, wallet=wallet, cid=cid, timeout=timeout)
pattern = r"[0-9]+(?:\.[0-9]+){3}"
nodes_ip = list(set(re.findall(pattern, result.stdout)))

View file

@ -9,21 +9,18 @@ from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.cli.neogo import NeoGo
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE
from frostfs_testlib.resources.common import ASSETS_DIR
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing import wait_for_success
from frostfs_testlib.utils import json_utils
from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output
from frostfs_testlib.utils.file_utils import TestFile
logger = logging.getLogger("NeoLogger")
@reporter.step("Get object from random node")
def get_object_from_random_node(
wallet: WalletInfo,
wallet: str,
cid: str,
oid: str,
shell: Shell,
@ -31,6 +28,7 @@ def get_object_from_random_node(
bearer: Optional[str] = None,
write_object: Optional[str] = None,
xhdr: Optional[dict] = None,
wallet_config: Optional[str] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
@ -46,6 +44,7 @@ def get_object_from_random_node(
cluster: cluster object
bearer (optional, str): path to Bearer Token file, appends to `--bearer` key
write_object (optional, str): path to downloaded file, appends to `--file` key
wallet_config(optional, str): path to the wallet config
no_progress(optional, bool): do not show progress bar
xhdr (optional, dict): Request X-Headers in form of Key=Value
session (optional, dict): path to a JSON-encoded container session token
@ -63,6 +62,7 @@ def get_object_from_random_node(
bearer,
write_object,
xhdr,
wallet_config,
no_progress,
session,
timeout,
@ -71,7 +71,7 @@ def get_object_from_random_node(
@reporter.step("Get object from {endpoint}")
def get_object(
wallet: WalletInfo,
wallet: str,
cid: str,
oid: str,
shell: Shell,
@ -79,21 +79,23 @@ def get_object(
bearer: Optional[str] = None,
write_object: Optional[str] = None,
xhdr: Optional[dict] = None,
wallet_config: Optional[str] = None,
no_progress: bool = True,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> TestFile:
) -> str:
"""
GET from FrostFS.
Args:
wallet (WalletInfo): wallet on whose behalf GET is done
wallet (str): wallet on whose behalf GET is done
cid (str): ID of Container where we get the Object from
oid (str): Object ID
shell: executor for cli command
bearer: path to Bearer Token file, appends to `--bearer` key
write_object: path to downloaded file, appends to `--file` key
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config(optional, str): path to the wallet config
no_progress(optional, bool): do not show progress bar
xhdr (optional, dict): Request X-Headers in form of Key=Value
session (optional, dict): path to a JSON-encoded container session token
@ -104,14 +106,15 @@ def get_object(
if not write_object:
write_object = str(uuid.uuid4())
test_file = TestFile(os.path.join(ASSETS_DIR, write_object))
file_path = os.path.join(ASSETS_DIR, write_object)
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
cli.object.get(
rpc_endpoint=endpoint,
wallet=wallet,
cid=cid,
oid=oid,
file=test_file,
file=file_path,
bearer=bearer,
no_progress=no_progress,
xhdr=xhdr,
@ -119,18 +122,19 @@ def get_object(
timeout=timeout,
)
return test_file
return file_path
@reporter.step("Get Range Hash from {endpoint}")
def get_range_hash(
wallet: WalletInfo,
wallet: str,
cid: str,
oid: str,
range_cut: str,
shell: Shell,
endpoint: str,
bearer: Optional[str] = None,
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
@ -147,15 +151,17 @@ def get_range_hash(
range_cut: Range to take hash from in the form offset1:length1,...,
value to pass to the `--range` parameter
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config: path to the wallet config
xhdr: Request X-Headers in form of Key=Values
session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session.
timeout: Timeout for the operation.
Returns:
None
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
result = cli.object.hash(
rpc_endpoint=endpoint,
wallet=wallet,
cid=cid,
oid=oid,
range=range_cut,
@ -171,7 +177,7 @@ def get_range_hash(
@reporter.step("Put object to random node")
def put_object_to_random_node(
wallet: WalletInfo,
wallet: str,
path: str,
cid: str,
shell: Shell,
@ -180,6 +186,7 @@ def put_object_to_random_node(
copies_number: Optional[int] = None,
attributes: Optional[dict] = None,
xhdr: Optional[dict] = None,
wallet_config: Optional[str] = None,
expire_at: Optional[int] = None,
no_progress: bool = True,
session: Optional[str] = None,
@ -198,6 +205,7 @@ def put_object_to_random_node(
copies_number: Number of copies of the object to store within the RPC call
attributes: User attributes in form of Key1=Value1,Key2=Value2
cluster: cluster under test
wallet_config: path to the wallet config
no_progress: do not show progress bar
expire_at: Last epoch in the life of the object
xhdr: Request X-Headers in form of Key=Value
@ -218,6 +226,7 @@ def put_object_to_random_node(
copies_number,
attributes,
xhdr,
wallet_config,
expire_at,
no_progress,
session,
@ -227,7 +236,7 @@ def put_object_to_random_node(
@reporter.step("Put object at {endpoint} in container {cid}")
def put_object(
wallet: WalletInfo,
wallet: str,
path: str,
cid: str,
shell: Shell,
@ -236,6 +245,7 @@ def put_object(
copies_number: Optional[int] = None,
attributes: Optional[dict] = None,
xhdr: Optional[dict] = None,
wallet_config: Optional[str] = None,
expire_at: Optional[int] = None,
no_progress: bool = True,
session: Optional[str] = None,
@ -253,6 +263,7 @@ def put_object(
copies_number: Number of copies of the object to store within the RPC call
attributes: User attributes in form of Key1=Value1,Key2=Value2
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config: path to the wallet config
no_progress: do not show progress bar
expire_at: Last epoch in the life of the object
xhdr: Request X-Headers in form of Key=Value
@ -262,9 +273,10 @@ def put_object(
(str): ID of uploaded Object
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
result = cli.object.put(
rpc_endpoint=endpoint,
wallet=wallet,
file=path,
cid=cid,
attributes=attributes,
@ -285,12 +297,13 @@ def put_object(
@reporter.step("Delete object {cid}/{oid} from {endpoint}")
def delete_object(
wallet: WalletInfo,
wallet: str,
cid: str,
oid: str,
shell: Shell,
endpoint: str,
bearer: str = "",
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
@ -305,6 +318,7 @@ def delete_object(
shell: executor for cli command
bearer: path to Bearer Token file, appends to `--bearer` key
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
wallet_config: path to the wallet config
xhdr: Request X-Headers in form of Key=Value
session: path to a JSON-encoded container session token
timeout: Timeout for the operation.
@ -312,9 +326,10 @@ def delete_object(
(str): Tombstone ID
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
result = cli.object.delete(
rpc_endpoint=endpoint,
wallet=wallet,
cid=cid,
oid=oid,
bearer=bearer,
@ -330,12 +345,13 @@ def delete_object(
@reporter.step("Get Range")
def get_range(
wallet: WalletInfo,
wallet: str,
cid: str,
oid: str,
range_cut: str,
shell: Shell,
endpoint: str,
wallet_config: Optional[str] = None,
bearer: str = "",
xhdr: Optional[dict] = None,
session: Optional[str] = None,
@ -352,35 +368,37 @@ def get_range(
shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
bearer: path to Bearer Token file, appends to `--bearer` key
wallet_config: path to the wallet config
xhdr: Request X-Headers in form of Key=Value
session: path to a JSON-encoded container session token
timeout: Timeout for the operation.
Returns:
(str, bytes) - path to the file with range content and content of this file as bytes
"""
test_file = TestFile(os.path.join(ASSETS_DIR, str(uuid.uuid4())))
range_file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4()))
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
cli.object.range(
rpc_endpoint=endpoint,
wallet=wallet,
cid=cid,
oid=oid,
range=range_cut,
file=test_file,
file=range_file_path,
bearer=bearer,
xhdr=xhdr,
session=session,
timeout=timeout,
)
with open(test_file, "rb") as file:
with open(range_file_path, "rb") as file:
content = file.read()
return test_file, content
return range_file_path, content
@reporter.step("Lock Object")
def lock_object(
wallet: WalletInfo,
wallet: str,
cid: str,
oid: str,
shell: Shell,
@ -390,6 +408,7 @@ def lock_object(
address: Optional[str] = None,
bearer: Optional[str] = None,
session: Optional[str] = None,
wallet_config: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
@ -416,12 +435,13 @@ def lock_object(
Lock object ID
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
result = cli.object.lock(
rpc_endpoint=endpoint,
lifetime=lifetime,
expire_at=expire_at,
address=address,
wallet=wallet,
cid=cid,
oid=oid,
bearer=bearer,
@ -439,13 +459,14 @@ def lock_object(
@reporter.step("Search object")
def search_object(
wallet: WalletInfo,
wallet: str,
cid: str,
shell: Shell,
endpoint: str,
bearer: str = "",
filters: Optional[dict] = None,
expected_objects_list: Optional[list] = None,
wallet_config: Optional[str] = None,
xhdr: Optional[dict] = None,
session: Optional[str] = None,
phy: bool = False,
@ -463,6 +484,7 @@ def search_object(
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
filters: key=value pairs to filter Objects
expected_objects_list: a list of ObjectIDs to compare found Objects with
wallet_config: path to the wallet config
xhdr: Request X-Headers in form of Key=Value
session: path to a JSON-encoded container session token
phy: Search physically stored objects.
@ -473,9 +495,10 @@ def search_object(
list of found ObjectIDs
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
result = cli.object.search(
rpc_endpoint=endpoint,
wallet=wallet,
cid=cid,
bearer=bearer,
xhdr=xhdr,
@ -490,18 +513,23 @@ def search_object(
if expected_objects_list:
if sorted(found_objects) == sorted(expected_objects_list):
logger.info(f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'")
logger.info(
f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'"
)
else:
logger.warning(f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'")
logger.warning(
f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'"
)
return found_objects
@reporter.step("Get netmap netinfo")
def get_netmap_netinfo(
wallet: WalletInfo,
wallet: str,
shell: Shell,
endpoint: str,
wallet_config: Optional[str] = None,
address: Optional[str] = None,
ttl: Optional[int] = None,
xhdr: Optional[dict] = None,
@ -511,7 +539,7 @@ def get_netmap_netinfo(
Get netmap netinfo output from node
Args:
wallet (WalletInfo): wallet on whose behalf request is done
wallet (str): wallet on whose behalf request is done
shell: executor for cli command
endpoint (optional, str): FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
address: Address of wallet account
@ -524,8 +552,9 @@ def get_netmap_netinfo(
(dict): dict of parsed command output
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
output = cli.netmap.netinfo(
wallet=wallet,
rpc_endpoint=endpoint,
address=address,
ttl=ttl,
@ -549,7 +578,7 @@ def get_netmap_netinfo(
@reporter.step("Head object")
def head_object(
wallet: WalletInfo,
wallet: str,
cid: str,
oid: str,
shell: Shell,
@ -559,6 +588,7 @@ def head_object(
json_output: bool = True,
is_raw: bool = False,
is_direct: bool = False,
wallet_config: Optional[str] = None,
session: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
):
@ -566,7 +596,7 @@ def head_object(
HEAD an Object.
Args:
wallet (WalletInfo): wallet on whose behalf HEAD is done
wallet (str): wallet on whose behalf HEAD is done
cid (str): ID of Container where we get the Object from
oid (str): ObjectID to HEAD
shell: executor for cli command
@ -578,6 +608,7 @@ def head_object(
turns into `--raw` key
is_direct(optional, bool): send request directly to the node or not; this flag
turns into `--ttl 1` key
wallet_config(optional, str): path to the wallet config
xhdr (optional, dict): Request X-Headers in form of Key=Value
session (optional, dict): path to a JSON-encoded container session token
timeout: Timeout for the operation.
@ -588,9 +619,10 @@ def head_object(
(str): HEAD response as a plain text
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
result = cli.object.head(
rpc_endpoint=endpoint,
wallet=wallet,
cid=cid,
oid=oid,
bearer=bearer,
@ -616,11 +648,6 @@ def head_object(
fst_line_idx = result.stdout.find("\n")
decoded = json.loads(result.stdout[fst_line_idx:])
# if response
if "chunks" in decoded.keys():
logger.info("decoding ec chunks")
return decoded["chunks"]
# If response is Complex Object header, it has `splitId` key
if "splitId" in decoded.keys():
logger.info("decoding split header")
@ -646,7 +673,7 @@ def head_object(
@reporter.step("Run neo-go dump-keys")
def neo_go_dump_keys(shell: Shell, wallet: WalletInfo) -> dict:
def neo_go_dump_keys(shell: Shell, wallet: str) -> dict:
"""
Run neo-go dump keys command
@ -702,7 +729,6 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict:
}
@wait_for_success()
@reporter.step("Search object nodes")
def get_object_nodes(
cluster: Cluster,
@ -722,27 +748,23 @@ def get_object_nodes(
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config)
response = cli.object.nodes(
result_object_nodes = cli.object.nodes(
rpc_endpoint=endpoint,
cid=cid,
oid=oid,
bearer=bearer,
ttl=1 if is_direct else None,
json=True,
xhdr=xhdr,
timeout=timeout,
verify_presence_all=verify_presence_all,
)
response_json = json.loads(response.stdout)
# Currently, the command will show expected and confirmed nodes.
# And we (currently) count only nodes which are both expected and confirmed
object_nodes_id = {
required_node
for data_object in response_json["data_objects"]
for required_node in data_object["required_nodes"]
if required_node in data_object["confirmed_nodes"]
}
parsing_output = parse_cmd_table(result_object_nodes.stdout, "|")
list_object_nodes = [
node
for node in parsing_output
if node["should_contain_object"] == "true" and node["actually_contains_object"] == "true"
]
netmap_nodes_list = parse_netmap_output(
cli.netmap.snapshot(
@ -751,11 +773,17 @@ def get_object_nodes(
).stdout
)
netmap_nodes = [
netmap_node for object_node in object_nodes_id for netmap_node in netmap_nodes_list if object_node == netmap_node.node_id
netmap_node
for object_node in list_object_nodes
for netmap_node in netmap_nodes_list
if object_node["node_id"] == netmap_node.node_id
]
object_nodes = [
cluster_node for netmap_node in netmap_nodes for cluster_node in cluster.cluster_nodes if netmap_node.node == cluster_node.host_ip
result = [
cluster_node
for netmap_node in netmap_nodes
for cluster_node in cluster.cluster_nodes
if netmap_node.node == cluster_node.host_ip
]
return object_nodes
return result

View file

@ -1,35 +0,0 @@
import logging
from typing import Optional
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.plugins import load_plugin
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
logger = logging.getLogger("NeoLogger")
@reporter.step("Get Tree List")
def get_tree_list(
wallet: WalletInfo,
cid: str,
shell: Shell,
endpoint: str,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> None:
"""
A wrapper for `frostfs-cli tree list` call.
Args:
wallet (WalletInfo): path to a wallet on whose behalf we delete the container
cid (str): ID of the container to delete
shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
timeout: Timeout for the operation.
This function doesn't return anything.
"""
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
cli.tree.list(cid=cid, rpc_endpoint=endpoint, timeout=timeout)

View file

@ -14,11 +14,11 @@ from typing import Optional, Tuple
from frostfs_testlib import reporter
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import head_object
from frostfs_testlib.storage.cluster import Cluster, StorageNode
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
logger = logging.getLogger("NeoLogger")
@ -44,7 +44,7 @@ def get_storage_object_chunks(
with reporter.step(f"Get complex object chunks (f{storage_object.oid})"):
split_object_id = get_link_object(
storage_object.wallet,
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
shell,
@ -53,7 +53,7 @@ def get_storage_object_chunks(
timeout=timeout,
)
head = head_object(
storage_object.wallet,
storage_object.wallet_file_path,
storage_object.cid,
split_object_id,
shell,
@ -96,7 +96,7 @@ def get_complex_object_split_ranges(
chunks_ids = get_storage_object_chunks(storage_object, shell, cluster)
for chunk_id in chunks_ids:
head = head_object(
storage_object.wallet,
storage_object.wallet_file_path,
storage_object.cid,
chunk_id,
shell,
@ -114,12 +114,13 @@ def get_complex_object_split_ranges(
@reporter.step("Get Link Object")
def get_link_object(
wallet: WalletInfo,
wallet: str,
cid: str,
oid: str,
shell: Shell,
nodes: list[StorageNode],
bearer: str = "",
wallet_config: str = DEFAULT_WALLET_CONFIG,
is_direct: bool = True,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
):
@ -153,6 +154,7 @@ def get_link_object(
is_raw=True,
is_direct=is_direct,
bearer=bearer,
wallet_config=wallet_config,
timeout=timeout,
)
if resp["link"]:
@ -165,7 +167,7 @@ def get_link_object(
@reporter.step("Get Last Object")
def get_last_object(
wallet: WalletInfo,
wallet: str,
cid: str,
oid: str,
shell: Shell,

View file

@ -4,7 +4,13 @@ from typing import Optional
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE
from frostfs_testlib.resources.cli import (
CLI_DEFAULT_TIMEOUT,
FROSTFS_ADM_CONFIG_PATH,
FROSTFS_ADM_EXEC,
FROSTFS_CLI_EXEC,
NEOGO_EXECUTABLE,
)
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.payment_neogo import get_contract_hash

View file

@ -11,19 +11,19 @@ from urllib.parse import quote_plus
import requests
from frostfs_testlib import reporter
from frostfs_testlib.cli import GenericCli
from frostfs_testlib.resources.common import ASSETS_DIR, SIMPLE_OBJECT_SIZE
from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE
from frostfs_testlib.s3.aws_cli_client import command_options
from frostfs_testlib.shell import Shell
from frostfs_testlib.shell.local_shell import LocalShell
from frostfs_testlib.steps.cli.object import get_object
from frostfs_testlib.steps.storage_policy import get_nodes_without_object
from frostfs_testlib.storage.cluster import ClusterNode, StorageNode
from frostfs_testlib.storage.cluster import StorageNode
from frostfs_testlib.testing.test_control import retry
from frostfs_testlib.utils.file_utils import TestFile, get_file_hash
from frostfs_testlib.utils.file_utils import get_file_hash
logger = logging.getLogger("NeoLogger")
ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/")
local_shell = LocalShell()
@ -31,7 +31,8 @@ local_shell = LocalShell()
def get_via_http_gate(
cid: str,
oid: str,
node: ClusterNode,
endpoint: str,
http_hostname: str,
request_path: Optional[str] = None,
timeout: Optional[int] = 300,
):
@ -39,17 +40,18 @@ def get_via_http_gate(
This function gets given object from HTTP gate
cid: container id to get object from
oid: object ID
node: node to make request
endpoint: http gate endpoint
http_hostname: http host name on the node
request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}]
"""
# if `request_path` parameter omitted, use default
if request_path is None:
request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}"
request = f"{endpoint}/get/{cid}/{oid}"
else:
request = f"{node.http_gate.get_endpoint()}{request_path}"
request = f"{endpoint}{request_path}"
resp = requests.get(request, stream=True, timeout=timeout, verify=False)
resp = requests.get(request, headers={"Host": http_hostname}, stream=True, timeout=timeout, verify=False)
if not resp.ok:
raise Exception(
@ -63,21 +65,22 @@ def get_via_http_gate(
logger.info(f"Request: {request}")
_attach_allure_step(request, resp.status_code)
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}"))
with open(test_file, "wb") as file:
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}")
with open(file_path, "wb") as file:
shutil.copyfileobj(resp.raw, file)
return test_file
return file_path
@reporter.step("Get via Zip HTTP Gate")
def get_via_zip_http_gate(cid: str, prefix: str, node: ClusterNode, timeout: Optional[int] = 300):
def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, http_hostname: str, timeout: Optional[int] = 300):
"""
This function gets given object from HTTP gate
cid: container id to get object from
prefix: common prefix
node: node to make request
endpoint: http gate endpoint
http_hostname: http host name on the node
"""
request = f"{node.http_gate.get_endpoint()}/zip/{cid}/{prefix}"
request = f"{endpoint}/zip/{cid}/{prefix}"
resp = requests.get(request, stream=True, timeout=timeout, verify=False)
if not resp.ok:
@ -92,11 +95,11 @@ def get_via_zip_http_gate(cid: str, prefix: str, node: ClusterNode, timeout: Opt
logger.info(f"Request: {request}")
_attach_allure_step(request, resp.status_code)
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_archive.zip"))
with open(test_file, "wb") as file:
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_archive.zip")
with open(file_path, "wb") as file:
shutil.copyfileobj(resp.raw, file)
with zipfile.ZipFile(test_file, "r") as zip_ref:
with zipfile.ZipFile(file_path, "r") as zip_ref:
zip_ref.extractall(ASSETS_DIR)
return os.path.join(os.getcwd(), ASSETS_DIR, prefix)
@ -106,7 +109,8 @@ def get_via_zip_http_gate(cid: str, prefix: str, node: ClusterNode, timeout: Opt
def get_via_http_gate_by_attribute(
cid: str,
attribute: dict,
node: ClusterNode,
endpoint: str,
http_hostname: str,
request_path: Optional[str] = None,
timeout: Optional[int] = 300,
):
@ -115,17 +119,18 @@ def get_via_http_gate_by_attribute(
cid: CID to get object from
attribute: attribute {name: attribute} value pair
endpoint: http gate endpoint
http_hostname: http host name on the node
request_path: (optional) http request path, if ommited - use default [{endpoint}/get_by_attribute/{Key}/{Value}]
"""
attr_name = list(attribute.keys())[0]
attr_value = quote_plus(str(attribute.get(attr_name)))
# if `request_path` parameter ommited, use default
if request_path is None:
request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}"
request = f"{endpoint}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}"
else:
request = f"{node.http_gate.get_endpoint()}{request_path}"
request = f"{endpoint}{request_path}"
resp = requests.get(request, stream=True, timeout=timeout, verify=False)
resp = requests.get(request, stream=True, timeout=timeout, verify=False, headers={"Host": http_hostname})
if not resp.ok:
raise Exception(
@ -139,14 +144,17 @@ def get_via_http_gate_by_attribute(
logger.info(f"Request: {request}")
_attach_allure_step(request, resp.status_code)
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{str(uuid.uuid4())}"))
with open(test_file, "wb") as file:
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{str(uuid.uuid4())}")
with open(file_path, "wb") as file:
shutil.copyfileobj(resp.raw, file)
return test_file
return file_path
# TODO: pass http_hostname as a header
@reporter.step("Upload via HTTP Gate")
def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300) -> str:
def upload_via_http_gate(
cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300
) -> str:
"""
This function upload given object through HTTP gate
cid: CID to get object from
@ -189,6 +197,7 @@ def is_object_large(filepath: str) -> bool:
return False
# TODO: pass http_hostname as a header
@reporter.step("Upload via HTTP Gate using Curl")
def upload_via_http_gate_curl(
cid: str,
@ -238,20 +247,21 @@ def upload_via_http_gate_curl(
@retry(max_attempts=3, sleep_interval=1)
@reporter.step("Get via HTTP Gate using Curl")
def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> TestFile:
def get_via_http_curl(cid: str, oid: str, endpoint: str, http_hostname: str) -> str:
"""
This function gets given object from HTTP gate using curl utility.
cid: CID to get object from
oid: object OID
node: node for request
endpoint: http gate endpoint
http_hostname: http host name of the node
"""
request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}"
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}"))
request = f"{endpoint}/get/{cid}/{oid}"
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}")
curl = GenericCli("curl", node.host)
curl(f"-k ", f"{request} > {test_file}", shell=local_shell)
cmd = f'curl -k -H "Host: {http_hostname}" {request} > {file_path}'
local_shell.exec(cmd)
return test_file
return file_path
def _attach_allure_step(request: str, status_code: int, req_type="GET"):
@ -264,11 +274,12 @@ def _attach_allure_step(request: str, status_code: int, req_type="GET"):
def try_to_get_object_and_expect_error(
cid: str,
oid: str,
node: ClusterNode,
error_pattern: str,
endpoint: str,
http_hostname: str,
) -> None:
try:
get_via_http_gate(cid=cid, oid=oid, node=node)
get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname)
raise AssertionError(f"Expected error on getting object with cid: {cid}")
except Exception as err:
match = error_pattern.casefold() in str(err).casefold()
@ -281,10 +292,13 @@ def get_object_by_attr_and_verify_hashes(
file_name: str,
cid: str,
attrs: dict,
node: ClusterNode,
endpoint: str,
http_hostname: str,
) -> None:
got_file_path_http = get_via_http_gate(cid=cid, oid=oid, node=node)
got_file_path_http_attr = get_via_http_gate_by_attribute(cid=cid, attribute=attrs, node=node)
got_file_path_http = get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname)
got_file_path_http_attr = get_via_http_gate_by_attribute(
cid=cid, attribute=attrs, endpoint=endpoint, http_hostname=http_hostname
)
assert_hashes_are_equal(file_name, got_file_path_http, got_file_path_http_attr)
@ -295,7 +309,8 @@ def verify_object_hash(
cid: str,
shell: Shell,
nodes: list[StorageNode],
request_node: ClusterNode,
endpoint: str,
http_hostname: str,
object_getter=None,
) -> None:
@ -321,7 +336,7 @@ def verify_object_hash(
shell=shell,
endpoint=random_node.get_rpc_endpoint(),
)
got_file_path_http = object_getter(cid=cid, oid=oid, node=request_node)
got_file_path_http = object_getter(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname)
assert_hashes_are_equal(file_name, got_file_path, got_file_path_http)
@ -350,9 +365,10 @@ def attr_into_str_header_curl(attrs: dict) -> list:
def try_to_get_object_via_passed_request_and_expect_error(
cid: str,
oid: str,
node: ClusterNode,
error_pattern: str,
endpoint: str,
http_request_path: str,
http_hostname: str,
attrs: Optional[dict] = None,
) -> None:
try:
@ -360,15 +376,17 @@ def try_to_get_object_via_passed_request_and_expect_error(
get_via_http_gate(
cid=cid,
oid=oid,
node=node,
endpoint=endpoint,
request_path=http_request_path,
http_hostname=http_hostname,
)
else:
get_via_http_gate_by_attribute(
cid=cid,
attribute=attrs,
node=node,
endpoint=endpoint,
request_path=http_request_path,
http_hostname=http_hostname,
)
raise AssertionError(f"Expected error on getting object with cid: {cid}")
except Exception as err:

View file

@ -1,45 +0,0 @@
import re
from frostfs_testlib import reporter
from frostfs_testlib.testing.test_control import wait_for_success
from frostfs_testlib.storage.cluster import ClusterNode
@reporter.step("Check metrics result")
@wait_for_success(interval=10)
def check_metrics_counter(
cluster_nodes: list[ClusterNode],
operator: str = "==",
counter_exp: int = 0,
parse_from_command: bool = False,
**metrics_greps: str,
):
counter_act = 0
for cluster_node in cluster_nodes:
counter_act += get_metrics_value(cluster_node, parse_from_command, **metrics_greps)
assert eval(
f"{counter_act} {operator} {counter_exp}"
), f"Expected: {counter_exp} {operator} Actual: {counter_act} in node: {cluster_node}"
@reporter.step("Get metrics value from node: {node}")
def get_metrics_value(node: ClusterNode, parse_from_command: bool = False, **metrics_greps: str):
try:
command_result = node.metrics.storage.get_metrics_search_by_greps(**metrics_greps)
if parse_from_command:
metrics_counter = calc_metrics_count_from_stdout(command_result.stdout, **metrics_greps)
else:
metrics_counter = calc_metrics_count_from_stdout(command_result.stdout)
except RuntimeError as e:
metrics_counter = 0
return metrics_counter
@reporter.step("Parse metrics count and calc sum of result")
def calc_metrics_count_from_stdout(metric_result_stdout: str, command: str = None):
if command:
result = re.findall(rf"{command}\s*([\d.e+-]+)", metric_result_stdout)
else:
result = re.findall(r"}\s*([\d.e+-]+)", metric_result_stdout)
return sum(map(lambda x: int(float(x)), result))

View file

@ -13,6 +13,7 @@ from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align
from frostfs_testlib.storage.cluster import Cluster, StorageNode
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate
from frostfs_testlib.utils import datetime_utils
logger = logging.getLogger("NeoLogger")
@ -51,24 +52,9 @@ def storage_node_healthcheck(node: StorageNode) -> HealthStatus:
Returns:
health status as HealthStatus object.
"""
host = node.host
service_config = host.get_service_config(node.name)
wallet_path = service_config.attributes["wallet_path"]
wallet_password = service_config.attributes["wallet_password"]
control_endpoint = service_config.attributes["control_endpoint"]
shell = host.get_shell()
wallet_config_path = f"/tmp/{node.name}-config.yaml"
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
cli_config = host.get_cli_config("frostfs-cli")
cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
result = cli.control.healthcheck(control_endpoint)
return HealthStatus.from_stdout(result.stdout)
command = "control healthcheck"
output = _run_control_command_with_retries(node, command)
return HealthStatus.from_stdout(output)
@reporter.step("Set status for {node}")
@ -80,21 +66,8 @@ def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) ->
status: online or offline.
retries (optional, int): number of retry attempts if it didn't work from the first time
"""
host = node.host
service_config = host.get_service_config(node.name)
wallet_path = service_config.attributes["wallet_path"]
wallet_password = service_config.attributes["wallet_password"]
control_endpoint = service_config.attributes["control_endpoint"]
shell = host.get_shell()
wallet_config_path = f"/tmp/{node.name}-config.yaml"
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
cli_config = host.get_cli_config("frostfs-cli")
cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
cli.control.set_status(control_endpoint, status)
command = f"control set-status --status {status}"
_run_control_command_with_retries(node, command, retries)
@reporter.step("Get netmap snapshot")
@ -118,7 +91,7 @@ def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str:
@reporter.step("Get shard list for {node}")
def node_shard_list(node: StorageNode, json: Optional[bool] = None) -> list[str]:
def node_shard_list(node: StorageNode) -> list[str]:
"""
The function returns list of shards for specified storage node.
Args:
@ -126,72 +99,31 @@ def node_shard_list(node: StorageNode, json: Optional[bool] = None) -> list[str]
Returns:
list of shards.
"""
host = node.host
service_config = host.get_service_config(node.name)
wallet_path = service_config.attributes["wallet_path"]
wallet_password = service_config.attributes["wallet_password"]
control_endpoint = service_config.attributes["control_endpoint"]
shell = host.get_shell()
wallet_config_path = f"/tmp/{node.name}-config.yaml"
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
cli_config = host.get_cli_config("frostfs-cli")
cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
result = cli.shards.list(endpoint=control_endpoint, json_mode=json)
return re.findall(r"Shard (.*):", result.stdout)
command = "control shards list"
output = _run_control_command_with_retries(node, command)
return re.findall(r"Shard (.*):", output)
@reporter.step("Shard set for {node}")
def node_shard_set_mode(node: StorageNode, shard: list[str], mode: str) -> None:
def node_shard_set_mode(node: StorageNode, shard: str, mode: str) -> str:
"""
The function sets mode for specified shard.
Args:
node: node on which shard mode should be set.
"""
host = node.host
service_config = host.get_service_config(node.name)
wallet_path = service_config.attributes["wallet_path"]
wallet_password = service_config.attributes["wallet_password"]
control_endpoint = service_config.attributes["control_endpoint"]
shell = host.get_shell()
wallet_config_path = f"/tmp/{node.name}-config.yaml"
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
cli_config = host.get_cli_config("frostfs-cli")
cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
cli.shards.set_mode(endpoint=control_endpoint, mode=mode, id=shard)
command = f"control shards set-mode --id {shard} --mode {mode}"
return _run_control_command_with_retries(node, command)
@reporter.step("Drop object from {node}")
def drop_object(node: StorageNode, cid: str, oid: str) -> None:
def drop_object(node: StorageNode, cid: str, oid: str) -> str:
"""
The function drops object from specified node.
Args:
node: node from which object should be dropped.
node_id str: node from which object should be dropped.
"""
host = node.host
service_config = host.get_service_config(node.name)
wallet_path = service_config.attributes["wallet_path"]
wallet_password = service_config.attributes["wallet_password"]
control_endpoint = service_config.attributes["control_endpoint"]
shell = host.get_shell()
wallet_config_path = f"/tmp/{node.name}-config.yaml"
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
cli_config = host.get_cli_config("frostfs-cli")
cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
objects = f"{cid}/{oid}"
cli.control.drop_objects(control_endpoint, objects)
command = f"control drop-objects -o {cid}/{oid}"
return _run_control_command_with_retries(node, command)
@reporter.step("Delete data from host for node {node}")
@ -263,7 +195,7 @@ def check_node_not_in_map(node: StorageNode, shell: Shell, alive_node: Optional[
@reporter.step("Wait for node {node} is ready")
def wait_for_node_to_be_ready(node: StorageNode) -> None:
timeout, attempts = 60, 15
timeout, attempts = 30, 6
for _ in range(attempts):
try:
health_check = storage_node_healthcheck(node)
@ -306,3 +238,38 @@ def remove_nodes_from_map_morph(
config_file=FROSTFS_ADM_CONFIG_PATH,
)
frostfsadm.morph.remove_nodes(node_netmap_keys)
def _run_control_command_with_retries(node: StorageNode, command: str, retries: int = 0) -> str:
for attempt in range(1 + retries): # original attempt + specified retries
try:
return _run_control_command(node, command)
except AssertionError as err:
if attempt < retries:
logger.warning(f"Command {command} failed with error {err} and will be retried")
continue
raise AssertionError(f"Command {command} failed with error {err}") from err
def _run_control_command(node: StorageNode, command: str) -> None:
host = node.host
service_config = host.get_service_config(node.name)
wallet_path = service_config.attributes["wallet_path"]
wallet_password = service_config.attributes["wallet_password"]
control_endpoint = service_config.attributes["control_endpoint"]
shell = host.get_shell()
wallet_config_path = f"/tmp/{node.name}-config.yaml"
wallet_config = f'password: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
cli_config = host.get_cli_config("frostfs-cli")
# TODO: implement cli.control
# cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
result = shell.exec(
f"{cli_config.exec_path} {command} --endpoint {control_endpoint} "
f"--wallet {wallet_path} --config {wallet_config_path}"
)
return result.stdout

View file

@ -1,16 +1,25 @@
import json
import logging
import os
import re
import uuid
from datetime import datetime, timedelta
from typing import Optional
from dateutil.parser import parse
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsAuthmate
from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC
from frostfs_testlib.resources.common import CREDENTIALS_CREATE_TIMEOUT
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
from frostfs_testlib.shell import Shell
from frostfs_testlib.shell import CommandOptions, InteractiveInput, Shell
from frostfs_testlib.shell.interfaces import SshCredentials
from frostfs_testlib.steps.cli.container import search_container_by_name, search_nodes_with_container
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.utils.cli_utils import _run_with_passwd
logger = logging.getLogger("NeoLogger")
@ -29,7 +38,9 @@ def check_objects_in_bucket(
assert bucket_object in bucket_objects, f"Expected object {bucket_object} in objects list {bucket_objects}"
for bucket_object in unexpected_objects:
assert bucket_object not in bucket_objects, f"Expected object {bucket_object} not in objects list {bucket_objects}"
assert (
bucket_object not in bucket_objects
), f"Expected object {bucket_object} not in objects list {bucket_objects}"
@reporter.step("Try to get object and got error")
@ -47,6 +58,7 @@ def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: Versi
if status == VersioningStatus.UNDEFINED:
return
s3_client.get_bucket_versioning_status(bucket)
s3_client.put_bucket_versioning(bucket, status=status)
bucket_status = s3_client.get_bucket_versioning_status(bucket)
assert bucket_status == status.value, f"Expected {bucket_status} status. Got {status.value}"
@ -56,7 +68,9 @@ def object_key_from_file_path(full_path: str) -> str:
return os.path.basename(full_path)
def assert_tags(actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None) -> None:
def assert_tags(
actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None
) -> None:
expected_tags = [{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else []
unexpected_tags = [{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else []
if expected_tags == []:
@ -119,28 +133,69 @@ def assert_object_lock_mode(
).days == retain_period, f"Expected retention period is {retain_period} days"
def _format_grants_as_strings(grants: list[dict]) -> list:
grantee_format = "{g_type}::{uri}:{permission}"
return set(
[
grantee_format.format(
g_type=grant.get("Grantee", {}).get("Type", ""),
uri=grant.get("Grantee", {}).get("URI", ""),
permission=grant.get("Permission", ""),
)
for grant in grants
]
def assert_s3_acl(acl_grants: list, permitted_users: str):
if permitted_users == "AllUsers":
grantees = {"AllUsers": 0, "CanonicalUser": 0}
for acl_grant in acl_grants:
if acl_grant.get("Grantee", {}).get("Type") == "Group":
uri = acl_grant.get("Grantee", {}).get("URI")
permission = acl_grant.get("Permission")
assert (uri, permission) == (
"http://acs.amazonaws.com/groups/global/AllUsers",
"FULL_CONTROL",
), "All Groups should have FULL_CONTROL"
grantees["AllUsers"] += 1
if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser":
permission = acl_grant.get("Permission")
assert permission == "FULL_CONTROL", "Canonical User should have FULL_CONTROL"
grantees["CanonicalUser"] += 1
assert grantees["AllUsers"] >= 1, "All Users should have FULL_CONTROL"
assert grantees["CanonicalUser"] >= 1, "Canonical User should have FULL_CONTROL"
if permitted_users == "CanonicalUser":
for acl_grant in acl_grants:
if acl_grant.get("Grantee", {}).get("Type") == "CanonicalUser":
permission = acl_grant.get("Permission")
assert permission == "FULL_CONTROL", "Only CanonicalUser should have FULL_CONTROL"
else:
logger.error("FULL_CONTROL is given to All Users")
@reporter.step("Init S3 Credentials")
def init_s3_credentials(
wallet: WalletInfo,
shell: Shell,
cluster: Cluster,
policy: Optional[dict] = None,
s3gates: Optional[list[S3Gate]] = None,
container_placement_policy: Optional[str] = None,
):
gate_public_keys = []
bucket = str(uuid.uuid4())
if not s3gates:
s3gates = [cluster.s3_gates[0]]
for s3gate in s3gates:
gate_public_keys.append(s3gate.get_wallet_public_key())
frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC)
issue_secret_output = frostfs_authmate_exec.secret.issue(
wallet=wallet.path,
peer=cluster.default_rpc_endpoint,
gate_public_key=gate_public_keys,
wallet_password=wallet.password,
container_policy=policy,
container_friendly_name=bucket,
container_placement_policy=container_placement_policy,
).stdout
aws_access_key_id = str(
re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group("aws_access_key_id")
)
@reporter.step("Verify ACL permissions")
def verify_acl_permissions(actual_acl_grants: list[dict], expected_acl_grants: list[dict], strict: bool = True):
actual_grants = _format_grants_as_strings(actual_acl_grants)
expected_grants = _format_grants_as_strings(expected_acl_grants)
assert expected_grants <= actual_grants, "Permissions mismatch"
if strict:
assert expected_grants == actual_grants, "Extra permissions found, must not be there"
aws_secret_access_key = str(
re.search(r"secret_access_key.*:\s.(?P<aws_secret_access_key>\w*)", issue_secret_output).group(
"aws_secret_access_key"
)
)
cid = str(re.search(r"container_id.*:\s.(?P<container_id>\w*)", issue_secret_output).group("container_id"))
return cid, aws_access_key_id, aws_secret_access_key
@reporter.step("Delete bucket with all objects")
@ -172,7 +227,7 @@ def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str):
def search_nodes_with_bucket(
cluster: Cluster,
bucket_name: str,
wallet: WalletInfo,
wallet: str,
shell: Shell,
endpoint: str,
) -> list[ClusterNode]:

View file

@ -4,12 +4,13 @@ import logging
import os
import uuid
from dataclasses import dataclass
from enum import Enum
from typing import Any, Optional
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
from frostfs_testlib.resources.common import ASSETS_DIR
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
@ -230,7 +231,8 @@ def get_object_signed_token(
def create_session_token(
shell: Shell,
owner: str,
wallet: WalletInfo,
wallet_path: str,
wallet_password: str,
rpc_endpoint: str,
) -> str:
"""
@ -245,18 +247,19 @@ def create_session_token(
The path to the generated session token file.
"""
session_token = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC)
frostfscli.session.create(
rpc_endpoint=rpc_endpoint,
address=owner,
wallet=wallet_path,
wallet_password=wallet_password,
out=session_token,
wallet=wallet.path,
)
return session_token
@reporter.step("Sign Session Token")
def sign_session_token(shell: Shell, session_token_file: str, wallet: WalletInfo) -> str:
def sign_session_token(shell: Shell, session_token_file: str, wlt: WalletInfo) -> str:
"""
This function signs the session token by the given wallet.
@ -269,6 +272,6 @@ def sign_session_token(shell: Shell, session_token_file: str, wallet: WalletInfo
The path to the signed token.
"""
signed_token_file = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
frostfscli.util.sign_session_token(session_token_file, signed_token_file)
frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG)
frostfscli.util.sign_session_token(wallet=wlt.path, from_file=session_token_file, to_file=signed_token_file)
return signed_token_file

View file

@ -30,14 +30,14 @@ def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell, clust
with reporter.step("Delete objects"):
for storage_object in storage_objects:
storage_object.tombstone = delete_object(
storage_object.wallet,
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
shell=shell,
endpoint=cluster.default_rpc_endpoint,
)
verify_head_tombstone(
wallet=storage_object.wallet,
wallet_path=storage_object.wallet_file_path,
cid=storage_object.cid,
oid_ts=storage_object.tombstone,
oid=storage_object.oid,
@ -52,7 +52,7 @@ def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell, clust
for storage_object in storage_objects:
with pytest.raises(Exception, match=OBJECT_ALREADY_REMOVED):
get_object(
storage_object.wallet,
storage_object.wallet_file_path,
storage_object.cid,
storage_object.oid,
shell=shell,

View file

@ -12,15 +12,13 @@ from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import head_object
from frostfs_testlib.steps.complex_object_actions import get_last_object
from frostfs_testlib.storage.cluster import StorageNode
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.utils import string_utils
logger = logging.getLogger("NeoLogger")
# TODO: Unused, remove or make use of
@reporter.step("Get Object Copies")
def get_object_copies(complexity: str, wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int:
def get_object_copies(complexity: str, wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int:
"""
The function performs requests to all nodes of the container and
finds out if they store a copy of the object. The procedure is
@ -45,7 +43,7 @@ def get_object_copies(complexity: str, wallet: WalletInfo, cid: str, oid: str, s
@reporter.step("Get Simple Object Copies")
def get_simple_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int:
def get_simple_object_copies(wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int:
"""
To figure out the number of a simple object copies, only direct
HEAD requests should be made to the every node of the container.
@ -74,7 +72,7 @@ def get_simple_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shel
@reporter.step("Get Complex Object Copies")
def get_complex_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int:
def get_complex_object_copies(wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int:
"""
To figure out the number of a complex object copies, we firstly
need to retrieve its Last object. We consider that the number of
@ -111,7 +109,8 @@ def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageN
nodes_list = []
for node in nodes:
wallet = WalletInfo.from_node(node)
wallet = node.get_wallet_path()
wallet_config = node.get_wallet_config_path()
try:
res = head_object(
wallet,
@ -120,6 +119,7 @@ def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageN
shell=shell,
endpoint=node.get_rpc_endpoint(),
is_direct=True,
wallet_config=wallet_config,
)
if res is not None:
logger.info(f"Found object {oid} on node {node}")
@ -131,7 +131,9 @@ def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageN
@reporter.step("Get Nodes Without Object")
def get_nodes_without_object(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> list[StorageNode]:
def get_nodes_without_object(
wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
) -> list[StorageNode]:
"""
The function returns list of nodes which do not store
the given object.

View file

@ -1,23 +1,31 @@
import json
import logging
from neo3.wallet import wallet
from frostfs_testlib import reporter
from frostfs_testlib.shell import Shell
from frostfs_testlib.steps.cli.object import head_object
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
logger = logging.getLogger("NeoLogger")
@reporter.step("Verify Head Tombstone")
def verify_head_tombstone(wallet: WalletInfo, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str):
header = head_object(wallet, cid, oid_ts, shell=shell, endpoint=endpoint)["header"]
def verify_head_tombstone(wallet_path: str, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str):
header = head_object(wallet_path, cid, oid_ts, shell=shell, endpoint=endpoint)["header"]
s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"]
logger.info(f"Header Session OIDs is {s_oid}")
logger.info(f"OID is {oid}")
assert header["containerID"] == cid, "Tombstone Header CID is wrong"
assert header["ownerID"] == wallet.get_address_from_json(0), "Tombstone Owner ID is wrong"
with open(wallet_path, "r") as file:
wlt_data = json.loads(file.read())
wlt = wallet.Wallet.from_json(wlt_data, password="")
addr = wlt.accounts[0].address
assert header["ownerID"] == addr, "Tombstone Owner ID is wrong"
assert header["objectType"] == "TOMBSTONE", "Header Type isn't Tombstone"
assert header["sessionToken"]["body"]["object"]["verb"] == "DELETE", "Header Session Type isn't DELETE"
assert header["sessionToken"]["body"]["object"]["target"]["container"] == cid, "Header Session ID is wrong"

View file

@ -9,12 +9,12 @@ from frostfs_testlib.hosting import Host, Hosting
from frostfs_testlib.hosting.config import ServiceConfig
from frostfs_testlib.storage import get_service_registry
from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml
from frostfs_testlib.storage.configuration.service_configuration import ServiceConfiguration
from frostfs_testlib.storage.constants import ConfigAttributes
from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces
from frostfs_testlib.storage.service_registry import ServiceRegistry
from frostfs_testlib.storage.dataclasses.metrics import Metrics
class ClusterNode:
@ -25,13 +25,11 @@ class ClusterNode:
class_registry: ServiceRegistry
id: int
host: Host
metrics: Metrics
def __init__(self, host: Host, id: int) -> None:
self.host = host
self.id = id
self.class_registry = get_service_registry()
self.metrics = Metrics(host=self.host, metrics_endpoint=self.storage_node.get_metrics_endpoint())
@property
def host_ip(self):
@ -74,7 +72,6 @@ class ClusterNode:
def s3_gate(self) -> S3Gate:
return self.service(S3Gate)
# TODO: Deprecated. Use config with ServiceConfigurationYml interface
def get_config(self, config_file_path: str) -> dict:
shell = self.host.get_shell()
@ -84,7 +81,6 @@ class ClusterNode:
config = yaml.safe_load(config_text)
return config
# TODO: Deprecated. Use config with ServiceConfigurationYml interface
def save_config(self, new_config: dict, config_file_path: str) -> None:
shell = self.host.get_shell()
@ -92,7 +88,7 @@ class ClusterNode:
shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}")
def config(self, service_type: type[ServiceClass]) -> ServiceConfigurationYml:
return self.service(service_type).config
return ServiceConfiguration(self.service(service_type))
def service(self, service_type: type[ServiceClass]) -> ServiceClass:
"""
@ -109,7 +105,7 @@ class ClusterNode:
service_entry = self.class_registry.get_entry(service_type)
service_name = service_entry["hosting_service_name"]
pattern = f"{service_name}_{self.id:02}"
pattern = f"{service_name}{self.id:02}"
config = self.host.get_service_config(pattern)
return service_type(
@ -124,7 +120,7 @@ class ClusterNode:
svcs_names_on_node = [svc.name for svc in self.host.config.services]
for entry in self.class_registry._class_mapping.values():
hosting_svc_name = entry["hosting_service_name"]
pattern = f"{hosting_svc_name}_{self.id:02}"
pattern = f"{hosting_svc_name}{self.id:02}"
if pattern in svcs_names_on_node:
config = self.host.get_service_config(pattern)
svcs.append(
@ -144,16 +140,30 @@ class ClusterNode:
return self.host.config.interfaces[interface.value]
def get_data_interfaces(self) -> list[str]:
return [ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "data" in name_interface]
return [
ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "data" in name_interface
]
def get_data_interface(self, search_interface: str) -> list[str]:
return [self.host.config.interfaces[interface] for interface in self.host.config.interfaces.keys() if search_interface == interface]
return [
self.host.config.interfaces[interface]
for interface in self.host.config.interfaces.keys()
if search_interface == interface
]
def get_internal_interfaces(self) -> list[str]:
return [ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "internal" in name_interface]
return [
ip_address
for name_interface, ip_address in self.host.config.interfaces.items()
if "internal" in name_interface
]
def get_internal_interface(self, search_internal: str) -> list[str]:
return [self.host.config.interfaces[interface] for interface in self.host.config.interfaces.keys() if search_internal == interface]
return [
self.host.config.interfaces[interface]
for interface in self.host.config.interfaces.keys()
if search_internal == interface
]
class Cluster:
@ -164,6 +174,8 @@ class Cluster:
default_rpc_endpoint: str
default_s3_gate_endpoint: str
default_http_gate_endpoint: str
default_http_hostname: str
default_s3_hostname: str
def __init__(self, hosting: Hosting) -> None:
self._hosting = hosting
@ -172,6 +184,8 @@ class Cluster:
self.default_rpc_endpoint = self.services(StorageNode)[0].get_rpc_endpoint()
self.default_s3_gate_endpoint = self.services(S3Gate)[0].get_endpoint()
self.default_http_gate_endpoint = self.services(HTTPGate)[0].get_endpoint()
self.default_http_hostname = self.services(StorageNode)[0].get_http_hostname()
self.default_s3_hostname = self.services(StorageNode)[0].get_s3_hostname()
@property
def hosts(self) -> list[Host]:
@ -253,13 +267,13 @@ class Cluster:
service_name = service["hosting_service_name"]
cls: type[NodeBase] = service["cls"]
pattern = f"{service_name}_\d*$"
pattern = f"{service_name}\d*$"
configs = self.hosting.find_service_configs(pattern)
found_nodes = []
for config in configs:
# config.name is something like s3-gate01. Cut last digits to know service type
service_type = re.findall("(.*)_\d+", config.name)[0]
service_type = re.findall(".*\D", config.name)[0]
# exclude unsupported services
if service_type != service_name:
continue

View file

@ -5,74 +5,51 @@ from typing import Any
import yaml
from frostfs_testlib import reporter
from frostfs_testlib.shell.interfaces import CommandOptions, Shell
from frostfs_testlib.shell.interfaces import CommandOptions
from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml
def extend_dict(extend_me: dict, extend_by: dict):
if isinstance(extend_by, dict):
for k, v in extend_by.items():
if k in extend_me:
extend_dict(extend_me.get(k), v)
else:
extend_me[k] = v
else:
extend_me += extend_by
from frostfs_testlib.storage.dataclasses.node_base import ServiceClass
class ServiceConfiguration(ServiceConfigurationYml):
def __init__(self, service_name: str, shell: Shell, config_dir: str, main_config_path: str) -> None:
self.service_name = service_name
self.shell = shell
self.main_config_path = main_config_path
self.confd_path = os.path.join(config_dir, "conf.d")
def __init__(self, service: "ServiceClass") -> None:
self.service = service
self.shell = self.service.host.get_shell()
self.confd_path = os.path.join(self.service.config_dir, "conf.d")
self.custom_file = os.path.join(self.confd_path, "99_changes.yml")
def _path_exists(self, path: str) -> bool:
return not self.shell.exec(f"test -e {path}", options=CommandOptions(check=False)).return_code
def _get_config_files(self):
config_files = [self.main_config_path]
def _get_data_from_file(self, path: str) -> dict:
content = self.shell.exec(f"cat {path}").stdout
data = yaml.safe_load(content)
return data
if self._path_exists(self.confd_path):
files = self.shell.exec(f"find {self.confd_path} -type f").stdout.strip().split()
# Sorting files in backwards order from latest to first one
config_files.extend(sorted(files, key=lambda x: -int(re.findall("^\d+", os.path.basename(x))[0])))
def get(self, key: str) -> str:
with reporter.step(f"Get {key} configuration value for {self.service}"):
config_files = [self.service.main_config_path]
return config_files
if self._path_exists(self.confd_path):
files = self.shell.exec(f"find {self.confd_path} -type f").stdout.strip().split()
# Sorting files in backwards order from latest to first one
config_files.extend(sorted(files, key=lambda x: -int(re.findall("^\d+", os.path.basename(x))[0])))
def _get_configuration(self, config_files: list[str]) -> dict:
if not config_files:
return [{}]
result = None
for file in files:
data = self._get_data_from_file(file)
result = self._find_option(key, data)
if result is not None:
break
splitter = "+++++"
files_str = " ".join(config_files)
all_content = self.shell.exec(
f"echo Getting config files; for file in {files_str}; do (echo {splitter}; sudo cat ${{file}}); done"
).stdout
files_content = all_content.split("+++++")[1:]
files_data = [yaml.safe_load(file_content) for file_content in files_content]
mergedData = {}
for data in files_data:
extend_dict(mergedData, data)
return mergedData
def get(self, key: str) -> str | Any:
with reporter.step(f"Get {key} configuration value for {self.service_name}"):
config_files = self._get_config_files()
configuration = self._get_configuration(config_files)
result = self._find_option(key, configuration)
return result
def set(self, values: dict[str, Any]):
with reporter.step(f"Change configuration for {self.service_name}"):
with reporter.step(f"Change configuration for {self.service}"):
if not self._path_exists(self.confd_path):
self.shell.exec(f"mkdir {self.confd_path}")
if self._path_exists(self.custom_file):
data = self._get_configuration([self.custom_file])
data = self._get_data_from_file(self.custom_file)
else:
data = {}
@ -84,5 +61,5 @@ class ServiceConfiguration(ServiceConfigurationYml):
self.shell.exec(f"chmod 777 {self.custom_file}")
def revert(self):
with reporter.step(f"Revert changed options for {self.service_name}"):
with reporter.step(f"Revert changed options for {self.service}"):
self.shell.exec(f"rm -rf {self.custom_file}")

View file

@ -16,3 +16,13 @@ class ConfigAttributes:
ENDPOINT_PROMETHEUS = "endpoint_prometheus"
CONTROL_ENDPOINT = "control_endpoint"
UN_LOCODE = "un_locode"
HTTP_HOSTNAME = "http_hostname"
S3_HOSTNAME = "s3_hostname"
class _FrostfsServicesNames:
STORAGE = "s"
S3_GATE = "s3-gate"
HTTP_GATE = "http-gate"
MORPH_CHAIN = "morph-chain"
INNER_RING = "ir"

View file

@ -1,5 +1,6 @@
import copy
from datetime import datetime
from typing import Optional
import frostfs_testlib.resources.optionals as optionals
from frostfs_testlib import reporter
@ -9,6 +10,7 @@ from frostfs_testlib.load.load_report import LoadReport
from frostfs_testlib.load.load_verifiers import LoadVerifier
from frostfs_testlib.storage.cluster import ClusterNode
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.parallel import parallel
from frostfs_testlib.testing.test_control import run_optionally
@ -21,6 +23,7 @@ class BackgroundLoadController:
cluster_nodes: list[ClusterNode]
nodes_under_load: list[ClusterNode]
load_counter: int
loaders_wallet: WalletInfo
load_summaries: dict
endpoints: list[str]
runner: ScenarioRunner
@ -31,6 +34,7 @@ class BackgroundLoadController:
self,
k6_dir: str,
load_params: LoadParams,
loaders_wallet: WalletInfo,
cluster_nodes: list[ClusterNode],
nodes_under_load: list[ClusterNode],
runner: ScenarioRunner,
@ -41,6 +45,7 @@ class BackgroundLoadController:
self.cluster_nodes = cluster_nodes
self.nodes_under_load = nodes_under_load
self.load_counter = 1
self.loaders_wallet = loaders_wallet
self.runner = runner
self.started = False
self.load_reporters = []
@ -59,7 +64,10 @@ class BackgroundLoadController:
)
),
EndpointSelectionStrategy.FIRST: list(
set(node_under_load.service(StorageNode).get_rpc_endpoint() for node_under_load in self.nodes_under_load)
set(
node_under_load.service(StorageNode).get_rpc_endpoint()
for node_under_load in self.nodes_under_load
)
),
},
# for some reason xk6 appends http protocol on its own
@ -187,19 +195,15 @@ class BackgroundLoadController:
read_from=self.load_params.read_from,
registry_file=self.load_params.registry_file,
verify_time=self.load_params.verify_time,
custom_registry=self.load_params.custom_registry,
load_type=self.load_params.load_type,
load_id=self.load_params.load_id,
vu_init_time=0,
working_dir=self.load_params.working_dir,
endpoint_selection_strategy=self.load_params.endpoint_selection_strategy,
k6_process_allocation_strategy=self.load_params.k6_process_allocation_strategy,
setup_timeout=self.load_params.setup_timeout,
setup_timeout="1s",
)
if self.verification_params.custom_registry:
self.verification_params.registry_file = self.load_params.custom_registry
if self.verification_params.verify_time is None:
raise RuntimeError("verify_time should not be none")

View file

@ -11,14 +11,12 @@ from frostfs_testlib.healthcheck.interfaces import Healthcheck
from frostfs_testlib.hosting.interfaces import HostStatus
from frostfs_testlib.plugins import load_all
from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG, MORPH_BLOCK_TIME
from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider
from frostfs_testlib.steps.network import IpHelper
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode
from frostfs_testlib.storage.controllers.disk_controller import DiskController
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeStatus
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing import parallel
from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success
from frostfs_testlib.utils.datetime_utils import parse_time
@ -328,8 +326,6 @@ class ClusterStateController:
@reporter.step("Restore blocked nodes")
def restore_all_traffic(self):
if not self.dropped_traffic:
return
parallel(self._restore_traffic_to_node, self.dropped_traffic)
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
@ -414,46 +410,45 @@ class ClusterStateController:
)
frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}")
@reporter.step("Set node status to {status} in CSC")
def set_node_status(self, cluster_node: ClusterNode, wallet: WalletInfo, status: NodeStatus, await_tick: bool = True) -> None:
@reporter.step("Set mode node to {status}")
def set_mode_node(self, cluster_node: ClusterNode, wallet: str, status: str, await_tick: bool = True) -> None:
rpc_endpoint = cluster_node.storage_node.get_rpc_endpoint()
control_endpoint = cluster_node.service(StorageNode).get_control_endpoint()
frostfs_adm, frostfs_cli, frostfs_cli_remote = self._get_cli(self.shell, wallet, cluster_node)
node_netinfo = NetmapParser.netinfo(frostfs_cli.netmap.netinfo(rpc_endpoint).stdout)
frostfs_adm, frostfs_cli, frostfs_cli_remote = self._get_cli(local_shell=self.shell, cluster_node=cluster_node)
node_netinfo = NetmapParser.netinfo(frostfs_cli.netmap.netinfo(rpc_endpoint=rpc_endpoint, wallet=wallet).stdout)
if node_netinfo.maintenance_mode_allowed == "false":
with reporter.step("Enable maintenance mode"):
frostfs_adm.morph.set_config("MaintenanceModeAllowed=true")
with reporter.step("If status maintenance, then check that the option is enabled"):
if node_netinfo.maintenance_mode_allowed == "false":
frostfs_adm.morph.set_config(set_key_value="MaintenanceModeAllowed=true")
with reporter.step(f"Set node status to {status} using FrostfsCli"):
frostfs_cli_remote.control.set_status(control_endpoint, status.value)
with reporter.step(f"Change the status to {status}"):
frostfs_cli_remote.control.set_status(endpoint=control_endpoint, status=status)
if not await_tick:
return
with reporter.step("Tick 2 epoch with 2 block await."):
for _ in range(2):
frostfs_adm.morph.force_new_epoch()
time.sleep(parse_time(MORPH_BLOCK_TIME) * 2)
with reporter.step("Tick 1 epoch, and await 2 block"):
frostfs_adm.morph.force_new_epoch()
time.sleep(parse_time(MORPH_BLOCK_TIME) * 2)
self.await_node_status(status, wallet, cluster_node)
self.check_node_status(status=status, wallet=wallet, cluster_node=cluster_node)
@wait_for_success(80, 8, title="Wait for node status become {status}")
def await_node_status(self, status: NodeStatus, wallet: WalletInfo, cluster_node: ClusterNode, checker_node: ClusterNode = None):
frostfs_cli = FrostfsCli(self.shell, FROSTFS_CLI_EXEC, wallet.config_path)
if not checker_node:
checker_node = cluster_node
netmap = NetmapParser.snapshot_all_nodes(frostfs_cli.netmap.snapshot(checker_node.storage_node.get_rpc_endpoint()).stdout)
@wait_for_success(80, 8, title="Wait for storage status become {status}")
def check_node_status(self, status: str, wallet: str, cluster_node: ClusterNode):
frostfs_cli = FrostfsCli(
shell=self.shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG
)
netmap = NetmapParser.snapshot_all_nodes(
frostfs_cli.netmap.snapshot(rpc_endpoint=cluster_node.storage_node.get_rpc_endpoint(), wallet=wallet).stdout
)
netmap = [node for node in netmap if cluster_node.host_ip == node.node]
if status == NodeStatus.OFFLINE:
if status == "offline":
assert cluster_node.host_ip not in netmap, f"{cluster_node.host_ip} not in Offline"
else:
assert netmap[0].node_status == status, f"Node status should be '{status}', but was '{netmap[0].node_status}'"
assert netmap[0].node_status == status.upper(), f"Node state - {netmap[0].node_status} != {status} expect"
def _get_cli(
self, local_shell: Shell, local_wallet: WalletInfo, cluster_node: ClusterNode
) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]:
def _get_cli(self, local_shell: Shell, cluster_node: ClusterNode) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]:
# TODO Move to service config
host = cluster_node.host
service_config = host.get_service_config(cluster_node.storage_node.name)
@ -465,8 +460,12 @@ class ClusterStateController:
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
frostfs_adm = FrostfsAdm(shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH)
frostfs_cli = FrostfsCli(local_shell, FROSTFS_CLI_EXEC, local_wallet.config_path)
frostfs_adm = FrostfsAdm(
shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH
)
frostfs_cli = FrostfsCli(
shell=local_shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG
)
frostfs_cli_remote = FrostfsCli(
shell=shell,
frostfs_cli_exec_path=FROSTFS_CLI_EXEC,
@ -510,7 +509,9 @@ class ClusterStateController:
options = CommandOptions(check=False)
return self.shell.exec(f"ping {node.host.config.address} -c 1", options).return_code
@retry(max_attempts=60, sleep_interval=10, expected_result=HostStatus.ONLINE, title="Waiting for {node} to go online")
@retry(
max_attempts=60, sleep_interval=10, expected_result=HostStatus.ONLINE, title="Waiting for {node} to go online"
)
def _wait_for_host_online(self, node: ClusterNode):
try:
ping_result = self._ping_host(node)
@ -521,7 +522,9 @@ class ClusterStateController:
logger.warning(f"Host ping fails with error {err}")
return HostStatus.OFFLINE
@retry(max_attempts=60, sleep_interval=10, expected_result=HostStatus.OFFLINE, title="Waiting for {node} to go offline")
@retry(
max_attempts=60, sleep_interval=10, expected_result=HostStatus.OFFLINE, title="Waiting for {node} to go offline"
)
def _wait_for_host_offline(self, node: ClusterNode):
try:
ping_result = self._ping_host(node)
@ -531,11 +534,3 @@ class ClusterStateController:
except Exception as err:
logger.warning(f"Host ping fails with error {err}")
return HostStatus.ONLINE
@reporter.step("Get contract by domain - {domain_name}")
def get_domain_contracts(self, cluster_node: ClusterNode, domain_name: str):
frostfs_adm = FrostfsAdm(
shell=cluster_node.host.get_shell(),
frostfs_adm_exec_path=FROSTFS_ADM_EXEC,
)
return frostfs_adm.morph.dump_hashes(cluster_node.morph_chain.get_endpoint(), domain_name).stdout

View file

@ -79,7 +79,9 @@ class ShardsWatcher:
assert self._is_shard_present(shard_id)
shards_with_new_errors = self.get_shards_with_new_errors()
assert shard_id in shards_with_new_errors, f"Expected shard {shard_id} to have new errors, but haven't {self.shards_snapshots[-1]}"
assert (
shard_id in shards_with_new_errors
), f"Expected shard {shard_id} to have new errors, but haven't {self.shards_snapshots[-1]}"
@wait_for_success(300, 5)
def await_for_shards_have_no_new_errors(self):
@ -108,9 +110,9 @@ class ShardsWatcher:
self.storage_node.host.get_cli_config("frostfs-cli").exec_path,
)
return shards_cli.set_mode(
endpoint=self.storage_node.get_control_endpoint(),
wallet=self.storage_node.get_remote_wallet_path(),
wallet_password=self.storage_node.get_wallet_password(),
self.storage_node.get_control_endpoint(),
self.storage_node.get_remote_wallet_path(),
self.storage_node.get_wallet_password(),
mode=mode,
id=[shard_id],
clear_errors=clear_errors,

View file

@ -1,8 +1,8 @@
import logging
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, List, Optional, Union
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.readable import HumanReadableEnum
from frostfs_testlib.utils import wallet_utils
@ -65,7 +65,11 @@ class EACLFilters:
def __str__(self):
return ",".join(
[f"{filter.header_type.value}:" f"{filter.key}{filter.match_type.value}{filter.value}" for filter in self.filters]
[
f"{filter.header_type.value}:"
f"{filter.key}{filter.match_type.value}{filter.value}"
for filter in self.filters
]
if self.filters
else []
)
@ -80,7 +84,7 @@ class EACLPubKey:
class EACLRule:
operation: Optional[EACLOperation] = None
access: Optional[EACLAccess] = None
role: Optional[Union[EACLRole, WalletInfo]] = None
role: Optional[Union[EACLRole, str]] = None
filters: Optional[EACLFilters] = None
def to_dict(self) -> Dict[str, Any]:
@ -92,9 +96,9 @@ class EACLRule:
}
def __str__(self):
role = ""
if isinstance(self.role, EACLRole):
role = self.role.value
if isinstance(self.role, WalletInfo):
role = f"pubkey:{wallet_utils.get_wallet_public_key(self.role.path, self.role.password)}"
role = (
self.role.value
if isinstance(self.role, EACLRole)
else f'pubkey:{wallet_utils.get_wallet_public_key(self.role, "")}'
)
return f'{self.access.value} {self.operation.value} {self.filters or ""} {role}'

View file

@ -1,115 +0,0 @@
import logging
from dataclasses import dataclass
from enum import Enum
from typing import Optional
from frostfs_testlib.testing.readable import HumanReadableEnum
from frostfs_testlib.utils import string_utils
logger = logging.getLogger("NeoLogger")
EACL_LIFETIME = 100500
FROSTFS_CONTRACT_CACHE_TIMEOUT = 30
class ObjectOperations(HumanReadableEnum):
PUT = "object.put"
GET = "object.get"
HEAD = "object.head"
GET_RANGE = "object.range"
GET_RANGE_HASH = "object.hash"
SEARCH = "object.search"
DELETE = "object.delete"
WILDCARD_ALL = "object.*"
@staticmethod
def get_all():
return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL]
class Verb(HumanReadableEnum):
ALLOW = "allow"
DENY = "deny"
class Role(HumanReadableEnum):
OWNER = "owner"
IR = "ir"
CONTAINER = "container"
OTHERS = "others"
class ConditionType(HumanReadableEnum):
RESOURCE = "ResourceCondition"
REQUEST = "RequestCondition"
# See https://git.frostfs.info/TrueCloudLab/policy-engine/src/branch/master/schema/native/consts.go#L40-L53
class ConditionKey(HumanReadableEnum):
ROLE = '"\\$Actor:role"'
PUBLIC_KEY = '"\\$Actor:publicKey"'
class MatchType(HumanReadableEnum):
EQUAL = "="
NOT_EQUAL = "!="
@dataclass
class Condition:
condition_key: ConditionKey | str
condition_value: str
condition_type: ConditionType = ConditionType.REQUEST
match_type: MatchType = MatchType.EQUAL
def as_string(self):
key = self.condition_key.value if isinstance(self.condition_key, ConditionKey) else self.condition_key
value = self.condition_value.value if isinstance(self.condition_value, Enum) else self.condition_value
return f"{self.condition_type.value}:{key}{self.match_type.value}{value}"
@staticmethod
def by_role(*args, **kwargs) -> "Condition":
return Condition(ConditionKey.ROLE, *args, **kwargs)
@staticmethod
def by_key(*args, **kwargs) -> "Condition":
return Condition(ConditionKey.PUBLIC_KEY, *args, **kwargs)
class Rule:
def __init__(
self,
access: Verb,
operations: list[ObjectOperations] | ObjectOperations,
conditions: list[Condition] | Condition = None,
chain_id: Optional[str] = None,
) -> None:
self.access = access
self.operations = operations
if not conditions:
self.conditions = []
elif isinstance(conditions, Condition):
self.conditions = [conditions]
else:
self.conditions = conditions
if not isinstance(self.conditions, list):
raise RuntimeError("Conditions must be a list")
if not operations:
self.operations = []
elif isinstance(operations, ObjectOperations):
self.operations = [operations]
else:
self.operations = operations
if not isinstance(self.operations, list):
raise RuntimeError("Operations must be a list")
self.chain_id = chain_id if chain_id else string_utils.unique_name("chain-id-")
def as_string(self):
conditions = " ".join([cond.as_string() for cond in self.conditions])
operations = " ".join([op.value for op in self.operations])
return f"{self.access.value} {operations} {conditions} *"

View file

@ -5,7 +5,6 @@ from frostfs_testlib.storage.constants import ConfigAttributes
from frostfs_testlib.storage.dataclasses.node_base import NodeBase
from frostfs_testlib.storage.dataclasses.shard import Shard
class InnerRing(NodeBase):
"""
Class represents inner ring node in a cluster
@ -18,7 +17,11 @@ class InnerRing(NodeBase):
def service_healthcheck(self) -> bool:
health_metric = "frostfs_ir_ir_health"
output = self.host.get_shell().exec(f"curl -s localhost:6662 | grep {health_metric} | sed 1,2d").stdout
output = (
self.host.get_shell()
.exec(f"curl -s localhost:6662 | grep {health_metric} | sed 1,2d")
.stdout
)
return health_metric in output
def get_netmap_cleaner_threshold(self) -> str:
@ -47,7 +50,11 @@ class S3Gate(NodeBase):
def service_healthcheck(self) -> bool:
health_metric = "frostfs_s3_gw_state_health"
output = self.host.get_shell().exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d").stdout
output = (
self.host.get_shell()
.exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d")
.stdout
)
return health_metric in output
@property
@ -65,7 +72,11 @@ class HTTPGate(NodeBase):
def service_healthcheck(self) -> bool:
health_metric = "frostfs_http_gw_state_health"
output = self.host.get_shell().exec(f"curl -s localhost:5662 | grep {health_metric} | sed 1,2d").stdout
output = (
self.host.get_shell()
.exec(f"curl -s localhost:5662 | grep {health_metric} | sed 1,2d")
.stdout
)
return health_metric in output
@property
@ -124,26 +135,32 @@ class StorageNode(NodeBase):
def service_healthcheck(self) -> bool:
health_metric = "frostfs_node_state_health"
output = self.host.get_shell().exec(f"curl -s localhost:6672 | grep {health_metric} | sed 1,2d").stdout
output = (
self.host.get_shell()
.exec(f"curl -s localhost:6672 | grep {health_metric} | sed 1,2d")
.stdout
)
return health_metric in output
# TODO: Deprecated. Use new approach with config
def get_shard_config_path(self) -> str:
return self._get_attribute(ConfigAttributes.SHARD_CONFIG_PATH)
# TODO: Deprecated. Use new approach with config
def get_shards_config(self) -> tuple[str, dict]:
return self.get_config(self.get_shard_config_path())
def get_shards(self) -> list[Shard]:
shards = self.config.get("storage:shard")
config = self.get_shards_config()[1]
config["storage"]["shard"].pop("default")
return [Shard.from_object(shard) for shard in config["storage"]["shard"].values()]
if not shards:
raise RuntimeError(f"Cannot get shards information for {self.name} on {self.host.config.address}")
def get_shards_from_env(self) -> list[Shard]:
config = self.get_shards_config()[1]
configObj = ConfigObj(StringIO(config))
if "default" in shards:
shards.pop("default")
return [Shard.from_object(shard) for shard in shards.values()]
pattern = f"{SHARD_PREFIX}\d*"
num_shards = len(set(re.findall(pattern, self.get_shards_config())))
return [Shard.from_config_object(configObj, shard_id) for shard_id in range(num_shards)]
def get_control_endpoint(self) -> str:
return self._get_attribute(ConfigAttributes.CONTROL_ENDPOINT)
@ -154,6 +171,15 @@ class StorageNode(NodeBase):
def get_data_directory(self) -> str:
return self.host.get_data_directory(self.name)
def get_storage_config(self) -> str:
return self.host.get_storage_config(self.name)
def get_http_hostname(self) -> str:
return self._get_attribute(ConfigAttributes.HTTP_HOSTNAME)
def get_s3_hostname(self) -> str:
return self._get_attribute(ConfigAttributes.S3_HOSTNAME)
def delete_blobovnicza(self):
self.host.delete_blobovnicza(self.name)

View file

@ -1,36 +0,0 @@
from frostfs_testlib.hosting import Host
from frostfs_testlib.shell.interfaces import CommandResult
class Metrics:
def __init__(self, host: Host, metrics_endpoint: str) -> None:
self.storage = StorageMetrics(host, metrics_endpoint)
class StorageMetrics:
"""
Class represents storage metrics in a cluster
"""
def __init__(self, host: Host, metrics_endpoint: str) -> None:
self.host = host
self.metrics_endpoint = metrics_endpoint
def get_metrics_search_by_greps(self, **greps) -> CommandResult:
"""
Get a metrics, search by: cid, metric_type, shard_id etc.
Args:
greps: dict of grep-command-name and value
for example get_metrics_search_by_greps(command='container_objects_total', cid='123456')
Return:
result of metrics
"""
shell = self.host.get_shell()
additional_greps = " |grep ".join([grep_command for grep_command in greps.values()])
result = shell.exec(f"curl -s {self.metrics_endpoint} | grep {additional_greps}")
return result
def get_all_metrics(self) -> CommandResult:
shell = self.host.get_shell()
result = shell.exec(f"curl -s {self.metrics_endpoint}")
return result

View file

@ -10,7 +10,6 @@ from frostfs_testlib import reporter
from frostfs_testlib.hosting.config import ServiceConfig
from frostfs_testlib.hosting.interfaces import Host
from frostfs_testlib.shell.interfaces import CommandResult
from frostfs_testlib.storage.configuration.service_configuration import ServiceConfiguration, ServiceConfigurationYml
from frostfs_testlib.storage.constants import ConfigAttributes
from frostfs_testlib.testing.readable import HumanReadableABC
from frostfs_testlib.utils import wallet_utils
@ -148,11 +147,7 @@ class NodeBase(HumanReadableABC):
def main_config_path(self) -> str:
return self._get_attribute(ConfigAttributes.CONFIG_PATH)
@property
def config(self) -> ServiceConfigurationYml:
return ServiceConfiguration(self.name, self.host.get_shell(), self.config_dir, self.main_config_path)
# TODO: Deprecated. Use config with ServiceConfigurationYml interface
# TODO: Deprecated
def get_config(self, config_file_path: Optional[str] = None) -> tuple[str, dict]:
if config_file_path is None:
config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH)
@ -165,7 +160,7 @@ class NodeBase(HumanReadableABC):
config = yaml.safe_load(config_text)
return config_file_path, config
# TODO: Deprecated. Use config with ServiceConfigurationYml interface
# TODO: Deprecated
def save_config(self, new_config: dict, config_file_path: Optional[str] = None) -> None:
if config_file_path is None:
config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH)

View file

@ -1,13 +0,0 @@
from dataclasses import dataclass
@dataclass
class PlacementPolicy:
name: str
value: str
def __str__(self) -> str:
return self.name
def __repr__(self) -> str:
return self.__str__()

View file

@ -1,6 +1,16 @@
import json
import pathlib
import re
from dataclasses import dataclass
from io import StringIO
import allure
import pytest
import yaml
from configobj import ConfigObj
from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG
SHARD_PREFIX = "FROSTFS_STORAGE_SHARD_"
BLOBSTOR_PREFIX = "_BLOBSTOR_"
@ -56,7 +66,9 @@ class Shard:
var_prefix = f"{SHARD_PREFIX}{shard_id}"
blobstor_count = Shard._get_blobstor_count_from_section(config_object, shard_id)
blobstors = [Blobstor.from_config_object(config_object, shard_id, blobstor_id) for blobstor_id in range(blobstor_count)]
blobstors = [
Blobstor.from_config_object(config_object, shard_id, blobstor_id) for blobstor_id in range(blobstor_count)
]
write_cache_enabled = config_object.as_bool(f"{var_prefix}_WRITECACHE_ENABLED")
@ -69,13 +81,7 @@ class Shard:
@staticmethod
def from_object(shard):
metabase = shard["metabase"]["path"] if "path" in shard["metabase"] else shard["metabase"]
writecache_enabled = True
if "enabled" in shard["writecache"]:
writecache_enabled = shard["writecache"]["enabled"]
writecache = shard["writecache"]["path"] if "path" in shard["writecache"] else shard["writecache"]
if not writecache_enabled:
writecache = ""
# Currently due to issue we need to check if pilorama exists in keys
# TODO: make pilorama mandatory after fix
@ -88,5 +94,6 @@ class Shard:
blobstor=[Blobstor(path=blobstor["path"], path_type=blobstor["type"]) for blobstor in shard["blobstor"]],
metabase=metabase,
writecache=writecache,
pilorama=pilorama,
pilorama=pilorama
)

View file

@ -1,7 +1,6 @@
from dataclasses import dataclass
from typing import Optional
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing.readable import HumanReadableEnum
@ -20,7 +19,7 @@ class LockObjectInfo(ObjectRef):
@dataclass
class StorageObjectInfo(ObjectRef):
size: Optional[int] = None
wallet: Optional[WalletInfo] = None
wallet_file_path: Optional[str] = None
file_path: Optional[str] = None
file_hash: Optional[str] = None
attributes: Optional[list[dict[str, str]]] = None
@ -28,7 +27,7 @@ class StorageObjectInfo(ObjectRef):
locks: Optional[list[LockObjectInfo]] = None
class NodeStatus(HumanReadableEnum):
class ModeNode(HumanReadableEnum):
MAINTENANCE: str = "maintenance"
ONLINE: str = "online"
OFFLINE: str = "offline"
@ -37,7 +36,7 @@ class NodeStatus(HumanReadableEnum):
@dataclass
class NodeNetmapInfo:
node_id: str = None
node_status: NodeStatus = None
node_status: ModeNode = None
node_data_ips: list[str] = None
cluster_name: str = None
continent: str = None
@ -70,8 +69,6 @@ class NodeNetInfo:
epoch_duration: str = None
inner_ring_candidate_fee: str = None
maximum_object_size: str = None
maximum_count_of_data_shards: str = None
maximum_count_of_parity_shards: str = None
withdrawal_fee: str = None
homomorphic_hashing_disabled: str = None
maintenance_mode_allowed: str = None

View file

@ -1,15 +1,13 @@
import json
import logging
import os
import uuid
from dataclasses import dataclass
from typing import Optional
import yaml
from frostfs_testlib import reporter
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG, DEFAULT_WALLET_PASS
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG, DEFAULT_WALLET_PASS
from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.cluster import NodeBase
from frostfs_testlib.storage.cluster import Cluster, NodeBase
from frostfs_testlib.utils.wallet_utils import get_last_address_from_wallet, init_wallet
logger = logging.getLogger("frostfs.testlib.utils")
@ -23,13 +21,9 @@ class WalletInfo:
@staticmethod
def from_node(node: NodeBase):
wallet_path = node.get_wallet_path()
wallet_password = node.get_wallet_password()
wallet_config_file = os.path.join(ASSETS_DIR, os.path.basename(node.get_wallet_config_path()))
with open(wallet_config_file, "w") as file:
file.write(yaml.dump({"wallet": wallet_path, "password": wallet_password}))
return WalletInfo(wallet_path, wallet_password, wallet_config_file)
return WalletInfo(
node.get_wallet_path(), node.get_wallet_password(), node.get_wallet_config_path()
)
def get_address(self) -> str:
"""
@ -53,17 +47,22 @@ class WalletInfo:
"""
with open(self.path, "r") as wallet:
wallet_json = json.load(wallet)
assert abs(account_id) + 1 <= len(wallet_json["accounts"]), f"There is no index '{account_id}' in wallet: {wallet_json}"
assert abs(account_id) + 1 <= len(
wallet_json["accounts"]
), f"There is no index '{account_id}' in wallet: {wallet_json}"
return wallet_json["accounts"][account_id]["address"]
class WalletFactory:
def __init__(self, wallets_dir: str, shell: Shell) -> None:
def __init__(self, wallets_dir: str, shell: Shell, cluster: Cluster) -> None:
self.shell = shell
self.wallets_dir = wallets_dir
self.cluster = cluster
def create_wallet(self, file_name: str, password: Optional[str] = None) -> WalletInfo:
def create_wallet(
self, file_name: Optional[str] = None, password: Optional[str] = None
) -> WalletInfo:
"""
Creates new default wallet.
@ -75,6 +74,8 @@ class WalletFactory:
WalletInfo object of new wallet.
"""
if file_name is None:
file_name = str(uuid.uuid4())
if password is None:
password = ""
@ -84,8 +85,6 @@ class WalletFactory:
init_wallet(wallet_path, password)
with open(wallet_config_path, "w") as config_file:
config_file.write(f'wallet: {wallet_path}\npassword: "{password}"')
reporter.attach(wallet_path, os.path.basename(wallet_path))
config_file.write(f'password: "{password}"')
return WalletInfo(wallet_path, password, wallet_config_path)

View file

@ -32,7 +32,7 @@ class ClusterTestBase:
):
epoch.tick_epoch(self.shell, self.cluster, alive_node=alive_node)
if wait_block:
self.wait_for_blocks(wait_block)
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * wait_block)
def wait_for_epochs_align(self):
epoch.wait_for_epochs_align(self.shell, self.cluster)
@ -42,6 +42,3 @@ class ClusterTestBase:
def ensure_fresh_epoch(self):
return epoch.ensure_fresh_epoch(self.shell, self.cluster)
def wait_for_blocks(self, blocks_count: int = 1):
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * blocks_count)

View file

@ -2,8 +2,6 @@ import itertools
from concurrent.futures import Future, ThreadPoolExecutor
from typing import Callable, Collection, Optional, Union
MAX_WORKERS = 50
def parallel(
fn: Union[Callable, list[Callable]],
@ -56,7 +54,7 @@ def _run_by_fn_list(fn_list: list[Callable], *args, **kwargs) -> list[Future]:
futures: list[Future] = []
with ThreadPoolExecutor(max_workers=min(len(fn_list), MAX_WORKERS)) as executor:
with ThreadPoolExecutor(max_workers=len(fn_list)) as executor:
for fn in fn_list:
task_args = _get_args(*args)
task_kwargs = _get_kwargs(**kwargs)
@ -69,7 +67,7 @@ def _run_by_fn_list(fn_list: list[Callable], *args, **kwargs) -> list[Future]:
def _run_by_items(fn: Callable, parallel_items: Collection, *args, **kwargs) -> list[Future]:
futures: list[Future] = []
with ThreadPoolExecutor(max_workers=min(len(parallel_items), MAX_WORKERS)) as executor:
with ThreadPoolExecutor(max_workers=len(parallel_items)) as executor:
for item in parallel_items:
task_args = _get_args(*args)
task_kwargs = _get_kwargs(**kwargs)

View file

@ -15,7 +15,7 @@ from contextlib import suppress
from datetime import datetime
from io import StringIO
from textwrap import shorten
from typing import Dict, List, Optional, TypedDict, Union
from typing import Dict, List, TypedDict, Union
import pexpect
@ -41,7 +41,7 @@ def _run_with_passwd(cmd: str) -> str:
return cmd.decode()
def _configure_aws_cli(cmd: str, key_id: str, access_key: str, region: str, out_format: str = "json") -> str:
def _configure_aws_cli(cmd: str, key_id: str, access_key: str, out_format: str = "json") -> str:
child = pexpect.spawn(cmd)
child.delaybeforesend = 1
@ -52,7 +52,7 @@ def _configure_aws_cli(cmd: str, key_id: str, access_key: str, region: str, out_
child.sendline(access_key)
child.expect("Default region name.*")
child.sendline("region")
child.sendline("")
child.expect("Default output format.*")
child.sendline(out_format)
@ -75,22 +75,14 @@ def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: date
reporter.attach(command_attachment, "Command execution")
def log_command_execution(url: str, cmd: str, output: Union[str, dict], params: Optional[dict] = None) -> None:
def log_command_execution(cmd: str, output: Union[str, TypedDict]) -> None:
logger.info(f"{cmd}: {output}")
with suppress(Exception):
json_output = json.dumps(output, indent=4, sort_keys=True)
output = json_output
try:
json_params = json.dumps(params, indent=4, sort_keys=True)
except TypeError as err:
logger.warning(f"Failed to serialize '{cmd}' request parameters:\n{params}\nException: {err}")
else:
params = json_params
command_attachment = f"COMMAND: '{cmd}'\n" f"URL: {url}\n" f"PARAMS:\n{params}\n" f"OUTPUT:\n{output}\n"
reporter.attach(command_attachment, "Command execution")
command_attachment = f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n"
with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'):
reporter.attach(command_attachment, "Command execution")
def parse_netmap_output(output: str) -> list[NodeNetmapInfo]:

View file

@ -6,46 +6,11 @@ from typing import Any, Optional
from frostfs_testlib import reporter
from frostfs_testlib.resources.common import ASSETS_DIR
from frostfs_testlib.utils import string_utils
logger = logging.getLogger("NeoLogger")
class TestFile(os.PathLike):
def __init__(self, path: str):
self.path = path
def __del__(self):
logger.debug(f"Removing file {self.path}")
if os.path.exists(self.path):
os.remove(self.path)
def __str__(self):
return self.path
def __repr__(self):
return self.path
def __fspath__(self):
return self.path
def ensure_directory(path):
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
def ensure_directory_opener(path, flags):
ensure_directory(path)
return os.open(path, flags)
# TODO: Do not add {size} to title yet, since it produces dynamic info in top level steps
# Use object_size dt in future as argument
@reporter.step("Generate file")
def generate_file(size: int) -> TestFile:
def generate_file(size: int) -> str:
"""Generates a binary file with the specified size in bytes.
Args:
@ -54,22 +19,19 @@ def generate_file(size: int) -> TestFile:
Returns:
The path to the generated file.
"""
test_file = TestFile(os.path.join(ASSETS_DIR, string_utils.unique_name("object-")))
with open(test_file, "wb", opener=ensure_directory_opener) as file:
file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4()))
with open(file_path, "wb") as file:
file.write(os.urandom(size))
logger.info(f"File with size {size} bytes has been generated: {test_file}")
logger.info(f"File with size {size} bytes has been generated: {file_path}")
return test_file
return file_path
# TODO: Do not add {size} to title yet, since it produces dynamic info in top level steps
# Use object_size dt in future as argument
@reporter.step("Generate file with content")
def generate_file_with_content(
size: int,
file_path: Optional[str | TestFile] = None,
file_path: Optional[str] = None,
content: Optional[str] = None,
) -> TestFile:
) -> str:
"""Creates a new file with specified content.
Args:
@ -86,22 +48,20 @@ def generate_file_with_content(
content = os.urandom(size)
mode = "wb"
test_file = None
if not file_path:
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())))
elif isinstance(file_path, TestFile):
test_file = file_path
file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
else:
test_file = TestFile(file_path)
if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
with open(test_file, mode, opener=ensure_directory_opener) as file:
with open(file_path, mode) as file:
file.write(content)
return test_file
return file_path
@reporter.step("Get File Hash")
def get_file_hash(file_path: str | TestFile, len: Optional[int] = None, offset: Optional[int] = None) -> str:
def get_file_hash(file_path: str, len: Optional[int] = None, offset: Optional[int] = None) -> str:
"""Generates hash for the specified file.
Args:
@ -128,7 +88,7 @@ def get_file_hash(file_path: str | TestFile, len: Optional[int] = None, offset:
@reporter.step("Concatenation set of files to one file")
def concat_files(file_paths: list[str | TestFile], resulting_file_path: Optional[str | TestFile] = None) -> TestFile:
def concat_files(file_paths: list, resulting_file_path: Optional[str] = None) -> str:
"""Concatenates several files into a single file.
Args:
@ -138,24 +98,16 @@ def concat_files(file_paths: list[str | TestFile], resulting_file_path: Optional
Returns:
Path to the resulting file.
"""
test_file = None
if not resulting_file_path:
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())))
elif isinstance(resulting_file_path, TestFile):
test_file = resulting_file_path
else:
test_file = TestFile(resulting_file_path)
with open(test_file, "wb", opener=ensure_directory_opener) as f:
resulting_file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
with open(resulting_file_path, "wb") as f:
for file in file_paths:
with open(file, "rb") as part_file:
f.write(part_file.read())
return test_file
return resulting_file_path
@reporter.step("Split file to {parts} parts")
def split_file(file_path: str | TestFile, parts: int) -> list[TestFile]:
def split_file(file_path: str, parts: int) -> list[str]:
"""Splits specified file into several specified number of parts.
Each part is saved under name `{original_file}_part_{i}`.
@ -177,7 +129,7 @@ def split_file(file_path: str | TestFile, parts: int) -> list[TestFile]:
part_file_paths = []
for content_offset in range(0, content_size + 1, chunk_size):
part_file_name = f"{file_path}_part_{part_id}"
part_file_paths.append(TestFile(part_file_name))
part_file_paths.append(part_file_name)
with open(part_file_name, "wb") as out_file:
out_file.write(content[content_offset : content_offset + chunk_size])
part_id += 1
@ -185,8 +137,9 @@ def split_file(file_path: str | TestFile, parts: int) -> list[TestFile]:
return part_file_paths
@reporter.step("Get file content")
def get_file_content(file_path: str | TestFile, content_len: Optional[int] = None, mode: str = "r", offset: Optional[int] = None) -> Any:
def get_file_content(
file_path: str, content_len: Optional[int] = None, mode: str = "r", offset: Optional[int] = None
) -> Any:
"""Returns content of specified file.
Args:

View file

@ -1,26 +1,12 @@
import random
import re
import string
from datetime import datetime
ONLY_ASCII_LETTERS = string.ascii_letters
DIGITS_AND_ASCII_LETTERS = string.ascii_letters + string.digits
NON_DIGITS_AND_LETTERS = string.punctuation
def unique_name(prefix: str = "", postfix: str = ""):
"""
Generate unique short name of anything with prefix.
This should be unique in scope of multiple runs
Args:
prefix: prefix for unique name generation
Returns:
unique name string
"""
return f"{prefix}{hex(int(datetime.now().timestamp() * 1000000))}{postfix}"
def random_string(length: int = 5, source: str = ONLY_ASCII_LETTERS):
"""
Generate random string from source letters list

View file

@ -1,90 +1,82 @@
import logging
import re
from functools import lru_cache
from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli
from frostfs_testlib.hosting import Host, Hosting
from frostfs_testlib.hosting import Hosting
from frostfs_testlib.resources.cli import FROSTFS_ADM_EXEC, FROSTFS_AUTHMATE_EXEC, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG
from frostfs_testlib.shell import Shell
from frostfs_testlib.testing.parallel import parallel
logger = logging.getLogger("NeoLogger")
@reporter.step("Get local binaries versions")
def get_local_binaries_versions(shell: Shell) -> dict[str, str]:
versions = {}
for binary in [NEOGO_EXECUTABLE, FROSTFS_AUTHMATE_EXEC]:
out = shell.exec(f"{binary} --version").stdout
versions[binary] = parse_version(out)
versions[binary] = _parse_version(out)
frostfs_cli = FrostfsCli(shell, FROSTFS_CLI_EXEC)
versions[FROSTFS_CLI_EXEC] = parse_version(frostfs_cli.version.get().stdout)
frostfs_cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
versions[FROSTFS_CLI_EXEC] = _parse_version(frostfs_cli.version.get().stdout)
try:
frostfs_adm = FrostfsAdm(shell, FROSTFS_ADM_EXEC)
versions[FROSTFS_ADM_EXEC] = parse_version(frostfs_adm.version.get().stdout)
versions[FROSTFS_ADM_EXEC] = _parse_version(frostfs_adm.version.get().stdout)
except RuntimeError:
logger.info(f"{FROSTFS_ADM_EXEC} not installed")
out = shell.exec("aws --version").stdout
out_lines = out.split("\n")
versions["AWS"] = out_lines[0] if out_lines else "Unknown"
logger.info(f"Local binaries version: {out_lines[0]}")
return versions
@reporter.step("Collect binaries versions from host")
def parallel_binary_verions(host: Host) -> dict[str, str]:
def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]:
versions_by_host = {}
binary_path_by_name = {
**{
svc.name[:-3]: {
"exec_path": svc.attributes.get("exec_path"),
"param": svc.attributes.get("custom_version_parameter", "--version"),
for host in hosting.hosts:
binary_path_by_name = {} # Maps binary name to executable path
for service_config in host.config.services:
exec_path = service_config.attributes.get("exec_path")
requires_check = service_config.attributes.get("requires_version_check", "true")
if exec_path:
binary_path_by_name[service_config.name] = {
"exec_path": exec_path,
"check": requires_check.lower() == "true",
}
for cli_config in host.config.clis:
requires_check = cli_config.attributes.get("requires_version_check", "true")
binary_path_by_name[cli_config.name] = {
"exec_path": cli_config.exec_path,
"check": requires_check.lower() == "true",
}
for svc in host.config.services
if svc.attributes.get("exec_path") and svc.attributes.get("requires_version_check", "true") == "true"
},
**{
cli.name: {"exec_path": cli.exec_path, "param": cli.attributes.get("custom_version_parameter", "--version")}
for cli in host.config.clis
if cli.attributes.get("requires_version_check", "true") == "true"
},
}
shell = host.get_shell()
versions_at_host = {}
for binary_name, binary in binary_path_by_name.items():
binary_path = binary["exec_path"]
try:
result = shell.exec(f"{binary_path} {binary['param']}")
version = parse_version(result.stdout) or parse_version(result.stderr) or "Unknown"
versions_at_host[binary_name] = version
except Exception as exc:
logger.error(f"Cannot get version for {binary_path} because of\n{exc}")
versions_at_host[binary_name] = "Unknown"
versions_by_host[host.config.address] = versions_at_host
return versions_by_host
shell = host.get_shell()
versions_at_host = {}
for binary_name, binary in binary_path_by_name.items():
try:
binary_path = binary["exec_path"]
result = shell.exec(f"{binary_path} --version")
versions_at_host[binary_name] = {"version": _parse_version(result.stdout), "check": binary["check"]}
except Exception as exc:
logger.error(f"Cannot get version for {binary_path} because of\n{exc}")
versions_at_host[binary_name] = {"version": "Unknown", "check": binary["check"]}
versions_by_host[host.config.address] = versions_at_host
# Consolidate versions across all hosts
versions = {}
for host, binary_versions in versions_by_host.items():
for name, binary in binary_versions.items():
captured_version = versions.get(name, {}).get("version")
version = binary["version"]
if captured_version:
assert captured_version == version, f"Binary {name} has inconsistent version on host {host}"
else:
versions[name] = {"version": version, "check": binary["check"]}
return versions
@lru_cache
def get_remote_binaries_versions(hosting: Hosting) -> dict[str, dict[str, str]]:
versions_by_host: dict[str, dict[str, str]] = {}
with reporter.step("Get remote binaries versions"):
future_binary_verions = parallel(parallel_binary_verions, parallel_items=hosting.hosts)
for future in future_binary_verions:
versions_by_host.update(future.result())
return versions_by_host
def parse_version(version_output: str) -> str:
version = re.search(r"(?<=version[:=])\s?[\"\']?v?(.+)", version_output, re.IGNORECASE)
return version.group(1).strip("\"'\n\t ") if version else version_output
def _parse_version(version_output: str) -> str:
version = re.search(r"version[:\s]*v?(.+)", version_output, re.IGNORECASE)
return version.group(1).strip() if version else version_output

View file

@ -9,16 +9,6 @@ from neo3.wallet import wallet as neo3_wallet
logger = logging.getLogger("frostfs.testlib.utils")
def __fix_wallet_schema(wallet: dict) -> None:
# Temporary function to fix wallets that do not conform to the schema
# TODO: get rid of it once issue is solved
if "name" not in wallet:
wallet["name"] = None
for account in wallet["accounts"]:
if "extra" not in account:
account["extra"] = None
def init_wallet(wallet_path: str, wallet_password: str):
"""
Create new wallet and new account.
@ -43,15 +33,29 @@ def get_last_address_from_wallet(wallet_path: str, wallet_password: str):
Returns:
The address for the wallet.
"""
wallet = load_wallet(wallet_path, wallet_password)
with open(wallet_path) as wallet_file:
wallet = neo3_wallet.Wallet.from_json(json.load(wallet_file), password=wallet_password)
address = wallet.accounts[-1].address
logger.info(f"got address: {address}")
return address
def get_wallet_public_key(wallet_path: str, wallet_password: str, format: str = "hex") -> str:
wallet = load_wallet(wallet_path, wallet_password)
public_key_hex = str(wallet.accounts[0].public_key)
def __fix_wallet_schema(wallet: dict) -> None:
# Temporary function to fix wallets that do not conform to the schema
# TODO: get rid of it once issue is solved
if "name" not in wallet:
wallet["name"] = None
for account in wallet["accounts"]:
if "extra" not in account:
account["extra"] = None
# Get public key from wallet file
with open(wallet_path, "r") as file:
wallet_content = json.load(file)
__fix_wallet_schema(wallet_content)
wallet_from_json = neo3_wallet.Wallet.from_json(wallet_content, password=wallet_password)
public_key_hex = str(wallet_from_json.accounts[0].public_key)
# Convert public key to specified format
if format == "hex":
@ -65,9 +69,7 @@ def get_wallet_public_key(wallet_path: str, wallet_password: str, format: str =
raise ValueError(f"Invalid public key format: {format}")
def load_wallet(wallet_path: str, wallet_password: str) -> neo3_wallet.Wallet:
with open(wallet_path) as wallet_file:
wallet_content = json.load(wallet_file)
__fix_wallet_schema(wallet_content)
return neo3_wallet.Wallet.from_json(wallet_content, password=wallet_password)
def load_wallet(path: str, passwd: str = "") -> neo3_wallet.Wallet:
with open(path, "r") as wallet_file:
wlt_data = wallet_file.read()
return neo3_wallet.Wallet.from_json(json.loads(wlt_data), password=passwd)

View file

@ -3,7 +3,14 @@ from typing import Any, get_args
import pytest
from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType, Preset, ReadFrom
from frostfs_testlib.load.load_config import (
EndpointSelectionStrategy,
LoadParams,
LoadScenario,
LoadType,
Preset,
ReadFrom,
)
from frostfs_testlib.load.runners import DefaultRunner
from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME
from frostfs_testlib.storage.cluster import ClusterNode
@ -92,7 +99,9 @@ class TestLoadConfig:
def test_load_controller_string_representation(self, load_params: LoadParams):
load_params.endpoint_selection_strategy = EndpointSelectionStrategy.ALL
load_params.object_size = 512
background_load_controller = BackgroundLoadController("tmp", load_params, None, None, DefaultRunner(None))
background_load_controller = BackgroundLoadController(
"tmp", load_params, "wallet", None, None, DefaultRunner(None)
)
expected = "grpc 512 KiB, writers=7, readers=7, deleters=8"
assert f"{background_load_controller}" == expected
assert repr(background_load_controller) == expected
@ -132,7 +141,7 @@ class TestLoadConfig:
"--out 'pregen_json'",
"--workers '7'",
"--containers '16'",
"--policy 'container_placement_policy' --policy 'container_placement_policy_2'",
"--policy 'container_placement_policy'",
"--ignore-errors",
"--sleep '19'",
"--local",
@ -147,8 +156,6 @@ class TestLoadConfig:
"READERS": 7,
"DELETERS": 8,
"READ_AGE": 8,
"STREAMING": 9,
"K6_OUT": "output",
"PREGEN_JSON": "pregen_json",
"PREPARE_LOCALLY": True,
}
@ -164,7 +171,7 @@ class TestLoadConfig:
"--out 'pregen_json'",
"--workers '7'",
"--containers '16'",
"--policy 'container_placement_policy' --policy 'container_placement_policy_2'",
"--policy 'container_placement_policy'",
"--ignore-errors",
"--sleep '19'",
"--local",
@ -173,7 +180,6 @@ class TestLoadConfig:
expected_env_vars = {
"DURATION": 9,
"WRITE_OBJ_SIZE": 11,
"K6_OUT": "output",
"REGISTRY_FILE": "registry_file",
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
"K6_SETUP_TIMEOUT": "setup_timeout",
@ -189,7 +195,6 @@ class TestLoadConfig:
"READ_RATE": 9,
"READ_AGE": 8,
"DELETE_RATE": 11,
"STREAMING": 9,
"PREPARE_LOCALLY": True,
}
@ -205,7 +210,7 @@ class TestLoadConfig:
"--out 'pregen_json'",
"--workers '7'",
"--buckets '13'",
"--location 's3_location' --location 's3_location_2'",
"--location 's3_location'",
"--ignore-errors",
"--sleep '19'",
"--acl 'acl'",
@ -214,14 +219,12 @@ class TestLoadConfig:
"DURATION": 9,
"WRITE_OBJ_SIZE": 11,
"REGISTRY_FILE": "registry_file",
"K6_OUT": "output",
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
"K6_SETUP_TIMEOUT": "setup_timeout",
"WRITERS": 7,
"READERS": 7,
"DELETERS": 8,
"READ_AGE": 8,
"STREAMING": 9,
"NO_VERIFY_SSL": True,
"PREGEN_JSON": "pregen_json",
}
@ -239,7 +242,7 @@ class TestLoadConfig:
"--out 'pregen_json'",
"--workers '7'",
"--buckets '13'",
"--location 's3_location' --location 's3_location_2'",
"--location 's3_location'",
"--ignore-errors",
"--sleep '19'",
"--acl 'acl'",
@ -248,7 +251,6 @@ class TestLoadConfig:
"DURATION": 183900,
"WRITE_OBJ_SIZE": 11,
"REGISTRY_FILE": "registry_file",
"K6_OUT": "output",
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
"K6_SETUP_TIMEOUT": "setup_timeout",
"NO_VERIFY_SSL": True,
@ -263,7 +265,6 @@ class TestLoadConfig:
"WRITE_RATE": 10,
"READ_RATE": 9,
"READ_AGE": 8,
"STREAMING": 9,
"DELETE_RATE": 11,
}
@ -279,7 +280,7 @@ class TestLoadConfig:
"--out 'pregen_json'",
"--workers '7'",
"--buckets '13'",
"--location 's3_location' --location 's3_location_2'",
"--location 's3_location'",
"--ignore-errors",
"--sleep '19'",
"--acl 'acl'",
@ -288,7 +289,6 @@ class TestLoadConfig:
"DURATION": 9,
"WRITE_OBJ_SIZE": 11,
"REGISTRY_FILE": "registry_file",
"K6_OUT": "output",
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
"K6_SETUP_TIMEOUT": "setup_timeout",
"NO_VERIFY_SSL": True,
@ -303,7 +303,6 @@ class TestLoadConfig:
"WRITE_RATE": 10,
"READ_RATE": 9,
"READ_AGE": 8,
"STREAMING": 9,
"DELETE_RATE": 11,
}
@ -320,7 +319,7 @@ class TestLoadConfig:
"--out 'pregen_json'",
"--workers '7'",
"--containers '16'",
"--policy 'container_placement_policy' --policy 'container_placement_policy_2'",
"--policy 'container_placement_policy'",
"--ignore-errors",
"--sleep '19'",
"--acl 'acl'",
@ -328,7 +327,6 @@ class TestLoadConfig:
expected_env_vars = {
"DURATION": 9,
"WRITE_OBJ_SIZE": 11,
"K6_OUT": "output",
"NO_VERIFY_SSL": True,
"REGISTRY_FILE": "registry_file",
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
@ -337,7 +335,6 @@ class TestLoadConfig:
"READERS": 7,
"DELETERS": 8,
"READ_AGE": 8,
"STREAMING": 9,
"PREGEN_JSON": "pregen_json",
}
@ -353,17 +350,15 @@ class TestLoadConfig:
"--out 'pregen_json'",
"--workers '7'",
"--containers '16'",
"--policy 'container_placement_policy' --policy 'container_placement_policy_2'",
"--policy 'container_placement_policy'",
"--ignore-errors",
"--sleep '19'",
"--acl 'acl'",
]
expected_env_vars = {
"CONFIG_DIR": "config_dir",
"CONFIG_FILE": "config_file",
"DURATION": 9,
"WRITE_OBJ_SIZE": 11,
"K6_OUT": "output",
"REGISTRY_FILE": "registry_file",
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
"K6_SETUP_TIMEOUT": "setup_timeout",
@ -371,50 +366,12 @@ class TestLoadConfig:
"READERS": 7,
"DELETERS": 8,
"READ_AGE": 8,
"STREAMING": 9,
"MAX_TOTAL_SIZE_GB": 17,
"PREGEN_JSON": "pregen_json",
}
self._check_preset_params(load_params, expected_preset_args)
self._check_env_vars(load_params, expected_env_vars)
@pytest.mark.parametrize(
"input, value, params",
[
(["A C ", " B"], ["A C", "B"], [f"--policy 'A C' --policy 'B'"]),
(" A ", ["A"], ["--policy 'A'"]),
(" A , B ", ["A , B"], ["--policy 'A , B'"]),
([" A", "B "], ["A", "B"], ["--policy 'A' --policy 'B'"]),
(None, None, []),
],
)
def test_grpc_list_parsing_formatter(self, input, value, params):
load_params = LoadParams(LoadType.gRPC)
load_params.preset = Preset()
load_params.preset.container_placement_policy = input
assert load_params.preset.container_placement_policy == value
self._check_preset_params(load_params, params)
@pytest.mark.parametrize(
"input, value, params",
[
(["A C ", " B"], ["A C", "B"], [f"--location 'A C' --location 'B'"]),
(" A ", ["A"], ["--location 'A'"]),
(" A , B ", ["A , B"], ["--location 'A , B'"]),
([" A", "B "], ["A", "B"], ["--location 'A' --location 'B'"]),
(None, None, []),
],
)
def test_s3_list_parsing_formatter(self, input, value, params):
load_params = LoadParams(LoadType.S3)
load_params.preset = Preset()
load_params.preset.s3_location = input
assert load_params.preset.s3_location == value
self._check_preset_params(load_params, params)
@pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True)
def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams):
expected_env_vars = {
@ -455,14 +412,12 @@ class TestLoadConfig:
"DURATION": 0,
"WRITE_OBJ_SIZE": 0,
"REGISTRY_FILE": "",
"K6_OUT": "",
"K6_MIN_ITERATION_DURATION": "",
"K6_SETUP_TIMEOUT": "",
"WRITERS": 0,
"READERS": 0,
"DELETERS": 0,
"READ_AGE": 0,
"STREAMING": 0,
"PREGEN_JSON": "",
"PREPARE_LOCALLY": False,
}
@ -486,7 +441,6 @@ class TestLoadConfig:
"DURATION": 0,
"WRITE_OBJ_SIZE": 0,
"REGISTRY_FILE": "",
"K6_OUT": "",
"K6_MIN_ITERATION_DURATION": "",
"K6_SETUP_TIMEOUT": "",
"MAX_WRITERS": 0,
@ -501,7 +455,6 @@ class TestLoadConfig:
"READ_RATE": 0,
"DELETE_RATE": 0,
"READ_AGE": 0,
"STREAMING": 0,
"PREPARE_LOCALLY": False,
}
@ -524,14 +477,12 @@ class TestLoadConfig:
"DURATION": 0,
"WRITE_OBJ_SIZE": 0,
"REGISTRY_FILE": "",
"K6_OUT": "",
"K6_MIN_ITERATION_DURATION": "",
"K6_SETUP_TIMEOUT": "",
"WRITERS": 0,
"READERS": 0,
"DELETERS": 0,
"READ_AGE": 0,
"STREAMING": 0,
"NO_VERIFY_SSL": False,
"PREGEN_JSON": "",
}
@ -555,7 +506,6 @@ class TestLoadConfig:
"DURATION": 0,
"WRITE_OBJ_SIZE": 0,
"REGISTRY_FILE": "",
"K6_OUT": "",
"K6_MIN_ITERATION_DURATION": "",
"K6_SETUP_TIMEOUT": "",
"NO_VERIFY_SSL": False,
@ -571,7 +521,6 @@ class TestLoadConfig:
"READ_RATE": 0,
"DELETE_RATE": 0,
"READ_AGE": 0,
"STREAMING": 0,
}
self._check_preset_params(load_params, expected_preset_args)
@ -594,14 +543,12 @@ class TestLoadConfig:
"WRITE_OBJ_SIZE": 0,
"NO_VERIFY_SSL": False,
"REGISTRY_FILE": "",
"K6_OUT": "",
"K6_MIN_ITERATION_DURATION": "",
"K6_SETUP_TIMEOUT": "",
"WRITERS": 0,
"READERS": 0,
"DELETERS": 0,
"READ_AGE": 0,
"STREAMING": 0,
"PREGEN_JSON": "",
}
@ -621,20 +568,16 @@ class TestLoadConfig:
"--acl ''",
]
expected_env_vars = {
"CONFIG_DIR": "",
"CONFIG_FILE": "",
"DURATION": 0,
"WRITE_OBJ_SIZE": 0,
"REGISTRY_FILE": "",
"K6_OUT": "",
"K6_MIN_ITERATION_DURATION": "",
"K6_SETUP_TIMEOUT": "",
"MAX_TOTAL_SIZE_GB": 0,
"WRITERS": 0,
"READERS": 0,
"DELETERS": 0,
"READ_AGE": 0,
"STREAMING": 0,
"PREGEN_JSON": "",
}
@ -699,7 +642,7 @@ class TestLoadConfig:
assert sorted(preset_parameters) == sorted(expected_preset_args)
def _check_env_vars(self, load_params: LoadParams, expected_env_vars: dict[str, str]):
env_vars = load_params.get_k6_vars()
env_vars = load_params.get_env_vars()
assert env_vars == expected_env_vars
def _check_all_values_none(self, dataclass, skip_fields=None):
@ -720,7 +663,9 @@ class TestLoadConfig:
value = getattr(dataclass, field.name)
assert value is not None, f"{field.name} is not None"
def _get_filled_load_params(self, load_type: LoadType, load_scenario: LoadScenario, set_emtpy: bool = False) -> LoadParams:
def _get_filled_load_params(
self, load_type: LoadType, load_scenario: LoadScenario, set_emtpy: bool = False
) -> LoadParams:
load_type_map = {
LoadScenario.S3: LoadType.S3,
LoadScenario.S3_CAR: LoadType.S3,
@ -737,12 +682,13 @@ class TestLoadConfig:
meta_fields = self._get_meta_fields(load_params)
for field in meta_fields:
if getattr(field.instance, field.field.name) is None and load_params.scenario in field.field.metadata["applicable_scenarios"]:
if (
getattr(field.instance, field.field.name) is None
and load_params.scenario in field.field.metadata["applicable_scenarios"]
):
value_to_set_map = {
int: 0 if set_emtpy else len(field.field.name),
float: 0 if set_emtpy else len(field.field.name),
str: "" if set_emtpy else field.field.name,
list[str]: "" if set_emtpy else [field.field.name, f"{field.field.name}_2"],
bool: False if set_emtpy else True,
}
value_to_set = value_to_set_map[field.field_type]
@ -755,7 +701,11 @@ class TestLoadConfig:
def _get_meta_fields(self, instance):
data_fields = fields(instance)
fields_with_data = [MetaTestField(field, self._get_actual_field_type(field), instance) for field in data_fields if field.metadata]
fields_with_data = [
MetaTestField(field, self._get_actual_field_type(field), instance)
for field in data_fields
if field.metadata
]
for field in data_fields:
actual_field_type = self._get_actual_field_type(field)