Compare commits
20 commits
master
...
feature_er
Author | SHA1 | Date | |
---|---|---|---|
9dbed0781e | |||
c40b637768 | |||
1880f96277 | |||
c1e5dd1007 | |||
f4d71b664d | |||
da1a4d0099 | |||
3e36defb90 | |||
6810765d46 | |||
2cffff3ffe | |||
d9f4e88f94 | |||
deb2f12bec | |||
f236c1b083 | |||
cc13a43bec | |||
a74d1bff4f | |||
547f6106ec | |||
c2aa41e5dc | |||
8e446ccb96 | |||
9c9fb7878a | |||
3a799afdcf | |||
b610e04a7b |
76 changed files with 1322 additions and 5201 deletions
|
@ -1,109 +0,0 @@
|
|||
hosts:
|
||||
- address: localhost
|
||||
hostname: localhost
|
||||
attributes:
|
||||
sudo_shell: false
|
||||
plugin_name: docker
|
||||
healthcheck_plugin_name: basic
|
||||
attributes:
|
||||
skip_readiness_check: True
|
||||
force_transactions: True
|
||||
services:
|
||||
- name: frostfs-storage_01
|
||||
attributes:
|
||||
container_name: s01
|
||||
config_path: /etc/frostfs/storage/config.yml
|
||||
wallet_path: ../frostfs-dev-env/services/storage/wallet01.json
|
||||
local_wallet_config_path: ./TemporaryDir/empty-password.yml
|
||||
local_wallet_path: ../frostfs-dev-env/services/storage/wallet01.json
|
||||
wallet_password: ""
|
||||
volume_name: storage_storage_s01
|
||||
endpoint_data0: s01.frostfs.devenv:8080
|
||||
control_endpoint: s01.frostfs.devenv:8081
|
||||
un_locode: "RU MOW"
|
||||
- name: frostfs-storage_02
|
||||
attributes:
|
||||
container_name: s02
|
||||
config_path: /etc/frostfs/storage/config.yml
|
||||
wallet_path: ../frostfs-dev-env/services/storage/wallet02.json
|
||||
local_wallet_config_path: ./TemporaryDir/empty-password.yml
|
||||
local_wallet_path: ../frostfs-dev-env/services/storage/wallet02.json
|
||||
wallet_password: ""
|
||||
volume_name: storage_storage_s02
|
||||
endpoint_data0: s02.frostfs.devenv:8080
|
||||
control_endpoint: s02.frostfs.devenv:8081
|
||||
un_locode: "RU LED"
|
||||
- name: frostfs-storage_03
|
||||
attributes:
|
||||
container_name: s03
|
||||
config_path: /etc/frostfs/storage/config.yml
|
||||
wallet_path: ../frostfs-dev-env/services/storage/wallet03.json
|
||||
local_wallet_config_path: ./TemporaryDir/empty-password.yml
|
||||
local_wallet_path: ../frostfs-dev-env/services/storage/wallet03.json
|
||||
wallet_password: ""
|
||||
volume_name: storage_storage_s03
|
||||
endpoint_data0: s03.frostfs.devenv:8080
|
||||
control_endpoint: s03.frostfs.devenv:8081
|
||||
un_locode: "SE STO"
|
||||
- name: frostfs-storage_04
|
||||
attributes:
|
||||
container_name: s04
|
||||
config_path: /etc/frostfs/storage/config.yml
|
||||
wallet_path: ../frostfs-dev-env/services/storage/wallet04.json
|
||||
local_wallet_config_path: ./TemporaryDir/empty-password.yml
|
||||
local_wallet_path: ../frostfs-dev-env/services/storage/wallet04.json
|
||||
wallet_password: ""
|
||||
volume_name: storage_storage_s04
|
||||
endpoint_data0: s04.frostfs.devenv:8080
|
||||
control_endpoint: s04.frostfs.devenv:8081
|
||||
un_locode: "FI HEL"
|
||||
- name: frostfs-s3_01
|
||||
attributes:
|
||||
container_name: s3_gate
|
||||
config_path: ../frostfs-dev-env/services/s3_gate/.s3.env
|
||||
wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json
|
||||
local_wallet_config_path: ./TemporaryDir/password-s3.yml
|
||||
local_wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json
|
||||
wallet_password: "s3"
|
||||
endpoint_data0: https://s3.frostfs.devenv:8080
|
||||
- name: frostfs-http_01
|
||||
attributes:
|
||||
container_name: http_gate
|
||||
config_path: ../frostfs-dev-env/services/http_gate/.http.env
|
||||
wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json
|
||||
local_wallet_config_path: ./TemporaryDir/password-other.yml
|
||||
local_wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json
|
||||
wallet_password: "one"
|
||||
endpoint_data0: http://http.frostfs.devenv
|
||||
- name: frostfs-ir_01
|
||||
attributes:
|
||||
container_name: ir01
|
||||
config_path: ../frostfs-dev-env/services/ir/.ir.env
|
||||
wallet_path: ../frostfs-dev-env/services/ir/az.json
|
||||
local_wallet_config_path: ./TemporaryDir/password-other.yml
|
||||
local_wallet_path: ../frostfs-dev-env/services/ir/az.json
|
||||
wallet_password: "one"
|
||||
- name: neo-go_01
|
||||
attributes:
|
||||
container_name: morph_chain
|
||||
config_path: ../frostfs-dev-env/services/morph_chain/protocol.privnet.yml
|
||||
wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json
|
||||
local_wallet_config_path: ./TemporaryDir/password-other.yml
|
||||
local_wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json
|
||||
wallet_password: "one"
|
||||
endpoint_internal0: http://morph-chain.frostfs.devenv:30333
|
||||
- name: main-chain_01
|
||||
attributes:
|
||||
container_name: main_chain
|
||||
config_path: ../frostfs-dev-env/services/chain/protocol.privnet.yml
|
||||
wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json
|
||||
local_wallet_config_path: ./TemporaryDir/password-other.yml
|
||||
local_wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json
|
||||
wallet_password: "one"
|
||||
endpoint_internal0: http://main-chain.frostfs.devenv:30333
|
||||
- name: coredns_01
|
||||
attributes:
|
||||
container_name: coredns
|
||||
clis:
|
||||
- name: frostfs-cli
|
||||
exec_path: frostfs-cli
|
|
@ -1,3 +0,0 @@
|
|||
.* @TrueCloudLab/qa-committers
|
||||
.forgejo/.* @potyarkin
|
||||
Makefile @potyarkin
|
|
@ -27,8 +27,8 @@ dependencies = [
|
|||
"testrail-api>=1.12.0",
|
||||
"pytest==7.1.2",
|
||||
"tenacity==8.0.1",
|
||||
"boto3==1.35.30",
|
||||
"boto3-stubs[essential]==1.35.30",
|
||||
"boto3==1.16.33",
|
||||
"boto3-stubs[essential]==1.16.33",
|
||||
]
|
||||
requires-python = ">=3.10"
|
||||
|
||||
|
@ -62,7 +62,7 @@ authmate = "frostfs_testlib.credentials.authmate_s3_provider:AuthmateS3Credentia
|
|||
wallet_factory = "frostfs_testlib.credentials.wallet_factory_provider:WalletFactoryProvider"
|
||||
|
||||
[project.entry-points."frostfs.testlib.bucket_cid_resolver"]
|
||||
frostfs = "frostfs_testlib.clients.s3.curl_bucket_resolver:CurlBucketContainerResolver"
|
||||
frostfs = "frostfs_testlib.s3.curl_bucket_resolver:CurlBucketContainerResolver"
|
||||
|
||||
[tool.isort]
|
||||
profile = "black"
|
||||
|
@ -89,7 +89,4 @@ push = false
|
|||
filterwarnings = [
|
||||
"ignore:Blowfish has been deprecated:cryptography.utils.CryptographyDeprecationWarning",
|
||||
]
|
||||
testpaths = ["tests"]
|
||||
|
||||
[project.entry-points.pytest11]
|
||||
testlib = "frostfs_testlib"
|
||||
testpaths = ["tests"]
|
|
@ -8,8 +8,8 @@ docstring_parser==0.15
|
|||
testrail-api==1.12.0
|
||||
tenacity==8.0.1
|
||||
pytest==7.1.2
|
||||
boto3==1.35.30
|
||||
boto3-stubs[essential]==1.35.30
|
||||
boto3==1.16.33
|
||||
boto3-stubs[essential]==1.16.33
|
||||
|
||||
# Dev dependencies
|
||||
black==22.8.0
|
||||
|
|
|
@ -1,4 +1 @@
|
|||
__version__ = "2.0.1"
|
||||
|
||||
from .fixtures import configure_testlib, hosting, temp_directory
|
||||
from .hooks import pytest_collection_modifyitems
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
from frostfs_testlib.analytics import test_case
|
||||
from frostfs_testlib.analytics.test_case import TestCasePriority
|
||||
from frostfs_testlib.analytics.test_collector import TestCase, TestCaseCollector
|
||||
from frostfs_testlib.analytics.test_exporter import TСExporter
|
||||
from frostfs_testlib.analytics.test_exporter import TestExporter
|
||||
from frostfs_testlib.analytics.testrail_exporter import TestrailExporter
|
||||
|
|
|
@ -3,8 +3,7 @@ from abc import ABC, abstractmethod
|
|||
from frostfs_testlib.analytics.test_collector import TestCase
|
||||
|
||||
|
||||
# TODO: REMOVE ME
|
||||
class TСExporter(ABC):
|
||||
class TestExporter(ABC):
|
||||
test_cases_cache = []
|
||||
test_suites_cache = []
|
||||
|
||||
|
@ -47,7 +46,9 @@ class TСExporter(ABC):
|
|||
"""
|
||||
|
||||
@abstractmethod
|
||||
def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None:
|
||||
def update_test_case(
|
||||
self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section
|
||||
) -> None:
|
||||
"""
|
||||
Update test case in TMS
|
||||
"""
|
||||
|
@ -59,7 +60,9 @@ class TСExporter(ABC):
|
|||
|
||||
for test_case in test_cases:
|
||||
test_suite = self.get_or_create_test_suite(test_case.suite_name)
|
||||
test_section = self.get_or_create_suite_section(test_suite, test_case.suite_section_name)
|
||||
test_section = self.get_or_create_suite_section(
|
||||
test_suite, test_case.suite_section_name
|
||||
)
|
||||
test_case_in_tms = self.search_test_case_id(test_case.id)
|
||||
steps = [{"content": value, "expected": " "} for key, value in test_case.steps.items()]
|
||||
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
from testrail_api import TestRailAPI
|
||||
|
||||
from frostfs_testlib.analytics.test_collector import TestCase
|
||||
from frostfs_testlib.analytics.test_exporter import TСExporter
|
||||
from frostfs_testlib.analytics.test_exporter import TestExporter
|
||||
|
||||
|
||||
class TestrailExporter(TСExporter):
|
||||
class TestrailExporter(TestExporter):
|
||||
def __init__(
|
||||
self,
|
||||
tr_url: str,
|
||||
|
@ -62,13 +62,19 @@ class TestrailExporter(TСExporter):
|
|||
It's help do not call TMS each time then we search test case
|
||||
"""
|
||||
for test_suite in self.test_suites_cache:
|
||||
self.test_cases_cache.extend(self.api.cases.get_cases(self.tr_project_id, suite_id=test_suite["id"]))
|
||||
self.test_cases_cache.extend(
|
||||
self.api.cases.get_cases(self.tr_project_id, suite_id=test_suite["id"])
|
||||
)
|
||||
|
||||
def search_test_case_id(self, test_case_id: str) -> object:
|
||||
"""
|
||||
Find test cases in TestRail (cache) by ID
|
||||
"""
|
||||
test_cases = [test_case for test_case in self.test_cases_cache if test_case["custom_autotest_name"] == test_case_id]
|
||||
test_cases = [
|
||||
test_case
|
||||
for test_case in self.test_cases_cache
|
||||
if test_case["custom_autotest_name"] == test_case_id
|
||||
]
|
||||
|
||||
if len(test_cases) > 1:
|
||||
raise RuntimeError(f"Too many results found in test rail for id {test_case_id}")
|
||||
|
@ -81,7 +87,9 @@ class TestrailExporter(TСExporter):
|
|||
"""
|
||||
Get suite name with exact name from Testrail or create if not exist
|
||||
"""
|
||||
test_rail_suites = [suite for suite in self.test_suites_cache if suite["name"] == test_suite_name]
|
||||
test_rail_suites = [
|
||||
suite for suite in self.test_suites_cache if suite["name"] == test_suite_name
|
||||
]
|
||||
|
||||
if not test_rail_suites:
|
||||
test_rail_suite = self.api.suites.add_suite(
|
||||
|
@ -94,13 +102,17 @@ class TestrailExporter(TСExporter):
|
|||
elif len(test_rail_suites) == 1:
|
||||
return test_rail_suites.pop()
|
||||
else:
|
||||
raise RuntimeError(f"Too many results found in test rail for suite name {test_suite_name}")
|
||||
raise RuntimeError(
|
||||
f"Too many results found in test rail for suite name {test_suite_name}"
|
||||
)
|
||||
|
||||
def get_or_create_suite_section(self, test_rail_suite, section_name) -> object:
|
||||
"""
|
||||
Get suite section with exact name from Testrail or create new one if not exist
|
||||
"""
|
||||
test_rail_sections = [section for section in test_rail_suite["sections"] if section["name"] == section_name]
|
||||
test_rail_sections = [
|
||||
section for section in test_rail_suite["sections"] if section["name"] == section_name
|
||||
]
|
||||
|
||||
if not test_rail_sections:
|
||||
test_rail_section = self.api.sections.add_section(
|
||||
|
@ -116,7 +128,9 @@ class TestrailExporter(TСExporter):
|
|||
elif len(test_rail_sections) == 1:
|
||||
return test_rail_sections.pop()
|
||||
else:
|
||||
raise RuntimeError(f"Too many results found in test rail for section name {section_name}")
|
||||
raise RuntimeError(
|
||||
f"Too many results found in test rail for section name {section_name}"
|
||||
)
|
||||
|
||||
def prepare_request_body(self, test_case: TestCase, test_suite, test_suite_section) -> dict:
|
||||
"""
|
||||
|
@ -150,7 +164,9 @@ class TestrailExporter(TСExporter):
|
|||
|
||||
self.api.cases.add_case(**request_body)
|
||||
|
||||
def update_test_case(self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section) -> None:
|
||||
def update_test_case(
|
||||
self, test_case: TestCase, test_case_in_tms, test_suite, test_suite_section
|
||||
) -> None:
|
||||
"""
|
||||
Update test case in Testrail
|
||||
"""
|
||||
|
|
|
@ -69,7 +69,9 @@ class FrostfsAdmMorph(CliCommand):
|
|||
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||
)
|
||||
|
||||
def set_config(self, set_key_value: str, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None) -> CommandResult:
|
||||
def set_config(
|
||||
self, set_key_value: str, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None
|
||||
) -> CommandResult:
|
||||
"""Add/update global config value in the FrostFS network.
|
||||
|
||||
Args:
|
||||
|
@ -108,7 +110,7 @@ class FrostfsAdmMorph(CliCommand):
|
|||
**{param: param_value for param, param_value in locals().items() if param not in ["self"]},
|
||||
)
|
||||
|
||||
def dump_hashes(self, rpc_endpoint: str, domain: Optional[str] = None) -> CommandResult:
|
||||
def dump_hashes(self, rpc_endpoint: str) -> CommandResult:
|
||||
"""Dump deployed contract hashes.
|
||||
|
||||
Args:
|
||||
|
@ -123,7 +125,7 @@ class FrostfsAdmMorph(CliCommand):
|
|||
)
|
||||
|
||||
def force_new_epoch(
|
||||
self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None, delta: Optional[int] = None
|
||||
self, rpc_endpoint: Optional[str] = None, alphabet_wallets: Optional[str] = None
|
||||
) -> CommandResult:
|
||||
"""Create new FrostFS epoch event in the side chain.
|
||||
|
||||
|
@ -342,147 +344,9 @@ class FrostfsAdmMorph(CliCommand):
|
|||
|
||||
return self._execute(
|
||||
f"morph remove-nodes {' '.join(node_netmap_keys)}",
|
||||
**{param: param_value for param, param_value in locals().items() if param not in ["self", "node_netmap_keys"]},
|
||||
)
|
||||
|
||||
def add_rule(
|
||||
self,
|
||||
chain_id: str,
|
||||
target_name: str,
|
||||
target_type: str,
|
||||
rule: Optional[list[str]] = None,
|
||||
path: Optional[str] = None,
|
||||
chain_id_hex: Optional[bool] = None,
|
||||
chain_name: Optional[str] = None,
|
||||
wallet: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> CommandResult:
|
||||
"""Drop objects from the node's local storage
|
||||
|
||||
Args:
|
||||
chain-id: Assign ID to the parsed chain
|
||||
chain-id-hex: Flag to parse chain ID as hex
|
||||
path: Path to encoded chain in JSON or binary format
|
||||
rule: Rule statement
|
||||
target-name: Resource name in APE resource name format
|
||||
target-type: Resource type(container/namespace)
|
||||
timeout: Timeout for an operation (default 15s)
|
||||
wallet: Path to the wallet or binary key
|
||||
|
||||
Returns:
|
||||
Command`s result.
|
||||
"""
|
||||
return self._execute(
|
||||
"morph ape add-rule-chain",
|
||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||
)
|
||||
|
||||
def get_rule(
|
||||
self,
|
||||
chain_id: str,
|
||||
target_name: str,
|
||||
target_type: str,
|
||||
chain_id_hex: Optional[bool] = None,
|
||||
chain_name: Optional[str] = None,
|
||||
wallet: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> CommandResult:
|
||||
"""Drop objects from the node's local storage
|
||||
|
||||
Args:
|
||||
chain-id string Chain id
|
||||
chain-id-hex Flag to parse chain ID as hex
|
||||
target-name string Resource name in APE resource name format
|
||||
target-type string Resource type(container/namespace)
|
||||
timeout duration Timeout for an operation (default 15s)
|
||||
wallet string Path to the wallet or binary key
|
||||
|
||||
Returns:
|
||||
Command`s result.
|
||||
"""
|
||||
return self._execute(
|
||||
"morph ape get-rule-chain",
|
||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||
)
|
||||
|
||||
def list_rules(
|
||||
self,
|
||||
target_type: str,
|
||||
target_name: Optional[str] = None,
|
||||
rpc_endpoint: Optional[str] = None,
|
||||
chain_name: Optional[str] = None,
|
||||
wallet: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> CommandResult:
|
||||
"""Drop objects from the node's local storage
|
||||
|
||||
Args:
|
||||
target-name: Resource name in APE resource name format
|
||||
target-type: Resource type(container/namespace)
|
||||
timeout: Timeout for an operation (default 15s)
|
||||
wallet: Path to the wallet or binary key
|
||||
|
||||
Returns:
|
||||
Command`s result.
|
||||
"""
|
||||
return self._execute(
|
||||
"morph ape list-rule-chains",
|
||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||
)
|
||||
|
||||
def remove_rule(
|
||||
self,
|
||||
chain_id: str,
|
||||
target_name: str,
|
||||
target_type: str,
|
||||
all: Optional[bool] = None,
|
||||
chain_name: Optional[str] = None,
|
||||
chain_id_hex: Optional[bool] = None,
|
||||
wallet: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> CommandResult:
|
||||
"""Drop objects from the node's local storage
|
||||
|
||||
Args:
|
||||
all: Remove all chains
|
||||
chain-id: Assign ID to the parsed chain
|
||||
chain-id-hex: Flag to parse chain ID as hex
|
||||
target-name: Resource name in APE resource name format
|
||||
target-type: Resource type(container/namespace)
|
||||
timeout: Timeout for an operation (default 15s)
|
||||
wallet: Path to the wallet or binary key
|
||||
|
||||
Returns:
|
||||
Command`s result.
|
||||
"""
|
||||
return self._execute(
|
||||
"morph ape rm-rule-chain",
|
||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||
)
|
||||
|
||||
def get_nns_records(
|
||||
self,
|
||||
name: str,
|
||||
type: Optional[str] = None,
|
||||
rpc_endpoint: Optional[str] = None,
|
||||
alphabet_wallets: Optional[str] = None,
|
||||
) -> CommandResult:
|
||||
"""Returns domain record of the specified type
|
||||
|
||||
Args:
|
||||
name: Domain name
|
||||
type: Domain name service record type(A|CNAME|SOA|TXT)
|
||||
rpc_endpoint: N3 RPC node endpoint
|
||||
alphabet_wallets: path to alphabet wallets dir
|
||||
|
||||
Returns:
|
||||
Command's result
|
||||
"""
|
||||
return self._execute(
|
||||
"morph nns get-records",
|
||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||
**{
|
||||
param: param_value
|
||||
for param, param_value in locals().items()
|
||||
if param not in ["self", "node_netmap_keys"]
|
||||
},
|
||||
)
|
||||
|
|
|
@ -1,70 +0,0 @@
|
|||
from typing import Optional
|
||||
|
||||
from frostfs_testlib.cli.cli_command import CliCommand
|
||||
from frostfs_testlib.shell import CommandResult
|
||||
|
||||
|
||||
class FrostfsCliApeManager(CliCommand):
|
||||
"""Operations with APE manager."""
|
||||
|
||||
def add(
|
||||
self,
|
||||
rpc_endpoint: str,
|
||||
chain_id: Optional[str] = None,
|
||||
chain_id_hex: Optional[str] = None,
|
||||
path: Optional[str] = None,
|
||||
rule: Optional[str] | Optional[list[str]] = None,
|
||||
target_name: Optional[str] = None,
|
||||
target_type: Optional[str] = None,
|
||||
wallet: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> CommandResult:
|
||||
"""Add rule chain for a target."""
|
||||
|
||||
return self._execute(
|
||||
"ape-manager add",
|
||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||
)
|
||||
|
||||
def list(
|
||||
self,
|
||||
rpc_endpoint: str,
|
||||
target_name: Optional[str] = None,
|
||||
target_type: Optional[str] = None,
|
||||
wallet: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> CommandResult:
|
||||
"""Generate APE override by target and APE chains. Util command.
|
||||
|
||||
Generated APE override can be dumped to a file in JSON format that is passed to
|
||||
"create" command.
|
||||
"""
|
||||
|
||||
return self._execute(
|
||||
"ape-manager list",
|
||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||
)
|
||||
|
||||
def remove(
|
||||
self,
|
||||
rpc_endpoint: str,
|
||||
chain_id: Optional[str] = None,
|
||||
chain_id_hex: Optional[str] = None,
|
||||
target_name: Optional[str] = None,
|
||||
target_type: Optional[str] = None,
|
||||
wallet: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> CommandResult:
|
||||
"""Generate APE override by target and APE chains. Util command.
|
||||
|
||||
Generated APE override can be dumped to a file in JSON format that is passed to
|
||||
"create" command.
|
||||
"""
|
||||
|
||||
return self._execute(
|
||||
"ape-manager remove",
|
||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||
)
|
|
@ -1,54 +0,0 @@
|
|||
from typing import Optional
|
||||
|
||||
from frostfs_testlib.cli.cli_command import CliCommand
|
||||
from frostfs_testlib.shell import CommandResult
|
||||
|
||||
|
||||
class FrostfsCliBearer(CliCommand):
|
||||
def create(
|
||||
self,
|
||||
rpc_endpoint: str,
|
||||
out: str,
|
||||
issued_at: Optional[str] = None,
|
||||
expire_at: Optional[str] = None,
|
||||
not_valid_before: Optional[str] = None,
|
||||
ape: Optional[str] = None,
|
||||
eacl: Optional[str] = None,
|
||||
owner: Optional[str] = None,
|
||||
json: Optional[bool] = False,
|
||||
impersonate: Optional[bool] = False,
|
||||
wallet: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
) -> CommandResult:
|
||||
"""Create bearer token.
|
||||
|
||||
All epoch flags can be specified relative to the current epoch with the +n syntax.
|
||||
In this case --rpc-endpoint flag should be specified and the epoch in bearer token
|
||||
is set to current epoch + n.
|
||||
"""
|
||||
return self._execute(
|
||||
"bearer create",
|
||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||
)
|
||||
|
||||
def generate_ape_override(
|
||||
self,
|
||||
chain_id: Optional[str] = None,
|
||||
chain_id_hex: Optional[str] = None,
|
||||
cid: Optional[str] = None,
|
||||
output: Optional[str] = None,
|
||||
path: Optional[str] = None,
|
||||
rule: Optional[str] = None,
|
||||
wallet: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
) -> CommandResult:
|
||||
"""Generate APE override by target and APE chains. Util command.
|
||||
|
||||
Generated APE override can be dumped to a file in JSON format that is passed to
|
||||
"create" command.
|
||||
"""
|
||||
|
||||
return self._execute(
|
||||
"bearer generate-ape-override",
|
||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||
)
|
|
@ -2,8 +2,6 @@ from typing import Optional
|
|||
|
||||
from frostfs_testlib.cli.frostfs_cli.accounting import FrostfsCliAccounting
|
||||
from frostfs_testlib.cli.frostfs_cli.acl import FrostfsCliACL
|
||||
from frostfs_testlib.cli.frostfs_cli.ape_manager import FrostfsCliApeManager
|
||||
from frostfs_testlib.cli.frostfs_cli.bearer import FrostfsCliBearer
|
||||
from frostfs_testlib.cli.frostfs_cli.container import FrostfsCliContainer
|
||||
from frostfs_testlib.cli.frostfs_cli.control import FrostfsCliControl
|
||||
from frostfs_testlib.cli.frostfs_cli.netmap import FrostfsCliNetmap
|
||||
|
@ -43,5 +41,3 @@ class FrostfsCli:
|
|||
self.version = FrostfsCliVersion(shell, frostfs_cli_exec_path, config=config_file)
|
||||
self.tree = FrostfsCliTree(shell, frostfs_cli_exec_path, config=config_file)
|
||||
self.control = FrostfsCliControl(shell, frostfs_cli_exec_path, config=config_file)
|
||||
self.bearer = FrostfsCliBearer(shell, frostfs_cli_exec_path, config=config_file)
|
||||
self.ape_manager = FrostfsCliApeManager(shell, frostfs_cli_exec_path, config=config_file)
|
||||
|
|
|
@ -16,8 +16,6 @@ class FrostfsCliContainer(CliCommand):
|
|||
basic_acl: Optional[str] = None,
|
||||
await_mode: bool = False,
|
||||
disable_timestamp: bool = False,
|
||||
force: bool = False,
|
||||
trace: bool = False,
|
||||
name: Optional[str] = None,
|
||||
nonce: Optional[str] = None,
|
||||
policy: Optional[str] = None,
|
||||
|
@ -39,8 +37,6 @@ class FrostfsCliContainer(CliCommand):
|
|||
basic_acl: Hex encoded basic ACL value or keywords like 'public-read-write',
|
||||
'private', 'eacl-public-read' (default "private").
|
||||
disable_timestamp: Disable timestamp container attribute.
|
||||
force: Skip placement validity check.
|
||||
trace: Generate trace ID and print it.
|
||||
name: Container name attribute.
|
||||
nonce: UUIDv4 nonce value for container.
|
||||
policy: QL-encoded or JSON-encoded placement policy or path to file with it.
|
||||
|
@ -73,7 +69,6 @@ class FrostfsCliContainer(CliCommand):
|
|||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
force: bool = False,
|
||||
trace: bool = False,
|
||||
) -> CommandResult:
|
||||
"""
|
||||
Delete an existing container.
|
||||
|
@ -83,7 +78,6 @@ class FrostfsCliContainer(CliCommand):
|
|||
address: Address of wallet account.
|
||||
await_mode: Block execution until container is removed.
|
||||
cid: Container ID.
|
||||
trace: Generate trace ID and print it.
|
||||
force: Do not check whether container contains locks and remove immediately.
|
||||
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
||||
session: Path to a JSON-encoded container session token.
|
||||
|
@ -106,11 +100,9 @@ class FrostfsCliContainer(CliCommand):
|
|||
cid: str,
|
||||
wallet: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
await_mode: bool = False,
|
||||
to: Optional[str] = None,
|
||||
json_mode: bool = False,
|
||||
trace: bool = False,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = None,
|
||||
|
@ -123,14 +115,12 @@ class FrostfsCliContainer(CliCommand):
|
|||
await_mode: Block execution until container is removed.
|
||||
cid: Container ID.
|
||||
json_mode: Print or dump container in JSON format.
|
||||
trace: Generate trace ID and print it.
|
||||
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
||||
to: Path to dump encoded container.
|
||||
ttl: TTL value in request meta header (default 2).
|
||||
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
||||
xhdr: Dict with request X-Headers.
|
||||
timeout: Timeout for the operation (default 15s).
|
||||
generate_key: Generate a new private key.
|
||||
|
||||
Returns:
|
||||
Command's result.
|
||||
|
@ -146,7 +136,6 @@ class FrostfsCliContainer(CliCommand):
|
|||
cid: str,
|
||||
wallet: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
await_mode: bool = False,
|
||||
to: Optional[str] = None,
|
||||
session: Optional[str] = None,
|
||||
|
@ -163,14 +152,11 @@ class FrostfsCliContainer(CliCommand):
|
|||
cid: Container ID.
|
||||
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
||||
to: Path to dump encoded container.
|
||||
json_mode: Print or dump container in JSON format.
|
||||
trace: Generate trace ID and print it.
|
||||
session: Path to a JSON-encoded container session token.
|
||||
ttl: TTL value in request meta header (default 2).
|
||||
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
||||
xhdr: Dict with request X-Headers.
|
||||
timeout: Timeout for the operation (default 15s).
|
||||
generate_key: Generate a new private key.
|
||||
|
||||
Returns:
|
||||
Command's result.
|
||||
|
@ -184,10 +170,8 @@ class FrostfsCliContainer(CliCommand):
|
|||
def list(
|
||||
self,
|
||||
rpc_endpoint: str,
|
||||
name: Optional[str] = None,
|
||||
wallet: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
owner: Optional[str] = None,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
|
@ -199,15 +183,12 @@ class FrostfsCliContainer(CliCommand):
|
|||
|
||||
Args:
|
||||
address: Address of wallet account.
|
||||
name: List containers by the attribute name.
|
||||
owner: Owner of containers (omit to use owner from private key).
|
||||
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
||||
ttl: TTL value in request meta header (default 2).
|
||||
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
||||
xhdr: Dict with request X-Headers.
|
||||
trace: Generate trace ID and print it.
|
||||
timeout: Timeout for the operation (default 15s).
|
||||
generate_key: Generate a new private key.
|
||||
|
||||
Returns:
|
||||
Command's result.
|
||||
|
@ -221,11 +202,8 @@ class FrostfsCliContainer(CliCommand):
|
|||
self,
|
||||
rpc_endpoint: str,
|
||||
cid: str,
|
||||
bearer: Optional[str] = None,
|
||||
wallet: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
trace: bool = False,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = None,
|
||||
|
@ -236,14 +214,11 @@ class FrostfsCliContainer(CliCommand):
|
|||
Args:
|
||||
address: Address of wallet account.
|
||||
cid: Container ID.
|
||||
bearer: File with signed JSON or binary encoded bearer token.
|
||||
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
||||
ttl: TTL value in request meta header (default 2).
|
||||
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
||||
xhdr: Dict with request X-Headers.
|
||||
trace: Generate trace ID and print it.
|
||||
timeout: Timeout for the operation (default 15s).
|
||||
generate_key: Generate a new private key.
|
||||
|
||||
Returns:
|
||||
Command's result.
|
||||
|
@ -253,7 +228,6 @@ class FrostfsCliContainer(CliCommand):
|
|||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||
)
|
||||
|
||||
# TODO Deprecated method with 0.42
|
||||
def set_eacl(
|
||||
self,
|
||||
rpc_endpoint: str,
|
||||
|
@ -299,7 +273,6 @@ class FrostfsCliContainer(CliCommand):
|
|||
address: Optional[str] = None,
|
||||
ttl: Optional[int] = None,
|
||||
from_file: Optional[str] = None,
|
||||
trace: bool = False,
|
||||
short: Optional[bool] = True,
|
||||
xhdr: Optional[dict] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
|
@ -317,9 +290,8 @@ class FrostfsCliContainer(CliCommand):
|
|||
from_file: string File path with encoded container
|
||||
timeout: duration Timeout for the operation (default 15 s)
|
||||
short: shorten the output of node information.
|
||||
trace: Generate trace ID and print it.
|
||||
xhdr: Dict with request X-Headers.
|
||||
generate_key: Generate a new private key.
|
||||
generate_key: Generate a new private key
|
||||
|
||||
Returns:
|
||||
|
||||
|
|
|
@ -13,7 +13,6 @@ class FrostfsCliObject(CliCommand):
|
|||
wallet: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
bearer: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
session: Optional[str] = None,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
|
@ -26,7 +25,6 @@ class FrostfsCliObject(CliCommand):
|
|||
address: Address of wallet account.
|
||||
bearer: File with signed JSON or binary encoded bearer token.
|
||||
cid: Container ID.
|
||||
generate_key: Generate new private key.
|
||||
oid: Object ID.
|
||||
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
||||
session: Filepath to a JSON- or binary-encoded token of the object DELETE session.
|
||||
|
@ -51,7 +49,6 @@ class FrostfsCliObject(CliCommand):
|
|||
wallet: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
bearer: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
file: Optional[str] = None,
|
||||
header: Optional[str] = None,
|
||||
no_progress: bool = False,
|
||||
|
@ -69,7 +66,6 @@ class FrostfsCliObject(CliCommand):
|
|||
bearer: File with signed JSON or binary encoded bearer token.
|
||||
cid: Container ID.
|
||||
file: File to write object payload to. Default: stdout.
|
||||
generate_key: Generate new private key.
|
||||
header: File to write header to. Default: stdout.
|
||||
no_progress: Do not show progress bar.
|
||||
oid: Object ID.
|
||||
|
@ -97,7 +93,6 @@ class FrostfsCliObject(CliCommand):
|
|||
wallet: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
bearer: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
range: Optional[str] = None,
|
||||
salt: Optional[str] = None,
|
||||
ttl: Optional[int] = None,
|
||||
|
@ -113,7 +108,6 @@ class FrostfsCliObject(CliCommand):
|
|||
address: Address of wallet account.
|
||||
bearer: File with signed JSON or binary encoded bearer token.
|
||||
cid: Container ID.
|
||||
generate_key: Generate new private key.
|
||||
oid: Object ID.
|
||||
range: Range to take hash from in the form offset1:length1,...
|
||||
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
||||
|
@ -141,7 +135,6 @@ class FrostfsCliObject(CliCommand):
|
|||
wallet: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
bearer: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
file: Optional[str] = None,
|
||||
json_mode: bool = False,
|
||||
main_only: bool = False,
|
||||
|
@ -160,7 +153,6 @@ class FrostfsCliObject(CliCommand):
|
|||
bearer: File with signed JSON or binary encoded bearer token.
|
||||
cid: Container ID.
|
||||
file: File to write object payload to. Default: stdout.
|
||||
generate_key: Generate new private key.
|
||||
json_mode: Marshal output in JSON.
|
||||
main_only: Return only main fields.
|
||||
oid: Object ID.
|
||||
|
@ -191,7 +183,6 @@ class FrostfsCliObject(CliCommand):
|
|||
expire_at: Optional[int] = None,
|
||||
address: Optional[str] = None,
|
||||
bearer: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
session: Optional[str] = None,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
|
@ -204,7 +195,6 @@ class FrostfsCliObject(CliCommand):
|
|||
address: Address of wallet account.
|
||||
bearer: File with signed JSON or binary encoded bearer token.
|
||||
cid: Container ID.
|
||||
generate_key: Generate new private key.
|
||||
oid: Object ID.
|
||||
lifetime: Lock lifetime.
|
||||
expire_at: Lock expiration epoch.
|
||||
|
@ -232,7 +222,6 @@ class FrostfsCliObject(CliCommand):
|
|||
address: Optional[str] = None,
|
||||
attributes: Optional[dict] = None,
|
||||
bearer: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
copies_number: Optional[int] = None,
|
||||
disable_filename: bool = False,
|
||||
disable_timestamp: bool = False,
|
||||
|
@ -257,7 +246,6 @@ class FrostfsCliObject(CliCommand):
|
|||
disable_timestamp: Do not set well-known timestamp attribute.
|
||||
expire_at: Last epoch in the life of the object.
|
||||
file: File with object payload.
|
||||
generate_key: Generate new private key.
|
||||
no_progress: Do not show progress bar.
|
||||
notify: Object notification in the form of *epoch*:*topic*; '-'
|
||||
topic means using default.
|
||||
|
@ -276,54 +264,6 @@ class FrostfsCliObject(CliCommand):
|
|||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||
)
|
||||
|
||||
def patch(
|
||||
self,
|
||||
rpc_endpoint: str,
|
||||
cid: str,
|
||||
oid: str,
|
||||
range: list[str] = None,
|
||||
payload: list[str] = None,
|
||||
new_attrs: Optional[str] = None,
|
||||
replace_attrs: bool = False,
|
||||
address: Optional[str] = None,
|
||||
bearer: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
session: Optional[str] = None,
|
||||
timeout: Optional[str] = None,
|
||||
trace: bool = False,
|
||||
ttl: Optional[int] = None,
|
||||
wallet: Optional[str] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
) -> CommandResult:
|
||||
"""
|
||||
PATCH an object.
|
||||
|
||||
Args:
|
||||
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>')
|
||||
cid: Container ID
|
||||
oid: Object ID
|
||||
range: An array of ranges in which to replace data in the format [offset1:length1, offset2:length2]
|
||||
payload: An array of file paths to be applied in each range
|
||||
new_attrs: Attributes to be changed in the format Key1=Value1,Key2=Value2
|
||||
replace_attrs: Replace all attributes completely with new ones specified in new_attrs
|
||||
address: Address of wallet account
|
||||
bearer: File with signed JSON or binary encoded bearer token
|
||||
generate_key: Generate new private key
|
||||
session: Filepath to a JSON- or binary-encoded token of the object RANGE session
|
||||
timeout: Timeout for the operation
|
||||
trace: Generate trace ID and print it
|
||||
ttl: TTL value in request meta header (default 2)
|
||||
wallet: WIF (NEP-2) string or path to the wallet or binary key
|
||||
xhdr: Dict with request X-Headers
|
||||
|
||||
Returns:
|
||||
Command's result.
|
||||
"""
|
||||
return self._execute(
|
||||
"object patch",
|
||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||
)
|
||||
|
||||
def range(
|
||||
self,
|
||||
rpc_endpoint: str,
|
||||
|
@ -333,7 +273,6 @@ class FrostfsCliObject(CliCommand):
|
|||
wallet: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
bearer: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
file: Optional[str] = None,
|
||||
json_mode: bool = False,
|
||||
raw: bool = False,
|
||||
|
@ -350,7 +289,6 @@ class FrostfsCliObject(CliCommand):
|
|||
bearer: File with signed JSON or binary encoded bearer token.
|
||||
cid: Container ID.
|
||||
file: File to write object payload to. Default: stdout.
|
||||
generate_key: Generate new private key.
|
||||
json_mode: Marshal output in JSON.
|
||||
oid: Object ID.
|
||||
range: Range to take data from in the form offset:length.
|
||||
|
@ -377,7 +315,6 @@ class FrostfsCliObject(CliCommand):
|
|||
wallet: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
bearer: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
filters: Optional[list] = None,
|
||||
oid: Optional[str] = None,
|
||||
phy: bool = False,
|
||||
|
@ -395,7 +332,6 @@ class FrostfsCliObject(CliCommand):
|
|||
bearer: File with signed JSON or binary encoded bearer token.
|
||||
cid: Container ID.
|
||||
filters: Repeated filter expressions or files with protobuf JSON.
|
||||
generate_key: Generate new private key.
|
||||
oid: Object ID.
|
||||
phy: Search physically stored objects.
|
||||
root: Search for user objects.
|
||||
|
@ -418,15 +354,14 @@ class FrostfsCliObject(CliCommand):
|
|||
self,
|
||||
rpc_endpoint: str,
|
||||
cid: str,
|
||||
oid: Optional[str] = None,
|
||||
wallet: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
bearer: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
oid: Optional[str] = None,
|
||||
trace: bool = False,
|
||||
root: bool = False,
|
||||
verify_presence_all: bool = False,
|
||||
json: bool = False,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = None,
|
||||
|
|
|
@ -40,7 +40,7 @@ class FrostfsCliShards(CliCommand):
|
|||
self,
|
||||
endpoint: str,
|
||||
mode: str,
|
||||
id: Optional[list[str]] = None,
|
||||
id: Optional[list[str]],
|
||||
wallet: Optional[str] = None,
|
||||
wallet_password: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
|
@ -143,119 +143,3 @@ class FrostfsCliShards(CliCommand):
|
|||
**{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]},
|
||||
)
|
||||
|
||||
def evacuation_start(
|
||||
self,
|
||||
endpoint: str,
|
||||
id: Optional[str] = None,
|
||||
scope: Optional[str] = None,
|
||||
all: bool = False,
|
||||
no_errors: bool = True,
|
||||
await_mode: bool = False,
|
||||
address: Optional[str] = None,
|
||||
timeout: Optional[str] = None,
|
||||
no_progress: bool = False,
|
||||
) -> CommandResult:
|
||||
"""
|
||||
Objects evacuation from shard to other shards.
|
||||
|
||||
Args:
|
||||
address: Address of wallet account
|
||||
all: Process all shards
|
||||
await: Block execution until evacuation is completed
|
||||
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
|
||||
id: List of shard IDs in base58 encoding
|
||||
no_errors: Skip invalid/unreadable objects (default true)
|
||||
no_progress: Print progress if await provided
|
||||
scope: Evacuation scope; possible values: trees, objects, all (default "all")
|
||||
timeout: Timeout for an operation (default 15s)
|
||||
|
||||
Returns:
|
||||
Command's result.
|
||||
"""
|
||||
return self._execute(
|
||||
"control shards evacuation start",
|
||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||
)
|
||||
|
||||
def evacuation_reset(
|
||||
self,
|
||||
endpoint: str,
|
||||
address: Optional[str] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> CommandResult:
|
||||
"""
|
||||
Reset evacuate objects from shard to other shards status.
|
||||
|
||||
Args:
|
||||
address: Address of wallet account
|
||||
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
|
||||
timeout: Timeout for an operation (default 15s)
|
||||
Returns:
|
||||
Command's result.
|
||||
"""
|
||||
return self._execute(
|
||||
"control shards evacuation reset",
|
||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||
)
|
||||
|
||||
def evacuation_stop(
|
||||
self,
|
||||
endpoint: str,
|
||||
address: Optional[str] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> CommandResult:
|
||||
"""
|
||||
Stop running evacuate process from shard to other shards.
|
||||
|
||||
Args:
|
||||
address: Address of wallet account
|
||||
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
|
||||
timeout: Timeout for an operation (default 15s)
|
||||
|
||||
Returns:
|
||||
Command's result.
|
||||
"""
|
||||
return self._execute(
|
||||
"control shards evacuation stop",
|
||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||
)
|
||||
|
||||
def evacuation_status(
|
||||
self,
|
||||
endpoint: str,
|
||||
address: Optional[str] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> CommandResult:
|
||||
"""
|
||||
Get evacuate objects from shard to other shards status.
|
||||
|
||||
Args:
|
||||
address: Address of wallet account
|
||||
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
|
||||
timeout: Timeout for an operation (default 15s)
|
||||
|
||||
Returns:
|
||||
Command's result.
|
||||
"""
|
||||
return self._execute(
|
||||
"control shards evacuation status",
|
||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||
)
|
||||
|
||||
def detach(self, endpoint: str, address: Optional[str] = None, id: Optional[str] = None, timeout: Optional[str] = None):
|
||||
"""
|
||||
Detach and close the shards
|
||||
|
||||
Args:
|
||||
address: Address of wallet account
|
||||
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
|
||||
id: List of shard IDs in base58 encoding
|
||||
timeout: Timeout for an operation (default 15s)
|
||||
|
||||
Returns:
|
||||
Command's result.
|
||||
"""
|
||||
return self._execute(
|
||||
"control shards detach",
|
||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||
)
|
||||
|
|
|
@ -54,11 +54,3 @@ class FrostfsCliUtil(CliCommand):
|
|||
"util sign session-token",
|
||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||
)
|
||||
|
||||
def convert_eacl(self, from_file: str, to_file: str, json: Optional[bool] = False, ape: Optional[bool] = False):
|
||||
"""Convert representation of extended ACL table."""
|
||||
|
||||
return self._execute(
|
||||
"util convert eacl",
|
||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||
)
|
||||
|
|
|
@ -15,8 +15,6 @@ class NetmapParser:
|
|||
"epoch_duration": r"Epoch duration: (?P<epoch_duration>\d+)",
|
||||
"inner_ring_candidate_fee": r"Inner Ring candidate fee: (?P<inner_ring_candidate_fee>\d+)",
|
||||
"maximum_object_size": r"Maximum object size: (?P<maximum_object_size>\d+)",
|
||||
"maximum_count_of_data_shards": r"Maximum count of data shards: (?P<maximum_count_of_data_shards>\d+)",
|
||||
"maximum_count_of_parity_shards": r"Maximum count of parity shards: (?P<maximum_count_of_parity_shards>\d+)",
|
||||
"withdrawal_fee": r"Withdrawal fee: (?P<withdrawal_fee>\d+)",
|
||||
"homomorphic_hashing_disabled": r"Homomorphic hashing disabled: (?P<homomorphic_hashing_disabled>true|false)",
|
||||
"maintenance_mode_allowed": r"Maintenance mode allowed: (?P<maintenance_mode_allowed>true|false)",
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
from frostfs_testlib.clients.http.http_client import HttpClient
|
||||
from frostfs_testlib.clients.s3.aws_cli_client import AwsCliClient
|
||||
from frostfs_testlib.clients.s3.boto3_client import Boto3ClientWrapper
|
||||
from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper
|
||||
from frostfs_testlib.clients.s3.s3_http_client import S3HttpClient
|
|
@ -1,144 +0,0 @@
|
|||
import io
|
||||
import json
|
||||
import logging
|
||||
import logging.config
|
||||
from typing import Mapping, Sequence
|
||||
|
||||
import httpx
|
||||
|
||||
from frostfs_testlib import reporter
|
||||
|
||||
timeout = httpx.Timeout(60, read=150)
|
||||
LOGGING_CONFIG = {
|
||||
"disable_existing_loggers": False,
|
||||
"version": 1,
|
||||
"handlers": {"default": {"class": "logging.StreamHandler", "formatter": "http", "stream": "ext://sys.stderr"}},
|
||||
"formatters": {
|
||||
"http": {
|
||||
"format": "%(levelname)s [%(asctime)s] %(name)s - %(message)s",
|
||||
"datefmt": "%Y-%m-%d %H:%M:%S",
|
||||
}
|
||||
},
|
||||
"loggers": {
|
||||
"httpx": {
|
||||
"handlers": ["default"],
|
||||
"level": "DEBUG",
|
||||
},
|
||||
"httpcore": {
|
||||
"handlers": ["default"],
|
||||
"level": "ERROR",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
logging.config.dictConfig(LOGGING_CONFIG)
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
|
||||
|
||||
class HttpClient:
|
||||
@reporter.step("Send {method} request to {url}")
|
||||
def send(self, method: str, url: str, expected_status_code: int = None, **kwargs: dict) -> httpx.Response:
|
||||
transport = httpx.HTTPTransport(verify=False, retries=5)
|
||||
client = httpx.Client(timeout=timeout, transport=transport)
|
||||
response = client.request(method, url, **kwargs)
|
||||
|
||||
self._attach_response(response, **kwargs)
|
||||
logger.info(f"Response: {response.status_code} => {response.text}")
|
||||
|
||||
if expected_status_code:
|
||||
assert (
|
||||
response.status_code == expected_status_code
|
||||
), f"Got {response.status_code} response code while {expected_status_code} expected"
|
||||
|
||||
return response
|
||||
|
||||
@classmethod
|
||||
def _parse_body(cls, readable: httpx.Request | httpx.Response) -> str | None:
|
||||
try:
|
||||
content = readable.read()
|
||||
except Exception as e:
|
||||
logger.warning(f"Unable to read file: {str(e)}")
|
||||
return None
|
||||
|
||||
if not content:
|
||||
return None
|
||||
|
||||
request_body = None
|
||||
|
||||
try:
|
||||
request_body = json.loads(content)
|
||||
except (json.JSONDecodeError, UnicodeDecodeError) as e:
|
||||
logger.warning(f"Unable to convert body to json: {str(e)}")
|
||||
|
||||
if request_body is not None:
|
||||
return json.dumps(request_body, default=str, indent=4)
|
||||
|
||||
try:
|
||||
request_body = content.decode()
|
||||
except UnicodeDecodeError as e:
|
||||
logger.warning(f"Unable to decode binary data to text using UTF-8 encoding: {str(e)}")
|
||||
|
||||
request_body = content if request_body is None else request_body
|
||||
request_body = "<large text data>" if len(request_body) > 1000 else request_body
|
||||
|
||||
return request_body
|
||||
|
||||
@classmethod
|
||||
def _parse_files(cls, files: Mapping | Sequence | None) -> dict:
|
||||
filepaths = {}
|
||||
|
||||
if not files:
|
||||
return filepaths
|
||||
|
||||
if isinstance(files, Sequence):
|
||||
items = files
|
||||
elif isinstance(files, Mapping):
|
||||
items = files.items()
|
||||
else:
|
||||
raise TypeError(f"'files' must be either Sequence or Mapping, got: {type(files).__name__}")
|
||||
|
||||
for name, file in items:
|
||||
if isinstance(file, io.IOBase):
|
||||
filepaths[name] = file.name
|
||||
elif isinstance(file, Sequence):
|
||||
filepaths[name] = file[1].name
|
||||
|
||||
return filepaths
|
||||
|
||||
@classmethod
|
||||
def _attach_response(cls, response: httpx.Response, **kwargs):
|
||||
request = response.request
|
||||
request_headers = json.dumps(dict(request.headers), default=str, indent=4)
|
||||
request_body = cls._parse_body(request)
|
||||
|
||||
files = kwargs.get("files")
|
||||
request_files = cls._parse_files(files)
|
||||
|
||||
response_headers = json.dumps(dict(response.headers), default=str, indent=4)
|
||||
response_body = cls._parse_body(response)
|
||||
|
||||
report = (
|
||||
f"Method: {request.method}\n\n"
|
||||
+ f"URL: {request.url}\n\n"
|
||||
+ f"Request Headers: {request_headers}\n\n"
|
||||
+ (f"Request Body: {request_body}\n\n" if request_body else "")
|
||||
+ (f"Request Files: {request_files}\n\n" if request_files else "")
|
||||
+ f"Response Status Code: {response.status_code}\n\n"
|
||||
+ f"Response Headers: {response_headers}\n\n"
|
||||
+ (f"Response Body: {response_body}\n\n" if response_body else "")
|
||||
)
|
||||
curl_request = cls._create_curl_request(request.url, request.method, request.headers, request_body, request_files)
|
||||
|
||||
reporter.attach(report, "Requests Info")
|
||||
reporter.attach(curl_request, "CURL")
|
||||
|
||||
@classmethod
|
||||
def _create_curl_request(cls, url: str, method: str, headers: httpx.Headers, data: str, files: dict) -> str:
|
||||
headers = " ".join(f'-H "{name.title()}: {value}"' for name, value in headers.items())
|
||||
data = f" -d '{data}'" if data else ""
|
||||
|
||||
for name, path in files.items():
|
||||
data += f' -F "{name}=@{path}"'
|
||||
|
||||
# Option -k means no verify SSL
|
||||
return f"curl {url} -X {method} {headers}{data} -k"
|
|
@ -1 +0,0 @@
|
|||
from frostfs_testlib.clients.s3.interfaces import BucketContainerResolver, S3ClientWrapper, VersioningStatus
|
File diff suppressed because it is too large
Load diff
|
@ -1,149 +0,0 @@
|
|||
import hashlib
|
||||
import logging
|
||||
import xml.etree.ElementTree as ET
|
||||
|
||||
import httpx
|
||||
from botocore.auth import SigV4Auth
|
||||
from botocore.awsrequest import AWSRequest
|
||||
from botocore.credentials import Credentials
|
||||
|
||||
from frostfs_testlib import reporter
|
||||
from frostfs_testlib.clients import HttpClient
|
||||
from frostfs_testlib.utils.file_utils import TestFile
|
||||
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
|
||||
DEFAULT_TIMEOUT = 60.0
|
||||
|
||||
|
||||
class S3HttpClient:
|
||||
def __init__(
|
||||
self, s3gate_endpoint: str, access_key_id: str, secret_access_key: str, profile: str = "default", region: str = "us-east-1"
|
||||
) -> None:
|
||||
self.http_client = HttpClient()
|
||||
self.credentials = Credentials(access_key_id, secret_access_key)
|
||||
self.profile = profile
|
||||
self.region = region
|
||||
|
||||
self.iam_endpoint: str = None
|
||||
self.s3gate_endpoint: str = None
|
||||
self.service: str = None
|
||||
self.signature: SigV4Auth = None
|
||||
|
||||
self.set_endpoint(s3gate_endpoint)
|
||||
|
||||
def _to_s3_header(self, header: str) -> dict:
|
||||
replacement_map = {
|
||||
"Acl": "ACL",
|
||||
"_": "-",
|
||||
}
|
||||
|
||||
result = header
|
||||
if not header.startswith("x_amz"):
|
||||
result = header.title()
|
||||
|
||||
for find, replace in replacement_map.items():
|
||||
result = result.replace(find, replace)
|
||||
|
||||
return result
|
||||
|
||||
def _convert_to_s3_headers(self, scope: dict, exclude: list[str] = None):
|
||||
exclude = ["self", "cls"] if not exclude else exclude + ["self", "cls"]
|
||||
return {self._to_s3_header(header): value for header, value in scope.items() if header not in exclude and value is not None}
|
||||
|
||||
def _create_aws_request(
|
||||
self, method: str, url: str, headers: dict, content: str | bytes | TestFile = None, params: dict = None
|
||||
) -> AWSRequest:
|
||||
data = b""
|
||||
|
||||
if content is not None:
|
||||
if isinstance(content, TestFile):
|
||||
with open(content, "rb") as io_content:
|
||||
data = io_content.read()
|
||||
elif isinstance(content, str):
|
||||
data = bytes(content, encoding="utf-8")
|
||||
elif isinstance(content, bytes):
|
||||
data = content
|
||||
else:
|
||||
raise TypeError(f"Content expected as a string, bytes or TestFile object, got: {content}")
|
||||
|
||||
headers["X-Amz-Content-SHA256"] = hashlib.sha256(data).hexdigest()
|
||||
aws_request = AWSRequest(method, url, headers, data, params)
|
||||
self.signature.add_auth(aws_request)
|
||||
|
||||
return aws_request
|
||||
|
||||
def _exec_request(
|
||||
self,
|
||||
method: str,
|
||||
url: str,
|
||||
headers: dict,
|
||||
content: str | bytes | TestFile = None,
|
||||
params: dict = None,
|
||||
timeout: float = DEFAULT_TIMEOUT,
|
||||
) -> dict:
|
||||
aws_request = self._create_aws_request(method, url, headers, content, params)
|
||||
response = self.http_client.send(
|
||||
aws_request.method,
|
||||
aws_request.url,
|
||||
headers=dict(aws_request.headers),
|
||||
data=aws_request.data,
|
||||
params=aws_request.params,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
try:
|
||||
response.raise_for_status()
|
||||
except httpx.HTTPStatusError:
|
||||
raise httpx.HTTPStatusError(response.text, request=response.request, response=response)
|
||||
|
||||
root = ET.fromstring(response.read())
|
||||
data = {
|
||||
"LastModified": root.find(".//LastModified").text,
|
||||
"ETag": root.find(".//ETag").text,
|
||||
}
|
||||
|
||||
if response.headers.get("x-amz-version-id"):
|
||||
data["VersionId"] = response.headers.get("x-amz-version-id")
|
||||
|
||||
return data
|
||||
|
||||
@reporter.step("Set endpoint S3 to {s3gate_endpoint}")
|
||||
def set_endpoint(self, s3gate_endpoint: str):
|
||||
if self.s3gate_endpoint == s3gate_endpoint:
|
||||
return
|
||||
|
||||
self.s3gate_endpoint = s3gate_endpoint
|
||||
self.service = "s3"
|
||||
self.signature = SigV4Auth(self.credentials, self.service, self.region)
|
||||
|
||||
@reporter.step("Set endpoint IAM to {iam_endpoint}")
|
||||
def set_iam_endpoint(self, iam_endpoint: str):
|
||||
if self.iam_endpoint == iam_endpoint:
|
||||
return
|
||||
|
||||
self.iam_endpoint = iam_endpoint
|
||||
self.service = "iam"
|
||||
self.signature = SigV4Auth(self.credentials, self.service, self.region)
|
||||
|
||||
@reporter.step("Patch object S3")
|
||||
def patch_object(
|
||||
self,
|
||||
bucket: str,
|
||||
key: str,
|
||||
content: str | bytes | TestFile,
|
||||
content_range: str,
|
||||
version_id: str = None,
|
||||
if_match: str = None,
|
||||
if_unmodified_since: str = None,
|
||||
x_amz_expected_bucket_owner: str = None,
|
||||
timeout: float = DEFAULT_TIMEOUT,
|
||||
) -> dict:
|
||||
if content_range and not content_range.startswith("bytes"):
|
||||
content_range = f"bytes {content_range}/*"
|
||||
|
||||
url = f"{self.s3gate_endpoint}/{bucket}/{key}"
|
||||
headers = self._convert_to_s3_headers(locals(), exclude=["bucket", "key", "content", "version_id", "timeout"])
|
||||
params = {"VersionId": version_id} if version_id is not None else None
|
||||
|
||||
return self._exec_request("PATCH", url, headers, content, params, timeout=timeout)
|
|
@ -1,4 +1,5 @@
|
|||
import re
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
|
||||
from frostfs_testlib import reporter
|
||||
|
@ -9,7 +10,6 @@ from frostfs_testlib.shell import LocalShell
|
|||
from frostfs_testlib.steps.cli.container import list_containers
|
||||
from frostfs_testlib.storage.cluster import ClusterNode
|
||||
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate
|
||||
from frostfs_testlib.utils import string_utils
|
||||
|
||||
|
||||
class AuthmateS3CredentialsProvider(S3CredentialsProvider):
|
||||
|
@ -22,7 +22,7 @@ class AuthmateS3CredentialsProvider(S3CredentialsProvider):
|
|||
|
||||
gate_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes]
|
||||
# unique short bucket name
|
||||
bucket = string_utils.unique_name("bucket-")
|
||||
bucket = f"bucket-{hex(int(datetime.now().timestamp()*1000000))}"
|
||||
|
||||
frostfs_authmate: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC)
|
||||
issue_secret_output = frostfs_authmate.secret.issue(
|
||||
|
|
|
@ -1,45 +0,0 @@
|
|||
import logging
|
||||
import os
|
||||
from importlib.metadata import entry_points
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
|
||||
from frostfs_testlib import reporter
|
||||
from frostfs_testlib.hosting.hosting import Hosting
|
||||
from frostfs_testlib.resources.common import ASSETS_DIR, HOSTING_CONFIG_FILE
|
||||
from frostfs_testlib.storage import get_service_registry
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def configure_testlib():
|
||||
reporter.get_reporter().register_handler(reporter.AllureHandler())
|
||||
reporter.get_reporter().register_handler(reporter.StepsLogger())
|
||||
logging.getLogger("paramiko").setLevel(logging.INFO)
|
||||
|
||||
# Register Services for cluster
|
||||
registry = get_service_registry()
|
||||
services = entry_points(group="frostfs.testlib.services")
|
||||
for svc in services:
|
||||
registry.register_service(svc.name, svc.load())
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def temp_directory(configure_testlib):
|
||||
with reporter.step("Prepare tmp directory"):
|
||||
full_path = ASSETS_DIR
|
||||
if not os.path.exists(full_path):
|
||||
os.mkdir(full_path)
|
||||
|
||||
return full_path
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def hosting(configure_testlib) -> Hosting:
|
||||
with open(HOSTING_CONFIG_FILE, "r") as file:
|
||||
hosting_config = yaml.full_load(file)
|
||||
|
||||
hosting_instance = Hosting()
|
||||
hosting_instance.configure(hosting_config)
|
||||
|
||||
return hosting_instance
|
|
@ -1,13 +0,0 @@
|
|||
import pytest
|
||||
|
||||
|
||||
@pytest.hookimpl
|
||||
def pytest_collection_modifyitems(items: list[pytest.Item]):
|
||||
# All tests which reside in frostfs nodeid are granted with frostfs marker, excluding
|
||||
# nodeid = full path of the test
|
||||
# 1. plugins
|
||||
# 2. testlib itself
|
||||
for item in items:
|
||||
location = item.location[0]
|
||||
if "frostfs" in location and "plugin" not in location and "testlib" not in location:
|
||||
item.add_marker("frostfs")
|
|
@ -60,7 +60,6 @@ class HostConfig:
|
|||
"""
|
||||
|
||||
plugin_name: str
|
||||
hostname: str
|
||||
healthcheck_plugin_name: str
|
||||
address: str
|
||||
s3_creds_plugin_name: str = field(default="authmate")
|
||||
|
|
|
@ -164,9 +164,6 @@ class DockerHost(Host):
|
|||
|
||||
return volume_path
|
||||
|
||||
def send_signal_to_service(self, service_name: str, signal: str) -> None:
|
||||
raise NotImplementedError("Not implemented for docker")
|
||||
|
||||
def delete_metabase(self, service_name: str) -> None:
|
||||
raise NotImplementedError("Not implemented for docker")
|
||||
|
||||
|
@ -188,12 +185,6 @@ class DockerHost(Host):
|
|||
def is_file_exist(self, file_path: str) -> None:
|
||||
raise NotImplementedError("Not implemented for docker")
|
||||
|
||||
def wipefs_storage_node_data(self, service_name: str) -> None:
|
||||
raise NotImplementedError("Not implemented for docker")
|
||||
|
||||
def finish_wipefs(self, service_name: str) -> None:
|
||||
raise NotImplementedError("Not implemented for docker")
|
||||
|
||||
def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None:
|
||||
volume_path = self.get_data_directory(service_name)
|
||||
|
||||
|
@ -249,8 +240,7 @@ class DockerHost(Host):
|
|||
until: Optional[datetime] = None,
|
||||
unit: Optional[str] = None,
|
||||
exclude_filter: Optional[str] = None,
|
||||
priority: Optional[str] = None,
|
||||
word_count: bool = None,
|
||||
priority: Optional[str] = None
|
||||
) -> str:
|
||||
client = self._get_docker_client()
|
||||
filtered_logs = ""
|
||||
|
|
|
@ -117,17 +117,6 @@ class Host(ABC):
|
|||
service_name: Name of the service to stop.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def send_signal_to_service(self, service_name: str, signal: str) -> None:
|
||||
"""Send signal to service with specified name using kill -<signal>
|
||||
|
||||
The service must be hosted on this host.
|
||||
|
||||
Args:
|
||||
service_name: Name of the service to stop.
|
||||
signal: signal name. See kill -l to all names
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def mask_service(self, service_name: str) -> None:
|
||||
"""Prevent the service from start by any activity by masking it.
|
||||
|
@ -189,21 +178,6 @@ class Host(ABC):
|
|||
cache_only: To delete cache only.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def wipefs_storage_node_data(self, service_name: str) -> None:
|
||||
"""Erases all data of the storage node with specified name.
|
||||
|
||||
Args:
|
||||
service_name: Name of storage node service.
|
||||
"""
|
||||
|
||||
def finish_wipefs(self, service_name: str) -> None:
|
||||
"""Erases all data of the storage node with specified name.
|
||||
|
||||
Args:
|
||||
service_name: Name of storage node service.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def delete_fstree(self, service_name: str) -> None:
|
||||
"""
|
||||
|
@ -323,8 +297,7 @@ class Host(ABC):
|
|||
until: Optional[datetime] = None,
|
||||
unit: Optional[str] = None,
|
||||
exclude_filter: Optional[str] = None,
|
||||
priority: Optional[str] = None,
|
||||
word_count: bool = None,
|
||||
priority: Optional[str] = None
|
||||
) -> str:
|
||||
"""Get logs from host filtered by regex.
|
||||
|
||||
|
@ -333,9 +306,8 @@ class Host(ABC):
|
|||
since: If set, limits the time from which logs should be collected. Must be in UTC.
|
||||
until: If set, limits the time until which logs should be collected. Must be in UTC.
|
||||
unit: required unit.
|
||||
priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher.
|
||||
priority: logs level, 0 - emergency, 7 - debug. All messages with that code and higher.
|
||||
For example, if we specify the -p 2 option, journalctl will show all messages with levels 2, 1 and 0.
|
||||
word_count: output type, expected values: lines, bytes, json
|
||||
|
||||
Returns:
|
||||
Found entries as str if any found.
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
from abc import ABC, abstractmethod
|
||||
|
||||
from frostfs_testlib.load.interfaces.loader import Loader
|
||||
from frostfs_testlib.load.k6 import K6
|
||||
from frostfs_testlib.load.load_config import LoadParams
|
||||
from frostfs_testlib.storage.cluster import ClusterNode
|
||||
|
@ -49,7 +48,3 @@ class ScenarioRunner(ABC):
|
|||
@abstractmethod
|
||||
def get_results(self) -> dict:
|
||||
"""Get results from K6 run"""
|
||||
|
||||
@abstractmethod
|
||||
def get_loaders(self) -> list[Loader]:
|
||||
"""Return loaders"""
|
||||
|
|
|
@ -25,16 +25,6 @@ def convert_time_to_seconds(time: int | str | None) -> int:
|
|||
return seconds
|
||||
|
||||
|
||||
def force_list(input: str | list[str]):
|
||||
if input is None:
|
||||
return None
|
||||
|
||||
if isinstance(input, list):
|
||||
return list(map(str.strip, input))
|
||||
|
||||
return [input.strip()]
|
||||
|
||||
|
||||
class LoadType(Enum):
|
||||
gRPC = "grpc"
|
||||
S3 = "s3"
|
||||
|
@ -152,29 +142,8 @@ class K6ProcessAllocationStrategy(Enum):
|
|||
PER_ENDPOINT = "PER_ENDPOINT"
|
||||
|
||||
|
||||
class MetaConfig:
|
||||
def _get_field_formatter(self, field_name: str) -> Callable | None:
|
||||
data_fields = fields(self)
|
||||
formatters = [
|
||||
field.metadata["formatter"]
|
||||
for field in data_fields
|
||||
if field.name == field_name and "formatter" in field.metadata and field.metadata["formatter"] != None
|
||||
]
|
||||
if formatters:
|
||||
return formatters[0]
|
||||
|
||||
return None
|
||||
|
||||
def __setattr__(self, field_name, value):
|
||||
formatter = self._get_field_formatter(field_name)
|
||||
if formatter:
|
||||
value = formatter(value)
|
||||
|
||||
super().__setattr__(field_name, value)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Preset(MetaConfig):
|
||||
class Preset:
|
||||
# ------ COMMON ------
|
||||
# Amount of objects which should be created
|
||||
objects_count: Optional[int] = metadata_field(all_load_scenarios, "preload_obj", None, False)
|
||||
|
@ -189,15 +158,13 @@ class Preset(MetaConfig):
|
|||
# Amount of containers which should be created
|
||||
containers_count: Optional[int] = metadata_field(grpc_preset_scenarios, "containers", None, False)
|
||||
# Container placement policy for containers for gRPC
|
||||
container_placement_policy: Optional[list[str]] = metadata_field(grpc_preset_scenarios, "policy", None, False, formatter=force_list)
|
||||
# Number of retries for creation of container
|
||||
container_creation_retry: Optional[int] = metadata_field(grpc_preset_scenarios, "retry", None, False)
|
||||
container_placement_policy: Optional[str] = metadata_field(grpc_preset_scenarios, "policy", None, False)
|
||||
|
||||
# ------ S3 ------
|
||||
# Amount of buckets which should be created
|
||||
buckets_count: Optional[int] = metadata_field(s3_preset_scenarios, "buckets", None, False)
|
||||
# S3 region (AKA placement policy for S3 buckets)
|
||||
s3_location: Optional[list[str]] = metadata_field(s3_preset_scenarios, "location", None, False, formatter=force_list)
|
||||
s3_location: Optional[str] = metadata_field(s3_preset_scenarios, "location", None, False)
|
||||
|
||||
# Delay between containers creation and object upload for preset
|
||||
object_upload_delay: Optional[int] = metadata_field(all_load_scenarios, "sleep", None, False)
|
||||
|
@ -210,7 +177,7 @@ class Preset(MetaConfig):
|
|||
|
||||
|
||||
@dataclass
|
||||
class PrometheusParams(MetaConfig):
|
||||
class PrometheusParams:
|
||||
# Prometheus server URL
|
||||
server_url: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_SERVER_URL", string_repr=False)
|
||||
# Prometheus trend stats
|
||||
|
@ -220,7 +187,7 @@ class PrometheusParams(MetaConfig):
|
|||
|
||||
|
||||
@dataclass
|
||||
class LoadParams(MetaConfig):
|
||||
class LoadParams:
|
||||
# ------- CONTROL PARAMS -------
|
||||
# Load type can be gRPC, HTTP, S3.
|
||||
load_type: LoadType
|
||||
|
@ -445,11 +412,6 @@ class LoadParams(MetaConfig):
|
|||
# For preset calls, bool values are passed with just --<argument_name> if the value is True
|
||||
return f"--{meta_field.metadata['preset_argument']}" if meta_field.value else ""
|
||||
|
||||
if isinstance(meta_field.value, list):
|
||||
return (
|
||||
" ".join(f"--{meta_field.metadata['preset_argument']} '{value}'" for value in meta_field.value) if meta_field.value else ""
|
||||
)
|
||||
|
||||
return f"--{meta_field.metadata['preset_argument']} '{meta_field.value}'"
|
||||
|
||||
@staticmethod
|
||||
|
@ -469,6 +431,25 @@ class LoadParams(MetaConfig):
|
|||
|
||||
return fields_with_data or []
|
||||
|
||||
def _get_field_formatter(self, field_name: str) -> Callable | None:
|
||||
data_fields = fields(self)
|
||||
formatters = [
|
||||
field.metadata["formatter"]
|
||||
for field in data_fields
|
||||
if field.name == field_name and "formatter" in field.metadata and field.metadata["formatter"] != None
|
||||
]
|
||||
if formatters:
|
||||
return formatters[0]
|
||||
|
||||
return None
|
||||
|
||||
def __setattr__(self, field_name, value):
|
||||
formatter = self._get_field_formatter(field_name)
|
||||
if formatter:
|
||||
value = formatter(value)
|
||||
|
||||
super().__setattr__(field_name, value)
|
||||
|
||||
def __str__(self) -> str:
|
||||
load_type_str = self.scenario.value if self.scenario else self.load_type.value
|
||||
# TODO: migrate load_params defaults to testlib
|
||||
|
|
|
@ -30,7 +30,6 @@ from frostfs_testlib.utils.file_keeper import FileKeeper
|
|||
|
||||
class RunnerBase(ScenarioRunner):
|
||||
k6_instances: list[K6]
|
||||
loaders: list[Loader]
|
||||
|
||||
@reporter.step("Run preset on loaders")
|
||||
def preset(self):
|
||||
|
@ -50,11 +49,9 @@ class RunnerBase(ScenarioRunner):
|
|||
def get_k6_instances(self):
|
||||
return self.k6_instances
|
||||
|
||||
def get_loaders(self) -> list[Loader]:
|
||||
return self.loaders
|
||||
|
||||
|
||||
class DefaultRunner(RunnerBase):
|
||||
loaders: list[Loader]
|
||||
user: User
|
||||
|
||||
def __init__(
|
||||
|
@ -231,6 +228,7 @@ class DefaultRunner(RunnerBase):
|
|||
|
||||
|
||||
class LocalRunner(RunnerBase):
|
||||
loaders: list[Loader]
|
||||
cluster_state_controller: ClusterStateController
|
||||
file_keeper: FileKeeper
|
||||
user: User
|
||||
|
|
|
@ -46,11 +46,3 @@ with open(DEFAULT_WALLET_CONFIG, "w") as file:
|
|||
MAX_REQUEST_ATTEMPTS = 5
|
||||
RETRY_MODE = "standard"
|
||||
CREDENTIALS_CREATE_TIMEOUT = "1m"
|
||||
|
||||
|
||||
HOSTING_CONFIG_FILE = os.getenv(
|
||||
"HOSTING_CONFIG_FILE", os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "..", "..", ".devenv.hosting.yaml"))
|
||||
)
|
||||
|
||||
MORE_LOG = os.getenv("MORE_LOG", "1")
|
||||
EXPIRATION_EPOCH_ATTRIBUTE = "__SYSTEM__EXPIRATION_EPOCH"
|
||||
|
|
|
@ -9,7 +9,6 @@ OBJECT_ALREADY_REMOVED = "code = 2052.*message = object already removed"
|
|||
SESSION_NOT_FOUND = "code = 4096.*message = session token not found"
|
||||
OUT_OF_RANGE = "code = 2053.*message = out of range"
|
||||
EXPIRED_SESSION_TOKEN = "code = 4097.*message = expired session token"
|
||||
ADD_CHAIN_ERROR = "code = 5120 message = apemanager access denied"
|
||||
# TODO: Change to codes with message
|
||||
# OBJECT_IS_LOCKED = "code = 2050.*message = object is locked"
|
||||
# LOCK_NON_REGULAR_OBJECT = "code = 2051.*message = ..." will be available once 2092 is fixed
|
||||
|
@ -28,10 +27,5 @@ S3_BUCKET_DOES_NOT_ALLOW_ACL = "The bucket does not allow ACLs"
|
|||
S3_MALFORMED_XML_REQUEST = "The XML you provided was not well-formed or did not validate against our published schema."
|
||||
|
||||
RULE_ACCESS_DENIED_CONTAINER = "access to container operation {operation} is denied by access policy engine: Access denied"
|
||||
# Errors from node missing reasons if request was forwarded. Commenting for now
|
||||
# RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied"
|
||||
RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request"
|
||||
RULE_ACCESS_DENIED_OBJECT = "access to object operation denied: ape denied request: method {operation}: Access denied"
|
||||
NO_RULE_FOUND_CONTAINER = "access to container operation {operation} is denied by access policy engine: NoRuleFound"
|
||||
# Errors from node missing reasons if request was forwarded. Commenting for now
|
||||
# NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request: method {operation}: NoRuleFound"
|
||||
NO_RULE_FOUND_OBJECT = "access to object operation denied: ape denied request"
|
||||
|
|
|
@ -26,7 +26,6 @@ BACKGROUND_LOAD_CONTAINER_PLACEMENT_POLICY = os.getenv(
|
|||
)
|
||||
BACKGROUND_LOAD_S3_LOCATION = os.getenv("BACKGROUND_LOAD_S3_LOCATION", "node-off")
|
||||
PRESET_CONTAINERS_COUNT = os.getenv("CONTAINERS_COUNT", "40")
|
||||
PRESET_CONTAINER_CREATION_RETRY_COUNT = os.getenv("CONTAINER_CREATION_RETRY_COUNT", "20")
|
||||
# TODO: At lease one object is required due to bug in xk6 (buckets with no objects produce millions exceptions in read)
|
||||
PRESET_OBJECTS_COUNT = os.getenv("OBJ_COUNT", "1")
|
||||
K6_DIRECTORY = os.getenv("K6_DIRECTORY", "/etc/k6")
|
||||
|
|
|
@ -16,10 +16,11 @@ OPTIONAL_NODE_UNDER_LOAD = os.getenv("OPTIONAL_NODE_UNDER_LOAD")
|
|||
OPTIONAL_FAILOVER_ENABLED = str_to_bool(os.getenv("OPTIONAL_FAILOVER_ENABLED", "true"))
|
||||
|
||||
# Set this to True to disable background load. I.E. node which supposed to be stopped will not be actually stopped.
|
||||
OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool(os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true"))
|
||||
OPTIONAL_BACKGROUND_LOAD_ENABLED = str_to_bool(
|
||||
os.getenv("OPTIONAL_BACKGROUND_LOAD_ENABLED", "true")
|
||||
)
|
||||
|
||||
# Set this to False for disable autouse fixture like node healthcheck during developing time.
|
||||
OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool(os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true"))
|
||||
|
||||
# Use cache for fixtures with @cachec_fixture decorator
|
||||
OPTIONAL_CACHE_FIXTURES = str_to_bool(os.getenv("OPTIONAL_CACHE_FIXTURES", "false"))
|
||||
OPTIONAL_AUTOUSE_FIXTURES_ENABLED = str_to_bool(
|
||||
os.getenv("OPTIONAL_AUTOUSE_FIXTURES_ENABLED", "true")
|
||||
)
|
||||
|
|
|
@ -4,6 +4,6 @@ ALL_USERS_GROUP_READ_GRANT = {"Grantee": {"Type": "Group", "URI": ALL_USERS_GROU
|
|||
CANONICAL_USER_FULL_CONTROL_GRANT = {"Grantee": {"Type": "CanonicalUser"}, "Permission": "FULL_CONTROL"}
|
||||
|
||||
# https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#canned-acl
|
||||
PRIVATE_GRANTS = []
|
||||
PUBLIC_READ_GRANTS = [ALL_USERS_GROUP_READ_GRANT]
|
||||
PUBLIC_READ_WRITE_GRANTS = [ALL_USERS_GROUP_WRITE_GRANT, ALL_USERS_GROUP_READ_GRANT]
|
||||
PRIVATE_GRANTS = [CANONICAL_USER_FULL_CONTROL_GRANT]
|
||||
PUBLIC_READ_GRANTS = [CANONICAL_USER_FULL_CONTROL_GRANT, ALL_USERS_GROUP_READ_GRANT]
|
||||
PUBLIC_READ_WRITE_GRANTS = [CANONICAL_USER_FULL_CONTROL_GRANT, ALL_USERS_GROUP_WRITE_GRANT, ALL_USERS_GROUP_READ_GRANT]
|
||||
|
|
3
src/frostfs_testlib/s3/__init__.py
Normal file
3
src/frostfs_testlib/s3/__init__.py
Normal file
|
@ -0,0 +1,3 @@
|
|||
from frostfs_testlib.s3.aws_cli_client import AwsCliClient
|
||||
from frostfs_testlib.s3.boto3_client import Boto3ClientWrapper
|
||||
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus
|
|
@ -1,16 +1,16 @@
|
|||
import json
|
||||
import logging
|
||||
import os
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from time import sleep
|
||||
from typing import Literal, Optional, Union
|
||||
|
||||
from frostfs_testlib import reporter
|
||||
from frostfs_testlib.clients.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
|
||||
from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME
|
||||
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
|
||||
from frostfs_testlib.shell import CommandOptions
|
||||
from frostfs_testlib.shell.local_shell import LocalShell
|
||||
from frostfs_testlib.utils import string_utils
|
||||
|
||||
# TODO: Refactor this code to use shell instead of _cmd_run
|
||||
from frostfs_testlib.utils.cli_utils import _configure_aws_cli
|
||||
|
@ -68,10 +68,7 @@ class AwsCliClient(S3ClientWrapper):
|
|||
location_constraint: Optional[str] = None,
|
||||
) -> str:
|
||||
if bucket is None:
|
||||
bucket = string_utils.unique_name("bucket-")
|
||||
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
bucket = str(uuid.uuid4())
|
||||
|
||||
if object_lock_enabled_for_bucket is None:
|
||||
object_lock = ""
|
||||
|
@ -94,6 +91,7 @@ class AwsCliClient(S3ClientWrapper):
|
|||
if location_constraint:
|
||||
cmd += f" --create-bucket-configuration LocationConstraint={location_constraint}"
|
||||
self.local_shell.exec(cmd)
|
||||
sleep(S3_SYNC_WAIT_TIME)
|
||||
|
||||
return bucket
|
||||
|
||||
|
@ -106,25 +104,17 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("Delete bucket S3")
|
||||
def delete_bucket(self, bucket: str) -> None:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = f"aws {self.common_flags} s3api delete-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}"
|
||||
self.local_shell.exec(cmd, command_options)
|
||||
sleep(S3_SYNC_WAIT_TIME)
|
||||
|
||||
@reporter.step("Head bucket S3")
|
||||
def head_bucket(self, bucket: str) -> None:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = f"aws {self.common_flags} s3api head-bucket --bucket {bucket} --endpoint {self.s3gate_endpoint} --profile {self.profile}"
|
||||
self.local_shell.exec(cmd)
|
||||
|
||||
@reporter.step("Put bucket versioning status")
|
||||
def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api put-bucket-versioning --bucket {bucket} "
|
||||
f"--versioning-configuration Status={status.value} "
|
||||
|
@ -134,9 +124,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("Get bucket versioning status")
|
||||
def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api get-bucket-versioning --bucket {bucket} "
|
||||
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
|
||||
|
@ -147,9 +134,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("Put bucket tagging")
|
||||
def put_bucket_tagging(self, bucket: str, tags: list) -> None:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
tags_json = {"TagSet": [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]}
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api put-bucket-tagging --bucket {bucket} "
|
||||
|
@ -159,9 +143,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("Get bucket tagging")
|
||||
def get_bucket_tagging(self, bucket: str) -> list:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api get-bucket-tagging --bucket {bucket} "
|
||||
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
|
||||
|
@ -171,21 +152,16 @@ class AwsCliClient(S3ClientWrapper):
|
|||
return response.get("TagSet")
|
||||
|
||||
@reporter.step("Get bucket acl")
|
||||
def get_bucket_acl(self, bucket: str) -> dict:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
def get_bucket_acl(self, bucket: str) -> list:
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api get-bucket-acl --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
|
||||
)
|
||||
output = self.local_shell.exec(cmd).stdout
|
||||
return self._to_json(output)
|
||||
response = self._to_json(output)
|
||||
return response.get("Grants")
|
||||
|
||||
@reporter.step("Get bucket location")
|
||||
def get_bucket_location(self, bucket: str) -> dict:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api get-bucket-location --bucket {bucket} "
|
||||
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
|
||||
|
@ -195,20 +171,8 @@ class AwsCliClient(S3ClientWrapper):
|
|||
return response.get("LocationConstraint")
|
||||
|
||||
@reporter.step("List objects S3")
|
||||
def list_objects(
|
||||
self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None
|
||||
) -> Union[dict, list[str]]:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} "
|
||||
if page_size:
|
||||
cmd = cmd.replace("--no-paginate", "")
|
||||
cmd += f" --page-size {page_size} "
|
||||
if prefix:
|
||||
cmd += f" --prefix {prefix}"
|
||||
if self.profile:
|
||||
cmd += f" --profile {self.profile} "
|
||||
def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
|
||||
cmd = f"aws {self.common_flags} s3api list-objects --bucket {bucket} " f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
|
||||
output = self.local_shell.exec(cmd).stdout
|
||||
response = self._to_json(output)
|
||||
|
||||
|
@ -219,9 +183,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("List objects S3 v2")
|
||||
def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api list-objects-v2 --bucket {bucket} "
|
||||
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
|
||||
|
@ -236,9 +197,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("List objects versions S3")
|
||||
def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} "
|
||||
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
|
||||
|
@ -249,9 +207,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("List objects delete markers S3")
|
||||
def list_delete_markers(self, bucket: str, full_output: bool = False) -> list:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api list-object-versions --bucket {bucket} "
|
||||
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
|
||||
|
@ -275,13 +230,8 @@ class AwsCliClient(S3ClientWrapper):
|
|||
) -> str:
|
||||
if bucket is None:
|
||||
bucket = source_bucket
|
||||
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
if key is None:
|
||||
key = string_utils.unique_name("copy-object-")
|
||||
|
||||
key = os.path.join(os.getcwd(), str(uuid.uuid4()))
|
||||
copy_source = f"{source_bucket}/{source_key}"
|
||||
|
||||
cmd = (
|
||||
|
@ -318,9 +268,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
grant_full_control: Optional[str] = None,
|
||||
grant_read: Optional[str] = None,
|
||||
) -> str:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
if key is None:
|
||||
key = os.path.basename(filepath)
|
||||
|
||||
|
@ -352,9 +299,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("Head object S3")
|
||||
def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
version = f" --version-id {version_id}" if version_id else ""
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api head-object --bucket {bucket} --key {key} "
|
||||
|
@ -373,10 +317,7 @@ class AwsCliClient(S3ClientWrapper):
|
|||
object_range: Optional[tuple[int, int]] = None,
|
||||
full_output: bool = False,
|
||||
) -> dict | TestFile:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, string_utils.unique_name("dl-object-")))
|
||||
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())))
|
||||
version = f" --version-id {version_id}" if version_id else ""
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api get-object --bucket {bucket} --key {key} "
|
||||
|
@ -390,9 +331,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("Get object ACL")
|
||||
def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
version = f" --version-id {version_id}" if version_id else ""
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api get-object-acl --bucket {bucket} --key {key} "
|
||||
|
@ -411,9 +349,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
grant_write: Optional[str] = None,
|
||||
grant_read: Optional[str] = None,
|
||||
) -> list:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api put-object-acl --bucket {bucket} --key {key} "
|
||||
f" --endpoint {self.s3gate_endpoint} --profile {self.profile}"
|
||||
|
@ -436,9 +371,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
grant_write: Optional[str] = None,
|
||||
grant_read: Optional[str] = None,
|
||||
) -> None:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api put-bucket-acl --bucket {bucket} "
|
||||
f" --endpoint {self.s3gate_endpoint} --profile {self.profile}"
|
||||
|
@ -453,9 +385,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("Delete objects S3")
|
||||
def delete_objects(self, bucket: str, keys: list[str]) -> dict:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
file_path = os.path.join(os.getcwd(), ASSETS_DIR, "delete.json")
|
||||
delete_structure = json.dumps(_make_objs_dict(keys))
|
||||
with open(file_path, "w") as out_file:
|
||||
|
@ -468,26 +397,22 @@ class AwsCliClient(S3ClientWrapper):
|
|||
)
|
||||
output = self.local_shell.exec(cmd, command_options).stdout
|
||||
response = self._to_json(output)
|
||||
sleep(S3_SYNC_WAIT_TIME)
|
||||
return response
|
||||
|
||||
@reporter.step("Delete object S3")
|
||||
def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
version = f" --version-id {version_id}" if version_id else ""
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api delete-object --bucket {bucket} "
|
||||
f"--key {key} {version} --endpoint {self.s3gate_endpoint} --profile {self.profile}"
|
||||
)
|
||||
output = self.local_shell.exec(cmd, command_options).stdout
|
||||
sleep(S3_SYNC_WAIT_TIME)
|
||||
return self._to_json(output)
|
||||
|
||||
@reporter.step("Delete object versions S3")
|
||||
def delete_object_versions(self, bucket: str, object_versions: list) -> dict:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
# Build deletion list in S3 format
|
||||
delete_list = {
|
||||
"Objects": [
|
||||
|
@ -510,13 +435,11 @@ class AwsCliClient(S3ClientWrapper):
|
|||
f"--delete file://{file_path} --endpoint {self.s3gate_endpoint} --profile {self.profile}"
|
||||
)
|
||||
output = self.local_shell.exec(cmd, command_options).stdout
|
||||
sleep(S3_SYNC_WAIT_TIME)
|
||||
return self._to_json(output)
|
||||
|
||||
@reporter.step("Delete object versions S3 without delete markers")
|
||||
def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
# Delete objects without creating delete markers
|
||||
for object_version in object_versions:
|
||||
self.delete_object(bucket=bucket, key=object_version["Key"], version_id=object_version["VersionId"])
|
||||
|
@ -532,8 +455,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
part_number: int = 0,
|
||||
full_output: bool = True,
|
||||
) -> dict:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
attrs = ",".join(attributes)
|
||||
version = f" --version-id {version_id}" if version_id else ""
|
||||
|
@ -557,9 +478,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("Get bucket policy")
|
||||
def get_bucket_policy(self, bucket: str) -> dict:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api get-bucket-policy --bucket {bucket} "
|
||||
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
|
||||
|
@ -570,9 +488,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("Delete bucket policy")
|
||||
def delete_bucket_policy(self, bucket: str) -> dict:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api delete-bucket-policy --bucket {bucket} "
|
||||
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
|
||||
|
@ -583,9 +498,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("Put bucket policy")
|
||||
def put_bucket_policy(self, bucket: str, policy: dict) -> None:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
# Leaving it as is was in test repo. Double dumps to escape resulting string
|
||||
# Example:
|
||||
# policy = {"a": 1}
|
||||
|
@ -601,9 +513,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("Get bucket cors")
|
||||
def get_bucket_cors(self, bucket: str) -> dict:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api get-bucket-cors --bucket {bucket} "
|
||||
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
|
||||
|
@ -614,9 +523,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("Put bucket cors")
|
||||
def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api put-bucket-cors --bucket {bucket} "
|
||||
f"--cors-configuration '{json.dumps(cors_configuration)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}"
|
||||
|
@ -625,9 +531,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("Delete bucket cors")
|
||||
def delete_bucket_cors(self, bucket: str) -> None:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api delete-bucket-cors --bucket {bucket} "
|
||||
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
|
||||
|
@ -636,9 +539,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("Delete bucket tagging")
|
||||
def delete_bucket_tagging(self, bucket: str) -> None:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api delete-bucket-tagging --bucket {bucket} "
|
||||
f"--endpoint {self.s3gate_endpoint} --profile {self.profile}"
|
||||
|
@ -654,9 +554,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
version_id: Optional[str] = None,
|
||||
bypass_governance_retention: Optional[bool] = None,
|
||||
) -> None:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
version = f" --version-id {version_id}" if version_id else ""
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api put-object-retention --bucket {bucket} --key {key} "
|
||||
|
@ -674,9 +571,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
legal_hold_status: Literal["ON", "OFF"],
|
||||
version_id: Optional[str] = None,
|
||||
) -> None:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
version = f" --version-id {version_id}" if version_id else ""
|
||||
legal_hold = json.dumps({"Status": legal_hold_status})
|
||||
cmd = (
|
||||
|
@ -687,9 +581,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("Put object tagging")
|
||||
def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
|
||||
tagging = {"TagSet": tags}
|
||||
version = f" --version-id {version_id}" if version_id else ""
|
||||
|
@ -701,9 +592,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("Get object tagging")
|
||||
def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
version = f" --version-id {version_id}" if version_id else ""
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api get-object-tagging --bucket {bucket} --key {key} "
|
||||
|
@ -715,9 +603,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("Delete object tagging")
|
||||
def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
version = f" --version-id {version_id}" if version_id else ""
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} "
|
||||
|
@ -733,9 +618,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
acl: Optional[str] = None,
|
||||
metadata: Optional[dict] = None,
|
||||
) -> dict:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3 sync {dir_path} s3://{bucket} " f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}"
|
||||
)
|
||||
|
@ -756,9 +638,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
acl: Optional[str] = None,
|
||||
metadata: Optional[dict] = None,
|
||||
) -> dict:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3 cp {dir_path} s3://{bucket} "
|
||||
f"--endpoint-url {self.s3gate_endpoint} --recursive --profile {self.profile}"
|
||||
|
@ -774,9 +653,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("Create multipart upload S3")
|
||||
def create_multipart_upload(self, bucket: str, key: str) -> str:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api create-multipart-upload --bucket {bucket} "
|
||||
f"--key {key} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}"
|
||||
|
@ -790,9 +666,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("List multipart uploads S3")
|
||||
def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api list-multipart-uploads --bucket {bucket} "
|
||||
f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}"
|
||||
|
@ -803,9 +676,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("Abort multipart upload S3")
|
||||
def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api abort-multipart-upload --bucket {bucket} "
|
||||
f"--key {key} --upload-id {upload_id} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}"
|
||||
|
@ -814,9 +684,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("Upload part S3")
|
||||
def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api upload-part --bucket {bucket} --key {key} "
|
||||
f"--upload-id {upload_id} --part-number {part_num} --body {filepath} "
|
||||
|
@ -829,9 +696,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("Upload copy part S3")
|
||||
def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api upload-part-copy --bucket {bucket} --key {key} "
|
||||
f"--upload-id {upload_id} --part-number {part_num} --copy-source {copy_source} "
|
||||
|
@ -845,9 +709,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("List parts S3")
|
||||
def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api list-parts --bucket {bucket} --key {key} "
|
||||
f"--upload-id {upload_id} --endpoint-url {self.s3gate_endpoint} --profile {self.profile}"
|
||||
|
@ -860,10 +721,7 @@ class AwsCliClient(S3ClientWrapper):
|
|||
return response["Parts"]
|
||||
|
||||
@reporter.step("Complete multipart upload S3")
|
||||
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None:
|
||||
file_path = os.path.join(os.getcwd(), ASSETS_DIR, "parts.json")
|
||||
parts_dict = {"Parts": [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]}
|
||||
|
||||
|
@ -884,9 +742,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("Put object lock configuration")
|
||||
def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api put-object-lock-configuration --bucket {bucket} "
|
||||
f"--object-lock-configuration '{json.dumps(configuration)}' --endpoint-url {self.s3gate_endpoint} --profile {self.profile}"
|
||||
|
@ -896,9 +751,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
|
||||
@reporter.step("Get object lock configuration")
|
||||
def get_object_lock_configuration(self, bucket: str):
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api get-object-lock-configuration --bucket {bucket} "
|
||||
f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}"
|
||||
|
@ -907,45 +759,6 @@ class AwsCliClient(S3ClientWrapper):
|
|||
response = self._to_json(output)
|
||||
return response.get("ObjectLockConfiguration")
|
||||
|
||||
@reporter.step("Put bucket lifecycle configuration")
|
||||
def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api put-bucket-lifecycle-configuration --bucket {bucket} "
|
||||
f"--endpoint-url {self.s3gate_endpoint} --lifecycle-configuration file://{dumped_configuration} --profile {self.profile}"
|
||||
)
|
||||
output = self.local_shell.exec(cmd).stdout
|
||||
response = self._to_json(output)
|
||||
return response
|
||||
|
||||
@reporter.step("Get bucket lifecycle configuration")
|
||||
def get_bucket_lifecycle_configuration(self, bucket: str) -> dict:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api get-bucket-lifecycle-configuration --bucket {bucket} "
|
||||
f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}"
|
||||
)
|
||||
output = self.local_shell.exec(cmd).stdout
|
||||
response = self._to_json(output)
|
||||
return response
|
||||
|
||||
@reporter.step("Delete bucket lifecycle configuration")
|
||||
def delete_bucket_lifecycle(self, bucket: str) -> dict:
|
||||
if bucket.startswith("-") or " " in bucket:
|
||||
bucket = f'"{bucket}"'
|
||||
|
||||
cmd = (
|
||||
f"aws {self.common_flags} s3api delete-bucket-lifecycle --bucket {bucket} "
|
||||
f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}"
|
||||
)
|
||||
output = self.local_shell.exec(cmd).stdout
|
||||
response = self._to_json(output)
|
||||
return response
|
||||
|
||||
@staticmethod
|
||||
def _to_json(output: str) -> dict:
|
||||
json_output = {}
|
||||
|
@ -1167,7 +980,7 @@ class AwsCliClient(S3ClientWrapper):
|
|||
response = self._to_json(output)
|
||||
|
||||
assert response.get("Policy"), f"Expected Policy in response:\n{response}"
|
||||
assert response["Policy"].get("Arn") == policy_arn, f"PolicyArn should be equal to {policy_arn}"
|
||||
assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}"
|
||||
|
||||
return response
|
||||
|
||||
|
@ -1411,127 +1224,3 @@ class AwsCliClient(S3ClientWrapper):
|
|||
response = self._to_json(output)
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Adds one or more tags to an IAM user")
|
||||
def iam_tag_user(self, user_name: str, tags: list) -> dict:
|
||||
tags_json = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
|
||||
cmd = (
|
||||
f"aws {self.common_flags} iam tag-user --user-name {user_name} --tags '{json.dumps(tags_json)}' --endpoint {self.iam_endpoint}"
|
||||
)
|
||||
if self.profile:
|
||||
cmd += f" --profile {self.profile}"
|
||||
|
||||
output = self.local_shell.exec(cmd).stdout
|
||||
response = self._to_json(output)
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("List tags of IAM user")
|
||||
def iam_list_user_tags(self, user_name: str) -> dict:
|
||||
cmd = f"aws {self.common_flags} iam list-user-tags --user-name {user_name} --endpoint {self.iam_endpoint}"
|
||||
if self.profile:
|
||||
cmd += f" --profile {self.profile}"
|
||||
|
||||
output = self.local_shell.exec(cmd).stdout
|
||||
response = self._to_json(output)
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Removes the specified tags from the user")
|
||||
def iam_untag_user(self, user_name: str, tag_keys: list) -> dict:
|
||||
tag_keys_joined = " ".join(tag_keys)
|
||||
cmd = f"aws {self.common_flags} iam untag-user --user-name {user_name} --tag-keys {tag_keys_joined} --endpoint {self.iam_endpoint}"
|
||||
if self.profile:
|
||||
cmd += f" --profile {self.profile}"
|
||||
|
||||
output = self.local_shell.exec(cmd).stdout
|
||||
response = self._to_json(output)
|
||||
|
||||
return response
|
||||
|
||||
# MFA METHODS
|
||||
@reporter.step("Creates a new virtual MFA device")
|
||||
def iam_create_virtual_mfa_device(self, virtual_mfa_device_name: str, outfile: str, bootstrap_method: str) -> tuple:
|
||||
cmd = f"aws {self.common_flags} iam create-virtual-mfa-device --virtual-mfa-device-name {virtual_mfa_device_name}\
|
||||
--outfile {outfile} --bootstrap-method {bootstrap_method} --endpoint {self.iam_endpoint}"
|
||||
|
||||
if self.profile:
|
||||
cmd += f" --profile {self.profile}"
|
||||
|
||||
output = self.local_shell.exec(cmd).stdout
|
||||
response = self._to_json(output)
|
||||
serial_number = response.get("VirtualMFADevice", {}).get("SerialNumber")
|
||||
assert serial_number, f"Expected SerialNumber in response:\n{response}"
|
||||
|
||||
return serial_number, False
|
||||
|
||||
@reporter.step("Deactivates the specified MFA device and removes it from association with the user name")
|
||||
def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict:
|
||||
cmd = f"aws {self.common_flags} iam deactivate-mfa-device --user-name {user_name} --serial-number {serial_number} --endpoint {self.iam_endpoint}"
|
||||
if self.profile:
|
||||
cmd += f" --profile {self.profile}"
|
||||
|
||||
output = self.local_shell.exec(cmd).stdout
|
||||
response = self._to_json(output)
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Deletes a virtual MFA device")
|
||||
def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict:
|
||||
cmd = f"aws {self.common_flags} iam delete-virtual-mfa-device --serial-number {serial_number} --endpoint {self.iam_endpoint}"
|
||||
if self.profile:
|
||||
cmd += f" --profile {self.profile}"
|
||||
|
||||
output = self.local_shell.exec(cmd).stdout
|
||||
response = self._to_json(output)
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Enables the specified MFA device and associates it with the specified IAM user")
|
||||
def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict:
|
||||
cmd = f"aws {self.common_flags} iam enable-mfa-device --user-name {user_name} --serial-number {serial_number} --authentication-code1 {authentication_code1}\
|
||||
--authentication-code2 {authentication_code2} --endpoint {self.iam_endpoint}"
|
||||
if self.profile:
|
||||
cmd += f" --profile {self.profile}"
|
||||
|
||||
output = self.local_shell.exec(cmd).stdout
|
||||
response = self._to_json(output)
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Lists the MFA devices for an IAM user")
|
||||
def iam_list_virtual_mfa_devices(self) -> dict:
|
||||
cmd = f"aws {self.common_flags} iam list-virtual-mfa-devices --endpoint {self.iam_endpoint}"
|
||||
if self.profile:
|
||||
cmd += f" --profile {self.profile}"
|
||||
|
||||
output = self.local_shell.exec(cmd).stdout
|
||||
response = self._to_json(output)
|
||||
assert response.get("VirtualMFADevices"), f"Expected VirtualMFADevices in response:\n{response}"
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Get session token for user")
|
||||
def sts_get_session_token(
|
||||
self, duration_seconds: Optional[str] = None, serial_number: Optional[str] = None, token_code: Optional[str] = None
|
||||
) -> tuple:
|
||||
cmd = f"aws {self.common_flags} sts get-session-token --endpoint {self.iam_endpoint}"
|
||||
if duration_seconds:
|
||||
cmd += f" --duration-seconds {duration_seconds}"
|
||||
if serial_number:
|
||||
cmd += f" --serial-number {serial_number}"
|
||||
if token_code:
|
||||
cmd += f" --token-code {token_code}"
|
||||
if self.profile:
|
||||
cmd += f" --profile {self.profile}"
|
||||
|
||||
output = self.local_shell.exec(cmd).stdout
|
||||
response = self._to_json(output)
|
||||
access_key = response.get("Credentials", {}).get("AccessKeyId")
|
||||
secret_access_key = response.get("Credentials", {}).get("SecretAccessKey")
|
||||
session_token = response.get("Credentials", {}).get("SessionToken")
|
||||
assert access_key, f"Expected AccessKeyId in response:\n{response}"
|
||||
assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}"
|
||||
assert session_token, f"Expected SessionToken in response:\n{response}"
|
||||
|
||||
return access_key, secret_access_key, session_token
|
905
src/frostfs_testlib/s3/boto3_client.py
Normal file
905
src/frostfs_testlib/s3/boto3_client.py
Normal file
|
@ -0,0 +1,905 @@
|
|||
import json
|
||||
import logging
|
||||
import os
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from functools import wraps
|
||||
from time import sleep
|
||||
from typing import Literal, Optional, Union
|
||||
|
||||
import boto3
|
||||
import urllib3
|
||||
from botocore.config import Config
|
||||
from botocore.exceptions import ClientError
|
||||
from mypy_boto3_s3 import S3Client
|
||||
|
||||
from frostfs_testlib import reporter
|
||||
from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, RETRY_MODE, S3_SYNC_WAIT_TIME
|
||||
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
|
||||
|
||||
# TODO: Refactor this code to use shell instead of _cmd_run
|
||||
from frostfs_testlib.utils.cli_utils import _configure_aws_cli, log_command_execution
|
||||
from frostfs_testlib.utils.file_utils import TestFile
|
||||
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
|
||||
# Disable warnings on self-signed certificate which the
|
||||
# boto library produces on requests to S3-gate in dev-env
|
||||
urllib3.disable_warnings()
|
||||
|
||||
|
||||
def report_error(func):
|
||||
@wraps(func)
|
||||
def deco(*a, **kw):
|
||||
try:
|
||||
return func(*a, **kw)
|
||||
except ClientError as err:
|
||||
log_command_execution("Result", str(err))
|
||||
raise
|
||||
|
||||
return deco
|
||||
|
||||
|
||||
class Boto3ClientWrapper(S3ClientWrapper):
|
||||
__repr_name__: str = "Boto3 client"
|
||||
|
||||
@reporter.step("Configure S3 client (boto3)")
|
||||
@report_error
|
||||
def __init__(
|
||||
self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1"
|
||||
) -> None:
|
||||
self.boto3_client: S3Client = None
|
||||
self.session = boto3.Session()
|
||||
self.region = region
|
||||
self.config = Config(
|
||||
retries={
|
||||
"max_attempts": MAX_REQUEST_ATTEMPTS,
|
||||
"mode": RETRY_MODE,
|
||||
}
|
||||
)
|
||||
self.access_key_id: str = access_key_id
|
||||
self.secret_access_key: str = secret_access_key
|
||||
self.s3gate_endpoint: str = ""
|
||||
self.boto3_iam_client: S3Client = None
|
||||
self.set_endpoint(s3gate_endpoint)
|
||||
|
||||
@reporter.step("Set endpoint S3 to {s3gate_endpoint}")
|
||||
def set_endpoint(self, s3gate_endpoint: str):
|
||||
if self.s3gate_endpoint == s3gate_endpoint:
|
||||
return
|
||||
|
||||
self.s3gate_endpoint = s3gate_endpoint
|
||||
|
||||
self.boto3_client: S3Client = self.session.client(
|
||||
service_name="s3",
|
||||
aws_access_key_id=self.access_key_id,
|
||||
aws_secret_access_key=self.secret_access_key,
|
||||
region_name=self.region,
|
||||
config=self.config,
|
||||
endpoint_url=s3gate_endpoint,
|
||||
verify=False,
|
||||
)
|
||||
|
||||
@reporter.step("Set endpoint IAM to {iam_endpoint}")
|
||||
def set_iam_endpoint(self, iam_endpoint: str):
|
||||
self.boto3_iam_client = self.session.client(
|
||||
service_name="iam",
|
||||
aws_access_key_id=self.access_key_id,
|
||||
aws_secret_access_key=self.secret_access_key,
|
||||
endpoint_url=iam_endpoint,
|
||||
verify=False,
|
||||
)
|
||||
|
||||
def _to_s3_param(self, param: str):
|
||||
replacement_map = {
|
||||
"Acl": "ACL",
|
||||
"Cors": "CORS",
|
||||
"_": "",
|
||||
}
|
||||
result = param.title()
|
||||
for find, replace in replacement_map.items():
|
||||
result = result.replace(find, replace)
|
||||
return result
|
||||
|
||||
# BUCKET METHODS #
|
||||
@reporter.step("Create bucket S3")
|
||||
@report_error
|
||||
def create_bucket(
|
||||
self,
|
||||
bucket: Optional[str] = None,
|
||||
object_lock_enabled_for_bucket: Optional[bool] = None,
|
||||
acl: Optional[str] = None,
|
||||
grant_write: Optional[str] = None,
|
||||
grant_read: Optional[str] = None,
|
||||
grant_full_control: Optional[str] = None,
|
||||
location_constraint: Optional[str] = None,
|
||||
) -> str:
|
||||
if bucket is None:
|
||||
bucket = str(uuid.uuid4())
|
||||
|
||||
params = {"Bucket": bucket}
|
||||
if object_lock_enabled_for_bucket is not None:
|
||||
params.update({"ObjectLockEnabledForBucket": object_lock_enabled_for_bucket})
|
||||
if acl is not None:
|
||||
params.update({"ACL": acl})
|
||||
elif grant_write or grant_read or grant_full_control:
|
||||
if grant_write:
|
||||
params.update({"GrantWrite": grant_write})
|
||||
elif grant_read:
|
||||
params.update({"GrantRead": grant_read})
|
||||
elif grant_full_control:
|
||||
params.update({"GrantFullControl": grant_full_control})
|
||||
if location_constraint:
|
||||
params.update({"CreateBucketConfiguration": {"LocationConstraint": location_constraint}})
|
||||
|
||||
s3_bucket = self.boto3_client.create_bucket(**params)
|
||||
log_command_execution(f"Created S3 bucket {bucket}", s3_bucket)
|
||||
sleep(S3_SYNC_WAIT_TIME)
|
||||
return bucket
|
||||
|
||||
@reporter.step("List buckets S3")
|
||||
@report_error
|
||||
def list_buckets(self) -> list[str]:
|
||||
found_buckets = []
|
||||
|
||||
response = self.boto3_client.list_buckets()
|
||||
log_command_execution("S3 List buckets result", response)
|
||||
|
||||
for bucket in response["Buckets"]:
|
||||
found_buckets.append(bucket["Name"])
|
||||
|
||||
return found_buckets
|
||||
|
||||
@reporter.step("Delete bucket S3")
|
||||
@report_error
|
||||
def delete_bucket(self, bucket: str) -> None:
|
||||
response = self.boto3_client.delete_bucket(Bucket=bucket)
|
||||
log_command_execution("S3 Delete bucket result", response)
|
||||
sleep(S3_SYNC_WAIT_TIME)
|
||||
|
||||
@reporter.step("Head bucket S3")
|
||||
@report_error
|
||||
def head_bucket(self, bucket: str) -> None:
|
||||
response = self.boto3_client.head_bucket(Bucket=bucket)
|
||||
log_command_execution("S3 Head bucket result", response)
|
||||
|
||||
@reporter.step("Put bucket versioning status")
|
||||
@report_error
|
||||
def put_bucket_versioning(self, bucket: str, status: VersioningStatus) -> None:
|
||||
response = self.boto3_client.put_bucket_versioning(Bucket=bucket, VersioningConfiguration={"Status": status.value})
|
||||
log_command_execution("S3 Set bucket versioning to", response)
|
||||
|
||||
@reporter.step("Get bucket versioning status")
|
||||
@report_error
|
||||
def get_bucket_versioning_status(self, bucket: str) -> Literal["Enabled", "Suspended"]:
|
||||
response = self.boto3_client.get_bucket_versioning(Bucket=bucket)
|
||||
status = response.get("Status")
|
||||
log_command_execution("S3 Got bucket versioning status", response)
|
||||
return status
|
||||
|
||||
@reporter.step("Put bucket tagging")
|
||||
@report_error
|
||||
def put_bucket_tagging(self, bucket: str, tags: list) -> None:
|
||||
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
|
||||
tagging = {"TagSet": tags}
|
||||
response = self.boto3_client.put_bucket_tagging(Bucket=bucket, Tagging=tagging)
|
||||
log_command_execution("S3 Put bucket tagging", response)
|
||||
|
||||
@reporter.step("Get bucket tagging")
|
||||
@report_error
|
||||
def get_bucket_tagging(self, bucket: str) -> list:
|
||||
response = self.boto3_client.get_bucket_tagging(Bucket=bucket)
|
||||
log_command_execution("S3 Get bucket tagging", response)
|
||||
return response.get("TagSet")
|
||||
|
||||
@reporter.step("Get bucket acl")
|
||||
@report_error
|
||||
def get_bucket_acl(self, bucket: str) -> list:
|
||||
response = self.boto3_client.get_bucket_acl(Bucket=bucket)
|
||||
log_command_execution("S3 Get bucket acl", response)
|
||||
return response.get("Grants")
|
||||
|
||||
@reporter.step("Delete bucket tagging")
|
||||
@report_error
|
||||
def delete_bucket_tagging(self, bucket: str) -> None:
|
||||
response = self.boto3_client.delete_bucket_tagging(Bucket=bucket)
|
||||
log_command_execution("S3 Delete bucket tagging", response)
|
||||
|
||||
@reporter.step("Put bucket ACL")
|
||||
@report_error
|
||||
def put_bucket_acl(
|
||||
self,
|
||||
bucket: str,
|
||||
acl: Optional[str] = None,
|
||||
grant_write: Optional[str] = None,
|
||||
grant_read: Optional[str] = None,
|
||||
) -> None:
|
||||
params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None}
|
||||
response = self.boto3_client.put_bucket_acl(**params)
|
||||
log_command_execution("S3 ACL bucket result", response)
|
||||
|
||||
@reporter.step("Put object lock configuration")
|
||||
@report_error
|
||||
def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict:
|
||||
response = self.boto3_client.put_object_lock_configuration(Bucket=bucket, ObjectLockConfiguration=configuration)
|
||||
log_command_execution("S3 put_object_lock_configuration result", response)
|
||||
return response
|
||||
|
||||
@reporter.step("Get object lock configuration")
|
||||
@report_error
|
||||
def get_object_lock_configuration(self, bucket: str) -> dict:
|
||||
response = self.boto3_client.get_object_lock_configuration(Bucket=bucket)
|
||||
log_command_execution("S3 get_object_lock_configuration result", response)
|
||||
return response.get("ObjectLockConfiguration")
|
||||
|
||||
@reporter.step("Get bucket policy")
|
||||
@report_error
|
||||
def get_bucket_policy(self, bucket: str) -> str:
|
||||
response = self.boto3_client.get_bucket_policy(Bucket=bucket)
|
||||
log_command_execution("S3 get_bucket_policy result", response)
|
||||
return response.get("Policy")
|
||||
|
||||
@reporter.step("Delete bucket policy")
|
||||
@report_error
|
||||
def delete_bucket_policy(self, bucket: str) -> str:
|
||||
response = self.boto3_client.delete_bucket_policy(Bucket=bucket)
|
||||
log_command_execution("S3 delete_bucket_policy result", response)
|
||||
return response
|
||||
|
||||
@reporter.step("Put bucket policy")
|
||||
@report_error
|
||||
def put_bucket_policy(self, bucket: str, policy: dict) -> None:
|
||||
response = self.boto3_client.put_bucket_policy(Bucket=bucket, Policy=json.dumps(policy))
|
||||
log_command_execution("S3 put_bucket_policy result", response)
|
||||
return response
|
||||
|
||||
@reporter.step("Get bucket cors")
|
||||
@report_error
|
||||
def get_bucket_cors(self, bucket: str) -> dict:
|
||||
response = self.boto3_client.get_bucket_cors(Bucket=bucket)
|
||||
log_command_execution("S3 get_bucket_cors result", response)
|
||||
return response.get("CORSRules")
|
||||
|
||||
@reporter.step("Get bucket location")
|
||||
@report_error
|
||||
def get_bucket_location(self, bucket: str) -> str:
|
||||
response = self.boto3_client.get_bucket_location(Bucket=bucket)
|
||||
log_command_execution("S3 get_bucket_location result", response)
|
||||
return response.get("LocationConstraint")
|
||||
|
||||
@reporter.step("Put bucket cors")
|
||||
@report_error
|
||||
def put_bucket_cors(self, bucket: str, cors_configuration: dict) -> None:
|
||||
response = self.boto3_client.put_bucket_cors(Bucket=bucket, CORSConfiguration=cors_configuration)
|
||||
log_command_execution("S3 put_bucket_cors result", response)
|
||||
return response
|
||||
|
||||
@reporter.step("Delete bucket cors")
|
||||
@report_error
|
||||
def delete_bucket_cors(self, bucket: str) -> None:
|
||||
response = self.boto3_client.delete_bucket_cors(Bucket=bucket)
|
||||
log_command_execution("S3 delete_bucket_cors result", response)
|
||||
|
||||
# END OF BUCKET METHODS #
|
||||
# OBJECT METHODS #
|
||||
|
||||
@reporter.step("List objects S3 v2")
|
||||
@report_error
|
||||
def list_objects_v2(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
|
||||
response = self.boto3_client.list_objects_v2(Bucket=bucket)
|
||||
log_command_execution("S3 v2 List objects result", response)
|
||||
|
||||
obj_list = [obj["Key"] for obj in response.get("Contents", [])]
|
||||
logger.info(f"Found s3 objects: {obj_list}")
|
||||
|
||||
return response if full_output else obj_list
|
||||
|
||||
@reporter.step("List objects S3")
|
||||
@report_error
|
||||
def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
|
||||
response = self.boto3_client.list_objects(Bucket=bucket)
|
||||
log_command_execution("S3 List objects result", response)
|
||||
|
||||
obj_list = [obj["Key"] for obj in response.get("Contents", [])]
|
||||
logger.info(f"Found s3 objects: {obj_list}")
|
||||
|
||||
return response if full_output else obj_list
|
||||
|
||||
@reporter.step("List objects versions S3")
|
||||
@report_error
|
||||
def list_objects_versions(self, bucket: str, full_output: bool = False) -> dict:
|
||||
response = self.boto3_client.list_object_versions(Bucket=bucket)
|
||||
log_command_execution("S3 List objects versions result", response)
|
||||
return response if full_output else response.get("Versions", [])
|
||||
|
||||
@reporter.step("List objects delete markers S3")
|
||||
@report_error
|
||||
def list_delete_markers(self, bucket: str, full_output: bool = False) -> list:
|
||||
response = self.boto3_client.list_object_versions(Bucket=bucket)
|
||||
log_command_execution("S3 List objects delete markers result", response)
|
||||
return response if full_output else response.get("DeleteMarkers", [])
|
||||
|
||||
@reporter.step("Put object S3")
|
||||
@report_error
|
||||
def put_object(
|
||||
self,
|
||||
bucket: str,
|
||||
filepath: str,
|
||||
key: Optional[str] = None,
|
||||
metadata: Optional[dict] = None,
|
||||
tagging: Optional[str] = None,
|
||||
acl: Optional[str] = None,
|
||||
object_lock_mode: Optional[str] = None,
|
||||
object_lock_retain_until_date: Optional[datetime] = None,
|
||||
object_lock_legal_hold_status: Optional[str] = None,
|
||||
grant_full_control: Optional[str] = None,
|
||||
grant_read: Optional[str] = None,
|
||||
) -> str:
|
||||
if key is None:
|
||||
key = os.path.basename(filepath)
|
||||
|
||||
with open(filepath, "rb") as put_file:
|
||||
body = put_file.read()
|
||||
|
||||
params = {
|
||||
self._to_s3_param(param): value
|
||||
for param, value in locals().items()
|
||||
if param not in ["self", "filepath", "put_file"] and value is not None
|
||||
}
|
||||
response = self.boto3_client.put_object(**params)
|
||||
log_command_execution("S3 Put object result", response)
|
||||
return response.get("VersionId")
|
||||
|
||||
@reporter.step("Head object S3")
|
||||
@report_error
|
||||
def head_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict:
|
||||
params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None}
|
||||
response = self.boto3_client.head_object(**params)
|
||||
log_command_execution("S3 Head object result", response)
|
||||
return response
|
||||
|
||||
@reporter.step("Delete object S3")
|
||||
@report_error
|
||||
def delete_object(self, bucket: str, key: str, version_id: Optional[str] = None) -> dict:
|
||||
params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None}
|
||||
response = self.boto3_client.delete_object(**params)
|
||||
log_command_execution("S3 Delete object result", response)
|
||||
sleep(S3_SYNC_WAIT_TIME)
|
||||
return response
|
||||
|
||||
@reporter.step("Delete objects S3")
|
||||
@report_error
|
||||
def delete_objects(self, bucket: str, keys: list[str]) -> dict:
|
||||
response = self.boto3_client.delete_objects(Bucket=bucket, Delete=_make_objs_dict(keys))
|
||||
log_command_execution("S3 Delete objects result", response)
|
||||
assert (
|
||||
"Errors" not in response
|
||||
), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}'
|
||||
sleep(S3_SYNC_WAIT_TIME)
|
||||
return response
|
||||
|
||||
@reporter.step("Delete object versions S3")
|
||||
@report_error
|
||||
def delete_object_versions(self, bucket: str, object_versions: list) -> dict:
|
||||
# Build deletion list in S3 format
|
||||
delete_list = {
|
||||
"Objects": [
|
||||
{
|
||||
"Key": object_version["Key"],
|
||||
"VersionId": object_version["VersionId"],
|
||||
}
|
||||
for object_version in object_versions
|
||||
]
|
||||
}
|
||||
response = self.boto3_client.delete_objects(Bucket=bucket, Delete=delete_list)
|
||||
log_command_execution("S3 Delete objects result", response)
|
||||
return response
|
||||
|
||||
@reporter.step("Delete object versions S3 without delete markers")
|
||||
@report_error
|
||||
def delete_object_versions_without_dm(self, bucket: str, object_versions: list) -> None:
|
||||
# Delete objects without creating delete markers
|
||||
for object_version in object_versions:
|
||||
response = self.boto3_client.delete_object(Bucket=bucket, Key=object_version["Key"], VersionId=object_version["VersionId"])
|
||||
log_command_execution("S3 Delete object result", response)
|
||||
|
||||
@reporter.step("Put object ACL")
|
||||
@report_error
|
||||
def put_object_acl(
|
||||
self,
|
||||
bucket: str,
|
||||
key: str,
|
||||
acl: Optional[str] = None,
|
||||
grant_write: Optional[str] = None,
|
||||
grant_read: Optional[str] = None,
|
||||
) -> list:
|
||||
# pytest.skip("Method put_object_acl is not supported by boto3 client")
|
||||
raise NotImplementedError("Unsupported for boto3 client")
|
||||
|
||||
@reporter.step("Get object ACL")
|
||||
@report_error
|
||||
def get_object_acl(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
|
||||
params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None}
|
||||
response = self.boto3_client.get_object_acl(**params)
|
||||
log_command_execution("S3 ACL objects result", response)
|
||||
return response.get("Grants")
|
||||
|
||||
@reporter.step("Copy object S3")
|
||||
@report_error
|
||||
def copy_object(
|
||||
self,
|
||||
source_bucket: str,
|
||||
source_key: str,
|
||||
bucket: Optional[str] = None,
|
||||
key: Optional[str] = None,
|
||||
acl: Optional[str] = None,
|
||||
metadata_directive: Optional[Literal["COPY", "REPLACE"]] = None,
|
||||
metadata: Optional[dict] = None,
|
||||
tagging_directive: Optional[Literal["COPY", "REPLACE"]] = None,
|
||||
tagging: Optional[str] = None,
|
||||
) -> str:
|
||||
if bucket is None:
|
||||
bucket = source_bucket
|
||||
if key is None:
|
||||
key = os.path.join(os.getcwd(), str(uuid.uuid4()))
|
||||
copy_source = f"{source_bucket}/{source_key}"
|
||||
|
||||
params = {
|
||||
self._to_s3_param(param): value
|
||||
for param, value in locals().items()
|
||||
if param not in ["self", "source_bucket", "source_key"] and value is not None
|
||||
}
|
||||
response = self.boto3_client.copy_object(**params)
|
||||
log_command_execution("S3 Copy objects result", response)
|
||||
return key
|
||||
|
||||
@reporter.step("Get object S3")
|
||||
@report_error
|
||||
def get_object(
|
||||
self,
|
||||
bucket: str,
|
||||
key: str,
|
||||
version_id: Optional[str] = None,
|
||||
object_range: Optional[tuple[int, int]] = None,
|
||||
full_output: bool = False,
|
||||
) -> dict | TestFile:
|
||||
range_str = None
|
||||
if object_range:
|
||||
range_str = f"bytes={object_range[0]}-{object_range[1]}"
|
||||
|
||||
params = {
|
||||
self._to_s3_param(param): value
|
||||
for param, value in {**locals(), **{"Range": range_str}}.items()
|
||||
if param not in ["self", "object_range", "full_output", "range_str", "filename"] and value is not None
|
||||
}
|
||||
response = self.boto3_client.get_object(**params)
|
||||
log_command_execution("S3 Get objects result", response)
|
||||
|
||||
if full_output:
|
||||
return response
|
||||
|
||||
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4())))
|
||||
with open(test_file, "wb") as file:
|
||||
chunk = response["Body"].read(1024)
|
||||
while chunk:
|
||||
file.write(chunk)
|
||||
chunk = response["Body"].read(1024)
|
||||
return test_file
|
||||
|
||||
@reporter.step("Create multipart upload S3")
|
||||
@report_error
|
||||
def create_multipart_upload(self, bucket: str, key: str) -> str:
|
||||
response = self.boto3_client.create_multipart_upload(Bucket=bucket, Key=key)
|
||||
log_command_execution("S3 Created multipart upload", response)
|
||||
assert response.get("UploadId"), f"Expected UploadId in response:\n{response}"
|
||||
|
||||
return response["UploadId"]
|
||||
|
||||
@reporter.step("List multipart uploads S3")
|
||||
@report_error
|
||||
def list_multipart_uploads(self, bucket: str) -> Optional[list[dict]]:
|
||||
response = self.boto3_client.list_multipart_uploads(Bucket=bucket)
|
||||
log_command_execution("S3 List multipart upload", response)
|
||||
|
||||
return response.get("Uploads")
|
||||
|
||||
@reporter.step("Abort multipart upload S3")
|
||||
@report_error
|
||||
def abort_multipart_upload(self, bucket: str, key: str, upload_id: str) -> None:
|
||||
response = self.boto3_client.abort_multipart_upload(Bucket=bucket, Key=key, UploadId=upload_id)
|
||||
log_command_execution("S3 Abort multipart upload", response)
|
||||
|
||||
@reporter.step("Upload part S3")
|
||||
@report_error
|
||||
def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str:
|
||||
with open(filepath, "rb") as put_file:
|
||||
body = put_file.read()
|
||||
|
||||
response = self.boto3_client.upload_part(
|
||||
UploadId=upload_id,
|
||||
Bucket=bucket,
|
||||
Key=key,
|
||||
PartNumber=part_num,
|
||||
Body=body,
|
||||
)
|
||||
log_command_execution("S3 Upload part", response)
|
||||
assert response.get("ETag"), f"Expected ETag in response:\n{response}"
|
||||
|
||||
return response["ETag"]
|
||||
|
||||
@reporter.step("Upload copy part S3")
|
||||
@report_error
|
||||
def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str:
|
||||
response = self.boto3_client.upload_part_copy(
|
||||
UploadId=upload_id,
|
||||
Bucket=bucket,
|
||||
Key=key,
|
||||
PartNumber=part_num,
|
||||
CopySource=copy_source,
|
||||
)
|
||||
log_command_execution("S3 Upload copy part", response)
|
||||
assert response.get("CopyPartResult", []).get("ETag"), f"Expected ETag in response:\n{response}"
|
||||
|
||||
return response["CopyPartResult"]["ETag"]
|
||||
|
||||
@reporter.step("List parts S3")
|
||||
@report_error
|
||||
def list_parts(self, bucket: str, key: str, upload_id: str) -> list[dict]:
|
||||
response = self.boto3_client.list_parts(UploadId=upload_id, Bucket=bucket, Key=key)
|
||||
log_command_execution("S3 List part", response)
|
||||
assert response.get("Parts"), f"Expected Parts in response:\n{response}"
|
||||
|
||||
return response["Parts"]
|
||||
|
||||
@reporter.step("Complete multipart upload S3")
|
||||
@report_error
|
||||
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None:
|
||||
parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]
|
||||
response = self.boto3_client.complete_multipart_upload(Bucket=bucket, Key=key, UploadId=upload_id, MultipartUpload={"Parts": parts})
|
||||
log_command_execution("S3 Complete multipart upload", response)
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Put object retention")
|
||||
@report_error
|
||||
def put_object_retention(
|
||||
self,
|
||||
bucket: str,
|
||||
key: str,
|
||||
retention: dict,
|
||||
version_id: Optional[str] = None,
|
||||
bypass_governance_retention: Optional[bool] = None,
|
||||
) -> None:
|
||||
params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None}
|
||||
response = self.boto3_client.put_object_retention(**params)
|
||||
log_command_execution("S3 Put object retention ", response)
|
||||
|
||||
@reporter.step("Put object legal hold")
|
||||
@report_error
|
||||
def put_object_legal_hold(
|
||||
self,
|
||||
bucket: str,
|
||||
key: str,
|
||||
legal_hold_status: Literal["ON", "OFF"],
|
||||
version_id: Optional[str] = None,
|
||||
) -> None:
|
||||
legal_hold = {"Status": legal_hold_status}
|
||||
params = {
|
||||
self._to_s3_param(param): value
|
||||
for param, value in locals().items()
|
||||
if param not in ["self", "legal_hold_status"] and value is not None
|
||||
}
|
||||
response = self.boto3_client.put_object_legal_hold(**params)
|
||||
log_command_execution("S3 Put object legal hold ", response)
|
||||
|
||||
@reporter.step("Put object tagging")
|
||||
@report_error
|
||||
def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = "") -> None:
|
||||
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
|
||||
tagging = {"TagSet": tags}
|
||||
response = self.boto3_client.put_object_tagging(Bucket=bucket, Key=key, Tagging=tagging, VersionId=version_id)
|
||||
log_command_execution("S3 Put object tagging", response)
|
||||
|
||||
@reporter.step("Get object tagging")
|
||||
@report_error
|
||||
def get_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> list:
|
||||
params = {self._to_s3_param(param): value for param, value in locals().items() if param not in ["self"] and value is not None}
|
||||
response = self.boto3_client.get_object_tagging(**params)
|
||||
log_command_execution("S3 Get object tagging", response)
|
||||
return response.get("TagSet")
|
||||
|
||||
@reporter.step("Delete object tagging")
|
||||
@report_error
|
||||
def delete_object_tagging(self, bucket: str, key: str) -> None:
|
||||
response = self.boto3_client.delete_object_tagging(Bucket=bucket, Key=key)
|
||||
log_command_execution("S3 Delete object tagging", response)
|
||||
|
||||
@reporter.step("Get object attributes")
|
||||
@report_error
|
||||
def get_object_attributes(
|
||||
self,
|
||||
bucket: str,
|
||||
key: str,
|
||||
attributes: list[str],
|
||||
version_id: Optional[str] = None,
|
||||
max_parts: Optional[int] = None,
|
||||
part_number: Optional[int] = None,
|
||||
full_output: bool = True,
|
||||
) -> dict:
|
||||
logger.warning("Method get_object_attributes is not supported by boto3 client")
|
||||
return {}
|
||||
|
||||
@reporter.step("Sync directory S3")
|
||||
@report_error
|
||||
def sync(
|
||||
self,
|
||||
bucket: str,
|
||||
dir_path: str,
|
||||
acl: Optional[str] = None,
|
||||
metadata: Optional[dict] = None,
|
||||
) -> dict:
|
||||
raise NotImplementedError("Sync is not supported for boto3 client")
|
||||
|
||||
@reporter.step("CP directory S3")
|
||||
@report_error
|
||||
def cp(
|
||||
self,
|
||||
bucket: str,
|
||||
dir_path: str,
|
||||
acl: Optional[str] = None,
|
||||
metadata: Optional[dict] = None,
|
||||
) -> dict:
|
||||
raise NotImplementedError("Cp is not supported for boto3 client")
|
||||
|
||||
# END OBJECT METHODS #
|
||||
|
||||
# IAM METHODS #
|
||||
# Some methods don't have checks because boto3 is silent in some cases (delete, attach, etc.)
|
||||
|
||||
@reporter.step("Adds the specified user to the specified group")
|
||||
def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict:
|
||||
response = self.boto3_iam_client.add_user_to_group(UserName=user_name, GroupName=group_name)
|
||||
return response
|
||||
|
||||
@reporter.step("Attaches the specified managed policy to the specified IAM group")
|
||||
def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict:
|
||||
response = self.boto3_iam_client.attach_group_policy(GroupName=group_name, PolicyArn=policy_arn)
|
||||
sleep(S3_SYNC_WAIT_TIME * 10)
|
||||
return response
|
||||
|
||||
@reporter.step("Attaches the specified managed policy to the specified user")
|
||||
def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict:
|
||||
response = self.boto3_iam_client.attach_user_policy(UserName=user_name, PolicyArn=policy_arn)
|
||||
sleep(S3_SYNC_WAIT_TIME * 10)
|
||||
return response
|
||||
|
||||
@reporter.step("Creates a new AWS secret access key and access key ID for the specified user")
|
||||
def iam_create_access_key(self, user_name: str) -> dict:
|
||||
response = self.boto3_iam_client.create_access_key(UserName=user_name)
|
||||
|
||||
access_key_id = response["AccessKey"].get("AccessKeyId")
|
||||
secret_access_key = response["AccessKey"].get("SecretAccessKey")
|
||||
assert access_key_id, f"Expected AccessKeyId in response:\n{response}"
|
||||
assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}"
|
||||
|
||||
return access_key_id, secret_access_key
|
||||
|
||||
@reporter.step("Creates a new group")
|
||||
def iam_create_group(self, group_name: str) -> dict:
|
||||
response = self.boto3_iam_client.create_group(GroupName=group_name)
|
||||
assert response.get("Group"), f"Expected Group in response:\n{response}"
|
||||
assert response["Group"].get("GroupName") == group_name, f"GroupName should be equal to {group_name}"
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Creates a new managed policy for your AWS account")
|
||||
def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict:
|
||||
response = self.boto3_iam_client.create_policy(PolicyName=policy_name, PolicyDocument=json.dumps(policy_document))
|
||||
assert response.get("Policy"), f"Expected Policy in response:\n{response}"
|
||||
assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}"
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Creates a new IAM user for your AWS account")
|
||||
def iam_create_user(self, user_name: str) -> dict:
|
||||
response = self.boto3_iam_client.create_user(UserName=user_name)
|
||||
assert response.get("User"), f"Expected User in response:\n{response}"
|
||||
assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}"
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Deletes the access key pair associated with the specified IAM user")
|
||||
def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict:
|
||||
response = self.boto3_iam_client.delete_access_key(AccessKeyId=access_key_id, UserName=user_name)
|
||||
return response
|
||||
|
||||
@reporter.step("Deletes the specified IAM group")
|
||||
def iam_delete_group(self, group_name: str) -> dict:
|
||||
response = self.boto3_iam_client.delete_group(GroupName=group_name)
|
||||
return response
|
||||
|
||||
@reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group")
|
||||
def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict:
|
||||
response = self.boto3_iam_client.delete_group_policy(GroupName=group_name, PolicyName=policy_name)
|
||||
return response
|
||||
|
||||
@reporter.step("Deletes the specified managed policy")
|
||||
def iam_delete_policy(self, policy_arn: str) -> dict:
|
||||
response = self.boto3_iam_client.delete_policy(PolicyArn=policy_arn)
|
||||
return response
|
||||
|
||||
@reporter.step("Deletes the specified IAM user")
|
||||
def iam_delete_user(self, user_name: str) -> dict:
|
||||
response = self.boto3_iam_client.delete_user(UserName=user_name)
|
||||
return response
|
||||
|
||||
@reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user")
|
||||
def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict:
|
||||
response = self.boto3_iam_client.delete_user_policy(UserName=user_name, PolicyName=policy_name)
|
||||
return response
|
||||
|
||||
@reporter.step("Removes the specified managed policy from the specified IAM group")
|
||||
def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict:
|
||||
response = self.boto3_iam_client.detach_group_policy(GroupName=group_name, PolicyArn=policy_arn)
|
||||
sleep(S3_SYNC_WAIT_TIME * 10)
|
||||
return response
|
||||
|
||||
@reporter.step("Removes the specified managed policy from the specified user")
|
||||
def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict:
|
||||
response = self.boto3_iam_client.detach_user_policy(UserName=user_name, PolicyArn=policy_arn)
|
||||
sleep(S3_SYNC_WAIT_TIME * 10)
|
||||
return response
|
||||
|
||||
@reporter.step("Returns a list of IAM users that are in the specified IAM group")
|
||||
def iam_get_group(self, group_name: str) -> dict:
|
||||
response = self.boto3_iam_client.get_group(GroupName=group_name)
|
||||
assert response.get("Group").get("GroupName") == group_name, f"GroupName should be equal to {group_name}"
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group")
|
||||
def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict:
|
||||
response = self.boto3_iam_client.get_group_policy(GroupName=group_name, PolicyName=policy_name)
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Retrieves information about the specified managed policy")
|
||||
def iam_get_policy(self, policy_arn: str) -> dict:
|
||||
response = self.boto3_iam_client.get_policy(PolicyArn=policy_arn)
|
||||
assert response.get("Policy"), f"Expected Policy in response:\n{response}"
|
||||
assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}"
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Retrieves information about the specified version of the specified managed policy")
|
||||
def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict:
|
||||
response = self.boto3_iam_client.get_policy_version(PolicyArn=policy_arn, VersionId=version_id)
|
||||
assert response.get("PolicyVersion"), f"Expected PolicyVersion in response:\n{response}"
|
||||
assert response["PolicyVersion"].get("VersionId") == version_id, f"VersionId should be equal to {version_id}"
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Retrieves information about the specified IAM user")
|
||||
def iam_get_user(self, user_name: str) -> dict:
|
||||
response = self.boto3_iam_client.get_user(UserName=user_name)
|
||||
assert response.get("User"), f"Expected User in response:\n{response}"
|
||||
assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}"
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user")
|
||||
def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict:
|
||||
response = self.boto3_iam_client.get_user_policy(UserName=user_name, PolicyName=policy_name)
|
||||
assert response.get("UserName"), f"Expected UserName in response:\n{response}"
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Returns information about the access key IDs associated with the specified IAM user")
|
||||
def iam_list_access_keys(self, user_name: str) -> dict:
|
||||
response = self.boto3_iam_client.list_access_keys(UserName=user_name)
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Lists all managed policies that are attached to the specified IAM group")
|
||||
def iam_list_attached_group_policies(self, group_name: str) -> dict:
|
||||
response = self.boto3_iam_client.list_attached_group_policies(GroupName=group_name)
|
||||
assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}"
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Lists all managed policies that are attached to the specified IAM user")
|
||||
def iam_list_attached_user_policies(self, user_name: str) -> dict:
|
||||
response = self.boto3_iam_client.list_attached_user_policies(UserName=user_name)
|
||||
assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}"
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to")
|
||||
def iam_list_entities_for_policy(self, policy_arn: str) -> dict:
|
||||
response = self.boto3_iam_client.list_entities_for_policy(PolicyArn=policy_arn)
|
||||
|
||||
assert response.get("PolicyGroups"), f"Expected PolicyGroups in response:\n{response}"
|
||||
assert response.get("PolicyUsers"), f"Expected PolicyUsers in response:\n{response}"
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group")
|
||||
def iam_list_group_policies(self, group_name: str) -> dict:
|
||||
response = self.boto3_iam_client.list_group_policies(GroupName=group_name)
|
||||
assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}"
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Lists the IAM groups")
|
||||
def iam_list_groups(self) -> dict:
|
||||
response = self.boto3_iam_client.list_groups()
|
||||
assert response.get("Groups"), f"Expected Groups in response:\n{response}"
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Lists the IAM groups that the specified IAM user belongs to")
|
||||
def iam_list_groups_for_user(self, user_name: str) -> dict:
|
||||
response = self.boto3_iam_client.list_groups_for_user(UserName=user_name)
|
||||
assert response.get("Groups"), f"Expected Groups in response:\n{response}"
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Lists all the managed policies that are available in your AWS account")
|
||||
def iam_list_policies(self) -> dict:
|
||||
response = self.boto3_iam_client.list_policies()
|
||||
assert response.get("Policies"), f"Expected Policies in response:\n{response}"
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Lists information about the versions of the specified managed policy")
|
||||
def iam_list_policy_versions(self, policy_arn: str) -> dict:
|
||||
response = self.boto3_iam_client.list_policy_versions(PolicyArn=policy_arn)
|
||||
assert response.get("Versions"), f"Expected Versions in response:\n{response}"
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Lists the names of the inline policies embedded in the specified IAM user")
|
||||
def iam_list_user_policies(self, user_name: str) -> dict:
|
||||
response = self.boto3_iam_client.list_user_policies(UserName=user_name)
|
||||
assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}"
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Lists the IAM users")
|
||||
def iam_list_users(self) -> dict:
|
||||
response = self.boto3_iam_client.list_users()
|
||||
assert response.get("Users"), f"Expected Users in response:\n{response}"
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group")
|
||||
def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict:
|
||||
response = self.boto3_iam_client.put_group_policy(
|
||||
GroupName=group_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)
|
||||
)
|
||||
sleep(S3_SYNC_WAIT_TIME * 10)
|
||||
return response
|
||||
|
||||
@reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user")
|
||||
def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict:
|
||||
response = self.boto3_iam_client.put_user_policy(
|
||||
UserName=user_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document)
|
||||
)
|
||||
sleep(S3_SYNC_WAIT_TIME * 10)
|
||||
return response
|
||||
|
||||
@reporter.step("Removes the specified user from the specified group")
|
||||
def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict:
|
||||
response = self.boto3_iam_client.remove_user_from_group(GroupName=group_name, UserName=user_name)
|
||||
return response
|
||||
|
||||
@reporter.step("Updates the name and/or the path of the specified IAM group")
|
||||
def iam_update_group(self, group_name: str, new_name: str, new_path: Optional[str] = None) -> dict:
|
||||
response = self.boto3_iam_client.update_group(GroupName=group_name, NewGroupName=new_name, NewPath="/")
|
||||
|
||||
return response
|
||||
|
||||
@reporter.step("Updates the name and/or the path of the specified IAM user")
|
||||
def iam_update_user(self, user_name: str, new_name: str, new_path: Optional[str] = None) -> dict:
|
||||
response = self.boto3_iam_client.update_user(UserName=user_name, NewUserName=new_name, NewPath="/")
|
||||
return response
|
|
@ -1,7 +1,7 @@
|
|||
import re
|
||||
|
||||
from frostfs_testlib.cli.generic_cli import GenericCli
|
||||
from frostfs_testlib.clients.s3 import BucketContainerResolver
|
||||
from frostfs_testlib.s3.interfaces import BucketContainerResolver
|
||||
from frostfs_testlib.storage.cluster import ClusterNode
|
||||
|
||||
|
|
@ -58,10 +58,6 @@ class S3ClientWrapper(HumanReadableABC):
|
|||
def set_endpoint(self, s3gate_endpoint: str):
|
||||
"""Set endpoint"""
|
||||
|
||||
@abstractmethod
|
||||
def set_iam_endpoint(self, iam_endpoint: str):
|
||||
"""Set iam endpoint"""
|
||||
|
||||
@abstractmethod
|
||||
def create_bucket(
|
||||
self,
|
||||
|
@ -128,7 +124,7 @@ class S3ClientWrapper(HumanReadableABC):
|
|||
"""Deletes the tags from the bucket."""
|
||||
|
||||
@abstractmethod
|
||||
def get_bucket_acl(self, bucket: str) -> dict:
|
||||
def get_bucket_acl(self, bucket: str) -> list:
|
||||
"""This implementation of the GET action uses the acl subresource to return the access control list (ACL) of a bucket."""
|
||||
|
||||
@abstractmethod
|
||||
|
@ -195,9 +191,7 @@ class S3ClientWrapper(HumanReadableABC):
|
|||
"""
|
||||
|
||||
@abstractmethod
|
||||
def list_objects(
|
||||
self, bucket: str, full_output: bool = False, page_size: Optional[int] = None, prefix: Optional[str] = None
|
||||
) -> Union[dict, list[str]]:
|
||||
def list_objects(self, bucket: str, full_output: bool = False) -> Union[dict, list[str]]:
|
||||
"""Returns some or all (up to 1,000) of the objects in a bucket with each request.
|
||||
You can use the request parameters as selection criteria to return a subset of the objects in a bucket.
|
||||
A 200 OK response can contain valid or invalid XML. Make sure to design your application
|
||||
|
@ -336,7 +330,7 @@ class S3ClientWrapper(HumanReadableABC):
|
|||
"""Lists the parts that have been uploaded for a specific multipart upload."""
|
||||
|
||||
@abstractmethod
|
||||
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> dict:
|
||||
def complete_multipart_upload(self, bucket: str, key: str, upload_id: str, parts: list) -> None:
|
||||
"""Completes a multipart upload by assembling previously uploaded parts."""
|
||||
|
||||
@abstractmethod
|
||||
|
@ -372,18 +366,6 @@ class S3ClientWrapper(HumanReadableABC):
|
|||
def delete_object_tagging(self, bucket: str, key: str) -> None:
|
||||
"""Removes the entire tag set from the specified object."""
|
||||
|
||||
@abstractmethod
|
||||
def put_bucket_lifecycle_configuration(self, bucket: str, lifecycle_configuration: dict, dumped_configuration: str) -> dict:
|
||||
"""Adds or updates bucket lifecycle configuration"""
|
||||
|
||||
@abstractmethod
|
||||
def get_bucket_lifecycle_configuration(self, bucket: str) -> dict:
|
||||
"""Gets bucket lifecycle configuration"""
|
||||
|
||||
@abstractmethod
|
||||
def delete_bucket_lifecycle(self, bucket: str) -> dict:
|
||||
"""Deletes bucket lifecycle"""
|
||||
|
||||
@abstractmethod
|
||||
def get_object_attributes(
|
||||
self,
|
||||
|
@ -426,7 +408,7 @@ class S3ClientWrapper(HumanReadableABC):
|
|||
"""Adds the specified user to the specified group"""
|
||||
|
||||
@abstractmethod
|
||||
def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict:
|
||||
def iam_attach_group_policy(self, group: str, policy_arn: str) -> dict:
|
||||
"""Attaches the specified managed policy to the specified IAM group"""
|
||||
|
||||
@abstractmethod
|
||||
|
@ -568,44 +550,3 @@ class S3ClientWrapper(HumanReadableABC):
|
|||
@abstractmethod
|
||||
def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict:
|
||||
"""Updates the name and/or the path of the specified IAM user"""
|
||||
|
||||
@abstractmethod
|
||||
def iam_tag_user(self, user_name: str, tags: list) -> dict:
|
||||
"""Adds one or more tags to an IAM user"""
|
||||
|
||||
@abstractmethod
|
||||
def iam_list_user_tags(self, user_name: str) -> dict:
|
||||
"""List tags of IAM user"""
|
||||
|
||||
@abstractmethod
|
||||
def iam_untag_user(self, user_name: str, tag_keys: list) -> dict:
|
||||
"""Removes the specified tags from the user"""
|
||||
|
||||
# MFA methods
|
||||
@abstractmethod
|
||||
def iam_create_virtual_mfa_device(
|
||||
self, virtual_mfa_device_name: str, outfile: Optional[str] = None, bootstrap_method: Optional[str] = None
|
||||
) -> tuple:
|
||||
"""Creates a new virtual MFA device"""
|
||||
|
||||
@abstractmethod
|
||||
def iam_deactivate_mfa_device(self, user_name: str, serial_number: str) -> dict:
|
||||
"""Deactivates the specified MFA device and removes it from association with the user name"""
|
||||
|
||||
@abstractmethod
|
||||
def iam_delete_virtual_mfa_device(self, serial_number: str) -> dict:
|
||||
"""Deletes a virtual MFA device"""
|
||||
|
||||
@abstractmethod
|
||||
def iam_enable_mfa_device(self, user_name: str, serial_number: str, authentication_code1: str, authentication_code2: str) -> dict:
|
||||
"""Enables the specified MFA device and associates it with the specified IAM user"""
|
||||
|
||||
@abstractmethod
|
||||
def iam_list_virtual_mfa_devices(self) -> dict:
|
||||
"""Lists the MFA devices for an IAM user"""
|
||||
|
||||
@abstractmethod
|
||||
def sts_get_session_token(
|
||||
self, duration_seconds: Optional[str] = None, serial_number: Optional[str] = None, token_code: Optional[str] = None
|
||||
) -> tuple:
|
||||
"""Get session token for user"""
|
|
@ -1,18 +1,15 @@
|
|||
import logging
|
||||
import subprocess
|
||||
import tempfile
|
||||
from contextlib import nullcontext
|
||||
from datetime import datetime
|
||||
from typing import IO, Optional
|
||||
|
||||
import pexpect
|
||||
|
||||
from frostfs_testlib import reporter
|
||||
from frostfs_testlib.resources.common import MORE_LOG
|
||||
from frostfs_testlib.shell.interfaces import CommandInspector, CommandOptions, CommandResult, Shell
|
||||
|
||||
logger = logging.getLogger("frostfs.testlib.shell")
|
||||
step_context = reporter.step if MORE_LOG == "1" else nullcontext
|
||||
|
||||
|
||||
class LocalShell(Shell):
|
||||
|
@ -31,10 +28,10 @@ class LocalShell(Shell):
|
|||
for inspector in [*self.command_inspectors, *extra_inspectors]:
|
||||
command = inspector.inspect(original_command, command)
|
||||
|
||||
with step_context(f"Executing command: {command}"):
|
||||
if options.interactive_inputs:
|
||||
return self._exec_interactive(command, options)
|
||||
return self._exec_non_interactive(command, options)
|
||||
logger.info(f"Executing command: {command}")
|
||||
if options.interactive_inputs:
|
||||
return self._exec_interactive(command, options)
|
||||
return self._exec_non_interactive(command, options)
|
||||
|
||||
def _exec_interactive(self, command: str, options: CommandOptions) -> CommandResult:
|
||||
start_time = datetime.utcnow()
|
||||
|
@ -63,7 +60,9 @@ class LocalShell(Shell):
|
|||
|
||||
if options.check and result.return_code != 0:
|
||||
raise RuntimeError(
|
||||
f"Command: {command}\nreturn code: {result.return_code}\n" f"Output: {result.stdout}\n" f"Stderr: {result.stderr}\n"
|
||||
f"Command: {command}\nreturn code: {result.return_code}\n"
|
||||
f"Output: {result.stdout}\n"
|
||||
f"Stderr: {result.stderr}\n"
|
||||
)
|
||||
return result
|
||||
|
||||
|
@ -94,7 +93,9 @@ class LocalShell(Shell):
|
|||
stderr="",
|
||||
return_code=exc.returncode,
|
||||
)
|
||||
raise RuntimeError(f"Command: {command}\nError with retcode: {exc.returncode}\n Output: {exc.output}") from exc
|
||||
raise RuntimeError(
|
||||
f"Command: {command}\nError:\n" f"return code: {exc.returncode}\n" f"output: {exc.output}"
|
||||
) from exc
|
||||
except OSError as exc:
|
||||
raise RuntimeError(f"Command: {command}\nOutput: {exc.strerror}") from exc
|
||||
finally:
|
||||
|
@ -128,19 +129,22 @@ class LocalShell(Shell):
|
|||
end_time: datetime,
|
||||
result: Optional[CommandResult],
|
||||
) -> None:
|
||||
if not result:
|
||||
logger.warning(f"Command: {command}\n" f"Error: result is None")
|
||||
return
|
||||
|
||||
status, log_method = ("Success", logger.info) if result.return_code == 0 else ("Error", logger.warning)
|
||||
log_method(f"Command: {command}\n" f"{status} with retcode {result.return_code}\n" f"Output: \n{result.stdout}")
|
||||
|
||||
elapsed_time = end_time - start_time
|
||||
command_attachment = (
|
||||
f"COMMAND: {command}\n"
|
||||
f"RETCODE: {result.return_code}\n\n"
|
||||
f"STDOUT:\n{result.stdout}\n"
|
||||
f"STDERR:\n{result.stderr}\n"
|
||||
f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}"
|
||||
# TODO: increase logging level if return code is non 0, should be warning at least
|
||||
logger.info(
|
||||
f"Command: {command}\n"
|
||||
f"{'Success:' if result and result.return_code == 0 else 'Error:'}\n"
|
||||
f"return code: {result.return_code if result else ''} "
|
||||
f"\nOutput: {result.stdout if result else ''}"
|
||||
)
|
||||
reporter.attach(command_attachment, "Command execution.txt")
|
||||
|
||||
if result:
|
||||
elapsed_time = end_time - start_time
|
||||
command_attachment = (
|
||||
f"COMMAND: {command}\n"
|
||||
f"RETCODE: {result.return_code}\n\n"
|
||||
f"STDOUT:\n{result.stdout}\n"
|
||||
f"STDERR:\n{result.stderr}\n"
|
||||
f"Start / End / Elapsed\t {start_time.time()} / {end_time.time()} / {elapsed_time}"
|
||||
)
|
||||
with reporter.step(f"COMMAND: {command}"):
|
||||
reporter.attach(command_attachment, "Command execution.txt")
|
||||
|
|
|
@ -7,7 +7,9 @@ from typing import Optional, Union
|
|||
|
||||
from frostfs_testlib import reporter
|
||||
from frostfs_testlib.cli import FrostfsCli
|
||||
from frostfs_testlib.plugins import load_plugin
|
||||
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
|
||||
from frostfs_testlib.s3.interfaces import BucketContainerResolver
|
||||
from frostfs_testlib.shell import Shell
|
||||
from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node
|
||||
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||
|
@ -109,8 +111,6 @@ def create_container(
|
|||
options: Optional[dict] = None,
|
||||
await_mode: bool = True,
|
||||
wait_for_creation: bool = True,
|
||||
nns_zone: Optional[str] = None,
|
||||
nns_name: Optional[str] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> str:
|
||||
"""
|
||||
|
@ -143,8 +143,6 @@ def create_container(
|
|||
result = cli.container.create(
|
||||
rpc_endpoint=endpoint,
|
||||
policy=rule,
|
||||
nns_name=nns_name,
|
||||
nns_zone=nns_zone,
|
||||
basic_acl=basic_acl,
|
||||
attributes=attributes,
|
||||
name=name,
|
||||
|
@ -202,6 +200,7 @@ def list_containers(wallet: WalletInfo, shell: Shell, endpoint: str, timeout: Op
|
|||
"""
|
||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
||||
result = cli.container.list(rpc_endpoint=endpoint, timeout=timeout)
|
||||
logger.info(f"Containers: \n{result}")
|
||||
return result.stdout.split()
|
||||
|
||||
|
||||
|
@ -329,6 +328,13 @@ def _parse_cid(output: str) -> str:
|
|||
return splitted[1]
|
||||
|
||||
|
||||
@reporter.step("Search container by name")
|
||||
def search_container_by_name(name: str, node: ClusterNode):
|
||||
resolver_cls = load_plugin("frostfs.testlib.bucket_cid_resolver", node.host.config.product)
|
||||
resolver: BucketContainerResolver = resolver_cls()
|
||||
return resolver.resolve(node, name)
|
||||
|
||||
|
||||
@reporter.step("Search for nodes with a container")
|
||||
def search_nodes_with_container(
|
||||
wallet: WalletInfo,
|
||||
|
|
|
@ -15,7 +15,7 @@ from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
|||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||
from frostfs_testlib.testing import wait_for_success
|
||||
from frostfs_testlib.utils import json_utils
|
||||
from frostfs_testlib.utils.cli_utils import parse_netmap_output
|
||||
from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output
|
||||
from frostfs_testlib.utils.file_utils import TestFile
|
||||
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
|
@ -616,27 +616,27 @@ def head_object(
|
|||
fst_line_idx = result.stdout.find("\n")
|
||||
decoded = json.loads(result.stdout[fst_line_idx:])
|
||||
|
||||
# if response
|
||||
if "chunks" in decoded.keys():
|
||||
logger.info("decoding ec chunks")
|
||||
return decoded["chunks"]
|
||||
|
||||
# If response is Complex Object header, it has `splitId` key
|
||||
if "splitId" in decoded.keys():
|
||||
logger.info("decoding split header")
|
||||
return json_utils.decode_split_header(decoded)
|
||||
|
||||
# If response is Last or Linking Object header,
|
||||
# it has `header` dictionary and non-null `split` dictionary
|
||||
if "split" in decoded["header"].keys():
|
||||
if decoded["header"]["split"]:
|
||||
logger.info("decoding linking object")
|
||||
return json_utils.decode_linking_object(decoded)
|
||||
|
||||
if decoded["header"]["objectType"] == "STORAGE_GROUP":
|
||||
logger.info("decoding storage group")
|
||||
return json_utils.decode_storage_group(decoded)
|
||||
|
||||
if decoded["header"]["objectType"] == "TOMBSTONE":
|
||||
logger.info("decoding tombstone")
|
||||
return json_utils.decode_tombstone(decoded)
|
||||
|
||||
logger.info("decoding simple header")
|
||||
return json_utils.decode_simple_header(decoded)
|
||||
|
||||
|
||||
|
@ -690,13 +690,11 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict:
|
|||
latest_block = first_line.split(":")
|
||||
# taking second line from command's output contain wallet key
|
||||
second_line = output.split("\n")[1]
|
||||
if second_line != "":
|
||||
validated_state = second_line.split(":")
|
||||
return {
|
||||
latest_block[0].replace(":", ""): int(latest_block[1]),
|
||||
validated_state[0].replace(":", ""): int(validated_state[1]),
|
||||
}
|
||||
return {latest_block[0].replace(":", ""): int(latest_block[1])}
|
||||
validated_state = second_line.split(":")
|
||||
return {
|
||||
latest_block[0].replace(":", ""): int(latest_block[1]),
|
||||
validated_state[0].replace(":", ""): int(validated_state[1]),
|
||||
}
|
||||
|
||||
|
||||
@wait_for_success()
|
||||
|
@ -719,27 +717,21 @@ def get_object_nodes(
|
|||
|
||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config)
|
||||
|
||||
response = cli.object.nodes(
|
||||
result_object_nodes = cli.object.nodes(
|
||||
rpc_endpoint=endpoint,
|
||||
cid=cid,
|
||||
oid=oid,
|
||||
bearer=bearer,
|
||||
ttl=1 if is_direct else None,
|
||||
json=True,
|
||||
xhdr=xhdr,
|
||||
timeout=timeout,
|
||||
verify_presence_all=verify_presence_all,
|
||||
)
|
||||
|
||||
response_json = json.loads(response.stdout)
|
||||
# Currently, the command will show expected and confirmed nodes.
|
||||
# And we (currently) count only nodes which are both expected and confirmed
|
||||
object_nodes_id = {
|
||||
required_node
|
||||
for data_object in response_json["data_objects"]
|
||||
for required_node in data_object["required_nodes"]
|
||||
if required_node in data_object["confirmed_nodes"]
|
||||
}
|
||||
parsing_output = parse_cmd_table(result_object_nodes.stdout, "|")
|
||||
list_object_nodes = [
|
||||
node for node in parsing_output if node["should_contain_object"] == "true" and node["actually_contains_object"] == "true"
|
||||
]
|
||||
|
||||
netmap_nodes_list = parse_netmap_output(
|
||||
cli.netmap.snapshot(
|
||||
|
@ -748,11 +740,14 @@ def get_object_nodes(
|
|||
).stdout
|
||||
)
|
||||
netmap_nodes = [
|
||||
netmap_node for object_node in object_nodes_id for netmap_node in netmap_nodes_list if object_node == netmap_node.node_id
|
||||
netmap_node
|
||||
for object_node in list_object_nodes
|
||||
for netmap_node in netmap_nodes_list
|
||||
if object_node["node_id"] == netmap_node.node_id
|
||||
]
|
||||
|
||||
object_nodes = [
|
||||
result = [
|
||||
cluster_node for netmap_node in netmap_nodes for cluster_node in cluster.cluster_nodes if netmap_node.node == cluster_node.host_ip
|
||||
]
|
||||
|
||||
return object_nodes
|
||||
return result
|
||||
|
|
|
@ -69,7 +69,7 @@ def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode]
|
|||
|
||||
|
||||
@reporter.step("Tick Epoch")
|
||||
def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None, delta: Optional[int] = None):
|
||||
def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode] = None):
|
||||
"""
|
||||
Tick epoch using frostfs-adm or NeoGo if frostfs-adm is not available (DevEnv)
|
||||
Args:
|
||||
|
@ -88,17 +88,12 @@ def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode]
|
|||
frostfs_adm_exec_path=FROSTFS_ADM_EXEC,
|
||||
config_file=FROSTFS_ADM_CONFIG_PATH,
|
||||
)
|
||||
frostfs_adm.morph.force_new_epoch(delta=delta)
|
||||
frostfs_adm.morph.force_new_epoch()
|
||||
return
|
||||
|
||||
# Otherwise we tick epoch using transaction
|
||||
cur_epoch = get_epoch(shell, cluster)
|
||||
|
||||
if delta:
|
||||
next_epoch = cur_epoch + delta
|
||||
else:
|
||||
next_epoch = cur_epoch + 1
|
||||
|
||||
# Use first node by default
|
||||
ir_node = cluster.services(InnerRing)[0]
|
||||
# In case if no local_wallet_path is provided, we use wallet_path
|
||||
|
@ -115,7 +110,7 @@ def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode]
|
|||
wallet_password=ir_wallet_pass,
|
||||
scripthash=get_contract_hash(morph_chain, "netmap.frostfs", shell=shell),
|
||||
method="newEpoch",
|
||||
arguments=f"int:{next_epoch}",
|
||||
arguments=f"int:{cur_epoch + 1}",
|
||||
multisig_hash=f"{ir_address}:Global",
|
||||
address=ir_address,
|
||||
rpc_endpoint=morph_endpoint,
|
||||
|
|
|
@ -12,8 +12,8 @@ import requests
|
|||
|
||||
from frostfs_testlib import reporter
|
||||
from frostfs_testlib.cli import GenericCli
|
||||
from frostfs_testlib.clients.s3.aws_cli_client import command_options
|
||||
from frostfs_testlib.resources.common import ASSETS_DIR, SIMPLE_OBJECT_SIZE
|
||||
from frostfs_testlib.s3.aws_cli_client import command_options
|
||||
from frostfs_testlib.shell import Shell
|
||||
from frostfs_testlib.shell.local_shell import LocalShell
|
||||
from frostfs_testlib.steps.cli.object import get_object
|
||||
|
@ -38,34 +38,34 @@ def get_via_http_gate(
|
|||
"""
|
||||
This function gets given object from HTTP gate
|
||||
cid: container id to get object from
|
||||
oid: object id / object key
|
||||
oid: object ID
|
||||
node: node to make request
|
||||
request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}]
|
||||
"""
|
||||
|
||||
request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}"
|
||||
if request_path:
|
||||
# if `request_path` parameter omitted, use default
|
||||
if request_path is None:
|
||||
request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}"
|
||||
else:
|
||||
request = f"{node.http_gate.get_endpoint()}{request_path}"
|
||||
|
||||
response = requests.get(request, stream=True, timeout=timeout, verify=False)
|
||||
resp = requests.get(request, headers={"Host": node.storage_node.get_http_hostname()[0]}, stream=True, timeout=timeout, verify=False)
|
||||
|
||||
if not response.ok:
|
||||
if not resp.ok:
|
||||
raise Exception(
|
||||
f"""Failed to get object via HTTP gate:
|
||||
request: {response.request.path_url},
|
||||
response: {response.text},
|
||||
headers: {response.headers},
|
||||
status code: {response.status_code} {response.reason}"""
|
||||
request: {resp.request.path_url},
|
||||
response: {resp.text},
|
||||
headers: {resp.headers},
|
||||
status code: {resp.status_code} {resp.reason}"""
|
||||
)
|
||||
|
||||
logger.info(f"Request: {request}")
|
||||
_attach_allure_step(request, response.status_code)
|
||||
_attach_allure_step(request, resp.status_code)
|
||||
|
||||
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}"))
|
||||
with open(test_file, "wb") as file:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
file.write(chunk)
|
||||
|
||||
shutil.copyfileobj(resp.raw, file)
|
||||
return test_file
|
||||
|
||||
|
||||
|
@ -115,17 +115,18 @@ def get_via_http_gate_by_attribute(
|
|||
cid: CID to get object from
|
||||
attribute: attribute {name: attribute} value pair
|
||||
endpoint: http gate endpoint
|
||||
http_hostname: http host name on the node
|
||||
request_path: (optional) http request path, if ommited - use default [{endpoint}/get_by_attribute/{Key}/{Value}]
|
||||
"""
|
||||
|
||||
attr_name = list(attribute.keys())[0]
|
||||
attr_value = quote_plus(str(attribute.get(attr_name)))
|
||||
|
||||
request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}"
|
||||
if request_path:
|
||||
# if `request_path` parameter ommited, use default
|
||||
if request_path is None:
|
||||
request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}"
|
||||
else:
|
||||
request = f"{node.http_gate.get_endpoint()}{request_path}"
|
||||
|
||||
resp = requests.get(request, stream=True, timeout=timeout, verify=False)
|
||||
resp = requests.get(request, stream=True, timeout=timeout, verify=False, headers={"Host": node.storage_node.get_http_hostname()[0]})
|
||||
|
||||
if not resp.ok:
|
||||
raise Exception(
|
||||
|
@ -145,6 +146,7 @@ def get_via_http_gate_by_attribute(
|
|||
return test_file
|
||||
|
||||
|
||||
# TODO: pass http_hostname as a header
|
||||
@reporter.step("Upload via HTTP Gate")
|
||||
def upload_via_http_gate(cid: str, path: str, endpoint: str, headers: Optional[dict] = None, timeout: Optional[int] = 300) -> str:
|
||||
"""
|
||||
|
@ -189,6 +191,7 @@ def is_object_large(filepath: str) -> bool:
|
|||
return False
|
||||
|
||||
|
||||
# TODO: pass http_hostname as a header
|
||||
@reporter.step("Upload via HTTP Gate using Curl")
|
||||
def upload_via_http_gate_curl(
|
||||
cid: str,
|
||||
|
@ -249,7 +252,7 @@ def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> TestFile:
|
|||
test_file = TestFile(os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}"))
|
||||
|
||||
curl = GenericCli("curl", node.host)
|
||||
curl(f"-k ", f"{request} > {test_file}", shell=local_shell)
|
||||
curl(f'-k -H "Host: {node.storage_node.get_http_hostname()[0]}"', f"{request} > {test_file}", shell=local_shell)
|
||||
|
||||
return test_file
|
||||
|
||||
|
@ -357,9 +360,19 @@ def try_to_get_object_via_passed_request_and_expect_error(
|
|||
) -> None:
|
||||
try:
|
||||
if attrs is None:
|
||||
get_via_http_gate(cid, oid, node, http_request_path)
|
||||
get_via_http_gate(
|
||||
cid=cid,
|
||||
oid=oid,
|
||||
node=node,
|
||||
request_path=http_request_path,
|
||||
)
|
||||
else:
|
||||
get_via_http_gate_by_attribute(cid, attrs, node, http_request_path)
|
||||
get_via_http_gate_by_attribute(
|
||||
cid=cid,
|
||||
attribute=attrs,
|
||||
node=node,
|
||||
request_path=http_request_path,
|
||||
)
|
||||
raise AssertionError(f"Expected error on getting object with cid: {cid}")
|
||||
except Exception as err:
|
||||
match = error_pattern.casefold() in str(err).casefold()
|
|
@ -1,8 +1,8 @@
|
|||
import re
|
||||
|
||||
from frostfs_testlib import reporter
|
||||
from frostfs_testlib.storage.cluster import ClusterNode
|
||||
from frostfs_testlib.testing.test_control import wait_for_success
|
||||
from frostfs_testlib.storage.cluster import ClusterNode
|
||||
|
||||
|
||||
@reporter.step("Check metrics result")
|
||||
|
@ -19,7 +19,7 @@ def check_metrics_counter(
|
|||
counter_act += get_metrics_value(cluster_node, parse_from_command, **metrics_greps)
|
||||
assert eval(
|
||||
f"{counter_act} {operator} {counter_exp}"
|
||||
), f"Expected: {counter_exp} {operator} Actual: {counter_act} in nodes: {cluster_nodes}"
|
||||
), f"Expected: {counter_exp} {operator} Actual: {counter_act} in node: {cluster_node}"
|
||||
|
||||
|
||||
@reporter.step("Get metrics value from node: {node}")
|
||||
|
|
|
@ -4,18 +4,16 @@ from frostfs_testlib.storage.cluster import ClusterNode
|
|||
|
||||
class IpHelper:
|
||||
@staticmethod
|
||||
def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[tuple]) -> None:
|
||||
def drop_input_traffic_to_node(node: ClusterNode, block_ip: list[str]) -> None:
|
||||
shell = node.host.get_shell()
|
||||
for ip, table in block_ip:
|
||||
if not table:
|
||||
shell.exec(f"ip r a blackhole {ip}")
|
||||
continue
|
||||
shell.exec(f"ip r a blackhole {ip} table {table}")
|
||||
for ip in block_ip:
|
||||
shell.exec(f"ip route add blackhole {ip}")
|
||||
|
||||
@staticmethod
|
||||
def restore_input_traffic_to_node(node: ClusterNode) -> None:
|
||||
shell = node.host.get_shell()
|
||||
unlock_ip = shell.exec("ip r l table all | grep blackhole", CommandOptions(check=False)).stdout
|
||||
|
||||
for active_blackhole in unlock_ip.strip().split("\n"):
|
||||
shell.exec(f"ip r d {active_blackhole}")
|
||||
unlock_ip = shell.exec("ip route list | grep blackhole", CommandOptions(check=False))
|
||||
if unlock_ip.return_code != 0:
|
||||
return
|
||||
for ip in unlock_ip.stdout.strip().split("\n"):
|
||||
shell.exec(f"ip route del blackhole {ip.split(' ')[1]}")
|
||||
|
|
|
@ -13,7 +13,6 @@ from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
|
|||
from frostfs_testlib.shell import Shell
|
||||
from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align
|
||||
from frostfs_testlib.storage.cluster import Cluster, StorageNode
|
||||
from frostfs_testlib.testing.test_control import wait_for_success
|
||||
from frostfs_testlib.utils import datetime_utils
|
||||
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
|
@ -112,7 +111,10 @@ def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str:
|
|||
storage_wallet_path = node.get_wallet_path()
|
||||
|
||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, config_file=storage_wallet_config)
|
||||
return cli.netmap.snapshot(rpc_endpoint=node.get_rpc_endpoint(), wallet=storage_wallet_path).stdout
|
||||
return cli.netmap.snapshot(
|
||||
rpc_endpoint=node.get_rpc_endpoint(),
|
||||
wallet=storage_wallet_path,
|
||||
).stdout
|
||||
|
||||
|
||||
@reporter.step("Get shard list for {node}")
|
||||
|
@ -200,7 +202,12 @@ def delete_node_data(node: StorageNode) -> None:
|
|||
|
||||
|
||||
@reporter.step("Exclude node {node_to_exclude} from network map")
|
||||
def exclude_node_from_network_map(node_to_exclude: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None:
|
||||
def exclude_node_from_network_map(
|
||||
node_to_exclude: StorageNode,
|
||||
alive_node: StorageNode,
|
||||
shell: Shell,
|
||||
cluster: Cluster,
|
||||
) -> None:
|
||||
node_netmap_key = node_to_exclude.get_wallet_public_key()
|
||||
|
||||
storage_node_set_status(node_to_exclude, status="offline")
|
||||
|
@ -214,7 +221,12 @@ def exclude_node_from_network_map(node_to_exclude: StorageNode, alive_node: Stor
|
|||
|
||||
|
||||
@reporter.step("Include node {node_to_include} into network map")
|
||||
def include_node_to_network_map(node_to_include: StorageNode, alive_node: StorageNode, shell: Shell, cluster: Cluster) -> None:
|
||||
def include_node_to_network_map(
|
||||
node_to_include: StorageNode,
|
||||
alive_node: StorageNode,
|
||||
shell: Shell,
|
||||
cluster: Cluster,
|
||||
) -> None:
|
||||
storage_node_set_status(node_to_include, status="online")
|
||||
|
||||
# Per suggestion of @fyrchik we need to wait for 2 blocks after we set status and after tick epoch.
|
||||
|
@ -224,7 +236,7 @@ def include_node_to_network_map(node_to_include: StorageNode, alive_node: Storag
|
|||
tick_epoch(shell, cluster)
|
||||
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * 2)
|
||||
|
||||
await_node_in_map(node_to_include, shell, alive_node)
|
||||
check_node_in_map(node_to_include, shell, alive_node)
|
||||
|
||||
|
||||
@reporter.step("Check node {node} in network map")
|
||||
|
@ -238,11 +250,6 @@ def check_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[Stor
|
|||
assert node_netmap_key in snapshot, f"Expected node with key {node_netmap_key} to be in network map"
|
||||
|
||||
|
||||
@wait_for_success(300, 15, title="Await node {node} in network map")
|
||||
def await_node_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None:
|
||||
check_node_in_map(node, shell, alive_node)
|
||||
|
||||
|
||||
@reporter.step("Check node {node} NOT in network map")
|
||||
def check_node_not_in_map(node: StorageNode, shell: Shell, alive_node: Optional[StorageNode] = None) -> None:
|
||||
alive_node = alive_node or node
|
||||
|
@ -269,7 +276,12 @@ def wait_for_node_to_be_ready(node: StorageNode) -> None:
|
|||
|
||||
|
||||
@reporter.step("Remove nodes from network map trough cli-adm morph command")
|
||||
def remove_nodes_from_map_morph(shell: Shell, cluster: Cluster, remove_nodes: list[StorageNode], alive_node: Optional[StorageNode] = None):
|
||||
def remove_nodes_from_map_morph(
|
||||
shell: Shell,
|
||||
cluster: Cluster,
|
||||
remove_nodes: list[StorageNode],
|
||||
alive_node: Optional[StorageNode] = None,
|
||||
):
|
||||
"""
|
||||
Move node to the Offline state in the candidates list and tick an epoch to update the netmap
|
||||
using frostfs-adm
|
||||
|
@ -288,5 +300,9 @@ def remove_nodes_from_map_morph(shell: Shell, cluster: Cluster, remove_nodes: li
|
|||
|
||||
if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH:
|
||||
# If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests)
|
||||
frostfsadm = FrostfsAdm(shell=remote_shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH)
|
||||
frostfsadm = FrostfsAdm(
|
||||
shell=remote_shell,
|
||||
frostfs_adm_exec_path=FROSTFS_ADM_EXEC,
|
||||
config_file=FROSTFS_ADM_CONFIG_PATH,
|
||||
)
|
||||
frostfsadm.morph.remove_nodes(node_netmap_keys)
|
||||
|
|
|
@ -6,9 +6,9 @@ from typing import Optional
|
|||
from dateutil.parser import parse
|
||||
|
||||
from frostfs_testlib import reporter
|
||||
from frostfs_testlib.clients.s3 import BucketContainerResolver, S3ClientWrapper, VersioningStatus
|
||||
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
|
||||
from frostfs_testlib.shell import Shell
|
||||
from frostfs_testlib.steps.cli.container import search_nodes_with_container
|
||||
from frostfs_testlib.steps.cli.container import search_container_by_name, search_nodes_with_container
|
||||
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||
|
||||
|
@ -47,6 +47,7 @@ def set_bucket_versioning(s3_client: S3ClientWrapper, bucket: str, status: Versi
|
|||
if status == VersioningStatus.UNDEFINED:
|
||||
return
|
||||
|
||||
s3_client.get_bucket_versioning_status(bucket)
|
||||
s3_client.put_bucket_versioning(bucket, status=status)
|
||||
bucket_status = s3_client.get_bucket_versioning_status(bucket)
|
||||
assert bucket_status == status.value, f"Expected {bucket_status} status. Got {status.value}"
|
||||
|
@ -175,35 +176,11 @@ def search_nodes_with_bucket(
|
|||
wallet: WalletInfo,
|
||||
shell: Shell,
|
||||
endpoint: str,
|
||||
bucket_container_resolver: BucketContainerResolver,
|
||||
) -> list[ClusterNode]:
|
||||
cid = None
|
||||
for cluster_node in cluster.cluster_nodes:
|
||||
cid = bucket_container_resolver.resolve(cluster_node, bucket_name)
|
||||
cid = search_container_by_name(name=bucket_name, node=cluster_node)
|
||||
if cid:
|
||||
break
|
||||
nodes_list = search_nodes_with_container(wallet=wallet, cid=cid, shell=shell, endpoint=endpoint, cluster=cluster)
|
||||
return nodes_list
|
||||
|
||||
|
||||
def get_bytes_relative_to_object(value: int | str, object_size: int = None, part_size: int = None) -> int:
|
||||
if isinstance(value, int):
|
||||
return value
|
||||
|
||||
if "part" not in value and "object" not in value:
|
||||
return int(value)
|
||||
|
||||
if object_size is not None:
|
||||
value = value.replace("object", str(object_size))
|
||||
|
||||
if part_size is not None:
|
||||
value = value.replace("part", str(part_size))
|
||||
|
||||
return int(eval(value))
|
||||
|
||||
|
||||
def get_range_relative_to_object(rng: str, object_size: int = None, part_size: int = None, int_values: bool = False) -> str | int:
|
||||
start, end = rng.split(":")
|
||||
start = get_bytes_relative_to_object(start, object_size, part_size)
|
||||
end = get_bytes_relative_to_object(end, object_size, part_size)
|
||||
return (start, end) if int_values else f"bytes {start}-{end}/*"
|
|
@ -11,10 +11,10 @@ from frostfs_testlib.storage import get_service_registry
|
|||
from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml
|
||||
from frostfs_testlib.storage.constants import ConfigAttributes
|
||||
from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode
|
||||
from frostfs_testlib.storage.dataclasses.metrics import Metrics
|
||||
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
|
||||
from frostfs_testlib.storage.dataclasses.storage_object_info import Interfaces
|
||||
from frostfs_testlib.storage.service_registry import ServiceRegistry
|
||||
from frostfs_testlib.storage.dataclasses.metrics import Metrics
|
||||
|
||||
|
||||
class ClusterNode:
|
||||
|
@ -144,16 +144,30 @@ class ClusterNode:
|
|||
return self.host.config.interfaces[interface.value]
|
||||
|
||||
def get_data_interfaces(self) -> list[str]:
|
||||
return [ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "data" in name_interface]
|
||||
return [
|
||||
ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "data" in name_interface
|
||||
]
|
||||
|
||||
def get_data_interface(self, search_interface: str) -> list[str]:
|
||||
return [self.host.config.interfaces[interface] for interface in self.host.config.interfaces.keys() if search_interface == interface]
|
||||
return [
|
||||
self.host.config.interfaces[interface]
|
||||
for interface in self.host.config.interfaces.keys()
|
||||
if search_interface == interface
|
||||
]
|
||||
|
||||
def get_internal_interfaces(self) -> list[str]:
|
||||
return [ip_address for name_interface, ip_address in self.host.config.interfaces.items() if "internal" in name_interface]
|
||||
return [
|
||||
ip_address
|
||||
for name_interface, ip_address in self.host.config.interfaces.items()
|
||||
if "internal" in name_interface
|
||||
]
|
||||
|
||||
def get_internal_interface(self, search_internal: str) -> list[str]:
|
||||
return [self.host.config.interfaces[interface] for interface in self.host.config.interfaces.keys() if search_internal == interface]
|
||||
return [
|
||||
self.host.config.interfaces[interface]
|
||||
for interface in self.host.config.interfaces.keys()
|
||||
if search_internal == interface
|
||||
]
|
||||
|
||||
|
||||
class Cluster:
|
||||
|
@ -164,6 +178,8 @@ class Cluster:
|
|||
default_rpc_endpoint: str
|
||||
default_s3_gate_endpoint: str
|
||||
default_http_gate_endpoint: str
|
||||
default_http_hostname: str
|
||||
default_s3_hostname: str
|
||||
|
||||
def __init__(self, hosting: Hosting) -> None:
|
||||
self._hosting = hosting
|
||||
|
@ -172,6 +188,8 @@ class Cluster:
|
|||
self.default_rpc_endpoint = self.services(StorageNode)[0].get_rpc_endpoint()
|
||||
self.default_s3_gate_endpoint = self.services(S3Gate)[0].get_endpoint()
|
||||
self.default_http_gate_endpoint = self.services(HTTPGate)[0].get_endpoint()
|
||||
self.default_http_hostname = self.services(StorageNode)[0].get_http_hostname()
|
||||
self.default_s3_hostname = self.services(StorageNode)[0].get_s3_hostname()
|
||||
|
||||
@property
|
||||
def hosts(self) -> list[Host]:
|
||||
|
|
|
@ -12,17 +12,9 @@ class ConfigAttributes:
|
|||
REMOTE_WALLET_CONFIG = "remote_wallet_config_path"
|
||||
ENDPOINT_DATA_0 = "endpoint_data0"
|
||||
ENDPOINT_DATA_1 = "endpoint_data1"
|
||||
ENDPOINT_DATA_0_NS = "endpoint_data0_namespace"
|
||||
ENDPOINT_INTERNAL = "endpoint_internal0"
|
||||
ENDPOINT_PROMETHEUS = "endpoint_prometheus"
|
||||
CONTROL_ENDPOINT = "control_endpoint"
|
||||
UN_LOCODE = "un_locode"
|
||||
|
||||
|
||||
class PlacementRule:
|
||||
DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
|
||||
SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X"
|
||||
REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X"
|
||||
REP_1_FOR_2_NODES_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 2 FROM * AS X"
|
||||
DEFAULT_EC_PLACEMENT_RULE = "EC 3.1"
|
||||
EC_1_1_FOR_2_NODES_PLACEMENT_RULE = "EC 1.1 IN X CBF 1 SELECT 2 FROM * AS X"
|
||||
HTTP_HOSTNAME = "http_hostname"
|
||||
S3_HOSTNAME = "s3_hostname"
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
import datetime
|
||||
import itertools
|
||||
import logging
|
||||
import time
|
||||
from typing import TypeVar
|
||||
|
@ -15,7 +14,6 @@ from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_E
|
|||
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
|
||||
from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider
|
||||
from frostfs_testlib.steps.network import IpHelper
|
||||
from frostfs_testlib.steps.node_management import include_node_to_network_map, remove_nodes_from_map_morph
|
||||
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode
|
||||
from frostfs_testlib.storage.controllers.disk_controller import DiskController
|
||||
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
|
||||
|
@ -40,8 +38,7 @@ class ClusterStateController:
|
|||
def __init__(self, shell: Shell, cluster: Cluster, healthcheck: Healthcheck) -> None:
|
||||
self.stopped_nodes: list[ClusterNode] = []
|
||||
self.detached_disks: dict[str, DiskController] = {}
|
||||
self.dropped_traffic: set[ClusterNode] = set()
|
||||
self.excluded_from_netmap: list[StorageNode] = []
|
||||
self.dropped_traffic: list[ClusterNode] = []
|
||||
self.stopped_services: set[NodeBase] = set()
|
||||
self.cluster = cluster
|
||||
self.healthcheck = healthcheck
|
||||
|
@ -173,15 +170,6 @@ class ClusterStateController:
|
|||
if service_type == StorageNode:
|
||||
self.wait_after_storage_startup()
|
||||
|
||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||
@reporter.step("Send sighup to all {service_type} services")
|
||||
def sighup_services_of_type(self, service_type: type[ServiceClass]):
|
||||
services = self.cluster.services(service_type)
|
||||
parallel([service.send_signal_to_service for service in services], signal="SIGHUP")
|
||||
|
||||
if service_type == StorageNode:
|
||||
self.wait_after_storage_startup()
|
||||
|
||||
@wait_for_success(600, 60)
|
||||
def wait_s3gate(self, s3gate: S3Gate):
|
||||
with reporter.step(f"Wait for {s3gate} reconnection"):
|
||||
|
@ -216,27 +204,21 @@ class ClusterStateController:
|
|||
|
||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||
@reporter.step("Stop {service_type} service on {node}")
|
||||
def stop_service_of_type(self, node: ClusterNode, service_type: ServiceClass, mask: bool = True):
|
||||
def stop_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass], mask: bool = True):
|
||||
service = node.service(service_type)
|
||||
service.stop_service(mask)
|
||||
self.stopped_services.add(service)
|
||||
|
||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||
@reporter.step("Send sighup to {service_type} service on {node}")
|
||||
def sighup_service_of_type(self, node: ClusterNode, service_type: ServiceClass):
|
||||
service = node.service(service_type)
|
||||
service.send_signal_to_service("SIGHUP")
|
||||
|
||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||
@reporter.step("Start {service_type} service on {node}")
|
||||
def start_service_of_type(self, node: ClusterNode, service_type: ServiceClass):
|
||||
def start_service_of_type(self, node: ClusterNode, service_type: type[ServiceClass]):
|
||||
service = node.service(service_type)
|
||||
service.start_service()
|
||||
self.stopped_services.discard(service)
|
||||
|
||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||
@reporter.step("Start all stopped {service_type} services")
|
||||
def start_stopped_services_of_type(self, service_type: ServiceClass):
|
||||
def start_stopped_services_of_type(self, service_type: type[ServiceClass]):
|
||||
stopped_svc = self._get_stopped_by_type(service_type)
|
||||
if not stopped_svc:
|
||||
return
|
||||
|
@ -325,23 +307,30 @@ class ClusterStateController:
|
|||
self.suspended_services = {}
|
||||
|
||||
@reporter.step("Drop traffic to {node}, nodes - {block_nodes}")
|
||||
def drop_traffic(self, node: ClusterNode, wakeup_timeout: int, name_interface: str, block_nodes: list[ClusterNode] = None) -> None:
|
||||
interfaces_tables = self._parse_interfaces(block_nodes, name_interface)
|
||||
IpHelper.drop_input_traffic_to_node(node, interfaces_tables)
|
||||
def drop_traffic(
|
||||
self,
|
||||
node: ClusterNode,
|
||||
wakeup_timeout: int,
|
||||
name_interface: str,
|
||||
block_nodes: list[ClusterNode] = None,
|
||||
) -> None:
|
||||
list_ip = self._parse_interfaces(block_nodes, name_interface)
|
||||
IpHelper.drop_input_traffic_to_node(node, list_ip)
|
||||
time.sleep(wakeup_timeout)
|
||||
self.dropped_traffic.add(node)
|
||||
self.dropped_traffic.append(node)
|
||||
|
||||
@reporter.step("Start traffic to {node}")
|
||||
def restore_traffic(self, node: ClusterNode) -> None:
|
||||
def restore_traffic(
|
||||
self,
|
||||
node: ClusterNode,
|
||||
) -> None:
|
||||
IpHelper.restore_input_traffic_to_node(node=node)
|
||||
self.dropped_traffic.discard(node)
|
||||
|
||||
@reporter.step("Restore blocked nodes")
|
||||
def restore_all_traffic(self):
|
||||
if not self.dropped_traffic:
|
||||
return
|
||||
parallel(self._restore_traffic_to_node, self.dropped_traffic)
|
||||
self.dropped_traffic.clear()
|
||||
|
||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||
@reporter.step("Hard reboot host {node} via magic SysRq option")
|
||||
|
@ -419,7 +408,9 @@ class ClusterStateController:
|
|||
@reporter.step("Set MaintenanceModeAllowed - {status}")
|
||||
def set_maintenance_mode_allowed(self, status: str, cluster_node: ClusterNode) -> None:
|
||||
frostfs_adm = FrostfsAdm(
|
||||
shell=cluster_node.host.get_shell(), frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH
|
||||
shell=cluster_node.host.get_shell(),
|
||||
frostfs_adm_exec_path=FROSTFS_ADM_EXEC,
|
||||
config_file=FROSTFS_ADM_CONFIG_PATH,
|
||||
)
|
||||
frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}")
|
||||
|
||||
|
@ -449,36 +440,15 @@ class ClusterStateController:
|
|||
self.await_node_status(status, wallet, cluster_node)
|
||||
|
||||
@wait_for_success(80, 8, title="Wait for node status become {status}")
|
||||
def await_node_status(self, status: NodeStatus, wallet: WalletInfo, cluster_node: ClusterNode, checker_node: ClusterNode = None):
|
||||
def await_node_status(self, status: NodeStatus, wallet: WalletInfo, cluster_node: ClusterNode):
|
||||
frostfs_cli = FrostfsCli(self.shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
||||
if not checker_node:
|
||||
checker_node = cluster_node
|
||||
netmap = NetmapParser.snapshot_all_nodes(frostfs_cli.netmap.snapshot(checker_node.storage_node.get_rpc_endpoint()).stdout)
|
||||
netmap = NetmapParser.snapshot_all_nodes(frostfs_cli.netmap.snapshot(cluster_node.storage_node.get_rpc_endpoint()).stdout)
|
||||
netmap = [node for node in netmap if cluster_node.host_ip == node.node]
|
||||
if status == NodeStatus.OFFLINE:
|
||||
assert cluster_node.host_ip not in netmap, f"{cluster_node.host_ip} not in Offline"
|
||||
else:
|
||||
assert netmap[0].node_status == status, f"Node status should be '{status}', but was '{netmap[0].node_status}'"
|
||||
|
||||
def remove_node_from_netmap(self, removes_nodes: list[StorageNode]) -> None:
|
||||
alive_storage = list(set(self.cluster.storage_nodes) - set(removes_nodes))[0]
|
||||
remove_nodes_from_map_morph(self.shell, self.cluster, removes_nodes, alive_storage)
|
||||
self.excluded_from_netmap.extend(removes_nodes)
|
||||
|
||||
def include_node_to_netmap(self, include_node: StorageNode, alive_node: StorageNode):
|
||||
include_node_to_network_map(include_node, alive_node, self.shell, self.cluster)
|
||||
self.excluded_from_netmap.pop(self.excluded_from_netmap.index(include_node))
|
||||
|
||||
def include_all_excluded_nodes(self):
|
||||
if not self.excluded_from_netmap:
|
||||
return
|
||||
alive_node = list(set(self.cluster.storage_nodes) - set(self.excluded_from_netmap))[0]
|
||||
if not alive_node:
|
||||
return
|
||||
|
||||
for exclude_node in self.excluded_from_netmap.copy():
|
||||
self.include_node_to_netmap(exclude_node, alive_node)
|
||||
|
||||
def _get_cli(
|
||||
self, local_shell: Shell, local_wallet: WalletInfo, cluster_node: ClusterNode
|
||||
) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]:
|
||||
|
@ -495,7 +465,11 @@ class ClusterStateController:
|
|||
|
||||
frostfs_adm = FrostfsAdm(shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH)
|
||||
frostfs_cli = FrostfsCli(local_shell, FROSTFS_CLI_EXEC, local_wallet.config_path)
|
||||
frostfs_cli_remote = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config_path)
|
||||
frostfs_cli_remote = FrostfsCli(
|
||||
shell=shell,
|
||||
frostfs_cli_exec_path=FROSTFS_CLI_EXEC,
|
||||
config_file=wallet_config_path,
|
||||
)
|
||||
return frostfs_adm, frostfs_cli, frostfs_cli_remote
|
||||
|
||||
def _enable_date_synchronizer(self, cluster_node: ClusterNode):
|
||||
|
@ -517,31 +491,17 @@ class ClusterStateController:
|
|||
|
||||
return disk_controller
|
||||
|
||||
@reporter.step("Restore traffic {node}")
|
||||
def _restore_traffic_to_node(self, node):
|
||||
IpHelper.restore_input_traffic_to_node(node)
|
||||
|
||||
def _parse_interfaces(self, nodes: list[ClusterNode], name_interface: str) -> list[tuple]:
|
||||
interfaces_and_tables = set()
|
||||
def _parse_interfaces(self, nodes: list[ClusterNode], name_interface: str):
|
||||
interfaces = []
|
||||
for node in nodes:
|
||||
shell = node.host.get_shell()
|
||||
lines = shell.exec(f"ip r l table all | grep '{name_interface}'").stdout.splitlines()
|
||||
|
||||
ips = []
|
||||
tables = []
|
||||
|
||||
for line in lines:
|
||||
if "src" not in line or "table local" in line:
|
||||
continue
|
||||
parts = line.split()
|
||||
ips.append(parts[-1])
|
||||
if "table" in line:
|
||||
tables.append(parts[parts.index("table") + 1])
|
||||
tables.append(None)
|
||||
|
||||
[interfaces_and_tables.add((ip, table)) for ip, table in itertools.product(ips, tables)]
|
||||
|
||||
return interfaces_and_tables
|
||||
dict_interfaces = node.host.config.interfaces
|
||||
for type, ip in dict_interfaces.items():
|
||||
if name_interface in type:
|
||||
interfaces.append(ip)
|
||||
return interfaces
|
||||
|
||||
@reporter.step("Ping node")
|
||||
def _ping_host(self, node: ClusterNode):
|
||||
|
@ -569,8 +529,3 @@ class ClusterStateController:
|
|||
except Exception as err:
|
||||
logger.warning(f"Host ping fails with error {err}")
|
||||
return HostStatus.ONLINE
|
||||
|
||||
@reporter.step("Get contract by domain - {domain_name}")
|
||||
def get_domain_contracts(self, cluster_node: ClusterNode, domain_name: str):
|
||||
frostfs_adm = FrostfsAdm(shell=cluster_node.host.get_shell(), frostfs_adm_exec_path=FROSTFS_ADM_EXEC)
|
||||
return frostfs_adm.morph.dump_hashes(cluster_node.morph_chain.get_http_endpoint(), domain_name).stdout
|
||||
|
|
|
@ -2,22 +2,22 @@ import json
|
|||
from typing import Any
|
||||
|
||||
from frostfs_testlib.cli.frostfs_cli.shards import FrostfsCliShards
|
||||
from frostfs_testlib.shell.interfaces import CommandResult
|
||||
from frostfs_testlib.storage.cluster import ClusterNode
|
||||
from frostfs_testlib.testing.test_control import wait_for_success
|
||||
|
||||
|
||||
class ShardsWatcher:
|
||||
shards_snapshots: list[dict[str, Any]] = []
|
||||
|
||||
def __init__(self, node_under_test: ClusterNode) -> None:
|
||||
self.shards_snapshots: list[dict[str, Any]] = []
|
||||
self.storage_node = node_under_test.storage_node
|
||||
self.take_shards_snapshot()
|
||||
|
||||
def take_shards_snapshot(self) -> None:
|
||||
def take_shards_snapshot(self):
|
||||
snapshot = self.get_shards_snapshot()
|
||||
self.shards_snapshots.append(snapshot)
|
||||
|
||||
def get_shards_snapshot(self) -> dict[str, Any]:
|
||||
def get_shards_snapshot(self):
|
||||
shards_snapshot: dict[str, Any] = {}
|
||||
|
||||
shards = self.get_shards()
|
||||
|
@ -26,17 +26,17 @@ class ShardsWatcher:
|
|||
|
||||
return shards_snapshot
|
||||
|
||||
def _get_current_snapshot(self) -> dict[str, Any]:
|
||||
def _get_current_snapshot(self):
|
||||
return self.shards_snapshots[-1]
|
||||
|
||||
def _get_previous_snapshot(self) -> dict[str, Any]:
|
||||
def _get_previous_snapshot(self):
|
||||
return self.shards_snapshots[-2]
|
||||
|
||||
def _is_shard_present(self, shard_id) -> bool:
|
||||
def _is_shard_present(self, shard_id):
|
||||
snapshot = self._get_current_snapshot()
|
||||
return shard_id in snapshot
|
||||
|
||||
def get_shards_with_new_errors(self) -> dict[str, Any]:
|
||||
def get_shards_with_new_errors(self):
|
||||
current_snapshot = self._get_current_snapshot()
|
||||
previous_snapshot = self._get_previous_snapshot()
|
||||
shards_with_new_errors: dict[str, Any] = {}
|
||||
|
@ -46,7 +46,7 @@ class ShardsWatcher:
|
|||
|
||||
return shards_with_new_errors
|
||||
|
||||
def get_shards_with_errors(self) -> dict[str, Any]:
|
||||
def get_shards_with_errors(self):
|
||||
snapshot = self.get_shards_snapshot()
|
||||
shards_with_errors: dict[str, Any] = {}
|
||||
for shard_id, shard in snapshot.items():
|
||||
|
@ -55,7 +55,7 @@ class ShardsWatcher:
|
|||
|
||||
return shards_with_errors
|
||||
|
||||
def get_shard_status(self, shard_id: str): # -> Any:
|
||||
def get_shard_status(self, shard_id: str):
|
||||
snapshot = self.get_shards_snapshot()
|
||||
|
||||
assert shard_id in snapshot, f"Shard {shard_id} is missing: {snapshot}"
|
||||
|
@ -63,18 +63,18 @@ class ShardsWatcher:
|
|||
return snapshot[shard_id]["mode"]
|
||||
|
||||
@wait_for_success(60, 2)
|
||||
def await_for_all_shards_status(self, status: str) -> None:
|
||||
def await_for_all_shards_status(self, status: str):
|
||||
snapshot = self.get_shards_snapshot()
|
||||
|
||||
for shard_id in snapshot:
|
||||
assert snapshot[shard_id]["mode"] == status, f"Shard {shard_id} have wrong shard status"
|
||||
|
||||
@wait_for_success(60, 2)
|
||||
def await_for_shard_status(self, shard_id: str, status: str) -> None:
|
||||
def await_for_shard_status(self, shard_id: str, status: str):
|
||||
assert self.get_shard_status(shard_id) == status
|
||||
|
||||
@wait_for_success(60, 2)
|
||||
def await_for_shard_have_new_errors(self, shard_id: str) -> None:
|
||||
def await_for_shard_have_new_errors(self, shard_id: str):
|
||||
self.take_shards_snapshot()
|
||||
assert self._is_shard_present(shard_id)
|
||||
shards_with_new_errors = self.get_shards_with_new_errors()
|
||||
|
@ -82,7 +82,7 @@ class ShardsWatcher:
|
|||
assert shard_id in shards_with_new_errors, f"Expected shard {shard_id} to have new errors, but haven't {self.shards_snapshots[-1]}"
|
||||
|
||||
@wait_for_success(300, 5)
|
||||
def await_for_shards_have_no_new_errors(self) -> None:
|
||||
def await_for_shards_have_no_new_errors(self):
|
||||
self.take_shards_snapshot()
|
||||
shards_with_new_errors = self.get_shards_with_new_errors()
|
||||
assert len(shards_with_new_errors) == 0
|
||||
|
@ -102,7 +102,7 @@ class ShardsWatcher:
|
|||
|
||||
return json.loads(response.stdout.split(">", 1)[1])
|
||||
|
||||
def set_shard_mode(self, shard_id: str, mode: str, clear_errors: bool = True) -> CommandResult:
|
||||
def set_shard_mode(self, shard_id: str, mode: str, clear_errors: bool = True):
|
||||
shards_cli = FrostfsCliShards(
|
||||
self.storage_node.host.get_shell(),
|
||||
self.storage_node.host.get_cli_config("frostfs-cli").exec_path,
|
||||
|
|
|
@ -14,19 +14,14 @@ class ConfigStateManager(StateManager):
|
|||
self.cluster = self.csc.cluster
|
||||
|
||||
@reporter.step("Change configuration for {service_type} on all nodes")
|
||||
def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any], sighup: bool = False):
|
||||
def set_on_all_nodes(self, service_type: type[ServiceClass], values: dict[str, Any]):
|
||||
services = self.cluster.services(service_type)
|
||||
nodes = self.cluster.nodes(services)
|
||||
self.services_with_changed_config.update([(node, service_type) for node in nodes])
|
||||
|
||||
if not sighup:
|
||||
self.csc.stop_services_of_type(service_type)
|
||||
|
||||
self.csc.stop_services_of_type(service_type)
|
||||
parallel([node.config(service_type).set for node in nodes], values=values)
|
||||
if not sighup:
|
||||
self.csc.start_services_of_type(service_type)
|
||||
else:
|
||||
self.csc.sighup_services_of_type(service_type)
|
||||
self.csc.start_services_of_type(service_type)
|
||||
|
||||
@reporter.step("Change configuration for {service_type} on {node}")
|
||||
def set_on_node(self, node: ClusterNode, service_type: type[ServiceClass], values: dict[str, Any]):
|
||||
|
@ -37,26 +32,18 @@ class ConfigStateManager(StateManager):
|
|||
self.csc.start_service_of_type(node, service_type)
|
||||
|
||||
@reporter.step("Revert all configuration changes")
|
||||
def revert_all(self, sighup: bool = False):
|
||||
def revert_all(self):
|
||||
if not self.services_with_changed_config:
|
||||
return
|
||||
|
||||
parallel(self._revert_svc, self.services_with_changed_config, sighup)
|
||||
parallel(self._revert_svc, self.services_with_changed_config)
|
||||
self.services_with_changed_config.clear()
|
||||
|
||||
if not sighup:
|
||||
self.csc.start_all_stopped_services()
|
||||
self.csc.start_all_stopped_services()
|
||||
|
||||
# TODO: parallel can't have multiple parallel_items :(
|
||||
@reporter.step("Revert all configuration {node_and_service}")
|
||||
def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass], sighup: bool = False):
|
||||
def _revert_svc(self, node_and_service: tuple[ClusterNode, ServiceClass]):
|
||||
node, service_type = node_and_service
|
||||
service = node.service(service_type)
|
||||
|
||||
if not sighup:
|
||||
self.csc.stop_service_of_type(node, service_type)
|
||||
|
||||
self.csc.stop_service_of_type(node, service_type)
|
||||
node.config(service_type).revert()
|
||||
|
||||
if sighup:
|
||||
service.send_signal_to_service("SIGHUP")
|
||||
|
|
|
@ -1,154 +0,0 @@
|
|||
import logging
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
from frostfs_testlib.testing.readable import HumanReadableEnum
|
||||
from frostfs_testlib.utils import string_utils
|
||||
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
EACL_LIFETIME = 100500
|
||||
FROSTFS_CONTRACT_CACHE_TIMEOUT = 30
|
||||
|
||||
|
||||
class ObjectOperations(HumanReadableEnum):
|
||||
PUT = "object.put"
|
||||
PATCH = "object.patch"
|
||||
GET = "object.get"
|
||||
HEAD = "object.head"
|
||||
GET_RANGE = "object.range"
|
||||
GET_RANGE_HASH = "object.hash"
|
||||
SEARCH = "object.search"
|
||||
DELETE = "object.delete"
|
||||
WILDCARD_ALL = "object.*"
|
||||
|
||||
@staticmethod
|
||||
def get_all():
|
||||
return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL]
|
||||
|
||||
|
||||
class ContainerOperations(HumanReadableEnum):
|
||||
PUT = "container.put"
|
||||
GET = "container.get"
|
||||
LIST = "container.list"
|
||||
DELETE = "container.delete"
|
||||
WILDCARD_ALL = "container.*"
|
||||
|
||||
@staticmethod
|
||||
def get_all():
|
||||
return [op for op in ObjectOperations if op != ObjectOperations.WILDCARD_ALL]
|
||||
|
||||
|
||||
@dataclass
|
||||
class Operations:
|
||||
GET_CONTAINER = "GetContainer"
|
||||
PUT_CONTAINER = "PutContainer"
|
||||
DELETE_CONTAINER = "DeleteContainer"
|
||||
LIST_CONTAINER = "ListContainers"
|
||||
GET_OBJECT = "GetObject"
|
||||
DELETE_OBJECT = "DeleteObject"
|
||||
HASH_OBJECT = "HashObject"
|
||||
RANGE_OBJECT = "RangeObject"
|
||||
SEARCH_OBJECT = "SearchObject"
|
||||
HEAD_OBJECT = "HeadObject"
|
||||
PUT_OBJECT = "PutObject"
|
||||
PATCH_OBJECT = "PatchObject"
|
||||
|
||||
|
||||
class Verb(HumanReadableEnum):
|
||||
ALLOW = "allow"
|
||||
DENY = "deny"
|
||||
|
||||
|
||||
class Role(HumanReadableEnum):
|
||||
OWNER = "owner"
|
||||
IR = "ir"
|
||||
CONTAINER = "container"
|
||||
OTHERS = "others"
|
||||
|
||||
|
||||
class ConditionType(HumanReadableEnum):
|
||||
RESOURCE = "ResourceCondition"
|
||||
REQUEST = "RequestCondition"
|
||||
|
||||
|
||||
# See https://git.frostfs.info/TrueCloudLab/policy-engine/src/branch/master/schema/native/consts.go#L40-L53
|
||||
class ConditionKey(HumanReadableEnum):
|
||||
ROLE = '"\\$Actor:role"'
|
||||
PUBLIC_KEY = '"\\$Actor:publicKey"'
|
||||
OBJECT_TYPE = '"\\$Object:objectType"'
|
||||
OBJECT_ID = '"\\$Object:objectID"'
|
||||
|
||||
|
||||
class MatchType(HumanReadableEnum):
|
||||
EQUAL = "="
|
||||
NOT_EQUAL = "!="
|
||||
|
||||
|
||||
@dataclass
|
||||
class Condition:
|
||||
condition_key: ConditionKey | str
|
||||
condition_value: str
|
||||
condition_type: ConditionType = ConditionType.REQUEST
|
||||
match_type: MatchType = MatchType.EQUAL
|
||||
|
||||
def as_string(self):
|
||||
key = self.condition_key.value if isinstance(self.condition_key, ConditionKey) else self.condition_key
|
||||
value = self.condition_value.value if isinstance(self.condition_value, Enum) else self.condition_value
|
||||
|
||||
return f"{self.condition_type.value}:{key}{self.match_type.value}{value}"
|
||||
|
||||
@staticmethod
|
||||
def by_role(*args, **kwargs) -> "Condition":
|
||||
return Condition(ConditionKey.ROLE, *args, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def by_key(*args, **kwargs) -> "Condition":
|
||||
return Condition(ConditionKey.PUBLIC_KEY, *args, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def by_object_type(*args, **kwargs) -> "Condition":
|
||||
return Condition(ConditionKey.OBJECT_TYPE, *args, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def by_object_id(*args, **kwargs) -> "Condition":
|
||||
return Condition(ConditionKey.OBJECT_ID, *args, **kwargs)
|
||||
|
||||
|
||||
class Rule:
|
||||
def __init__(
|
||||
self,
|
||||
access: Verb,
|
||||
operations: list[ObjectOperations] | ObjectOperations,
|
||||
conditions: list[Condition] | Condition = None,
|
||||
chain_id: Optional[str] = None,
|
||||
) -> None:
|
||||
self.access = access
|
||||
self.operations = operations
|
||||
|
||||
if not conditions:
|
||||
self.conditions = []
|
||||
elif isinstance(conditions, Condition):
|
||||
self.conditions = [conditions]
|
||||
else:
|
||||
self.conditions = conditions
|
||||
|
||||
if not isinstance(self.conditions, list):
|
||||
raise RuntimeError("Conditions must be a list")
|
||||
|
||||
if not operations:
|
||||
self.operations = []
|
||||
elif isinstance(operations, (ObjectOperations, ContainerOperations)):
|
||||
self.operations = [operations]
|
||||
else:
|
||||
self.operations = operations
|
||||
|
||||
if not isinstance(self.operations, list):
|
||||
raise RuntimeError("Operations must be a list")
|
||||
|
||||
self.chain_id = chain_id if chain_id else string_utils.unique_name("chain-id-")
|
||||
|
||||
def as_string(self):
|
||||
conditions = " ".join([cond.as_string() for cond in self.conditions])
|
||||
operations = " ".join([op.value for op in self.operations])
|
||||
return f"{self.access.value} {operations} {conditions} *"
|
|
@ -39,18 +39,12 @@ class S3Gate(NodeBase):
|
|||
def get_endpoint(self) -> str:
|
||||
return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0)
|
||||
|
||||
def get_ns_endpoint(self, ns_name: str) -> str:
|
||||
return self._get_attribute(f"{ConfigAttributes.ENDPOINT_DATA_0}_namespace").format(namespace=ns_name)
|
||||
|
||||
def get_all_endpoints(self) -> list[str]:
|
||||
return [
|
||||
self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0),
|
||||
self._get_attribute(ConfigAttributes.ENDPOINT_DATA_1),
|
||||
]
|
||||
|
||||
def get_ns_endpoint(self, ns_name: str) -> str:
|
||||
return self._get_attribute(ConfigAttributes.ENDPOINT_DATA_0_NS).format(namespace=ns_name)
|
||||
|
||||
def service_healthcheck(self) -> bool:
|
||||
health_metric = "frostfs_s3_gw_state_health"
|
||||
output = self.host.get_shell().exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d").stdout
|
||||
|
@ -160,6 +154,15 @@ class StorageNode(NodeBase):
|
|||
def get_data_directory(self) -> str:
|
||||
return self.host.get_data_directory(self.name)
|
||||
|
||||
def get_storage_config(self) -> str:
|
||||
return self.host.get_storage_config(self.name)
|
||||
|
||||
def get_http_hostname(self) -> list[str]:
|
||||
return self._get_attribute(ConfigAttributes.HTTP_HOSTNAME)
|
||||
|
||||
def get_s3_hostname(self) -> list[str]:
|
||||
return self._get_attribute(ConfigAttributes.S3_HOSTNAME)
|
||||
|
||||
def delete_blobovnicza(self):
|
||||
self.host.delete_blobovnicza(self.name)
|
||||
|
||||
|
|
|
@ -65,10 +65,6 @@ class NodeBase(HumanReadableABC):
|
|||
with reporter.step(f"Start {self.name} service on {self.host.config.address}"):
|
||||
self.host.start_service(self.name)
|
||||
|
||||
def send_signal_to_service(self, signal: str):
|
||||
with reporter.step(f"Send -{signal} signal to {self.name} service on {self.host.config.address}"):
|
||||
self.host.send_signal_to_service(self.name, signal)
|
||||
|
||||
@abstractmethod
|
||||
def service_healthcheck(self) -> bool:
|
||||
"""Service healthcheck."""
|
||||
|
@ -189,7 +185,9 @@ class NodeBase(HumanReadableABC):
|
|||
|
||||
if attribute_name not in config.attributes:
|
||||
if default_attribute_name is None:
|
||||
raise RuntimeError(f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either")
|
||||
raise RuntimeError(
|
||||
f"Service {self.name} has no {attribute_name} in config and fallback attribute isn't set either"
|
||||
)
|
||||
|
||||
return config.attributes[default_attribute_name]
|
||||
|
||||
|
@ -199,7 +197,9 @@ class NodeBase(HumanReadableABC):
|
|||
return self.host.get_service_config(self.name)
|
||||
|
||||
def get_service_uptime(self, service: str) -> datetime:
|
||||
result = self.host.get_shell().exec(f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2")
|
||||
result = self.host.get_shell().exec(
|
||||
f"systemctl show {service} --property ActiveEnterTimestamp | cut -d '=' -f 2"
|
||||
)
|
||||
start_time = parser.parse(result.stdout.strip())
|
||||
current_time = datetime.now(tz=timezone.utc)
|
||||
active_time = current_time - start_time
|
||||
|
|
|
@ -70,26 +70,8 @@ class NodeNetInfo:
|
|||
epoch_duration: str = None
|
||||
inner_ring_candidate_fee: str = None
|
||||
maximum_object_size: str = None
|
||||
maximum_count_of_data_shards: str = None
|
||||
maximum_count_of_parity_shards: str = None
|
||||
withdrawal_fee: str = None
|
||||
homomorphic_hashing_disabled: str = None
|
||||
maintenance_mode_allowed: str = None
|
||||
eigen_trust_alpha: str = None
|
||||
eigen_trust_iterations: str = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class Chunk:
|
||||
def __init__(self, object_id: str, required_nodes: list, confirmed_nodes: list, ec_parent_object_id: str, ec_index: int) -> None:
|
||||
self.object_id = object_id
|
||||
self.required_nodes = required_nodes
|
||||
self.confirmed_nodes = confirmed_nodes
|
||||
self.ec_parent_object_id = ec_parent_object_id
|
||||
self.ec_index = ec_index
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self.object_id
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return self.object_id
|
||||
|
|
|
@ -1,14 +0,0 @@
|
|||
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
|
||||
from frostfs_testlib.storage.grpc_operations import interfaces
|
||||
from frostfs_testlib.storage.grpc_operations.implementations import container, object
|
||||
|
||||
|
||||
class CliClientWrapper(interfaces.GrpcClientWrapper):
|
||||
def __init__(self, cli: FrostfsCli) -> None:
|
||||
self.cli = cli
|
||||
self.object: interfaces.ObjectInterface = object.ObjectOperations(self.cli)
|
||||
self.container: interfaces.ContainerInterface = container.ContainerOperations(self.cli)
|
||||
|
||||
|
||||
class RpcClientWrapper(interfaces.GrpcClientWrapper):
|
||||
pass # The next series
|
|
@ -1,165 +0,0 @@
|
|||
import json
|
||||
from typing import Optional
|
||||
|
||||
from frostfs_testlib import reporter
|
||||
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
|
||||
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
|
||||
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||
from frostfs_testlib.storage.controllers.shards_watcher import ShardsWatcher
|
||||
from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo
|
||||
from frostfs_testlib.storage.grpc_operations import interfaces
|
||||
from frostfs_testlib.testing.test_control import wait_for_success
|
||||
from frostfs_testlib.utils.cli_utils import parse_netmap_output
|
||||
|
||||
|
||||
class ChunksOperations(interfaces.ChunksInterface):
|
||||
def __init__(self, cli: FrostfsCli) -> None:
|
||||
self.cli = cli
|
||||
|
||||
@reporter.step("Search node without chunks")
|
||||
def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]:
|
||||
if not endpoint:
|
||||
endpoint = cluster.default_rpc_endpoint
|
||||
netmap = parse_netmap_output(self.cli.netmap.snapshot(endpoint, timeout=CLI_DEFAULT_TIMEOUT).stdout)
|
||||
chunks_node_key = []
|
||||
for chunk in chunks:
|
||||
chunks_node_key.extend(chunk.confirmed_nodes)
|
||||
for node_info in netmap.copy():
|
||||
if node_info.node_id in chunks_node_key and node_info in netmap:
|
||||
netmap.remove(node_info)
|
||||
result = []
|
||||
for node_info in netmap:
|
||||
for cluster_node in cluster.cluster_nodes:
|
||||
if node_info.node == cluster_node.host_ip:
|
||||
result.append(cluster_node)
|
||||
return result
|
||||
|
||||
@reporter.step("Search node with chunk {chunk}")
|
||||
def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]:
|
||||
netmap = parse_netmap_output(self.cli.netmap.snapshot(cluster.default_rpc_endpoint, timeout=CLI_DEFAULT_TIMEOUT).stdout)
|
||||
for node_info in netmap:
|
||||
if node_info.node_id in chunk.confirmed_nodes:
|
||||
for cluster_node in cluster.cluster_nodes:
|
||||
if cluster_node.host_ip == node_info.node:
|
||||
return (cluster_node, node_info)
|
||||
|
||||
@wait_for_success(300, 5, fail_testcase=None)
|
||||
@reporter.step("Search shard with chunk {chunk}")
|
||||
def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str:
|
||||
oid_path = f"{chunk.object_id[0]}/{chunk.object_id[1]}/{chunk.object_id[2]}/{chunk.object_id[3]}"
|
||||
node_shell = node.storage_node.host.get_shell()
|
||||
shards_watcher = ShardsWatcher(node)
|
||||
|
||||
with reporter.step("Search object file"):
|
||||
for shard_id, shard_info in shards_watcher.shards_snapshots[-1].items():
|
||||
check_dir = node_shell.exec(f" [ -d {shard_info['blobstor'][1]['path']}/{oid_path} ] && echo 1 || echo 0").stdout
|
||||
if "1" in check_dir.strip():
|
||||
return shard_id
|
||||
|
||||
@reporter.step("Get all chunks")
|
||||
def get_all(
|
||||
self,
|
||||
rpc_endpoint: str,
|
||||
cid: str,
|
||||
oid: str,
|
||||
address: Optional[str] = None,
|
||||
bearer: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
trace: bool = True,
|
||||
root: bool = False,
|
||||
verify_presence_all: bool = False,
|
||||
json: bool = True,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> list[Chunk]:
|
||||
object_nodes = self.cli.object.nodes(
|
||||
rpc_endpoint=rpc_endpoint,
|
||||
cid=cid,
|
||||
address=address,
|
||||
bearer=bearer,
|
||||
generate_key=generate_key,
|
||||
oid=oid,
|
||||
trace=trace,
|
||||
root=root,
|
||||
verify_presence_all=verify_presence_all,
|
||||
json=json,
|
||||
ttl=ttl,
|
||||
xhdr=xhdr,
|
||||
timeout=timeout,
|
||||
)
|
||||
return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])
|
||||
|
||||
@reporter.step("Get last parity chunk")
|
||||
def get_parity(
|
||||
self,
|
||||
rpc_endpoint: str,
|
||||
cid: str,
|
||||
address: Optional[str] = None,
|
||||
bearer: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
oid: Optional[str] = None,
|
||||
trace: bool = True,
|
||||
root: bool = False,
|
||||
verify_presence_all: bool = False,
|
||||
json: bool = True,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> Chunk:
|
||||
object_nodes = self.cli.object.nodes(
|
||||
rpc_endpoint=rpc_endpoint,
|
||||
cid=cid,
|
||||
address=address,
|
||||
bearer=bearer,
|
||||
generate_key=generate_key,
|
||||
oid=oid,
|
||||
trace=trace,
|
||||
root=root,
|
||||
verify_presence_all=verify_presence_all,
|
||||
json=json,
|
||||
ttl=ttl,
|
||||
xhdr=xhdr,
|
||||
timeout=timeout,
|
||||
)
|
||||
return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[-1]
|
||||
|
||||
@reporter.step("Get first data chunk")
|
||||
def get_first_data(
|
||||
self,
|
||||
rpc_endpoint: str,
|
||||
cid: str,
|
||||
oid: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
bearer: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
trace: bool = True,
|
||||
root: bool = False,
|
||||
verify_presence_all: bool = False,
|
||||
json: bool = True,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> Chunk:
|
||||
object_nodes = self.cli.object.nodes(
|
||||
rpc_endpoint=rpc_endpoint,
|
||||
cid=cid,
|
||||
address=address,
|
||||
bearer=bearer,
|
||||
generate_key=generate_key,
|
||||
oid=oid,
|
||||
trace=trace,
|
||||
root=root,
|
||||
verify_presence_all=verify_presence_all,
|
||||
json=json,
|
||||
ttl=ttl,
|
||||
xhdr=xhdr,
|
||||
timeout=timeout,
|
||||
)
|
||||
return self._parse_object_nodes(object_nodes.stdout.split("\n")[0])[0]
|
||||
|
||||
def _parse_object_nodes(self, object_nodes: str) -> list[Chunk]:
|
||||
parse_result = json.loads(object_nodes)
|
||||
if parse_result.get("errors"):
|
||||
raise parse_result["errors"]
|
||||
return [Chunk(**chunk) for chunk in parse_result["data_objects"]]
|
|
@ -1,327 +0,0 @@
|
|||
import json
|
||||
import logging
|
||||
import re
|
||||
from typing import List, Optional, Union
|
||||
|
||||
from frostfs_testlib import reporter
|
||||
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
|
||||
from frostfs_testlib.clients.s3 import BucketContainerResolver
|
||||
from frostfs_testlib.plugins import load_plugin
|
||||
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
|
||||
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||
from frostfs_testlib.storage.grpc_operations import interfaces
|
||||
from frostfs_testlib.utils import json_utils
|
||||
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
|
||||
|
||||
class ContainerOperations(interfaces.ContainerInterface):
|
||||
def __init__(self, cli: FrostfsCli) -> None:
|
||||
self.cli = cli
|
||||
|
||||
@reporter.step("Create Container")
|
||||
def create(
|
||||
self,
|
||||
endpoint: str,
|
||||
nns_zone: Optional[str] = None,
|
||||
nns_name: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
attributes: Optional[dict] = None,
|
||||
basic_acl: Optional[str] = None,
|
||||
await_mode: bool = False,
|
||||
disable_timestamp: bool = False,
|
||||
force: bool = False,
|
||||
trace: bool = False,
|
||||
name: Optional[str] = None,
|
||||
nonce: Optional[str] = None,
|
||||
policy: Optional[str] = None,
|
||||
session: Optional[str] = None,
|
||||
subnet: Optional[str] = None,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> str:
|
||||
"""
|
||||
A wrapper for `frostfs-cli container create` call.
|
||||
|
||||
Args:
|
||||
wallet (WalletInfo): a wallet on whose behalf a container is created
|
||||
rule (optional, str): placement rule for container
|
||||
basic_acl (optional, str): an ACL for container, will be
|
||||
appended to `--basic-acl` key
|
||||
attributes (optional, dict): container attributes , will be
|
||||
appended to `--attributes` key
|
||||
session_token (optional, str): a path to session token file
|
||||
session_wallet(optional, str): a path to the wallet which signed
|
||||
the session token; this parameter makes sense
|
||||
when paired with `session_token`
|
||||
shell: executor for cli command
|
||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||
options (optional, dict): any other options to pass to the call
|
||||
name (optional, str): container name attribute
|
||||
await_mode (bool): block execution until container is persisted
|
||||
wait_for_creation (): Wait for container shows in container list
|
||||
timeout: Timeout for the operation.
|
||||
|
||||
Returns:
|
||||
(str): CID of the created container
|
||||
"""
|
||||
result = self.cli.container.create(
|
||||
rpc_endpoint=endpoint,
|
||||
policy=policy,
|
||||
nns_zone=nns_zone,
|
||||
nns_name=nns_name,
|
||||
address=address,
|
||||
attributes=attributes,
|
||||
basic_acl=basic_acl,
|
||||
await_mode=await_mode,
|
||||
disable_timestamp=disable_timestamp,
|
||||
force=force,
|
||||
trace=trace,
|
||||
name=name,
|
||||
nonce=nonce,
|
||||
session=session,
|
||||
subnet=subnet,
|
||||
ttl=ttl,
|
||||
xhdr=xhdr,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
cid = self._parse_cid(result.stdout)
|
||||
|
||||
logger.info("Container created; waiting until it is persisted in the sidechain")
|
||||
|
||||
return cid
|
||||
|
||||
@reporter.step("List Containers")
|
||||
def list(
|
||||
self,
|
||||
endpoint: str,
|
||||
name: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
owner: Optional[str] = None,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
**params,
|
||||
) -> List[str]:
|
||||
"""
|
||||
A wrapper for `frostfs-cli container list` call. It returns all the
|
||||
available containers for the given wallet.
|
||||
Args:
|
||||
shell: executor for cli command
|
||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||
timeout: Timeout for the operation.
|
||||
Returns:
|
||||
(list): list of containers
|
||||
"""
|
||||
result = self.cli.container.list(
|
||||
rpc_endpoint=endpoint,
|
||||
name=name,
|
||||
address=address,
|
||||
generate_key=generate_key,
|
||||
owner=owner,
|
||||
ttl=ttl,
|
||||
xhdr=xhdr,
|
||||
timeout=timeout,
|
||||
**params,
|
||||
)
|
||||
return result.stdout.split()
|
||||
|
||||
@reporter.step("List Objects in container")
|
||||
def list_objects(
|
||||
self,
|
||||
endpoint: str,
|
||||
cid: str,
|
||||
bearer: Optional[str] = None,
|
||||
wallet: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
trace: bool = False,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> List[str]:
|
||||
"""
|
||||
A wrapper for `frostfs-cli container list-objects` call. It returns all the
|
||||
available objects in container.
|
||||
Args:
|
||||
container_id: cid of container
|
||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||
timeout: Timeout for the operation.
|
||||
Returns:
|
||||
(list): list of containers
|
||||
"""
|
||||
result = self.cli.container.list_objects(
|
||||
rpc_endpoint=endpoint,
|
||||
cid=cid,
|
||||
bearer=bearer,
|
||||
wallet=wallet,
|
||||
address=address,
|
||||
generate_key=generate_key,
|
||||
trace=trace,
|
||||
ttl=ttl,
|
||||
xhdr=xhdr,
|
||||
timeout=timeout,
|
||||
)
|
||||
logger.info(f"Container objects: \n{result}")
|
||||
return result.stdout.split()
|
||||
|
||||
@reporter.step("Delete container")
|
||||
def delete(
|
||||
self,
|
||||
endpoint: str,
|
||||
cid: str,
|
||||
address: Optional[str] = None,
|
||||
await_mode: bool = False,
|
||||
session: Optional[str] = None,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
force: bool = False,
|
||||
trace: bool = False,
|
||||
):
|
||||
return self.cli.container.delete(
|
||||
rpc_endpoint=endpoint,
|
||||
cid=cid,
|
||||
address=address,
|
||||
await_mode=await_mode,
|
||||
session=session,
|
||||
ttl=ttl,
|
||||
xhdr=xhdr,
|
||||
force=force,
|
||||
trace=trace,
|
||||
).stdout
|
||||
|
||||
@reporter.step("Get container")
|
||||
def get(
|
||||
self,
|
||||
endpoint: str,
|
||||
cid: str,
|
||||
address: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
await_mode: bool = False,
|
||||
to: Optional[str] = None,
|
||||
json_mode: bool = True,
|
||||
trace: bool = False,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> Union[dict, str]:
|
||||
result = self.cli.container.get(
|
||||
rpc_endpoint=endpoint,
|
||||
cid=cid,
|
||||
address=address,
|
||||
generate_key=generate_key,
|
||||
await_mode=await_mode,
|
||||
to=to,
|
||||
json_mode=json_mode,
|
||||
trace=trace,
|
||||
ttl=ttl,
|
||||
xhdr=xhdr,
|
||||
timeout=timeout,
|
||||
)
|
||||
container_info = json.loads(result.stdout)
|
||||
attributes = dict()
|
||||
for attr in container_info["attributes"]:
|
||||
attributes[attr["key"]] = attr["value"]
|
||||
container_info["attributes"] = attributes
|
||||
container_info["ownerID"] = json_utils.json_reencode(container_info["ownerID"]["value"])
|
||||
return container_info
|
||||
|
||||
@reporter.step("Get eacl container")
|
||||
def get_eacl(
|
||||
self,
|
||||
endpoint: str,
|
||||
cid: str,
|
||||
address: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
await_mode: bool = False,
|
||||
json_mode: bool = True,
|
||||
trace: bool = False,
|
||||
to: Optional[str] = None,
|
||||
session: Optional[str] = None,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
):
|
||||
return self.cli.container.get_eacl(
|
||||
rpc_endpoint=endpoint,
|
||||
cid=cid,
|
||||
address=address,
|
||||
generate_key=generate_key,
|
||||
await_mode=await_mode,
|
||||
to=to,
|
||||
session=session,
|
||||
ttl=ttl,
|
||||
xhdr=xhdr,
|
||||
timeout=CLI_DEFAULT_TIMEOUT,
|
||||
).stdout
|
||||
|
||||
@reporter.step("Get nodes container")
|
||||
def nodes(
|
||||
self,
|
||||
endpoint: str,
|
||||
cid: str,
|
||||
cluster: Cluster,
|
||||
address: Optional[str] = None,
|
||||
ttl: Optional[int] = None,
|
||||
from_file: Optional[str] = None,
|
||||
trace: bool = False,
|
||||
short: Optional[bool] = True,
|
||||
xhdr: Optional[dict] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> List[ClusterNode]:
|
||||
result = self.cli.container.search_node(
|
||||
rpc_endpoint=endpoint,
|
||||
cid=cid,
|
||||
address=address,
|
||||
ttl=ttl,
|
||||
from_file=from_file,
|
||||
trace=trace,
|
||||
short=short,
|
||||
xhdr=xhdr,
|
||||
generate_key=generate_key,
|
||||
timeout=timeout,
|
||||
).stdout
|
||||
|
||||
pattern = r"[0-9]+(?:\.[0-9]+){3}"
|
||||
nodes_ip = list(set(re.findall(pattern, result)))
|
||||
|
||||
with reporter.step(f"nodes ips = {nodes_ip}"):
|
||||
nodes_list = cluster.get_nodes_by_ip(nodes_ip)
|
||||
|
||||
with reporter.step(f"Return nodes - {nodes_list}"):
|
||||
return nodes_list
|
||||
|
||||
@reporter.step("Resolve container by name")
|
||||
def resolve_container_by_name(name: str, node: ClusterNode):
|
||||
resolver_cls = load_plugin("frostfs.testlib.bucket_cid_resolver", node.host.config.product)
|
||||
resolver: BucketContainerResolver = resolver_cls()
|
||||
return resolver.resolve(node, name)
|
||||
|
||||
def _parse_cid(self, output: str) -> str:
|
||||
"""
|
||||
Parses container ID from a given CLI output. The input string we expect:
|
||||
container ID: 2tz86kVTDpJxWHrhw3h6PbKMwkLtBEwoqhHQCKTre1FN
|
||||
awaiting...
|
||||
container has been persisted on sidechain
|
||||
We want to take 'container ID' value from the string.
|
||||
|
||||
Args:
|
||||
output (str): CLI output to parse
|
||||
|
||||
Returns:
|
||||
(str): extracted CID
|
||||
"""
|
||||
try:
|
||||
# taking first line from command's output
|
||||
first_line = output.split("\n")[0]
|
||||
except Exception:
|
||||
first_line = ""
|
||||
logger.error(f"Got empty output: {output}")
|
||||
splitted = first_line.split(": ")
|
||||
if len(splitted) != 2:
|
||||
raise ValueError(f"no CID was parsed from command output: \t{first_line}")
|
||||
return splitted[1]
|
|
@ -1,707 +0,0 @@
|
|||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import uuid
|
||||
from typing import Any, Optional
|
||||
|
||||
from frostfs_testlib import reporter, utils
|
||||
from frostfs_testlib.cli.frostfs_cli.cli import FrostfsCli
|
||||
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
|
||||
from frostfs_testlib.resources.common import ASSETS_DIR
|
||||
from frostfs_testlib.shell.interfaces import CommandResult
|
||||
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||
from frostfs_testlib.storage.grpc_operations import interfaces
|
||||
from frostfs_testlib.storage.grpc_operations.implementations.chunks import ChunksOperations
|
||||
from frostfs_testlib.testing.test_control import wait_for_success
|
||||
from frostfs_testlib.utils import cli_utils, file_utils
|
||||
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
|
||||
|
||||
class ObjectOperations(interfaces.ObjectInterface):
|
||||
def __init__(self, cli: FrostfsCli) -> None:
|
||||
self.cli = cli
|
||||
self.chunks: interfaces.ChunksInterface = ChunksOperations(self.cli)
|
||||
|
||||
@reporter.step("Delete object")
|
||||
def delete(
|
||||
self,
|
||||
cid: str,
|
||||
oid: str,
|
||||
endpoint: str,
|
||||
bearer: str = "",
|
||||
xhdr: Optional[dict] = None,
|
||||
session: Optional[str] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> str:
|
||||
"""
|
||||
DELETE an Object.
|
||||
|
||||
Args:
|
||||
cid: ID of Container where we get the Object from
|
||||
oid: ID of Object we are going to delete
|
||||
bearer: path to Bearer Token file, appends to `--bearer` key
|
||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||
xhdr: Request X-Headers in form of Key=Value
|
||||
session: path to a JSON-encoded container session token
|
||||
timeout: Timeout for the operation.
|
||||
Returns:
|
||||
(str): Tombstone ID
|
||||
"""
|
||||
result = self.cli.object.delete(
|
||||
rpc_endpoint=endpoint,
|
||||
cid=cid,
|
||||
oid=oid,
|
||||
bearer=bearer,
|
||||
xhdr=xhdr,
|
||||
session=session,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
id_str = result.stdout.split("\n")[1]
|
||||
tombstone = id_str.split(":")[1]
|
||||
return tombstone.strip()
|
||||
|
||||
@reporter.step("Get object")
|
||||
def get(
|
||||
self,
|
||||
cid: str,
|
||||
oid: str,
|
||||
endpoint: str,
|
||||
bearer: Optional[str] = None,
|
||||
write_object: Optional[str] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
no_progress: bool = True,
|
||||
session: Optional[str] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> file_utils.TestFile:
|
||||
"""
|
||||
GET from FrostFS.
|
||||
|
||||
Args:
|
||||
cid (str): ID of Container where we get the Object from
|
||||
oid (str): Object ID
|
||||
bearer: path to Bearer Token file, appends to `--bearer` key
|
||||
write_object: path to downloaded file, appends to `--file` key
|
||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||
no_progress(optional, bool): do not show progress bar
|
||||
xhdr (optional, dict): Request X-Headers in form of Key=Value
|
||||
session (optional, dict): path to a JSON-encoded container session token
|
||||
timeout: Timeout for the operation.
|
||||
Returns:
|
||||
(str): path to downloaded file
|
||||
"""
|
||||
if not write_object:
|
||||
write_object = str(uuid.uuid4())
|
||||
test_file = file_utils.TestFile(os.path.join(ASSETS_DIR, write_object))
|
||||
|
||||
self.cli.object.get(
|
||||
rpc_endpoint=endpoint,
|
||||
cid=cid,
|
||||
oid=oid,
|
||||
file=test_file,
|
||||
bearer=bearer,
|
||||
no_progress=no_progress,
|
||||
xhdr=xhdr,
|
||||
session=session,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
return test_file
|
||||
|
||||
@reporter.step("Get object from random node")
|
||||
def get_from_random_node(
|
||||
self,
|
||||
cid: str,
|
||||
oid: str,
|
||||
cluster: Cluster,
|
||||
bearer: Optional[str] = None,
|
||||
write_object: Optional[str] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
no_progress: bool = True,
|
||||
session: Optional[str] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> str:
|
||||
"""
|
||||
GET from FrostFS random storage node
|
||||
|
||||
Args:
|
||||
cid: ID of Container where we get the Object from
|
||||
oid: Object ID
|
||||
cluster: cluster object
|
||||
bearer (optional, str): path to Bearer Token file, appends to `--bearer` key
|
||||
write_object (optional, str): path to downloaded file, appends to `--file` key
|
||||
no_progress(optional, bool): do not show progress bar
|
||||
xhdr (optional, dict): Request X-Headers in form of Key=Value
|
||||
session (optional, dict): path to a JSON-encoded container session token
|
||||
timeout: Timeout for the operation.
|
||||
Returns:
|
||||
(str): path to downloaded file
|
||||
"""
|
||||
endpoint = cluster.get_random_storage_rpc_endpoint()
|
||||
return self.get(
|
||||
cid,
|
||||
oid,
|
||||
endpoint,
|
||||
bearer,
|
||||
write_object,
|
||||
xhdr,
|
||||
no_progress,
|
||||
session,
|
||||
timeout,
|
||||
)
|
||||
|
||||
@reporter.step("Get hash object")
|
||||
def hash(
|
||||
self,
|
||||
rpc_endpoint: str,
|
||||
cid: str,
|
||||
oid: str,
|
||||
address: Optional[str] = None,
|
||||
bearer: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
range: Optional[str] = None,
|
||||
salt: Optional[str] = None,
|
||||
ttl: Optional[int] = None,
|
||||
session: Optional[str] = None,
|
||||
hash_type: Optional[str] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> str:
|
||||
"""
|
||||
Get object hash.
|
||||
|
||||
Args:
|
||||
address: Address of wallet account.
|
||||
bearer: File with signed JSON or binary encoded bearer token.
|
||||
cid: Container ID.
|
||||
generate_key: Generate new private key.
|
||||
oid: Object ID.
|
||||
range: Range to take hash from in the form offset1:length1,...
|
||||
rpc_endpoint: Remote node address (as 'multiaddr' or '<host>:<port>').
|
||||
salt: Salt in hex format.
|
||||
ttl: TTL value in request meta header (default 2).
|
||||
session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session.
|
||||
hash_type: Hash type. Either 'sha256' or 'tz' (default "sha256").
|
||||
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
||||
xhdr: Dict with request X-Headers.
|
||||
timeout: Timeout for the operation (default 15s).
|
||||
|
||||
Returns:
|
||||
Command's result.
|
||||
"""
|
||||
result = self.cli.object.hash(
|
||||
rpc_endpoint=rpc_endpoint,
|
||||
cid=cid,
|
||||
oid=oid,
|
||||
address=address,
|
||||
bearer=bearer,
|
||||
generate_key=generate_key,
|
||||
range=range,
|
||||
salt=salt,
|
||||
ttl=ttl,
|
||||
xhdr=xhdr,
|
||||
session=session,
|
||||
hash_type=hash_type,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
if range:
|
||||
# Cut off the range and return only hash
|
||||
return result.stdout.split(":")[1].strip()
|
||||
|
||||
return result.stdout
|
||||
|
||||
@reporter.step("Head object")
|
||||
def head(
|
||||
self,
|
||||
cid: str,
|
||||
oid: str,
|
||||
endpoint: str,
|
||||
bearer: str = "",
|
||||
xhdr: Optional[dict] = None,
|
||||
json_output: bool = True,
|
||||
is_raw: bool = False,
|
||||
is_direct: bool = False,
|
||||
session: Optional[str] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> CommandResult | Any:
|
||||
"""
|
||||
HEAD an Object.
|
||||
|
||||
Args:
|
||||
cid (str): ID of Container where we get the Object from
|
||||
oid (str): ObjectID to HEAD
|
||||
bearer (optional, str): path to Bearer Token file, appends to `--bearer` key
|
||||
endpoint(optional, str): FrostFS endpoint to send request to
|
||||
json_output(optional, bool): return response in JSON format or not; this flag
|
||||
turns into `--json` key
|
||||
is_raw(optional, bool): send "raw" request or not; this flag
|
||||
turns into `--raw` key
|
||||
is_direct(optional, bool): send request directly to the node or not; this flag
|
||||
turns into `--ttl 1` key
|
||||
xhdr (optional, dict): Request X-Headers in form of Key=Value
|
||||
session (optional, dict): path to a JSON-encoded container session token
|
||||
timeout: Timeout for the operation.
|
||||
Returns:
|
||||
depending on the `json_output` parameter value, the function returns
|
||||
(dict): HEAD response in JSON format
|
||||
or
|
||||
(str): HEAD response as a plain text
|
||||
"""
|
||||
result = self.cli.object.head(
|
||||
rpc_endpoint=endpoint,
|
||||
cid=cid,
|
||||
oid=oid,
|
||||
bearer=bearer,
|
||||
json_mode=json_output,
|
||||
raw=is_raw,
|
||||
ttl=1 if is_direct else None,
|
||||
xhdr=xhdr,
|
||||
session=session,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
if not json_output:
|
||||
return result
|
||||
|
||||
try:
|
||||
decoded = json.loads(result.stdout)
|
||||
except Exception as exc:
|
||||
# If we failed to parse output as JSON, the cause might be
|
||||
# the plain text string in the beginning of the output.
|
||||
# Here we cut off first string and try to parse again.
|
||||
logger.info(f"failed to parse output: {exc}")
|
||||
logger.info("parsing output in another way")
|
||||
fst_line_idx = result.stdout.find("\n")
|
||||
decoded = json.loads(result.stdout[fst_line_idx:])
|
||||
|
||||
# if response
|
||||
if "chunks" in decoded.keys():
|
||||
logger.info("decoding ec chunks")
|
||||
return decoded["chunks"]
|
||||
|
||||
# If response is Complex Object header, it has `splitId` key
|
||||
if "splitId" in decoded.keys():
|
||||
logger.info("decoding split header")
|
||||
return utils.json_utils.decode_split_header(decoded)
|
||||
|
||||
# If response is Last or Linking Object header,
|
||||
# it has `header` dictionary and non-null `split` dictionary
|
||||
if "split" in decoded["header"].keys():
|
||||
if decoded["header"]["split"]:
|
||||
logger.info("decoding linking object")
|
||||
return utils.json_utils.decode_linking_object(decoded)
|
||||
|
||||
if decoded["header"]["objectType"] == "STORAGE_GROUP":
|
||||
logger.info("decoding storage group")
|
||||
return utils.json_utils.decode_storage_group(decoded)
|
||||
|
||||
if decoded["header"]["objectType"] == "TOMBSTONE":
|
||||
logger.info("decoding tombstone")
|
||||
return utils.json_utils.decode_tombstone(decoded)
|
||||
|
||||
logger.info("decoding simple header")
|
||||
return utils.json_utils.decode_simple_header(decoded)
|
||||
|
||||
@reporter.step("Lock Object")
|
||||
def lock(
|
||||
self,
|
||||
cid: str,
|
||||
oid: str,
|
||||
endpoint: str,
|
||||
lifetime: Optional[int] = None,
|
||||
expire_at: Optional[int] = None,
|
||||
address: Optional[str] = None,
|
||||
bearer: Optional[str] = None,
|
||||
session: Optional[str] = None,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> str:
|
||||
"""
|
||||
Locks object in container.
|
||||
|
||||
Args:
|
||||
address: Address of wallet account.
|
||||
bearer: File with signed JSON or binary encoded bearer token.
|
||||
cid: Container ID.
|
||||
oid: Object ID.
|
||||
lifetime: Lock lifetime.
|
||||
expire_at: Lock expiration epoch.
|
||||
shell: executor for cli command
|
||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||
session: Path to a JSON-encoded container session token.
|
||||
ttl: TTL value in request meta header (default 2).
|
||||
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
||||
xhdr: Dict with request X-Headers.
|
||||
timeout: Timeout for the operation.
|
||||
|
||||
Returns:
|
||||
Lock object ID
|
||||
"""
|
||||
result = self.cli.object.lock(
|
||||
rpc_endpoint=endpoint,
|
||||
lifetime=lifetime,
|
||||
expire_at=expire_at,
|
||||
address=address,
|
||||
cid=cid,
|
||||
oid=oid,
|
||||
bearer=bearer,
|
||||
xhdr=xhdr,
|
||||
session=session,
|
||||
ttl=ttl,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
# Splitting CLI output to separate lines and taking the penultimate line
|
||||
id_str = result.stdout.strip().split("\n")[0]
|
||||
oid = id_str.split(":")[1]
|
||||
return oid.strip()
|
||||
|
||||
@reporter.step("Put object")
|
||||
def put(
|
||||
self,
|
||||
path: str,
|
||||
cid: str,
|
||||
endpoint: str,
|
||||
bearer: Optional[str] = None,
|
||||
copies_number: Optional[int] = None,
|
||||
attributes: Optional[dict] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
expire_at: Optional[int] = None,
|
||||
no_progress: bool = True,
|
||||
session: Optional[str] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> str:
|
||||
"""
|
||||
PUT of given file.
|
||||
|
||||
Args:
|
||||
path: path to file to be PUT
|
||||
cid: ID of Container where we get the Object from
|
||||
bearer: path to Bearer Token file, appends to `--bearer` key
|
||||
copies_number: Number of copies of the object to store within the RPC call
|
||||
attributes: User attributes in form of Key1=Value1,Key2=Value2
|
||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||
no_progress: do not show progress bar
|
||||
expire_at: Last epoch in the life of the object
|
||||
xhdr: Request X-Headers in form of Key=Value
|
||||
session: path to a JSON-encoded container session token
|
||||
timeout: Timeout for the operation.
|
||||
Returns:
|
||||
(str): ID of uploaded Object
|
||||
"""
|
||||
result = self.cli.object.put(
|
||||
rpc_endpoint=endpoint,
|
||||
file=path,
|
||||
cid=cid,
|
||||
attributes=attributes,
|
||||
bearer=bearer,
|
||||
copies_number=copies_number,
|
||||
expire_at=expire_at,
|
||||
no_progress=no_progress,
|
||||
xhdr=xhdr,
|
||||
session=session,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
# Splitting CLI output to separate lines and taking the penultimate line
|
||||
id_str = result.stdout.strip().split("\n")[-2]
|
||||
oid = id_str.split(":")[1]
|
||||
return oid.strip()
|
||||
|
||||
@reporter.step("Patch object")
|
||||
def patch(
|
||||
self,
|
||||
cid: str,
|
||||
oid: str,
|
||||
endpoint: str,
|
||||
ranges: list[str] = None,
|
||||
payloads: list[str] = None,
|
||||
new_attrs: Optional[str] = None,
|
||||
replace_attrs: bool = False,
|
||||
bearer: str = "",
|
||||
xhdr: Optional[dict] = None,
|
||||
session: Optional[str] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
trace: bool = False,
|
||||
) -> str:
|
||||
"""
|
||||
PATCH an object.
|
||||
|
||||
Args:
|
||||
cid: ID of Container where we get the Object from
|
||||
oid: Object ID
|
||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||
ranges: An array of ranges in which to replace data in the format [offset1:length1, offset2:length2]
|
||||
payloads: An array of file paths to be applied in each range
|
||||
new_attrs: Attributes to be changed in the format "key1=value1,key2=value2"
|
||||
replace_attrs: Replace all attributes completely with new ones specified in new_attrs
|
||||
bearer: Path to Bearer Token file, appends to `--bearer` key
|
||||
xhdr: Request X-Headers in form of Key=Value
|
||||
session: Path to a JSON-encoded container session token
|
||||
timeout: Timeout for the operation
|
||||
trace: Generate trace ID and print it
|
||||
Returns:
|
||||
(str): ID of patched Object
|
||||
"""
|
||||
result = self.cli.object.patch(
|
||||
rpc_endpoint=endpoint,
|
||||
cid=cid,
|
||||
oid=oid,
|
||||
range=ranges,
|
||||
payload=payloads,
|
||||
new_attrs=new_attrs,
|
||||
replace_attrs=replace_attrs,
|
||||
bearer=bearer,
|
||||
xhdr=xhdr,
|
||||
session=session,
|
||||
timeout=timeout,
|
||||
trace=trace,
|
||||
)
|
||||
return result.stdout.split(":")[1].strip()
|
||||
|
||||
@reporter.step("Put object to random node")
|
||||
def put_to_random_node(
|
||||
self,
|
||||
path: str,
|
||||
cid: str,
|
||||
cluster: Cluster,
|
||||
bearer: Optional[str] = None,
|
||||
copies_number: Optional[int] = None,
|
||||
attributes: Optional[dict] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
expire_at: Optional[int] = None,
|
||||
no_progress: bool = True,
|
||||
session: Optional[str] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> str:
|
||||
"""
|
||||
PUT of given file to a random storage node.
|
||||
|
||||
Args:
|
||||
path: path to file to be PUT
|
||||
cid: ID of Container where we get the Object from
|
||||
cluster: cluster under test
|
||||
bearer: path to Bearer Token file, appends to `--bearer` key
|
||||
copies_number: Number of copies of the object to store within the RPC call
|
||||
attributes: User attributes in form of Key1=Value1,Key2=Value2
|
||||
cluster: cluster under test
|
||||
no_progress: do not show progress bar
|
||||
expire_at: Last epoch in the life of the object
|
||||
xhdr: Request X-Headers in form of Key=Value
|
||||
session: path to a JSON-encoded container session token
|
||||
timeout: Timeout for the operation.
|
||||
Returns:
|
||||
ID of uploaded Object
|
||||
"""
|
||||
endpoint = cluster.get_random_storage_rpc_endpoint()
|
||||
return self.put(
|
||||
path,
|
||||
cid,
|
||||
endpoint,
|
||||
bearer,
|
||||
copies_number,
|
||||
attributes,
|
||||
xhdr,
|
||||
expire_at,
|
||||
no_progress,
|
||||
session,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
@reporter.step("Get Range")
|
||||
def range(
|
||||
self,
|
||||
cid: str,
|
||||
oid: str,
|
||||
range_cut: str,
|
||||
endpoint: str,
|
||||
bearer: str = "",
|
||||
xhdr: Optional[dict] = None,
|
||||
session: Optional[str] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> tuple[file_utils.TestFile, bytes]:
|
||||
"""
|
||||
GETRANGE an Object.
|
||||
|
||||
Args:
|
||||
wallet: wallet on whose behalf GETRANGE is done
|
||||
cid: ID of Container where we get the Object from
|
||||
oid: ID of Object we are going to request
|
||||
range_cut: range to take data from in the form offset:length
|
||||
shell: executor for cli command
|
||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||
bearer: path to Bearer Token file, appends to `--bearer` key
|
||||
xhdr: Request X-Headers in form of Key=Value
|
||||
session: path to a JSON-encoded container session token
|
||||
timeout: Timeout for the operation.
|
||||
Returns:
|
||||
(str, bytes) - path to the file with range content and content of this file as bytes
|
||||
"""
|
||||
test_file = file_utils.TestFile(os.path.join(ASSETS_DIR, str(uuid.uuid4())))
|
||||
|
||||
self.cli.object.range(
|
||||
rpc_endpoint=endpoint,
|
||||
cid=cid,
|
||||
oid=oid,
|
||||
range=range_cut,
|
||||
file=test_file,
|
||||
bearer=bearer,
|
||||
xhdr=xhdr,
|
||||
session=session,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
with open(test_file, "rb") as file:
|
||||
content = file.read()
|
||||
return test_file, content
|
||||
|
||||
@reporter.step("Search object")
|
||||
def search(
|
||||
self,
|
||||
cid: str,
|
||||
endpoint: str,
|
||||
bearer: str = "",
|
||||
oid: Optional[str] = None,
|
||||
filters: Optional[dict] = None,
|
||||
expected_objects_list: Optional[list] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
session: Optional[str] = None,
|
||||
phy: bool = False,
|
||||
root: bool = False,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
address: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
ttl: Optional[int] = None,
|
||||
) -> list:
|
||||
"""
|
||||
SEARCH an Object.
|
||||
|
||||
Args:
|
||||
wallet: wallet on whose behalf SEARCH is done
|
||||
cid: ID of Container where we get the Object from
|
||||
shell: executor for cli command
|
||||
bearer: path to Bearer Token file, appends to `--bearer` key
|
||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||
filters: key=value pairs to filter Objects
|
||||
expected_objects_list: a list of ObjectIDs to compare found Objects with
|
||||
xhdr: Request X-Headers in form of Key=Value
|
||||
session: path to a JSON-encoded container session token
|
||||
phy: Search physically stored objects.
|
||||
root: Search for user objects.
|
||||
timeout: Timeout for the operation.
|
||||
|
||||
Returns:
|
||||
list of found ObjectIDs
|
||||
"""
|
||||
result = self.cli.object.search(
|
||||
rpc_endpoint=endpoint,
|
||||
cid=cid,
|
||||
bearer=bearer,
|
||||
oid=oid,
|
||||
xhdr=xhdr,
|
||||
filters=[f"{filter_key} EQ {filter_val}" for filter_key, filter_val in filters.items()] if filters else None,
|
||||
session=session,
|
||||
phy=phy,
|
||||
root=root,
|
||||
address=address,
|
||||
generate_key=generate_key,
|
||||
ttl=ttl,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
found_objects = re.findall(r"(\w{43,44})", result.stdout)
|
||||
|
||||
if expected_objects_list:
|
||||
if sorted(found_objects) == sorted(expected_objects_list):
|
||||
logger.info(f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'")
|
||||
else:
|
||||
logger.warning(f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'")
|
||||
|
||||
return found_objects
|
||||
|
||||
@wait_for_success()
|
||||
@reporter.step("Search object nodes")
|
||||
def nodes(
|
||||
self,
|
||||
cluster: Cluster,
|
||||
cid: str,
|
||||
oid: str,
|
||||
alive_node: ClusterNode,
|
||||
bearer: str = "",
|
||||
xhdr: Optional[dict] = None,
|
||||
is_direct: bool = False,
|
||||
verify_presence_all: bool = False,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> list[ClusterNode]:
|
||||
endpoint = alive_node.storage_node.get_rpc_endpoint()
|
||||
|
||||
response = self.cli.object.nodes(
|
||||
rpc_endpoint=endpoint,
|
||||
cid=cid,
|
||||
oid=oid,
|
||||
bearer=bearer,
|
||||
ttl=1 if is_direct else None,
|
||||
json=True,
|
||||
xhdr=xhdr,
|
||||
timeout=timeout,
|
||||
verify_presence_all=verify_presence_all,
|
||||
)
|
||||
|
||||
response_json = json.loads(response.stdout)
|
||||
# Currently, the command will show expected and confirmed nodes.
|
||||
# And we (currently) count only nodes which are both expected and confirmed
|
||||
object_nodes_id = {
|
||||
required_node
|
||||
for data_object in response_json["data_objects"]
|
||||
for required_node in data_object["required_nodes"]
|
||||
if required_node in data_object["confirmed_nodes"]
|
||||
}
|
||||
|
||||
netmap_nodes_list = cli_utils.parse_netmap_output(
|
||||
self.cli.netmap.snapshot(
|
||||
rpc_endpoint=endpoint,
|
||||
).stdout
|
||||
)
|
||||
netmap_nodes = [
|
||||
netmap_node for object_node in object_nodes_id for netmap_node in netmap_nodes_list if object_node == netmap_node.node_id
|
||||
]
|
||||
|
||||
object_nodes = [
|
||||
cluster_node
|
||||
for netmap_node in netmap_nodes
|
||||
for cluster_node in cluster.cluster_nodes
|
||||
if netmap_node.node == cluster_node.host_ip
|
||||
]
|
||||
|
||||
return object_nodes
|
||||
|
||||
@reporter.step("Search parts of object")
|
||||
def parts(
|
||||
self,
|
||||
cid: str,
|
||||
oid: str,
|
||||
alive_node: ClusterNode,
|
||||
bearer: str = "",
|
||||
xhdr: Optional[dict] = None,
|
||||
is_direct: bool = False,
|
||||
verify_presence_all: bool = False,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> list[str]:
|
||||
endpoint = alive_node.storage_node.get_rpc_endpoint()
|
||||
response = self.cli.object.nodes(
|
||||
rpc_endpoint=endpoint,
|
||||
cid=cid,
|
||||
oid=oid,
|
||||
bearer=bearer,
|
||||
ttl=1 if is_direct else None,
|
||||
json=True,
|
||||
xhdr=xhdr,
|
||||
timeout=timeout,
|
||||
verify_presence_all=verify_presence_all,
|
||||
)
|
||||
response_json = json.loads(response.stdout)
|
||||
return [data_object["object_id"] for data_object in response_json["data_objects"]]
|
|
@ -1,424 +0,0 @@
|
|||
from abc import ABC, abstractmethod
|
||||
from typing import Any, List, Optional
|
||||
|
||||
from frostfs_testlib.shell.interfaces import CommandResult
|
||||
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||
from frostfs_testlib.storage.constants import PlacementRule
|
||||
from frostfs_testlib.storage.dataclasses.storage_object_info import Chunk, NodeNetmapInfo
|
||||
from frostfs_testlib.utils import file_utils
|
||||
|
||||
|
||||
class ChunksInterface(ABC):
|
||||
@abstractmethod
|
||||
def search_node_without_chunks(self, chunks: list[Chunk], cluster: Cluster, endpoint: str = None) -> list[ClusterNode]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_chunk_node(self, cluster: Cluster, chunk: Chunk) -> tuple[ClusterNode, NodeNetmapInfo]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_shard_chunk(self, node: ClusterNode, chunk: Chunk) -> str:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_all(
|
||||
self,
|
||||
rpc_endpoint: str,
|
||||
cid: str,
|
||||
oid: str,
|
||||
wallet: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
bearer: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
trace: bool = False,
|
||||
root: bool = False,
|
||||
verify_presence_all: bool = False,
|
||||
json: bool = True,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> list[Chunk]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_parity(
|
||||
self,
|
||||
rpc_endpoint: str,
|
||||
cid: str,
|
||||
wallet: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
bearer: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
oid: Optional[str] = None,
|
||||
trace: bool = False,
|
||||
root: bool = False,
|
||||
verify_presence_all: bool = False,
|
||||
json: bool = True,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> Chunk:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_first_data(
|
||||
self,
|
||||
rpc_endpoint: str,
|
||||
cid: str,
|
||||
wallet: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
bearer: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
oid: Optional[str] = None,
|
||||
trace: bool = False,
|
||||
root: bool = False,
|
||||
verify_presence_all: bool = False,
|
||||
json: bool = True,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> Chunk:
|
||||
pass
|
||||
|
||||
|
||||
class ObjectInterface(ABC):
|
||||
def __init__(self) -> None:
|
||||
self.chunks: ChunksInterface
|
||||
|
||||
@abstractmethod
|
||||
def delete(
|
||||
self,
|
||||
cid: str,
|
||||
oid: str,
|
||||
endpoint: str,
|
||||
bearer: str = "",
|
||||
xhdr: Optional[dict] = None,
|
||||
session: Optional[str] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> str:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get(
|
||||
self,
|
||||
cid: str,
|
||||
oid: str,
|
||||
endpoint: str,
|
||||
bearer: Optional[str] = None,
|
||||
write_object: Optional[str] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
no_progress: bool = True,
|
||||
session: Optional[str] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> file_utils.TestFile:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def get_from_random_node(
|
||||
self,
|
||||
cid: str,
|
||||
oid: str,
|
||||
cluster: Cluster,
|
||||
bearer: Optional[str] = None,
|
||||
write_object: Optional[str] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
no_progress: bool = True,
|
||||
session: Optional[str] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> str:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def hash(
|
||||
self,
|
||||
endpoint: str,
|
||||
cid: str,
|
||||
oid: str,
|
||||
address: Optional[str] = None,
|
||||
bearer: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
range: Optional[str] = None,
|
||||
salt: Optional[str] = None,
|
||||
ttl: Optional[int] = None,
|
||||
session: Optional[str] = None,
|
||||
hash_type: Optional[str] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> str:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def head(
|
||||
self,
|
||||
cid: str,
|
||||
oid: str,
|
||||
endpoint: str,
|
||||
bearer: str = "",
|
||||
xhdr: Optional[dict] = None,
|
||||
json_output: bool = True,
|
||||
is_raw: bool = False,
|
||||
is_direct: bool = False,
|
||||
session: Optional[str] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> CommandResult | Any:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def lock(
|
||||
self,
|
||||
cid: str,
|
||||
oid: str,
|
||||
endpoint: str,
|
||||
lifetime: Optional[int] = None,
|
||||
expire_at: Optional[int] = None,
|
||||
address: Optional[str] = None,
|
||||
bearer: Optional[str] = None,
|
||||
session: Optional[str] = None,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> str:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def put(
|
||||
self,
|
||||
path: str,
|
||||
cid: str,
|
||||
endpoint: str,
|
||||
bearer: Optional[str] = None,
|
||||
copies_number: Optional[int] = None,
|
||||
attributes: Optional[dict] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
expire_at: Optional[int] = None,
|
||||
no_progress: bool = True,
|
||||
session: Optional[str] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> str:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def patch(
|
||||
self,
|
||||
cid: str,
|
||||
oid: str,
|
||||
endpoint: str,
|
||||
ranges: Optional[list[str]] = None,
|
||||
payloads: Optional[list[str]] = None,
|
||||
new_attrs: Optional[str] = None,
|
||||
replace_attrs: bool = False,
|
||||
bearer: Optional[str] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
session: Optional[str] = None,
|
||||
timeout: Optional[str] = None,
|
||||
trace: bool = False,
|
||||
) -> str:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def put_to_random_node(
|
||||
self,
|
||||
path: str,
|
||||
cid: str,
|
||||
cluster: Cluster,
|
||||
bearer: Optional[str] = None,
|
||||
copies_number: Optional[int] = None,
|
||||
attributes: Optional[dict] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
expire_at: Optional[int] = None,
|
||||
no_progress: bool = True,
|
||||
session: Optional[str] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> str:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def range(
|
||||
self,
|
||||
cid: str,
|
||||
oid: str,
|
||||
range_cut: str,
|
||||
endpoint: str,
|
||||
bearer: str = "",
|
||||
xhdr: Optional[dict] = None,
|
||||
session: Optional[str] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> tuple[file_utils.TestFile, bytes]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def search(
|
||||
self,
|
||||
cid: str,
|
||||
endpoint: str,
|
||||
bearer: str = "",
|
||||
oid: Optional[str] = None,
|
||||
filters: Optional[dict] = None,
|
||||
expected_objects_list: Optional[list] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
session: Optional[str] = None,
|
||||
phy: bool = False,
|
||||
root: bool = False,
|
||||
timeout: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
ttl: Optional[int] = None,
|
||||
) -> List:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def nodes(
|
||||
self,
|
||||
cluster: Cluster,
|
||||
cid: str,
|
||||
oid: str,
|
||||
alive_node: ClusterNode,
|
||||
bearer: str = "",
|
||||
xhdr: Optional[dict] = None,
|
||||
is_direct: bool = False,
|
||||
verify_presence_all: bool = False,
|
||||
timeout: Optional[str] = None,
|
||||
) -> List[ClusterNode]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def parts(
|
||||
self,
|
||||
cid: str,
|
||||
oid: str,
|
||||
alive_node: ClusterNode,
|
||||
bearer: str = "",
|
||||
xhdr: Optional[dict] = None,
|
||||
is_direct: bool = False,
|
||||
verify_presence_all: bool = False,
|
||||
timeout: Optional[str] = None,
|
||||
) -> List[str]:
|
||||
pass
|
||||
|
||||
|
||||
class ContainerInterface(ABC):
|
||||
@abstractmethod
|
||||
def create(
|
||||
self,
|
||||
endpoint: str,
|
||||
nns_zone: Optional[str] = None,
|
||||
nns_name: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
attributes: Optional[dict] = None,
|
||||
basic_acl: Optional[str] = None,
|
||||
await_mode: bool = False,
|
||||
disable_timestamp: bool = False,
|
||||
force: bool = False,
|
||||
trace: bool = False,
|
||||
name: Optional[str] = None,
|
||||
nonce: Optional[str] = None,
|
||||
policy: Optional[str] = None,
|
||||
session: Optional[str] = None,
|
||||
subnet: Optional[str] = None,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> str:
|
||||
"""
|
||||
Create a new container and register it in the FrostFS.
|
||||
It will be stored in the sidechain when the Inner Ring accepts it.
|
||||
"""
|
||||
raise NotImplementedError("No implemethed method create")
|
||||
|
||||
@abstractmethod
|
||||
def delete(
|
||||
self,
|
||||
endpoint: str,
|
||||
cid: str,
|
||||
address: Optional[str] = None,
|
||||
await_mode: bool = False,
|
||||
session: Optional[str] = None,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
force: bool = False,
|
||||
trace: bool = False,
|
||||
) -> List[str]:
|
||||
"""
|
||||
Delete an existing container.
|
||||
Only the owner of the container has permission to remove the container.
|
||||
"""
|
||||
raise NotImplementedError("No implemethed method delete")
|
||||
|
||||
@abstractmethod
|
||||
def get(
|
||||
self,
|
||||
endpoint: str,
|
||||
cid: str,
|
||||
address: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
await_mode: bool = False,
|
||||
to: Optional[str] = None,
|
||||
json_mode: bool = True,
|
||||
trace: bool = False,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> List[str]:
|
||||
"""Get container field info."""
|
||||
raise NotImplementedError("No implemethed method get")
|
||||
|
||||
@abstractmethod
|
||||
def get_eacl(
|
||||
self,
|
||||
endpoint: str,
|
||||
cid: str,
|
||||
address: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
await_mode: bool = False,
|
||||
json_mode: bool = True,
|
||||
trace: bool = False,
|
||||
to: Optional[str] = None,
|
||||
session: Optional[str] = None,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> List[str]:
|
||||
"""Get extended ACL table of container."""
|
||||
raise NotImplementedError("No implemethed method get-eacl")
|
||||
|
||||
@abstractmethod
|
||||
def list(
|
||||
self,
|
||||
endpoint: str,
|
||||
name: Optional[str] = None,
|
||||
address: Optional[str] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
trace: bool = False,
|
||||
owner: Optional[str] = None,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = None,
|
||||
**params,
|
||||
) -> List[str]:
|
||||
"""List all created containers."""
|
||||
raise NotImplementedError("No implemethed method list")
|
||||
|
||||
@abstractmethod
|
||||
def nodes(
|
||||
self,
|
||||
endpoint: str,
|
||||
cid: str,
|
||||
cluster: Cluster,
|
||||
address: Optional[str] = None,
|
||||
ttl: Optional[int] = None,
|
||||
from_file: Optional[str] = None,
|
||||
trace: bool = False,
|
||||
short: Optional[bool] = True,
|
||||
xhdr: Optional[dict] = None,
|
||||
generate_key: Optional[bool] = None,
|
||||
timeout: Optional[str] = None,
|
||||
) -> List[ClusterNode]:
|
||||
"""Show the nodes participating in the container in the current epoch."""
|
||||
raise NotImplementedError("No implemethed method nodes")
|
||||
|
||||
|
||||
class GrpcClientWrapper(ABC):
|
||||
def __init__(self) -> None:
|
||||
self.object: ObjectInterface
|
||||
self.container: ContainerInterface
|
|
@ -25,10 +25,14 @@ class ClusterTestBase:
|
|||
for _ in range(epochs_to_tick):
|
||||
self.tick_epoch(alive_node, wait_block)
|
||||
|
||||
def tick_epoch(self, alive_node: Optional[StorageNode] = None, wait_block: int = None, delta: Optional[int] = None):
|
||||
epoch.tick_epoch(self.shell, self.cluster, alive_node=alive_node, delta=delta)
|
||||
def tick_epoch(
|
||||
self,
|
||||
alive_node: Optional[StorageNode] = None,
|
||||
wait_block: int = None,
|
||||
):
|
||||
epoch.tick_epoch(self.shell, self.cluster, alive_node=alive_node)
|
||||
if wait_block:
|
||||
self.wait_for_blocks(wait_block)
|
||||
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * wait_block)
|
||||
|
||||
def wait_for_epochs_align(self):
|
||||
epoch.wait_for_epochs_align(self.shell, self.cluster)
|
||||
|
@ -38,6 +42,3 @@ class ClusterTestBase:
|
|||
|
||||
def ensure_fresh_epoch(self):
|
||||
return epoch.ensure_fresh_epoch(self.shell, self.cluster)
|
||||
|
||||
def wait_for_blocks(self, blocks_count: int = 1):
|
||||
time.sleep(datetime_utils.parse_time(MORPH_BLOCK_TIME) * blocks_count)
|
||||
|
|
|
@ -1,22 +1,7 @@
|
|||
import itertools
|
||||
import traceback
|
||||
from concurrent.futures import Future, ThreadPoolExecutor
|
||||
from contextlib import contextmanager
|
||||
from typing import Callable, Collection, Optional, Union
|
||||
|
||||
MAX_WORKERS = 50
|
||||
|
||||
|
||||
@contextmanager
|
||||
def parallel_workers_limit(workers_count: int):
|
||||
global MAX_WORKERS
|
||||
original_value = MAX_WORKERS
|
||||
MAX_WORKERS = workers_count
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
MAX_WORKERS = original_value
|
||||
|
||||
|
||||
def parallel(
|
||||
fn: Union[Callable, list[Callable]],
|
||||
|
@ -56,42 +41,7 @@ def parallel(
|
|||
# Check for exceptions
|
||||
exceptions = [future.exception() for future in futures if future.exception()]
|
||||
if exceptions:
|
||||
# Prettify exception in parallel with all underlying stack traces
|
||||
# For example, we had 3 RuntimeError exceptions during parallel. This format will give us something like
|
||||
#
|
||||
# RuntimeError: The following exceptions occured during parallel run:
|
||||
# 1) Exception one text
|
||||
# 2) Exception two text
|
||||
# 3) Exception three text
|
||||
# TRACES:
|
||||
# ==== 1 ====
|
||||
# Traceback (most recent call last):
|
||||
# File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run
|
||||
# result = self.fn(*self.args, **self.kwargs)
|
||||
# File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service
|
||||
# raise RuntimeError(f"Exception one text")
|
||||
# RuntimeError: Exception one text
|
||||
#
|
||||
# ==== 2 ====
|
||||
# Traceback (most recent call last):
|
||||
# File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run
|
||||
# result = self.fn(*self.args, **self.kwargs)
|
||||
# File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service
|
||||
# raise RuntimeError(f"Exception two text")
|
||||
# RuntimeError: Exception two text
|
||||
#
|
||||
# ==== 3 ====
|
||||
# Traceback (most recent call last):
|
||||
# File "/usr/lib/python3.10/concurrent/futures/thread.py", line 58, in run
|
||||
# result = self.fn(*self.args, **self.kwargs)
|
||||
# File "frostfs_testcases/pytest_tests/testsuites/object/test_object_tombstone.py", line 17, in check_service
|
||||
# raise RuntimeError(f"Exception three text")
|
||||
# RuntimeError: Exception three text
|
||||
short_summary = "\n".join([f"{i}) {str(e)}" for i, e in enumerate(exceptions, 1)])
|
||||
stack_traces = "\n".join(
|
||||
[f"==== {i} ====\n{''.join(traceback.TracebackException.from_exception(e).format())}" for i, e in enumerate(exceptions, 1)]
|
||||
)
|
||||
message = f"{short_summary}\nTRACES:\n{stack_traces}"
|
||||
message = "\n".join([str(e) for e in exceptions])
|
||||
raise RuntimeError(f"The following exceptions occured during parallel run:\n{message}")
|
||||
return futures
|
||||
|
||||
|
@ -104,7 +54,7 @@ def _run_by_fn_list(fn_list: list[Callable], *args, **kwargs) -> list[Future]:
|
|||
|
||||
futures: list[Future] = []
|
||||
|
||||
with ThreadPoolExecutor(max_workers=min(len(fn_list), MAX_WORKERS)) as executor:
|
||||
with ThreadPoolExecutor(max_workers=len(fn_list)) as executor:
|
||||
for fn in fn_list:
|
||||
task_args = _get_args(*args)
|
||||
task_kwargs = _get_kwargs(**kwargs)
|
||||
|
@ -117,7 +67,7 @@ def _run_by_fn_list(fn_list: list[Callable], *args, **kwargs) -> list[Future]:
|
|||
def _run_by_items(fn: Callable, parallel_items: Collection, *args, **kwargs) -> list[Future]:
|
||||
futures: list[Future] = []
|
||||
|
||||
with ThreadPoolExecutor(max_workers=min(len(parallel_items), MAX_WORKERS)) as executor:
|
||||
with ThreadPoolExecutor(max_workers=len(parallel_items)) as executor:
|
||||
for item in parallel_items:
|
||||
task_args = _get_args(*args)
|
||||
task_kwargs = _get_kwargs(**kwargs)
|
||||
|
|
|
@ -1,16 +1,13 @@
|
|||
import inspect
|
||||
import logging
|
||||
import os
|
||||
from functools import wraps
|
||||
from time import sleep, time
|
||||
from typing import Any
|
||||
|
||||
import yaml
|
||||
from _pytest.outcomes import Failed
|
||||
from pytest import fail
|
||||
|
||||
from frostfs_testlib import reporter
|
||||
from frostfs_testlib.resources.common import ASSETS_DIR
|
||||
from frostfs_testlib.utils.func_utils import format_by_args
|
||||
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
|
@ -131,42 +128,6 @@ def run_optionally(enabled: bool, mock_value: Any = True):
|
|||
return deco
|
||||
|
||||
|
||||
def cached_fixture(enabled: bool):
|
||||
"""
|
||||
Decorator to cache fixtures.
|
||||
MUST be placed after @pytest.fixture and before @allure decorators.
|
||||
|
||||
Args:
|
||||
enabled: if true, decorated func will be cached.
|
||||
"""
|
||||
|
||||
def deco(func):
|
||||
@wraps(func)
|
||||
def func_impl(*a, **kw):
|
||||
# TODO: *a and *kw should be parsed to some kind of hashsum and used in filename to prevent cache load from different parameters
|
||||
cache_file = os.path.join(ASSETS_DIR, f"fixture_cache_{func.__name__}.yml")
|
||||
|
||||
if enabled and os.path.exists(cache_file):
|
||||
with open(cache_file, "r") as cache_input:
|
||||
return yaml.load(cache_input, Loader=yaml.Loader)
|
||||
|
||||
result = func(*a, **kw)
|
||||
|
||||
if enabled:
|
||||
with open(cache_file, "w") as cache_output:
|
||||
yaml.dump(result, cache_output)
|
||||
return result
|
||||
|
||||
# TODO: cache yielding fixtures
|
||||
@wraps(func)
|
||||
def gen_impl(*a, **kw):
|
||||
raise NotImplementedError("Not implemented for yielding fixtures")
|
||||
|
||||
return gen_impl if inspect.isgeneratorfunction(func) else func_impl
|
||||
|
||||
return deco
|
||||
|
||||
|
||||
def wait_for_success(
|
||||
max_wait_time: int = 60,
|
||||
interval: int = 1,
|
||||
|
|
|
@ -9,12 +9,13 @@ import csv
|
|||
import json
|
||||
import logging
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
from contextlib import suppress
|
||||
from datetime import datetime
|
||||
from io import StringIO
|
||||
from textwrap import shorten
|
||||
from typing import Any, Optional, Union
|
||||
from typing import Dict, List, TypedDict, Union
|
||||
|
||||
import pexpect
|
||||
|
||||
|
@ -74,78 +75,14 @@ def _attach_allure_log(cmd: str, output: str, return_code: int, start_time: date
|
|||
reporter.attach(command_attachment, "Command execution")
|
||||
|
||||
|
||||
def log_command_execution(cmd: str, output: Union[str, dict], params: Optional[dict] = None, **kwargs) -> None:
|
||||
def log_command_execution(cmd: str, output: Union[str, TypedDict]) -> None:
|
||||
logger.info(f"{cmd}: {output}")
|
||||
|
||||
if not params:
|
||||
params = {}
|
||||
|
||||
if params.get("Body") and len(params.get("Body")) > 1000:
|
||||
params["Body"] = "<large text data>"
|
||||
|
||||
output_params = params
|
||||
|
||||
try:
|
||||
json_params = json.dumps(params, indent=4, sort_keys=True, default=str)
|
||||
except TypeError as err:
|
||||
logger.warning(f"Failed to serialize '{cmd}' request parameters:\n{params}\nException: {err}")
|
||||
else:
|
||||
output_params = json_params
|
||||
|
||||
output = json.dumps(output, indent=4, sort_keys=True, default=str)
|
||||
|
||||
command_execution = f"COMMAND: '{cmd}'\n" f"URL: {kwargs['endpoint']}\n" f"PARAMS:\n{output_params}\n" f"OUTPUT:\n{output}\n"
|
||||
aws_command = _convert_request_to_aws_cli_command(cmd, params, **kwargs)
|
||||
|
||||
reporter.attach(command_execution, "Command execution")
|
||||
reporter.attach(aws_command, "AWS CLI Command")
|
||||
|
||||
|
||||
def _convert_request_to_aws_cli_command(command: str, params: dict, **kwargs) -> str:
|
||||
overriden_names = [_convert_json_name_to_aws_cli(name) for name in kwargs.keys()]
|
||||
command = command.replace("_", "-")
|
||||
options = []
|
||||
|
||||
for name, value in params.items():
|
||||
name = _convert_json_name_to_aws_cli(name)
|
||||
|
||||
# To override parameters for AWS CLI
|
||||
if name in overriden_names:
|
||||
continue
|
||||
|
||||
if option := _create_option(name, value):
|
||||
options.append(option)
|
||||
|
||||
for name, value in kwargs.items():
|
||||
name = _convert_json_name_to_aws_cli(name)
|
||||
if option := _create_option(name, value):
|
||||
options.append(option)
|
||||
|
||||
options = " ".join(options)
|
||||
api = "s3api" if "s3" in kwargs["endpoint"] else "iam"
|
||||
return f"aws --no-verify-ssl --no-paginate {api} {command} {options}"
|
||||
|
||||
|
||||
def _convert_json_name_to_aws_cli(name: str) -> str:
|
||||
specific_names = {"CORSConfiguration": "cors-configuration"}
|
||||
|
||||
if aws_cli_name := specific_names.get(name):
|
||||
return aws_cli_name
|
||||
return re.sub(r"([a-z])([A-Z])", r"\1 \2", name).lower().replace(" ", "-").replace("_", "-")
|
||||
|
||||
|
||||
def _create_option(name: str, value: Any) -> str | None:
|
||||
if isinstance(value, bool) and value:
|
||||
return f"--{name}"
|
||||
|
||||
if isinstance(value, dict):
|
||||
value = json.dumps(value, indent=4, sort_keys=True, default=str)
|
||||
return f"--{name} '{value}'"
|
||||
|
||||
if value:
|
||||
return f"--{name} {value}"
|
||||
|
||||
return None
|
||||
with suppress(Exception):
|
||||
json_output = json.dumps(output, indent=4, sort_keys=True)
|
||||
output = json_output
|
||||
command_attachment = f"COMMAND: '{cmd}'\n" f"OUTPUT:\n {output}\n"
|
||||
with reporter.step(f'COMMAND: {shorten(cmd, width=60, placeholder="...")}'):
|
||||
reporter.attach(command_attachment, "Command execution")
|
||||
|
||||
|
||||
def parse_netmap_output(output: str) -> list[NodeNetmapInfo]:
|
||||
|
|
|
@ -6,7 +6,6 @@ from typing import Any, Optional
|
|||
|
||||
from frostfs_testlib import reporter
|
||||
from frostfs_testlib.resources.common import ASSETS_DIR
|
||||
from frostfs_testlib.utils import string_utils
|
||||
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
|
||||
|
@ -42,10 +41,8 @@ def ensure_directory_opener(path, flags):
|
|||
return os.open(path, flags)
|
||||
|
||||
|
||||
# TODO: Do not add {size} to title yet, since it produces dynamic info in top level steps
|
||||
# Use object_size dt in future as argument
|
||||
@reporter.step("Generate file")
|
||||
def generate_file(size: int, file_name: Optional[str] = None) -> TestFile:
|
||||
@reporter.step("Generate file with size {size}")
|
||||
def generate_file(size: int) -> TestFile:
|
||||
"""Generates a binary file with the specified size in bytes.
|
||||
|
||||
Args:
|
||||
|
@ -54,11 +51,7 @@ def generate_file(size: int, file_name: Optional[str] = None) -> TestFile:
|
|||
Returns:
|
||||
The path to the generated file.
|
||||
"""
|
||||
|
||||
if file_name is None:
|
||||
file_name = string_utils.unique_name("object-")
|
||||
|
||||
test_file = TestFile(os.path.join(ASSETS_DIR, file_name))
|
||||
test_file = TestFile(os.path.join(ASSETS_DIR, str(uuid.uuid4())))
|
||||
with open(test_file, "wb", opener=ensure_directory_opener) as file:
|
||||
file.write(os.urandom(size))
|
||||
logger.info(f"File with size {size} bytes has been generated: {test_file}")
|
||||
|
@ -66,9 +59,7 @@ def generate_file(size: int, file_name: Optional[str] = None) -> TestFile:
|
|||
return test_file
|
||||
|
||||
|
||||
# TODO: Do not add {size} to title yet, since it produces dynamic info in top level steps
|
||||
# Use object_size dt in future as argument
|
||||
@reporter.step("Generate file with content")
|
||||
@reporter.step("Generate file with content of size {size}")
|
||||
def generate_file_with_content(
|
||||
size: int,
|
||||
file_path: Optional[str | TestFile] = None,
|
||||
|
|
|
@ -1,29 +1,11 @@
|
|||
import itertools
|
||||
import random
|
||||
import re
|
||||
import string
|
||||
from datetime import datetime
|
||||
|
||||
ONLY_ASCII_LETTERS = string.ascii_letters
|
||||
DIGITS_AND_ASCII_LETTERS = string.ascii_letters + string.digits
|
||||
NON_DIGITS_AND_LETTERS = string.punctuation
|
||||
|
||||
# if unique_name is called multiple times within the same microsecond, append 0-4 to the name so it surely unique
|
||||
FUSE = itertools.cycle(range(5))
|
||||
|
||||
|
||||
def unique_name(prefix: str = "", postfix: str = ""):
|
||||
"""
|
||||
Generate unique short name of anything with prefix.
|
||||
This should be unique in scope of multiple runs
|
||||
|
||||
Args:
|
||||
prefix: prefix for unique name generation
|
||||
Returns:
|
||||
unique name string
|
||||
"""
|
||||
return f"{prefix}{hex(int(datetime.now().timestamp() * 1000000))}{next(FUSE)}{postfix}"
|
||||
|
||||
|
||||
def random_string(length: int = 5, source: str = ONLY_ASCII_LETTERS):
|
||||
"""
|
||||
|
|
|
@ -18,14 +18,14 @@ def get_local_binaries_versions(shell: Shell) -> dict[str, str]:
|
|||
|
||||
for binary in [NEOGO_EXECUTABLE, FROSTFS_AUTHMATE_EXEC]:
|
||||
out = shell.exec(f"{binary} --version").stdout
|
||||
versions[binary] = parse_version(out)
|
||||
versions[binary] = _parse_version(out)
|
||||
|
||||
frostfs_cli = FrostfsCli(shell, FROSTFS_CLI_EXEC)
|
||||
versions[FROSTFS_CLI_EXEC] = parse_version(frostfs_cli.version.get().stdout)
|
||||
versions[FROSTFS_CLI_EXEC] = _parse_version(frostfs_cli.version.get().stdout)
|
||||
|
||||
try:
|
||||
frostfs_adm = FrostfsAdm(shell, FROSTFS_ADM_EXEC)
|
||||
versions[FROSTFS_ADM_EXEC] = parse_version(frostfs_adm.version.get().stdout)
|
||||
versions[FROSTFS_ADM_EXEC] = _parse_version(frostfs_adm.version.get().stdout)
|
||||
except RuntimeError:
|
||||
logger.info(f"{FROSTFS_ADM_EXEC} not installed")
|
||||
|
||||
|
@ -63,7 +63,7 @@ def parallel_binary_verions(host: Host) -> dict[str, str]:
|
|||
binary_path = binary["exec_path"]
|
||||
try:
|
||||
result = shell.exec(f"{binary_path} {binary['param']}")
|
||||
version = parse_version(result.stdout) or parse_version(result.stderr) or "Unknown"
|
||||
version = _parse_version(result.stdout) or _parse_version(result.stderr) or "Unknown"
|
||||
versions_at_host[binary_name] = version
|
||||
except Exception as exc:
|
||||
logger.error(f"Cannot get version for {binary_path} because of\n{exc}")
|
||||
|
@ -85,6 +85,6 @@ def get_remote_binaries_versions(hosting: Hosting) -> dict[str, dict[str, str]]:
|
|||
return versions_by_host
|
||||
|
||||
|
||||
def parse_version(version_output: str) -> str:
|
||||
version = re.search(r"(?<=version[:=])\s?[\"\']?v?(.+)", version_output, re.IGNORECASE)
|
||||
return version.group(1).strip("\"'\n\t ") if version else version_output
|
||||
def _parse_version(version_output: str) -> str:
|
||||
version = re.search(r"version[:\s]*v?(.+)", version_output, re.IGNORECASE)
|
||||
return version.group(1).strip() if version else version_output
|
||||
|
|
|
@ -2,7 +2,7 @@ from typing import Any
|
|||
|
||||
import pytest
|
||||
|
||||
from frostfs_testlib.clients import AwsCliClient, Boto3ClientWrapper
|
||||
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper
|
||||
from frostfs_testlib.storage.dataclasses.acl import EACLRole
|
||||
from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode
|
||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||
|
|
|
@ -3,7 +3,14 @@ from typing import Any, get_args
|
|||
|
||||
import pytest
|
||||
|
||||
from frostfs_testlib.load.load_config import EndpointSelectionStrategy, LoadParams, LoadScenario, LoadType, Preset, ReadFrom
|
||||
from frostfs_testlib.load.load_config import (
|
||||
EndpointSelectionStrategy,
|
||||
LoadParams,
|
||||
LoadScenario,
|
||||
LoadType,
|
||||
Preset,
|
||||
ReadFrom,
|
||||
)
|
||||
from frostfs_testlib.load.runners import DefaultRunner
|
||||
from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_DEFAULT_VU_INIT_TIME
|
||||
from frostfs_testlib.storage.cluster import ClusterNode
|
||||
|
@ -92,7 +99,9 @@ class TestLoadConfig:
|
|||
def test_load_controller_string_representation(self, load_params: LoadParams):
|
||||
load_params.endpoint_selection_strategy = EndpointSelectionStrategy.ALL
|
||||
load_params.object_size = 512
|
||||
background_load_controller = BackgroundLoadController("tmp", load_params, None, None, DefaultRunner(None))
|
||||
background_load_controller = BackgroundLoadController(
|
||||
"tmp", load_params, "wallet", None, None, DefaultRunner(None)
|
||||
)
|
||||
expected = "grpc 512 KiB, writers=7, readers=7, deleters=8"
|
||||
assert f"{background_load_controller}" == expected
|
||||
assert repr(background_load_controller) == expected
|
||||
|
@ -132,7 +141,7 @@ class TestLoadConfig:
|
|||
"--out 'pregen_json'",
|
||||
"--workers '7'",
|
||||
"--containers '16'",
|
||||
"--policy 'container_placement_policy' --policy 'container_placement_policy_2'",
|
||||
"--policy 'container_placement_policy'",
|
||||
"--ignore-errors",
|
||||
"--sleep '19'",
|
||||
"--local",
|
||||
|
@ -164,7 +173,7 @@ class TestLoadConfig:
|
|||
"--out 'pregen_json'",
|
||||
"--workers '7'",
|
||||
"--containers '16'",
|
||||
"--policy 'container_placement_policy' --policy 'container_placement_policy_2'",
|
||||
"--policy 'container_placement_policy'",
|
||||
"--ignore-errors",
|
||||
"--sleep '19'",
|
||||
"--local",
|
||||
|
@ -205,7 +214,7 @@ class TestLoadConfig:
|
|||
"--out 'pregen_json'",
|
||||
"--workers '7'",
|
||||
"--buckets '13'",
|
||||
"--location 's3_location' --location 's3_location_2'",
|
||||
"--location 's3_location'",
|
||||
"--ignore-errors",
|
||||
"--sleep '19'",
|
||||
"--acl 'acl'",
|
||||
|
@ -239,7 +248,7 @@ class TestLoadConfig:
|
|||
"--out 'pregen_json'",
|
||||
"--workers '7'",
|
||||
"--buckets '13'",
|
||||
"--location 's3_location' --location 's3_location_2'",
|
||||
"--location 's3_location'",
|
||||
"--ignore-errors",
|
||||
"--sleep '19'",
|
||||
"--acl 'acl'",
|
||||
|
@ -279,7 +288,7 @@ class TestLoadConfig:
|
|||
"--out 'pregen_json'",
|
||||
"--workers '7'",
|
||||
"--buckets '13'",
|
||||
"--location 's3_location' --location 's3_location_2'",
|
||||
"--location 's3_location'",
|
||||
"--ignore-errors",
|
||||
"--sleep '19'",
|
||||
"--acl 'acl'",
|
||||
|
@ -320,7 +329,7 @@ class TestLoadConfig:
|
|||
"--out 'pregen_json'",
|
||||
"--workers '7'",
|
||||
"--containers '16'",
|
||||
"--policy 'container_placement_policy' --policy 'container_placement_policy_2'",
|
||||
"--policy 'container_placement_policy'",
|
||||
"--ignore-errors",
|
||||
"--sleep '19'",
|
||||
"--acl 'acl'",
|
||||
|
@ -353,13 +362,12 @@ class TestLoadConfig:
|
|||
"--out 'pregen_json'",
|
||||
"--workers '7'",
|
||||
"--containers '16'",
|
||||
"--policy 'container_placement_policy' --policy 'container_placement_policy_2'",
|
||||
"--policy 'container_placement_policy'",
|
||||
"--ignore-errors",
|
||||
"--sleep '19'",
|
||||
"--acl 'acl'",
|
||||
]
|
||||
expected_env_vars = {
|
||||
"CONFIG_DIR": "config_dir",
|
||||
"CONFIG_FILE": "config_file",
|
||||
"DURATION": 9,
|
||||
"WRITE_OBJ_SIZE": 11,
|
||||
|
@ -372,49 +380,12 @@ class TestLoadConfig:
|
|||
"DELETERS": 8,
|
||||
"READ_AGE": 8,
|
||||
"STREAMING": 9,
|
||||
"MAX_TOTAL_SIZE_GB": 17,
|
||||
"PREGEN_JSON": "pregen_json",
|
||||
}
|
||||
|
||||
self._check_preset_params(load_params, expected_preset_args)
|
||||
self._check_env_vars(load_params, expected_env_vars)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"input, value, params",
|
||||
[
|
||||
(["A C ", " B"], ["A C", "B"], [f"--policy 'A C' --policy 'B'"]),
|
||||
(" A ", ["A"], ["--policy 'A'"]),
|
||||
(" A , B ", ["A , B"], ["--policy 'A , B'"]),
|
||||
([" A", "B "], ["A", "B"], ["--policy 'A' --policy 'B'"]),
|
||||
(None, None, []),
|
||||
],
|
||||
)
|
||||
def test_grpc_list_parsing_formatter(self, input, value, params):
|
||||
load_params = LoadParams(LoadType.gRPC)
|
||||
load_params.preset = Preset()
|
||||
load_params.preset.container_placement_policy = input
|
||||
assert load_params.preset.container_placement_policy == value
|
||||
|
||||
self._check_preset_params(load_params, params)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"input, value, params",
|
||||
[
|
||||
(["A C ", " B"], ["A C", "B"], [f"--location 'A C' --location 'B'"]),
|
||||
(" A ", ["A"], ["--location 'A'"]),
|
||||
(" A , B ", ["A , B"], ["--location 'A , B'"]),
|
||||
([" A", "B "], ["A", "B"], ["--location 'A' --location 'B'"]),
|
||||
(None, None, []),
|
||||
],
|
||||
)
|
||||
def test_s3_list_parsing_formatter(self, input, value, params):
|
||||
load_params = LoadParams(LoadType.S3)
|
||||
load_params.preset = Preset()
|
||||
load_params.preset.s3_location = input
|
||||
assert load_params.preset.s3_location == value
|
||||
|
||||
self._check_preset_params(load_params, params)
|
||||
|
||||
@pytest.mark.parametrize("load_params, load_type", [(LoadScenario.VERIFY, LoadType.S3)], indirect=True)
|
||||
def test_argument_parsing_for_s3_verify_scenario(self, load_params: LoadParams):
|
||||
expected_env_vars = {
|
||||
|
@ -621,7 +592,6 @@ class TestLoadConfig:
|
|||
"--acl ''",
|
||||
]
|
||||
expected_env_vars = {
|
||||
"CONFIG_DIR": "",
|
||||
"CONFIG_FILE": "",
|
||||
"DURATION": 0,
|
||||
"WRITE_OBJ_SIZE": 0,
|
||||
|
@ -629,7 +599,6 @@ class TestLoadConfig:
|
|||
"K6_OUT": "",
|
||||
"K6_MIN_ITERATION_DURATION": "",
|
||||
"K6_SETUP_TIMEOUT": "",
|
||||
"MAX_TOTAL_SIZE_GB": 0,
|
||||
"WRITERS": 0,
|
||||
"READERS": 0,
|
||||
"DELETERS": 0,
|
||||
|
@ -720,7 +689,9 @@ class TestLoadConfig:
|
|||
value = getattr(dataclass, field.name)
|
||||
assert value is not None, f"{field.name} is not None"
|
||||
|
||||
def _get_filled_load_params(self, load_type: LoadType, load_scenario: LoadScenario, set_emtpy: bool = False) -> LoadParams:
|
||||
def _get_filled_load_params(
|
||||
self, load_type: LoadType, load_scenario: LoadScenario, set_emtpy: bool = False
|
||||
) -> LoadParams:
|
||||
load_type_map = {
|
||||
LoadScenario.S3: LoadType.S3,
|
||||
LoadScenario.S3_CAR: LoadType.S3,
|
||||
|
@ -737,12 +708,13 @@ class TestLoadConfig:
|
|||
|
||||
meta_fields = self._get_meta_fields(load_params)
|
||||
for field in meta_fields:
|
||||
if getattr(field.instance, field.field.name) is None and load_params.scenario in field.field.metadata["applicable_scenarios"]:
|
||||
if (
|
||||
getattr(field.instance, field.field.name) is None
|
||||
and load_params.scenario in field.field.metadata["applicable_scenarios"]
|
||||
):
|
||||
value_to_set_map = {
|
||||
int: 0 if set_emtpy else len(field.field.name),
|
||||
float: 0 if set_emtpy else len(field.field.name),
|
||||
str: "" if set_emtpy else field.field.name,
|
||||
list[str]: "" if set_emtpy else [field.field.name, f"{field.field.name}_2"],
|
||||
bool: False if set_emtpy else True,
|
||||
}
|
||||
value_to_set = value_to_set_map[field.field_type]
|
||||
|
@ -755,7 +727,11 @@ class TestLoadConfig:
|
|||
|
||||
def _get_meta_fields(self, instance):
|
||||
data_fields = fields(instance)
|
||||
fields_with_data = [MetaTestField(field, self._get_actual_field_type(field), instance) for field in data_fields if field.metadata]
|
||||
fields_with_data = [
|
||||
MetaTestField(field, self._get_actual_field_type(field), instance)
|
||||
for field in data_fields
|
||||
if field.metadata
|
||||
]
|
||||
|
||||
for field in data_fields:
|
||||
actual_field_type = self._get_actual_field_type(field)
|
||||
|
|
Loading…
Reference in a new issue