forked from TrueCloudLab/frostfs-testlib
Compare commits
2 commits
master
...
bereza/upd
Author | SHA1 | Date | |
---|---|---|---|
9f2c8adfb5 | |||
ee1f3ab0c1 |
63 changed files with 902 additions and 2053 deletions
|
@ -1,21 +0,0 @@
|
||||||
name: DCO action
|
|
||||||
on: [pull_request]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
dco:
|
|
||||||
name: DCO
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Setup Go
|
|
||||||
uses: actions/setup-go@v3
|
|
||||||
with:
|
|
||||||
go-version: '1.21'
|
|
||||||
|
|
||||||
- name: Run commit format checker
|
|
||||||
uses: https://git.frostfs.info/TrueCloudLab/dco-go@v3
|
|
||||||
with:
|
|
||||||
from: 'origin/${{ github.event.pull_request.base.ref }}'
|
|
1
.github/CODEOWNERS
vendored
Normal file
1
.github/CODEOWNERS
vendored
Normal file
|
@ -0,0 +1 @@
|
||||||
|
* @aprasolova @vdomnich-yadro @dansingjulia @yadro-vavdeev @abereziny
|
21
.github/workflows/dco.yml
vendored
Normal file
21
.github/workflows/dco.yml
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
name: DCO check
|
||||||
|
|
||||||
|
on:
|
||||||
|
pull_request:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
commits_check_job:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
name: Commits Check
|
||||||
|
steps:
|
||||||
|
- name: Get PR Commits
|
||||||
|
id: 'get-pr-commits'
|
||||||
|
uses: tim-actions/get-pr-commits@master
|
||||||
|
with:
|
||||||
|
token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- name: DCO Check
|
||||||
|
uses: tim-actions/dco@master
|
||||||
|
with:
|
||||||
|
commits: ${{ steps.get-pr-commits.outputs.commits }}
|
|
@ -51,26 +51,19 @@ basic = "frostfs_testlib.healthcheck.basic_healthcheck:BasicHealthcheck"
|
||||||
config = "frostfs_testlib.storage.controllers.state_managers.config_state_manager:ConfigStateManager"
|
config = "frostfs_testlib.storage.controllers.state_managers.config_state_manager:ConfigStateManager"
|
||||||
|
|
||||||
[project.entry-points."frostfs.testlib.services"]
|
[project.entry-points."frostfs.testlib.services"]
|
||||||
frostfs-storage = "frostfs_testlib.storage.dataclasses.frostfs_services:StorageNode"
|
s = "frostfs_testlib.storage.dataclasses.frostfs_services:StorageNode"
|
||||||
frostfs-s3 = "frostfs_testlib.storage.dataclasses.frostfs_services:S3Gate"
|
s3-gate = "frostfs_testlib.storage.dataclasses.frostfs_services:S3Gate"
|
||||||
frostfs-http = "frostfs_testlib.storage.dataclasses.frostfs_services:HTTPGate"
|
http-gate = "frostfs_testlib.storage.dataclasses.frostfs_services:HTTPGate"
|
||||||
neo-go = "frostfs_testlib.storage.dataclasses.frostfs_services:MorphChain"
|
morph-chain = "frostfs_testlib.storage.dataclasses.frostfs_services:MorphChain"
|
||||||
frostfs-ir = "frostfs_testlib.storage.dataclasses.frostfs_services:InnerRing"
|
ir = "frostfs_testlib.storage.dataclasses.frostfs_services:InnerRing"
|
||||||
|
|
||||||
[project.entry-points."frostfs.testlib.credentials_providers"]
|
|
||||||
authmate = "frostfs_testlib.credentials.authmate_s3_provider:AuthmateS3CredentialsProvider"
|
|
||||||
wallet_factory = "frostfs_testlib.credentials.wallet_factory_provider:WalletFactoryProvider"
|
|
||||||
|
|
||||||
[project.entry-points."frostfs.testlib.bucket_cid_resolver"]
|
|
||||||
frostfs = "frostfs_testlib.s3.curl_bucket_resolver:CurlBucketContainerResolver"
|
|
||||||
|
|
||||||
[tool.isort]
|
[tool.isort]
|
||||||
profile = "black"
|
profile = "black"
|
||||||
src_paths = ["src", "tests"]
|
src_paths = ["src", "tests"]
|
||||||
line_length = 140
|
line_length = 120
|
||||||
|
|
||||||
[tool.black]
|
[tool.black]
|
||||||
line-length = 140
|
line-length = 120
|
||||||
target-version = ["py310"]
|
target-version = ["py310"]
|
||||||
|
|
||||||
[tool.bumpver]
|
[tool.bumpver]
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
from frostfs_testlib.cli.frostfs_adm import FrostfsAdm
|
from frostfs_testlib.cli.frostfs_adm import FrostfsAdm
|
||||||
from frostfs_testlib.cli.frostfs_authmate import FrostfsAuthmate
|
from frostfs_testlib.cli.frostfs_authmate import FrostfsAuthmate
|
||||||
from frostfs_testlib.cli.frostfs_cli import FrostfsCli
|
from frostfs_testlib.cli.frostfs_cli import FrostfsCli
|
||||||
from frostfs_testlib.cli.generic_cli import GenericCli
|
|
||||||
from frostfs_testlib.cli.neogo import NeoGo, NetworkType
|
from frostfs_testlib.cli.neogo import NeoGo, NetworkType
|
||||||
|
|
|
@ -8,7 +8,7 @@ class FrostfsCliContainer(CliCommand):
|
||||||
def create(
|
def create(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: Optional[str] = None,
|
wallet: str,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
attributes: Optional[dict] = None,
|
attributes: Optional[dict] = None,
|
||||||
basic_acl: Optional[str] = None,
|
basic_acl: Optional[str] = None,
|
||||||
|
@ -57,14 +57,15 @@ class FrostfsCliContainer(CliCommand):
|
||||||
def delete(
|
def delete(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
wallet: Optional[str] = None,
|
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
await_mode: bool = False,
|
await_mode: bool = False,
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
ttl: Optional[int] = None,
|
ttl: Optional[int] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
force: bool = False,
|
force: bool = False,
|
||||||
|
timeout: Optional[str] = None,
|
||||||
) -> CommandResult:
|
) -> CommandResult:
|
||||||
"""
|
"""
|
||||||
Delete an existing container.
|
Delete an existing container.
|
||||||
|
@ -80,6 +81,7 @@ class FrostfsCliContainer(CliCommand):
|
||||||
ttl: TTL value in request meta header (default 2).
|
ttl: TTL value in request meta header (default 2).
|
||||||
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
||||||
xhdr: Dict with request X-Headers.
|
xhdr: Dict with request X-Headers.
|
||||||
|
timeout: Timeout for the operation (default 15s).
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Command's result.
|
Command's result.
|
||||||
|
@ -93,8 +95,8 @@ class FrostfsCliContainer(CliCommand):
|
||||||
def get(
|
def get(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
wallet: Optional[str] = None,
|
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
await_mode: bool = False,
|
await_mode: bool = False,
|
||||||
to: Optional[str] = None,
|
to: Optional[str] = None,
|
||||||
|
@ -129,8 +131,8 @@ class FrostfsCliContainer(CliCommand):
|
||||||
def get_eacl(
|
def get_eacl(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
wallet: Optional[str] = None,
|
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
await_mode: bool = False,
|
await_mode: bool = False,
|
||||||
to: Optional[str] = None,
|
to: Optional[str] = None,
|
||||||
|
@ -166,7 +168,7 @@ class FrostfsCliContainer(CliCommand):
|
||||||
def list(
|
def list(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: Optional[str] = None,
|
wallet: str,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
owner: Optional[str] = None,
|
owner: Optional[str] = None,
|
||||||
ttl: Optional[int] = None,
|
ttl: Optional[int] = None,
|
||||||
|
@ -197,8 +199,8 @@ class FrostfsCliContainer(CliCommand):
|
||||||
def list_objects(
|
def list_objects(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
wallet: Optional[str] = None,
|
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
ttl: Optional[int] = None,
|
ttl: Optional[int] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
|
@ -227,8 +229,8 @@ class FrostfsCliContainer(CliCommand):
|
||||||
def set_eacl(
|
def set_eacl(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
wallet: Optional[str] = None,
|
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
await_mode: bool = False,
|
await_mode: bool = False,
|
||||||
table: Optional[str] = None,
|
table: Optional[str] = None,
|
||||||
|
@ -264,8 +266,8 @@ class FrostfsCliContainer(CliCommand):
|
||||||
def search_node(
|
def search_node(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
wallet: Optional[str] = None,
|
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
ttl: Optional[int] = None,
|
ttl: Optional[int] = None,
|
||||||
from_file: Optional[str] = None,
|
from_file: Optional[str] = None,
|
||||||
|
@ -296,5 +298,9 @@ class FrostfsCliContainer(CliCommand):
|
||||||
|
|
||||||
return self._execute(
|
return self._execute(
|
||||||
f"container nodes {from_str}",
|
f"container nodes {from_str}",
|
||||||
**{param: value for param, value in locals().items() if param not in ["self", "from_file", "from_str"]},
|
**{
|
||||||
|
param: value
|
||||||
|
for param, value in locals().items()
|
||||||
|
if param not in ["self", "from_file", "from_str"]
|
||||||
|
},
|
||||||
)
|
)
|
||||||
|
|
|
@ -39,12 +39,14 @@ class FrostfsCliControl(CliCommand):
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
timeout: Optional[str] = None,
|
timeout: Optional[str] = None,
|
||||||
) -> CommandResult:
|
) -> CommandResult:
|
||||||
"""Health check for FrostFS storage nodes
|
"""Set status of the storage node in FrostFS network map
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
wallet: Path to the wallet or binary key
|
wallet: Path to the wallet or binary key
|
||||||
address: Address of wallet account
|
address: Address of wallet account
|
||||||
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
|
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
|
||||||
|
force: Force turning to local maintenance
|
||||||
|
status: New netmap status keyword ('online', 'offline', 'maintenance')
|
||||||
timeout: Timeout for an operation (default 15s)
|
timeout: Timeout for an operation (default 15s)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
|
@ -54,28 +56,3 @@ class FrostfsCliControl(CliCommand):
|
||||||
"control healthcheck",
|
"control healthcheck",
|
||||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
**{param: value for param, value in locals().items() if param not in ["self"]},
|
||||||
)
|
)
|
||||||
|
|
||||||
def drop_objects(
|
|
||||||
self,
|
|
||||||
endpoint: str,
|
|
||||||
objects: str,
|
|
||||||
wallet: Optional[str] = None,
|
|
||||||
address: Optional[str] = None,
|
|
||||||
timeout: Optional[str] = None,
|
|
||||||
) -> CommandResult:
|
|
||||||
"""Drop objects from the node's local storage
|
|
||||||
|
|
||||||
Args:
|
|
||||||
wallet: Path to the wallet or binary key
|
|
||||||
address: Address of wallet account
|
|
||||||
endpoint: Remote node control address (as 'multiaddr' or '<host>:<port>')
|
|
||||||
objects: List of object addresses to be removed in string format
|
|
||||||
timeout: Timeout for an operation (default 15s)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Command`s result.
|
|
||||||
"""
|
|
||||||
return self._execute(
|
|
||||||
"control drop-objects",
|
|
||||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
|
||||||
)
|
|
|
@ -8,7 +8,7 @@ class FrostfsCliNetmap(CliCommand):
|
||||||
def epoch(
|
def epoch(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: Optional[str] = None,
|
wallet: str,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
generate_key: bool = False,
|
generate_key: bool = False,
|
||||||
ttl: Optional[int] = None,
|
ttl: Optional[int] = None,
|
||||||
|
@ -38,7 +38,7 @@ class FrostfsCliNetmap(CliCommand):
|
||||||
def netinfo(
|
def netinfo(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: Optional[str] = None,
|
wallet: str,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
generate_key: bool = False,
|
generate_key: bool = False,
|
||||||
ttl: Optional[int] = None,
|
ttl: Optional[int] = None,
|
||||||
|
@ -68,7 +68,7 @@ class FrostfsCliNetmap(CliCommand):
|
||||||
def nodeinfo(
|
def nodeinfo(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: Optional[str] = None,
|
wallet: str,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
generate_key: bool = False,
|
generate_key: bool = False,
|
||||||
json: bool = False,
|
json: bool = False,
|
||||||
|
@ -100,7 +100,7 @@ class FrostfsCliNetmap(CliCommand):
|
||||||
def snapshot(
|
def snapshot(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: Optional[str] = None,
|
wallet: str,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
generate_key: bool = False,
|
generate_key: bool = False,
|
||||||
ttl: Optional[int] = None,
|
ttl: Optional[int] = None,
|
||||||
|
|
|
@ -8,9 +8,9 @@ class FrostfsCliObject(CliCommand):
|
||||||
def delete(
|
def delete(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
wallet: Optional[str] = None,
|
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
|
@ -44,9 +44,9 @@ class FrostfsCliObject(CliCommand):
|
||||||
def get(
|
def get(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
wallet: Optional[str] = None,
|
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
file: Optional[str] = None,
|
file: Optional[str] = None,
|
||||||
|
@ -88,9 +88,9 @@ class FrostfsCliObject(CliCommand):
|
||||||
def hash(
|
def hash(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
wallet: Optional[str] = None,
|
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
range: Optional[str] = None,
|
range: Optional[str] = None,
|
||||||
|
@ -124,15 +124,17 @@ class FrostfsCliObject(CliCommand):
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute(
|
||||||
"object hash",
|
"object hash",
|
||||||
**{param: value for param, value in locals().items() if param not in ["self", "params"]},
|
**{
|
||||||
|
param: value for param, value in locals().items() if param not in ["self", "params"]
|
||||||
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
def head(
|
def head(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
wallet: Optional[str] = None,
|
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
file: Optional[str] = None,
|
file: Optional[str] = None,
|
||||||
|
@ -176,9 +178,9 @@ class FrostfsCliObject(CliCommand):
|
||||||
def lock(
|
def lock(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
wallet: Optional[str] = None,
|
|
||||||
lifetime: Optional[int] = None,
|
lifetime: Optional[int] = None,
|
||||||
expire_at: Optional[int] = None,
|
expire_at: Optional[int] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
|
@ -216,9 +218,9 @@ class FrostfsCliObject(CliCommand):
|
||||||
def put(
|
def put(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
file: str,
|
file: str,
|
||||||
wallet: Optional[str] = None,
|
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
attributes: Optional[dict] = None,
|
attributes: Optional[dict] = None,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
|
@ -267,10 +269,10 @@ class FrostfsCliObject(CliCommand):
|
||||||
def range(
|
def range(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
range: str,
|
range: str,
|
||||||
wallet: Optional[str] = None,
|
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
file: Optional[str] = None,
|
file: Optional[str] = None,
|
||||||
|
@ -311,8 +313,8 @@ class FrostfsCliObject(CliCommand):
|
||||||
def search(
|
def search(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
wallet: Optional[str] = None,
|
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
filters: Optional[list] = None,
|
filters: Optional[list] = None,
|
||||||
|
@ -353,11 +355,11 @@ class FrostfsCliObject(CliCommand):
|
||||||
def nodes(
|
def nodes(
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
wallet: Optional[str] = None,
|
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
generate_key: Optional[bool] = None,
|
generate_key: Optional = None,
|
||||||
oid: Optional[str] = None,
|
oid: Optional[str] = None,
|
||||||
trace: bool = False,
|
trace: bool = False,
|
||||||
root: bool = False,
|
root: bool = False,
|
||||||
|
|
|
@ -9,6 +9,7 @@ class FrostfsCliSession(CliCommand):
|
||||||
self,
|
self,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
wallet: str,
|
wallet: str,
|
||||||
|
wallet_password: str,
|
||||||
out: str,
|
out: str,
|
||||||
lifetime: Optional[int] = None,
|
lifetime: Optional[int] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
|
@ -29,7 +30,12 @@ class FrostfsCliSession(CliCommand):
|
||||||
Returns:
|
Returns:
|
||||||
Command's result.
|
Command's result.
|
||||||
"""
|
"""
|
||||||
return self._execute(
|
return self._execute_with_password(
|
||||||
"session create",
|
"session create",
|
||||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
wallet_password,
|
||||||
|
**{
|
||||||
|
param: value
|
||||||
|
for param, value in locals().items()
|
||||||
|
if param not in ["self", "wallet_password"]
|
||||||
|
},
|
||||||
)
|
)
|
||||||
|
|
|
@ -39,10 +39,10 @@ class FrostfsCliShards(CliCommand):
|
||||||
def set_mode(
|
def set_mode(
|
||||||
self,
|
self,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
|
wallet: str,
|
||||||
|
wallet_password: str,
|
||||||
mode: str,
|
mode: str,
|
||||||
id: Optional[list[str]],
|
id: Optional[list[str]],
|
||||||
wallet: Optional[str] = None,
|
|
||||||
wallet_password: Optional[str] = None,
|
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
all: bool = False,
|
all: bool = False,
|
||||||
clear_errors: bool = False,
|
clear_errors: bool = False,
|
||||||
|
@ -65,15 +65,14 @@ class FrostfsCliShards(CliCommand):
|
||||||
Returns:
|
Returns:
|
||||||
Command's result.
|
Command's result.
|
||||||
"""
|
"""
|
||||||
if not wallet_password:
|
|
||||||
return self._execute(
|
|
||||||
"control shards set-mode",
|
|
||||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
|
||||||
)
|
|
||||||
return self._execute_with_password(
|
return self._execute_with_password(
|
||||||
"control shards set-mode",
|
"control shards set-mode",
|
||||||
wallet_password,
|
wallet_password,
|
||||||
**{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]},
|
**{
|
||||||
|
param: value
|
||||||
|
for param, value in locals().items()
|
||||||
|
if param not in ["self", "wallet_password"]
|
||||||
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
def dump(
|
def dump(
|
||||||
|
@ -106,14 +105,18 @@ class FrostfsCliShards(CliCommand):
|
||||||
return self._execute_with_password(
|
return self._execute_with_password(
|
||||||
"control shards dump",
|
"control shards dump",
|
||||||
wallet_password,
|
wallet_password,
|
||||||
**{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]},
|
**{
|
||||||
|
param: value
|
||||||
|
for param, value in locals().items()
|
||||||
|
if param not in ["self", "wallet_password"]
|
||||||
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
def list(
|
def list(
|
||||||
self,
|
self,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
wallet: Optional[str] = None,
|
wallet: str,
|
||||||
wallet_password: Optional[str] = None,
|
wallet_password: str,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
json_mode: bool = False,
|
json_mode: bool = False,
|
||||||
timeout: Optional[str] = None,
|
timeout: Optional[str] = None,
|
||||||
|
@ -132,14 +135,12 @@ class FrostfsCliShards(CliCommand):
|
||||||
Returns:
|
Returns:
|
||||||
Command's result.
|
Command's result.
|
||||||
"""
|
"""
|
||||||
if not wallet_password:
|
|
||||||
return self._execute(
|
|
||||||
"control shards list",
|
|
||||||
**{param: value for param, value in locals().items() if param not in ["self"]},
|
|
||||||
)
|
|
||||||
return self._execute_with_password(
|
return self._execute_with_password(
|
||||||
"control shards list",
|
"control shards list",
|
||||||
wallet_password,
|
wallet_password,
|
||||||
**{param: value for param, value in locals().items() if param not in ["self", "wallet_password"]},
|
**{
|
||||||
|
param: value
|
||||||
|
for param, value in locals().items()
|
||||||
|
if param not in ["self", "wallet_password"]
|
||||||
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -6,12 +6,12 @@ from frostfs_testlib.shell import CommandResult
|
||||||
|
|
||||||
class FrostfsCliUtil(CliCommand):
|
class FrostfsCliUtil(CliCommand):
|
||||||
def sign_bearer_token(
|
def sign_bearer_token(
|
||||||
self,
|
self,
|
||||||
from_file: str,
|
wallet: str,
|
||||||
to_file: str,
|
from_file: str,
|
||||||
wallet: Optional[str] = None,
|
to_file: str,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
json: Optional[bool] = False,
|
json: Optional[bool] = False,
|
||||||
) -> CommandResult:
|
) -> CommandResult:
|
||||||
"""
|
"""
|
||||||
Sign bearer token to use it in requests.
|
Sign bearer token to use it in requests.
|
||||||
|
@ -33,9 +33,9 @@ class FrostfsCliUtil(CliCommand):
|
||||||
|
|
||||||
def sign_session_token(
|
def sign_session_token(
|
||||||
self,
|
self,
|
||||||
|
wallet: str,
|
||||||
from_file: str,
|
from_file: str,
|
||||||
to_file: str,
|
to_file: str,
|
||||||
wallet: Optional[str] = None,
|
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
) -> CommandResult:
|
) -> CommandResult:
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -1,30 +0,0 @@
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
from frostfs_testlib.hosting.interfaces import Host
|
|
||||||
from frostfs_testlib.shell.interfaces import CommandOptions, Shell
|
|
||||||
|
|
||||||
|
|
||||||
class GenericCli(object):
|
|
||||||
def __init__(self, cli_name: str, host: Host) -> None:
|
|
||||||
self.host = host
|
|
||||||
self.cli_name = cli_name
|
|
||||||
|
|
||||||
def __call__(
|
|
||||||
self,
|
|
||||||
args: Optional[str] = "",
|
|
||||||
pipes: Optional[str] = "",
|
|
||||||
shell: Optional[Shell] = None,
|
|
||||||
options: Optional[CommandOptions] = None,
|
|
||||||
):
|
|
||||||
if not shell:
|
|
||||||
shell = self.host.get_shell()
|
|
||||||
|
|
||||||
cli_config = self.host.get_cli_config(self.cli_name, True)
|
|
||||||
extra_args = ""
|
|
||||||
exec_path = self.cli_name
|
|
||||||
if cli_config:
|
|
||||||
extra_args = " ".join(cli_config.extra_args)
|
|
||||||
exec_path = cli_config.exec_path
|
|
||||||
|
|
||||||
cmd = f"{exec_path} {args} {extra_args} {pipes}"
|
|
||||||
return shell.exec(cmd, options)
|
|
|
@ -1,7 +1,7 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from frostfs_testlib.storage.cluster import ClusterNode
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo, NodeStatus
|
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeNetInfo, NodeNetmapInfo
|
||||||
|
|
||||||
|
|
||||||
class NetmapParser:
|
class NetmapParser:
|
||||||
|
@ -44,7 +44,7 @@ class NetmapParser:
|
||||||
regexes = {
|
regexes = {
|
||||||
"node_id": r"\d+: (?P<node_id>\w+)",
|
"node_id": r"\d+: (?P<node_id>\w+)",
|
||||||
"node_data_ips": r"(?P<node_data_ips>/ip4/.+?)$",
|
"node_data_ips": r"(?P<node_data_ips>/ip4/.+?)$",
|
||||||
"node_status": r"(?P<node_status>ONLINE|MAINTENANCE|OFFLINE)",
|
"node_status": r"(?P<node_status>ONLINE|OFFLINE)",
|
||||||
"cluster_name": r"ClusterName: (?P<cluster_name>\w+)",
|
"cluster_name": r"ClusterName: (?P<cluster_name>\w+)",
|
||||||
"continent": r"Continent: (?P<continent>\w+)",
|
"continent": r"Continent: (?P<continent>\w+)",
|
||||||
"country": r"Country: (?P<country>\w+)",
|
"country": r"Country: (?P<country>\w+)",
|
||||||
|
@ -62,17 +62,14 @@ class NetmapParser:
|
||||||
for node in netmap_nodes:
|
for node in netmap_nodes:
|
||||||
for key, regex in regexes.items():
|
for key, regex in regexes.items():
|
||||||
search_result = re.search(regex, node, flags=re.MULTILINE)
|
search_result = re.search(regex, node, flags=re.MULTILINE)
|
||||||
if search_result == None:
|
|
||||||
result_netmap[key] = None
|
|
||||||
continue
|
|
||||||
if key == "node_data_ips":
|
if key == "node_data_ips":
|
||||||
result_netmap[key] = search_result[key].strip().split(" ")
|
result_netmap[key] = search_result[key].strip().split(" ")
|
||||||
continue
|
continue
|
||||||
if key == "external_address":
|
if key == "external_address":
|
||||||
result_netmap[key] = search_result[key].strip().split(",")
|
result_netmap[key] = search_result[key].strip().split(",")
|
||||||
continue
|
continue
|
||||||
if key == "node_status":
|
if search_result == None:
|
||||||
result_netmap[key] = NodeStatus(search_result[key].strip().lower())
|
result_netmap[key] = None
|
||||||
continue
|
continue
|
||||||
result_netmap[key] = search_result[key].strip()
|
result_netmap[key] = search_result[key].strip()
|
||||||
|
|
||||||
|
|
|
@ -1,47 +0,0 @@
|
||||||
import re
|
|
||||||
from datetime import datetime
|
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
from frostfs_testlib import reporter
|
|
||||||
from frostfs_testlib.cli import FrostfsAuthmate
|
|
||||||
from frostfs_testlib.credentials.interfaces import S3Credentials, S3CredentialsProvider, User
|
|
||||||
from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC
|
|
||||||
from frostfs_testlib.shell import LocalShell
|
|
||||||
from frostfs_testlib.steps.cli.container import list_containers
|
|
||||||
from frostfs_testlib.storage.cluster import ClusterNode
|
|
||||||
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate
|
|
||||||
|
|
||||||
|
|
||||||
class AuthmateS3CredentialsProvider(S3CredentialsProvider):
|
|
||||||
@reporter.step("Init S3 Credentials using Authmate CLI")
|
|
||||||
def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None) -> S3Credentials:
|
|
||||||
cluster_nodes: list[ClusterNode] = self.cluster.cluster_nodes
|
|
||||||
shell = LocalShell()
|
|
||||||
wallet = user.wallet
|
|
||||||
endpoint = cluster_node.storage_node.get_rpc_endpoint()
|
|
||||||
|
|
||||||
gate_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes]
|
|
||||||
# unique short bucket name
|
|
||||||
bucket = f"bucket-{hex(int(datetime.now().timestamp()*1000000))}"
|
|
||||||
|
|
||||||
frostfs_authmate: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC)
|
|
||||||
issue_secret_output = frostfs_authmate.secret.issue(
|
|
||||||
wallet=wallet.path,
|
|
||||||
peer=endpoint,
|
|
||||||
gate_public_key=gate_public_keys,
|
|
||||||
wallet_password=wallet.password,
|
|
||||||
container_policy=location_constraints,
|
|
||||||
container_friendly_name=bucket,
|
|
||||||
).stdout
|
|
||||||
|
|
||||||
aws_access_key_id = str(re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group("aws_access_key_id"))
|
|
||||||
aws_secret_access_key = str(
|
|
||||||
re.search(r"secret_access_key.*:\s.(?P<aws_secret_access_key>\w*)", issue_secret_output).group("aws_secret_access_key")
|
|
||||||
)
|
|
||||||
cid = str(re.search(r"container_id.*:\s.(?P<container_id>\w*)", issue_secret_output).group("container_id"))
|
|
||||||
|
|
||||||
containers_list = list_containers(wallet, shell, endpoint)
|
|
||||||
assert cid in containers_list, f"Expected cid {cid} in {containers_list}"
|
|
||||||
|
|
||||||
user.s3_credentials = S3Credentials(aws_access_key_id, aws_secret_access_key)
|
|
||||||
return user.s3_credentials
|
|
|
@ -1,51 +0,0 @@
|
||||||
from abc import ABC, abstractmethod
|
|
||||||
from dataclasses import dataclass, field
|
|
||||||
from typing import Any, Optional
|
|
||||||
|
|
||||||
from frostfs_testlib.plugins import load_plugin
|
|
||||||
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class S3Credentials:
|
|
||||||
access_key: str
|
|
||||||
secret_key: str
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class User:
|
|
||||||
name: str
|
|
||||||
attributes: dict[str, Any] = field(default_factory=dict)
|
|
||||||
wallet: WalletInfo | None = None
|
|
||||||
s3_credentials: S3Credentials | None = None
|
|
||||||
|
|
||||||
|
|
||||||
class S3CredentialsProvider(ABC):
|
|
||||||
def __init__(self, cluster: Cluster) -> None:
|
|
||||||
self.cluster = cluster
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def provide(self, user: User, cluster_node: ClusterNode, location_constraints: Optional[str] = None) -> S3Credentials:
|
|
||||||
raise NotImplementedError("Directly called abstract class?")
|
|
||||||
|
|
||||||
|
|
||||||
class GrpcCredentialsProvider(ABC):
|
|
||||||
def __init__(self, cluster: Cluster) -> None:
|
|
||||||
self.cluster = cluster
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def provide(self, user: User, cluster_node: ClusterNode) -> WalletInfo:
|
|
||||||
raise NotImplementedError("Directly called abstract class?")
|
|
||||||
|
|
||||||
|
|
||||||
class CredentialsProvider(object):
|
|
||||||
S3: S3CredentialsProvider
|
|
||||||
GRPC: GrpcCredentialsProvider
|
|
||||||
|
|
||||||
def __init__(self, cluster: Cluster) -> None:
|
|
||||||
config = cluster.cluster_nodes[0].host.config
|
|
||||||
s3_cls = load_plugin("frostfs.testlib.credentials_providers", config.s3_creds_plugin_name)
|
|
||||||
self.S3 = s3_cls(cluster)
|
|
||||||
grpc_cls = load_plugin("frostfs.testlib.credentials_providers", config.grpc_creds_plugin_name)
|
|
||||||
self.GRPC = grpc_cls(cluster)
|
|
|
@ -1,14 +0,0 @@
|
||||||
from frostfs_testlib import reporter
|
|
||||||
from frostfs_testlib.credentials.interfaces import GrpcCredentialsProvider, User
|
|
||||||
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_PASS
|
|
||||||
from frostfs_testlib.shell.local_shell import LocalShell
|
|
||||||
from frostfs_testlib.storage.cluster import ClusterNode
|
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletFactory, WalletInfo
|
|
||||||
|
|
||||||
|
|
||||||
class WalletFactoryProvider(GrpcCredentialsProvider):
|
|
||||||
@reporter.step("Init gRPC Credentials using wallet generation")
|
|
||||||
def provide(self, user: User, cluster_node: ClusterNode) -> WalletInfo:
|
|
||||||
wallet_factory = WalletFactory(ASSETS_DIR, LocalShell())
|
|
||||||
user.wallet = wallet_factory.create_wallet(file_name=user.name, password=DEFAULT_WALLET_PASS)
|
|
||||||
return user.wallet
|
|
|
@ -1,5 +1,5 @@
|
||||||
class Options:
|
class Options:
|
||||||
DEFAULT_SHELL_TIMEOUT = 120
|
DEFAULT_SHELL_TIMEOUT = 90
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def get_default_shell_timeout():
|
def get_default_shell_timeout():
|
||||||
|
|
|
@ -10,7 +10,9 @@ class ParsedAttributes:
|
||||||
def parse(cls, attributes: dict[str, Any]):
|
def parse(cls, attributes: dict[str, Any]):
|
||||||
# Pick attributes supported by the class
|
# Pick attributes supported by the class
|
||||||
field_names = set(field.name for field in fields(cls))
|
field_names = set(field.name for field in fields(cls))
|
||||||
supported_attributes = {key: value for key, value in attributes.items() if key in field_names}
|
supported_attributes = {
|
||||||
|
key: value for key, value in attributes.items() if key in field_names
|
||||||
|
}
|
||||||
return cls(**supported_attributes)
|
return cls(**supported_attributes)
|
||||||
|
|
||||||
|
|
||||||
|
@ -27,7 +29,6 @@ class CLIConfig:
|
||||||
name: str
|
name: str
|
||||||
exec_path: str
|
exec_path: str
|
||||||
attributes: dict[str, str] = field(default_factory=dict)
|
attributes: dict[str, str] = field(default_factory=dict)
|
||||||
extra_args: list[str] = field(default_factory=list)
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
|
@ -62,9 +63,6 @@ class HostConfig:
|
||||||
plugin_name: str
|
plugin_name: str
|
||||||
healthcheck_plugin_name: str
|
healthcheck_plugin_name: str
|
||||||
address: str
|
address: str
|
||||||
s3_creds_plugin_name: str = field(default="authmate")
|
|
||||||
grpc_creds_plugin_name: str = field(default="wallet_factory")
|
|
||||||
product: str = field(default="frostfs")
|
|
||||||
services: list[ServiceConfig] = field(default_factory=list)
|
services: list[ServiceConfig] = field(default_factory=list)
|
||||||
clis: list[CLIConfig] = field(default_factory=list)
|
clis: list[CLIConfig] = field(default_factory=list)
|
||||||
attributes: dict[str, str] = field(default_factory=dict)
|
attributes: dict[str, str] = field(default_factory=dict)
|
||||||
|
|
|
@ -152,7 +152,9 @@ class DockerHost(Host):
|
||||||
timeout=service_attributes.start_timeout,
|
timeout=service_attributes.start_timeout,
|
||||||
)
|
)
|
||||||
|
|
||||||
def wait_for_service_to_be_in_state(self, systemd_service_name: str, expected_state: str, timeout: int) -> None:
|
def wait_for_service_to_be_in_state(
|
||||||
|
self, systemd_service_name: str, expected_state: str, timeout: int
|
||||||
|
) -> None:
|
||||||
raise NotImplementedError("Not implemented for docker")
|
raise NotImplementedError("Not implemented for docker")
|
||||||
|
|
||||||
def get_data_directory(self, service_name: str) -> str:
|
def get_data_directory(self, service_name: str) -> str:
|
||||||
|
@ -179,12 +181,6 @@ class DockerHost(Host):
|
||||||
def delete_pilorama(self, service_name: str) -> None:
|
def delete_pilorama(self, service_name: str) -> None:
|
||||||
raise NotImplementedError("Not implemented for docker")
|
raise NotImplementedError("Not implemented for docker")
|
||||||
|
|
||||||
def delete_file(self, file_path: str) -> None:
|
|
||||||
raise NotImplementedError("Not implemented for docker")
|
|
||||||
|
|
||||||
def is_file_exist(self, file_path: str) -> None:
|
|
||||||
raise NotImplementedError("Not implemented for docker")
|
|
||||||
|
|
||||||
def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None:
|
def delete_storage_node_data(self, service_name: str, cache_only: bool = False) -> None:
|
||||||
volume_path = self.get_data_directory(service_name)
|
volume_path = self.get_data_directory(service_name)
|
||||||
|
|
||||||
|
@ -309,7 +305,9 @@ class DockerHost(Host):
|
||||||
return container
|
return container
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def _wait_for_container_to_be_in_state(self, container_name: str, expected_state: str, timeout: int) -> None:
|
def _wait_for_container_to_be_in_state(
|
||||||
|
self, container_name: str, expected_state: str, timeout: int
|
||||||
|
) -> None:
|
||||||
iterations = 10
|
iterations = 10
|
||||||
iteration_wait_time = timeout / iterations
|
iteration_wait_time = timeout / iterations
|
||||||
|
|
||||||
|
|
|
@ -54,7 +54,7 @@ class Host(ABC):
|
||||||
raise ValueError(f"Unknown service name: '{service_name}'")
|
raise ValueError(f"Unknown service name: '{service_name}'")
|
||||||
return service_config
|
return service_config
|
||||||
|
|
||||||
def get_cli_config(self, cli_name: str, allow_empty: bool = False) -> CLIConfig:
|
def get_cli_config(self, cli_name: str) -> CLIConfig:
|
||||||
"""Returns config of CLI tool with specified name.
|
"""Returns config of CLI tool with specified name.
|
||||||
|
|
||||||
The CLI must be located on this host.
|
The CLI must be located on this host.
|
||||||
|
@ -66,7 +66,7 @@ class Host(ABC):
|
||||||
Config of the CLI tool.
|
Config of the CLI tool.
|
||||||
"""
|
"""
|
||||||
cli_config = self._cli_config_by_name.get(cli_name)
|
cli_config = self._cli_config_by_name.get(cli_name)
|
||||||
if cli_config is None and not allow_empty:
|
if cli_config is None:
|
||||||
raise ValueError(f"Unknown CLI name: '{cli_name}'")
|
raise ValueError(f"Unknown CLI name: '{cli_name}'")
|
||||||
return cli_config
|
return cli_config
|
||||||
|
|
||||||
|
|
|
@ -50,7 +50,6 @@ class SummarizedStats:
|
||||||
throughput: float = field(default_factory=float)
|
throughput: float = field(default_factory=float)
|
||||||
latencies: SummarizedLatencies = field(default_factory=SummarizedLatencies)
|
latencies: SummarizedLatencies = field(default_factory=SummarizedLatencies)
|
||||||
errors: SummarizedErorrs = field(default_factory=SummarizedErorrs)
|
errors: SummarizedErorrs = field(default_factory=SummarizedErorrs)
|
||||||
total_bytes: int = field(default_factory=int)
|
|
||||||
passed: bool = True
|
passed: bool = True
|
||||||
|
|
||||||
def calc_stats(self):
|
def calc_stats(self):
|
||||||
|
@ -86,7 +85,6 @@ class SummarizedStats:
|
||||||
target.latencies.by_node[node_key] = operation.latency
|
target.latencies.by_node[node_key] = operation.latency
|
||||||
target.throughput += operation.throughput
|
target.throughput += operation.throughput
|
||||||
target.errors.threshold = load_params.error_threshold
|
target.errors.threshold = load_params.error_threshold
|
||||||
target.total_bytes = operation.total_bytes
|
|
||||||
if operation.failed_iterations:
|
if operation.failed_iterations:
|
||||||
target.errors.by_node[node_key] = operation.failed_iterations
|
target.errors.by_node[node_key] = operation.failed_iterations
|
||||||
|
|
||||||
|
|
|
@ -4,19 +4,18 @@ import math
|
||||||
import os
|
import os
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from threading import Event
|
|
||||||
from time import sleep
|
from time import sleep
|
||||||
from typing import Any
|
from typing import Any
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.credentials.interfaces import User
|
|
||||||
from frostfs_testlib.load.interfaces.loader import Loader
|
from frostfs_testlib.load.interfaces.loader import Loader
|
||||||
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario, LoadType
|
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadScenario, LoadType
|
||||||
from frostfs_testlib.processes.remote_process import RemoteProcess
|
from frostfs_testlib.processes.remote_process import RemoteProcess
|
||||||
from frostfs_testlib.resources.common import STORAGE_USER_NAME
|
from frostfs_testlib.resources.common import STORAGE_USER_NAME
|
||||||
from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, K6_TEARDOWN_PERIOD
|
from frostfs_testlib.resources.load_params import K6_STOP_SIGNAL_TIMEOUT, K6_TEARDOWN_PERIOD
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.testing.test_control import wait_for_success
|
from frostfs_testlib.testing.test_control import wait_for_success
|
||||||
|
|
||||||
EXIT_RESULT_CODE = 0
|
EXIT_RESULT_CODE = 0
|
||||||
|
@ -43,16 +42,16 @@ class K6:
|
||||||
k6_dir: str,
|
k6_dir: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
loader: Loader,
|
loader: Loader,
|
||||||
user: User,
|
wallet: WalletInfo,
|
||||||
):
|
):
|
||||||
if load_params.scenario is None:
|
if load_params.scenario is None:
|
||||||
raise RuntimeError("Scenario should not be none")
|
raise RuntimeError("Scenario should not be none")
|
||||||
|
|
||||||
self.load_params = load_params
|
self.load_params: LoadParams = load_params
|
||||||
self.endpoints = endpoints
|
self.endpoints = endpoints
|
||||||
self.loader = loader
|
self.loader: Loader = loader
|
||||||
self.shell = shell
|
self.shell: Shell = shell
|
||||||
self.user = user
|
self.wallet = wallet
|
||||||
self.preset_output: str = ""
|
self.preset_output: str = ""
|
||||||
self.summary_json: str = os.path.join(
|
self.summary_json: str = os.path.join(
|
||||||
self.load_params.working_dir,
|
self.load_params.working_dir,
|
||||||
|
@ -62,22 +61,26 @@ class K6:
|
||||||
self._k6_dir: str = k6_dir
|
self._k6_dir: str = k6_dir
|
||||||
|
|
||||||
command = (
|
command = (
|
||||||
f"{self._generate_env_variables()}{self._k6_dir}/k6 run {self._generate_k6_variables()} "
|
f"{self._k6_dir}/k6 run {self._generate_env_variables()} "
|
||||||
f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js"
|
f"{self._k6_dir}/scenarios/{self.load_params.scenario.value}.js"
|
||||||
)
|
)
|
||||||
remote_user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None
|
user = STORAGE_USER_NAME if self.load_params.scenario == LoadScenario.LOCAL else None
|
||||||
process_id = self.load_params.load_id if self.load_params.scenario != LoadScenario.VERIFY else f"{self.load_params.load_id}_verify"
|
process_id = (
|
||||||
self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, remote_user, process_id)
|
self.load_params.load_id
|
||||||
|
if self.load_params.scenario != LoadScenario.VERIFY
|
||||||
|
else f"{self.load_params.load_id}_verify"
|
||||||
|
)
|
||||||
|
self._k6_process = RemoteProcess.create(command, self.shell, self.load_params.working_dir, user, process_id)
|
||||||
|
|
||||||
def _get_fill_percents(self):
|
def _get_fill_percents(self):
|
||||||
fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs | grep data").stdout.split("\n")
|
fill_percents = self.shell.exec("df -H --output=source,pcent,target | grep frostfs").stdout.split("\n")
|
||||||
return [line.split() for line in fill_percents][:-1]
|
return [line.split() for line in fill_percents][:-1]
|
||||||
|
|
||||||
def check_fill_percent(self):
|
def check_fill_percent(self):
|
||||||
fill_percents = self._get_fill_percents()
|
fill_percents = self._get_fill_percents()
|
||||||
percent_mean = 0
|
percent_mean = 0
|
||||||
for line in fill_percents:
|
for line in fill_percents:
|
||||||
percent_mean += float(line[1].split("%")[0])
|
percent_mean += float(line[1].split('%')[0])
|
||||||
percent_mean = percent_mean / len(fill_percents)
|
percent_mean = percent_mean / len(fill_percents)
|
||||||
logger.info(f"{self.loader.ip} mean fill percent is {percent_mean}")
|
logger.info(f"{self.loader.ip} mean fill percent is {percent_mean}")
|
||||||
return percent_mean >= self.load_params.fill_percent
|
return percent_mean >= self.load_params.fill_percent
|
||||||
|
@ -100,8 +103,8 @@ class K6:
|
||||||
preset_grpc: [
|
preset_grpc: [
|
||||||
preset_grpc,
|
preset_grpc,
|
||||||
f"--endpoint {','.join(self.endpoints)}",
|
f"--endpoint {','.join(self.endpoints)}",
|
||||||
f"--wallet {self.user.wallet.path} ",
|
f"--wallet {self.wallet.path} ",
|
||||||
f"--config {self.user.wallet.config_path} ",
|
f"--config {self.wallet.config_path} ",
|
||||||
],
|
],
|
||||||
preset_s3: [
|
preset_s3: [
|
||||||
preset_s3,
|
preset_s3,
|
||||||
|
@ -122,9 +125,9 @@ class K6:
|
||||||
self.preset_output = result.stdout.strip("\n")
|
self.preset_output = result.stdout.strip("\n")
|
||||||
return self.preset_output
|
return self.preset_output
|
||||||
|
|
||||||
@reporter.step("Generate K6 variables")
|
@reporter.step("Generate K6 command")
|
||||||
def _generate_k6_variables(self) -> str:
|
def _generate_env_variables(self) -> str:
|
||||||
env_vars = self.load_params.get_k6_vars()
|
env_vars = self.load_params.get_env_vars()
|
||||||
|
|
||||||
env_vars[f"{self.load_params.load_type.value.upper()}_ENDPOINTS"] = ",".join(self.endpoints)
|
env_vars[f"{self.load_params.load_type.value.upper()}_ENDPOINTS"] = ",".join(self.endpoints)
|
||||||
env_vars["SUMMARY_JSON"] = self.summary_json
|
env_vars["SUMMARY_JSON"] = self.summary_json
|
||||||
|
@ -132,14 +135,6 @@ class K6:
|
||||||
reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables")
|
reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "K6 ENV variables")
|
||||||
return " ".join([f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None])
|
return " ".join([f"-e {param}='{value}'" for param, value in env_vars.items() if value is not None])
|
||||||
|
|
||||||
@reporter.step("Generate env variables")
|
|
||||||
def _generate_env_variables(self) -> str:
|
|
||||||
env_vars = self.load_params.get_env_vars()
|
|
||||||
if not env_vars:
|
|
||||||
return ""
|
|
||||||
reporter.attach("\n".join(f"{param}: {value}" for param, value in env_vars.items()), "ENV variables")
|
|
||||||
return " ".join([f"{param}='{value}'" for param, value in env_vars.items() if value is not None]) + " "
|
|
||||||
|
|
||||||
def get_start_time(self) -> datetime:
|
def get_start_time(self) -> datetime:
|
||||||
return datetime.fromtimestamp(self._k6_process.start_time())
|
return datetime.fromtimestamp(self._k6_process.start_time())
|
||||||
|
|
||||||
|
@ -150,7 +145,7 @@ class K6:
|
||||||
with reporter.step(f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"):
|
with reporter.step(f"Start load from loader {self.loader.ip} on endpoints {self.endpoints}"):
|
||||||
self._k6_process.start()
|
self._k6_process.start()
|
||||||
|
|
||||||
def wait_until_finished(self, event: Event, soft_timeout: int = 0) -> None:
|
def wait_until_finished(self, event, soft_timeout: int = 0) -> None:
|
||||||
with reporter.step(f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"):
|
with reporter.step(f"Wait until load is finished from loader {self.loader.ip} on endpoints {self.endpoints}"):
|
||||||
if self.load_params.scenario == LoadScenario.VERIFY:
|
if self.load_params.scenario == LoadScenario.VERIFY:
|
||||||
timeout = self.load_params.verify_time or 0
|
timeout = self.load_params.verify_time or 0
|
||||||
|
@ -164,7 +159,9 @@ class K6:
|
||||||
remaining_time = timeout - working_time
|
remaining_time = timeout - working_time
|
||||||
|
|
||||||
setup_teardown_time = (
|
setup_teardown_time = (
|
||||||
int(K6_TEARDOWN_PERIOD) + self.load_params.get_init_time() + int(self.load_params.setup_timeout.replace("s", "").strip())
|
int(K6_TEARDOWN_PERIOD)
|
||||||
|
+ self.load_params.get_init_time()
|
||||||
|
+ int(self.load_params.setup_timeout.replace("s", "").strip())
|
||||||
)
|
)
|
||||||
remaining_time_including_setup_and_teardown = remaining_time + setup_teardown_time
|
remaining_time_including_setup_and_teardown = remaining_time + setup_teardown_time
|
||||||
timeout = remaining_time_including_setup_and_teardown
|
timeout = remaining_time_including_setup_and_teardown
|
||||||
|
@ -191,7 +188,7 @@ class K6:
|
||||||
wait_interval = min_wait_interval
|
wait_interval = min_wait_interval
|
||||||
if self._k6_process is None:
|
if self._k6_process is None:
|
||||||
assert "No k6 instances were executed"
|
assert "No k6 instances were executed"
|
||||||
|
|
||||||
while timeout > 0:
|
while timeout > 0:
|
||||||
if not self.load_params.fill_percent is None:
|
if not self.load_params.fill_percent is None:
|
||||||
with reporter.step(f"Check the percentage of filling of all data disks on the node"):
|
with reporter.step(f"Check the percentage of filling of all data disks on the node"):
|
||||||
|
@ -200,14 +197,14 @@ class K6:
|
||||||
event.set()
|
event.set()
|
||||||
self.stop()
|
self.stop()
|
||||||
return
|
return
|
||||||
|
|
||||||
if event.is_set():
|
if event.is_set():
|
||||||
self.stop()
|
self.stop()
|
||||||
return
|
return
|
||||||
|
|
||||||
if not self._k6_process.running():
|
if not self._k6_process.running():
|
||||||
return
|
return
|
||||||
|
|
||||||
remaining_time_hours = f"{timeout//3600}h" if timeout // 3600 != 0 else ""
|
remaining_time_hours = f"{timeout//3600}h" if timeout // 3600 != 0 else ""
|
||||||
remaining_time_minutes = f"{timeout//60%60}m" if timeout // 60 % 60 != 0 else ""
|
remaining_time_minutes = f"{timeout//60%60}m" if timeout // 60 % 60 != 0 else ""
|
||||||
logger.info(
|
logger.info(
|
||||||
|
|
|
@ -94,18 +94,16 @@ def metadata_field(
|
||||||
string_repr: Optional[bool] = True,
|
string_repr: Optional[bool] = True,
|
||||||
distributed: Optional[bool] = False,
|
distributed: Optional[bool] = False,
|
||||||
formatter: Optional[Callable] = None,
|
formatter: Optional[Callable] = None,
|
||||||
env_variable: Optional[str] = None,
|
|
||||||
):
|
):
|
||||||
return field(
|
return field(
|
||||||
default=None,
|
default=None,
|
||||||
metadata={
|
metadata={
|
||||||
"applicable_scenarios": applicable_scenarios,
|
"applicable_scenarios": applicable_scenarios,
|
||||||
"preset_argument": preset_param,
|
"preset_argument": preset_param,
|
||||||
"scenario_variable": scenario_variable,
|
"env_variable": scenario_variable,
|
||||||
"string_repr": string_repr,
|
"string_repr": string_repr,
|
||||||
"distributed": distributed,
|
"distributed": distributed,
|
||||||
"formatter": formatter,
|
"formatter": formatter,
|
||||||
"env_variable": env_variable,
|
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -149,8 +147,6 @@ class Preset:
|
||||||
pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False)
|
pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False)
|
||||||
# Workers count for preset
|
# Workers count for preset
|
||||||
workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False)
|
workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False)
|
||||||
# Acl for container/buckets
|
|
||||||
acl: Optional[str] = metadata_field(all_load_scenarios, "acl", None, False)
|
|
||||||
|
|
||||||
# ------ GRPC ------
|
# ------ GRPC ------
|
||||||
# Amount of containers which should be created
|
# Amount of containers which should be created
|
||||||
|
@ -170,19 +166,6 @@ class Preset:
|
||||||
# Flag to control preset erorrs
|
# Flag to control preset erorrs
|
||||||
ignore_errors: Optional[bool] = metadata_field(all_load_scenarios, "ignore-errors", None, False)
|
ignore_errors: Optional[bool] = metadata_field(all_load_scenarios, "ignore-errors", None, False)
|
||||||
|
|
||||||
# Flag to ensure created containers store data on local endpoints
|
|
||||||
local: Optional[bool] = metadata_field(grpc_preset_scenarios, "local", None, False)
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class PrometheusParams:
|
|
||||||
# Prometheus server URL
|
|
||||||
server_url: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_SERVER_URL", string_repr=False)
|
|
||||||
# Prometheus trend stats
|
|
||||||
trend_stats: Optional[str] = metadata_field(all_load_scenarios, env_variable="K6_PROMETHEUS_RW_TREND_STATS", string_repr=False)
|
|
||||||
# Additional tags
|
|
||||||
metrics_tags: Optional[str] = metadata_field(all_load_scenarios, None, "METRIC_TAGS", False)
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class LoadParams:
|
class LoadParams:
|
||||||
|
@ -233,16 +216,12 @@ class LoadParams:
|
||||||
)
|
)
|
||||||
# Percentage of filling of all data disks on all nodes
|
# Percentage of filling of all data disks on all nodes
|
||||||
fill_percent: Optional[float] = None
|
fill_percent: Optional[float] = None
|
||||||
# if set, the payload is generated on the fly and is not read into memory fully.
|
|
||||||
streaming: Optional[int] = metadata_field(all_load_scenarios, None, "STREAMING", False)
|
|
||||||
# Output format
|
|
||||||
output: Optional[str] = metadata_field(all_load_scenarios, None, "K6_OUT", False)
|
|
||||||
# Prometheus params
|
|
||||||
prometheus: Optional[PrometheusParams] = None
|
|
||||||
|
|
||||||
# ------- COMMON SCENARIO PARAMS -------
|
# ------- COMMON SCENARIO PARAMS -------
|
||||||
# Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value.
|
# Load time is the maximum duration for k6 to give load. Default is the BACKGROUND_LOAD_DEFAULT_TIME value.
|
||||||
load_time: Optional[int] = metadata_field(all_load_scenarios, None, "DURATION", False, formatter=convert_time_to_seconds)
|
load_time: Optional[int] = metadata_field(
|
||||||
|
all_load_scenarios, None, "DURATION", False, formatter=convert_time_to_seconds
|
||||||
|
)
|
||||||
# Object size in KB for load and preset.
|
# Object size in KB for load and preset.
|
||||||
object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE", False)
|
object_size: Optional[int] = metadata_field(all_load_scenarios, "size", "WRITE_OBJ_SIZE", False)
|
||||||
# For read operations, controls from which set get objects to read
|
# For read operations, controls from which set get objects to read
|
||||||
|
@ -253,14 +232,14 @@ class LoadParams:
|
||||||
registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False)
|
registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False)
|
||||||
# In case if we want to use custom registry file left from another load run
|
# In case if we want to use custom registry file left from another load run
|
||||||
custom_registry: Optional[str] = None
|
custom_registry: Optional[str] = None
|
||||||
# In case if we want to use custom registry file left from another load run
|
|
||||||
force_fresh_registry: Optional[bool] = None
|
|
||||||
# Specifies the minimum duration of every single execution (i.e. iteration).
|
# Specifies the minimum duration of every single execution (i.e. iteration).
|
||||||
# Any iterations that are shorter than this value will cause that VU to
|
# Any iterations that are shorter than this value will cause that VU to
|
||||||
# sleep for the remainder of the time until the specified minimum duration is reached.
|
# sleep for the remainder of the time until the specified minimum duration is reached.
|
||||||
min_iteration_duration: Optional[str] = metadata_field(all_load_scenarios, None, "K6_MIN_ITERATION_DURATION", False)
|
min_iteration_duration: Optional[str] = metadata_field(all_load_scenarios, None, "K6_MIN_ITERATION_DURATION", False)
|
||||||
# Prepare/cut objects locally on client before sending
|
# Prepare/cut objects locally on client before sending
|
||||||
prepare_locally: Optional[bool] = metadata_field([LoadScenario.gRPC, LoadScenario.gRPC_CAR], None, "PREPARE_LOCALLY", False)
|
prepare_locally: Optional[bool] = metadata_field(
|
||||||
|
[LoadScenario.gRPC, LoadScenario.gRPC_CAR], None, "PREPARE_LOCALLY", False
|
||||||
|
)
|
||||||
# Specifies K6 setupTimeout time. Currently hardcoded in xk6 as 5 seconds for all scenarios
|
# Specifies K6 setupTimeout time. Currently hardcoded in xk6 as 5 seconds for all scenarios
|
||||||
# https://k6.io/docs/using-k6/k6-options/reference/#setup-timeout
|
# https://k6.io/docs/using-k6/k6-options/reference/#setup-timeout
|
||||||
setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT", False)
|
setup_timeout: Optional[str] = metadata_field(all_scenarios, None, "K6_SETUP_TIMEOUT", False)
|
||||||
|
@ -290,25 +269,35 @@ class LoadParams:
|
||||||
delete_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "DELETE_RATE", True, True)
|
delete_rate: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "DELETE_RATE", True, True)
|
||||||
|
|
||||||
# Amount of preAllocatedVUs for write operations.
|
# Amount of preAllocatedVUs for write operations.
|
||||||
preallocated_writers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True, True)
|
preallocated_writers: Optional[int] = metadata_field(
|
||||||
|
constant_arrival_rate_scenarios, None, "PRE_ALLOC_WRITERS", True, True
|
||||||
|
)
|
||||||
# Amount of maxVUs for write operations.
|
# Amount of maxVUs for write operations.
|
||||||
max_writers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_WRITERS", False, True)
|
max_writers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_WRITERS", False, True)
|
||||||
|
|
||||||
# Amount of preAllocatedVUs for read operations.
|
# Amount of preAllocatedVUs for read operations.
|
||||||
preallocated_readers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True, True)
|
preallocated_readers: Optional[int] = metadata_field(
|
||||||
|
constant_arrival_rate_scenarios, None, "PRE_ALLOC_READERS", True, True
|
||||||
|
)
|
||||||
# Amount of maxVUs for read operations.
|
# Amount of maxVUs for read operations.
|
||||||
max_readers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_READERS", False, True)
|
max_readers: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_READERS", False, True)
|
||||||
|
|
||||||
# Amount of preAllocatedVUs for read operations.
|
# Amount of preAllocatedVUs for read operations.
|
||||||
preallocated_deleters: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True, True)
|
preallocated_deleters: Optional[int] = metadata_field(
|
||||||
|
constant_arrival_rate_scenarios, None, "PRE_ALLOC_DELETERS", True, True
|
||||||
|
)
|
||||||
# Amount of maxVUs for delete operations.
|
# Amount of maxVUs for delete operations.
|
||||||
max_deleters: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_DELETERS", False, True)
|
max_deleters: Optional[int] = metadata_field(constant_arrival_rate_scenarios, None, "MAX_DELETERS", False, True)
|
||||||
|
|
||||||
# Multipart
|
# Multipart
|
||||||
# Number of parts to upload in parallel
|
# Number of parts to upload in parallel
|
||||||
writers_multipart: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITERS_MULTIPART", False, True)
|
writers_multipart: Optional[int] = metadata_field(
|
||||||
|
[LoadScenario.S3_MULTIPART], None, "WRITERS_MULTIPART", False, True
|
||||||
|
)
|
||||||
# part size must be greater than (5 MB)
|
# part size must be greater than (5 MB)
|
||||||
write_object_part_size: Optional[int] = metadata_field([LoadScenario.S3_MULTIPART], None, "WRITE_OBJ_PART_SIZE", False)
|
write_object_part_size: Optional[int] = metadata_field(
|
||||||
|
[LoadScenario.S3_MULTIPART], None, "WRITE_OBJ_PART_SIZE", False
|
||||||
|
)
|
||||||
|
|
||||||
# Period of time to apply the rate value.
|
# Period of time to apply the rate value.
|
||||||
time_unit: Optional[str] = metadata_field(constant_arrival_rate_scenarios, None, "TIME_UNIT", False)
|
time_unit: Optional[str] = metadata_field(constant_arrival_rate_scenarios, None, "TIME_UNIT", False)
|
||||||
|
@ -323,7 +312,7 @@ class LoadParams:
|
||||||
# Config file location (filled automatically)
|
# Config file location (filled automatically)
|
||||||
config_file: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_FILE", False)
|
config_file: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_FILE", False)
|
||||||
# Config directory location (filled automatically)
|
# Config directory location (filled automatically)
|
||||||
config_dir: Optional[str] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "CONFIG_DIR", False)
|
config_dir: Optional[str] = metadata_field([LoadScenario.S3_LOCAL], None, "CONFIG_DIR", False)
|
||||||
|
|
||||||
def set_id(self, load_id):
|
def set_id(self, load_id):
|
||||||
self.load_id = load_id
|
self.load_id = load_id
|
||||||
|
@ -341,17 +330,6 @@ class LoadParams:
|
||||||
if self.preset:
|
if self.preset:
|
||||||
self.preset.pregen_json = os.path.join(self.working_dir, f"{load_id}_prepare.json")
|
self.preset.pregen_json = os.path.join(self.working_dir, f"{load_id}_prepare.json")
|
||||||
|
|
||||||
def get_k6_vars(self):
|
|
||||||
env_vars = {
|
|
||||||
meta_field.metadata["scenario_variable"]: meta_field.value
|
|
||||||
for meta_field in self._get_meta_fields(self)
|
|
||||||
if self.scenario in meta_field.metadata["applicable_scenarios"]
|
|
||||||
and meta_field.metadata["scenario_variable"]
|
|
||||||
and meta_field.value is not None
|
|
||||||
}
|
|
||||||
|
|
||||||
return env_vars
|
|
||||||
|
|
||||||
def get_env_vars(self):
|
def get_env_vars(self):
|
||||||
env_vars = {
|
env_vars = {
|
||||||
meta_field.metadata["env_variable"]: meta_field.value
|
meta_field.metadata["env_variable"]: meta_field.value
|
||||||
|
@ -456,7 +434,9 @@ class LoadParams:
|
||||||
static_params = [f"{load_type_str}"]
|
static_params = [f"{load_type_str}"]
|
||||||
|
|
||||||
dynamic_params = [
|
dynamic_params = [
|
||||||
f"{meta_field.name}={meta_field.value}" for meta_field in self._get_applicable_fields() if meta_field.metadata["string_repr"]
|
f"{meta_field.name}={meta_field.value}"
|
||||||
|
for meta_field in self._get_applicable_fields()
|
||||||
|
if meta_field.metadata["string_repr"]
|
||||||
]
|
]
|
||||||
params = ", ".join(static_params + dynamic_params)
|
params = ", ".join(static_params + dynamic_params)
|
||||||
|
|
||||||
|
|
|
@ -39,10 +39,6 @@ class OperationMetric(ABC):
|
||||||
def throughput(self) -> float:
|
def throughput(self) -> float:
|
||||||
return self._get_metric_rate(self._THROUGHPUT)
|
return self._get_metric_rate(self._THROUGHPUT)
|
||||||
|
|
||||||
@property
|
|
||||||
def total_bytes(self) -> float:
|
|
||||||
return self._get_metric(self._THROUGHPUT)
|
|
||||||
|
|
||||||
def _get_metric(self, metric: str) -> int:
|
def _get_metric(self, metric: str) -> int:
|
||||||
metrics_method_map = {
|
metrics_method_map = {
|
||||||
"counter": self._get_counter_metric,
|
"counter": self._get_counter_metric,
|
||||||
|
@ -111,66 +107,66 @@ class DeleteOperationMetric(OperationMetric):
|
||||||
|
|
||||||
|
|
||||||
class GrpcWriteOperationMetric(WriteOperationMetric):
|
class GrpcWriteOperationMetric(WriteOperationMetric):
|
||||||
_SUCCESS = "frostfs_obj_put_success"
|
_SUCCESS = "frostfs_obj_put_total"
|
||||||
_ERRORS = "frostfs_obj_put_fails"
|
_ERRORS = "frostfs_obj_put_fails"
|
||||||
_LATENCY = "frostfs_obj_put_duration"
|
_LATENCY = "frostfs_obj_put_duration"
|
||||||
|
|
||||||
|
|
||||||
class GrpcReadOperationMetric(ReadOperationMetric):
|
class GrpcReadOperationMetric(ReadOperationMetric):
|
||||||
_SUCCESS = "frostfs_obj_get_success"
|
_SUCCESS = "frostfs_obj_get_total"
|
||||||
_ERRORS = "frostfs_obj_get_fails"
|
_ERRORS = "frostfs_obj_get_fails"
|
||||||
_LATENCY = "frostfs_obj_get_duration"
|
_LATENCY = "frostfs_obj_get_duration"
|
||||||
|
|
||||||
|
|
||||||
class GrpcDeleteOperationMetric(DeleteOperationMetric):
|
class GrpcDeleteOperationMetric(DeleteOperationMetric):
|
||||||
_SUCCESS = "frostfs_obj_delete_success"
|
_SUCCESS = "frostfs_obj_delete_total"
|
||||||
_ERRORS = "frostfs_obj_delete_fails"
|
_ERRORS = "frostfs_obj_delete_fails"
|
||||||
_LATENCY = "frostfs_obj_delete_duration"
|
_LATENCY = "frostfs_obj_delete_duration"
|
||||||
|
|
||||||
|
|
||||||
class S3WriteOperationMetric(WriteOperationMetric):
|
class S3WriteOperationMetric(WriteOperationMetric):
|
||||||
_SUCCESS = "aws_obj_put_success"
|
_SUCCESS = "aws_obj_put_total"
|
||||||
_ERRORS = "aws_obj_put_fails"
|
_ERRORS = "aws_obj_put_fails"
|
||||||
_LATENCY = "aws_obj_put_duration"
|
_LATENCY = "aws_obj_put_duration"
|
||||||
|
|
||||||
|
|
||||||
class S3ReadOperationMetric(ReadOperationMetric):
|
class S3ReadOperationMetric(ReadOperationMetric):
|
||||||
_SUCCESS = "aws_obj_get_success"
|
_SUCCESS = "aws_obj_get_total"
|
||||||
_ERRORS = "aws_obj_get_fails"
|
_ERRORS = "aws_obj_get_fails"
|
||||||
_LATENCY = "aws_obj_get_duration"
|
_LATENCY = "aws_obj_get_duration"
|
||||||
|
|
||||||
|
|
||||||
class S3DeleteOperationMetric(DeleteOperationMetric):
|
class S3DeleteOperationMetric(DeleteOperationMetric):
|
||||||
_SUCCESS = "aws_obj_delete_success"
|
_SUCCESS = "aws_obj_delete_total"
|
||||||
_ERRORS = "aws_obj_delete_fails"
|
_ERRORS = "aws_obj_delete_fails"
|
||||||
_LATENCY = "aws_obj_delete_duration"
|
_LATENCY = "aws_obj_delete_duration"
|
||||||
|
|
||||||
|
|
||||||
class S3LocalWriteOperationMetric(WriteOperationMetric):
|
class S3LocalWriteOperationMetric(WriteOperationMetric):
|
||||||
_SUCCESS = "s3local_obj_put_success"
|
_SUCCESS = "s3local_obj_put_total"
|
||||||
_ERRORS = "s3local_obj_put_fails"
|
_ERRORS = "s3local_obj_put_fails"
|
||||||
_LATENCY = "s3local_obj_put_duration"
|
_LATENCY = "s3local_obj_put_duration"
|
||||||
|
|
||||||
|
|
||||||
class S3LocalReadOperationMetric(ReadOperationMetric):
|
class S3LocalReadOperationMetric(ReadOperationMetric):
|
||||||
_SUCCESS = "s3local_obj_get_success"
|
_SUCCESS = "s3local_obj_get_total"
|
||||||
_ERRORS = "s3local_obj_get_fails"
|
_ERRORS = "s3local_obj_get_fails"
|
||||||
_LATENCY = "s3local_obj_get_duration"
|
_LATENCY = "s3local_obj_get_duration"
|
||||||
|
|
||||||
|
|
||||||
class LocalWriteOperationMetric(WriteOperationMetric):
|
class LocalWriteOperationMetric(WriteOperationMetric):
|
||||||
_SUCCESS = "local_obj_put_success"
|
_SUCCESS = "local_obj_put_total"
|
||||||
_ERRORS = "local_obj_put_fails"
|
_ERRORS = "local_obj_put_fails"
|
||||||
_LATENCY = "local_obj_put_duration"
|
_LATENCY = "local_obj_put_duration"
|
||||||
|
|
||||||
|
|
||||||
class LocalReadOperationMetric(ReadOperationMetric):
|
class LocalReadOperationMetric(ReadOperationMetric):
|
||||||
_SUCCESS = "local_obj_get_success"
|
_SUCCESS = "local_obj_get_total"
|
||||||
_ERRORS = "local_obj_get_fails"
|
_ERRORS = "local_obj_get_fails"
|
||||||
|
|
||||||
|
|
||||||
class LocalDeleteOperationMetric(DeleteOperationMetric):
|
class LocalDeleteOperationMetric(DeleteOperationMetric):
|
||||||
_SUCCESS = "local_obj_delete_success"
|
_SUCCESS = "local_obj_delete_total"
|
||||||
_ERRORS = "local_obj_delete_fails"
|
_ERRORS = "local_obj_delete_fails"
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -120,11 +120,6 @@ class LoadReport:
|
||||||
throughput, unit = calc_unit(stats.throughput)
|
throughput, unit = calc_unit(stats.throughput)
|
||||||
throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec")
|
throughput_html = self._row("Throughput", f"{throughput:.2f} {unit}/sec")
|
||||||
|
|
||||||
bytes_html = ""
|
|
||||||
if stats.total_bytes > 0:
|
|
||||||
total_bytes, total_bytes_unit = calc_unit(stats.total_bytes)
|
|
||||||
bytes_html = self._row("Total transferred", f"{total_bytes:.2f} {total_bytes_unit}")
|
|
||||||
|
|
||||||
per_node_errors_html = ""
|
per_node_errors_html = ""
|
||||||
for node_key, errors in stats.errors.by_node.items():
|
for node_key, errors in stats.errors.by_node.items():
|
||||||
if self.load_params.k6_process_allocation_strategy == K6ProcessAllocationStrategy.PER_ENDPOINT:
|
if self.load_params.k6_process_allocation_strategy == K6ProcessAllocationStrategy.PER_ENDPOINT:
|
||||||
|
@ -153,7 +148,6 @@ class LoadReport:
|
||||||
<tr><th colspan="2" bgcolor="gainsboro">Metrics</th></tr>
|
<tr><th colspan="2" bgcolor="gainsboro">Metrics</th></tr>
|
||||||
{self._row("Total operations", stats.operations)}
|
{self._row("Total operations", stats.operations)}
|
||||||
{self._row("OP/sec", f"{stats.rate:.2f}")}
|
{self._row("OP/sec", f"{stats.rate:.2f}")}
|
||||||
{bytes_html}
|
|
||||||
{throughput_html}
|
{throughput_html}
|
||||||
{latency_html}
|
{latency_html}
|
||||||
<tr><th colspan="2" bgcolor="gainsboro">Errors</th></tr>
|
<tr><th colspan="2" bgcolor="gainsboro">Errors</th></tr>
|
||||||
|
|
|
@ -1,20 +1,23 @@
|
||||||
import copy
|
import copy
|
||||||
import itertools
|
import itertools
|
||||||
import math
|
import math
|
||||||
|
import re
|
||||||
import time
|
import time
|
||||||
from dataclasses import fields
|
from dataclasses import fields
|
||||||
from threading import Event
|
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.credentials.interfaces import S3Credentials, User
|
from frostfs_testlib.cli.frostfs_authmate.authmate import FrostfsAuthmate
|
||||||
from frostfs_testlib.load.interfaces.loader import Loader
|
from frostfs_testlib.load.interfaces.loader import Loader
|
||||||
from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner
|
from frostfs_testlib.load.interfaces.scenario_runner import ScenarioRunner
|
||||||
from frostfs_testlib.load.k6 import K6
|
from frostfs_testlib.load.k6 import K6
|
||||||
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadType
|
from frostfs_testlib.load.load_config import K6ProcessAllocationStrategy, LoadParams, LoadType
|
||||||
from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader
|
from frostfs_testlib.load.loaders import NodeLoader, RemoteLoader
|
||||||
from frostfs_testlib.resources import optionals
|
from frostfs_testlib.resources import optionals
|
||||||
|
from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC
|
||||||
from frostfs_testlib.resources.common import STORAGE_USER_NAME
|
from frostfs_testlib.resources.common import STORAGE_USER_NAME
|
||||||
from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_VUS_COUNT_DIVISOR, LOAD_NODE_SSH_USER, LOAD_NODES
|
from frostfs_testlib.resources.load_params import BACKGROUND_LOAD_VUS_COUNT_DIVISOR, LOAD_NODE_SSH_USER, LOAD_NODES
|
||||||
from frostfs_testlib.shell.command_inspectors import SuInspector
|
from frostfs_testlib.shell.command_inspectors import SuInspector
|
||||||
|
@ -22,10 +25,12 @@ from frostfs_testlib.shell.interfaces import CommandOptions, InteractiveInput
|
||||||
from frostfs_testlib.storage.cluster import ClusterNode
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
|
from frostfs_testlib.storage.controllers.cluster_state_controller import ClusterStateController
|
||||||
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode
|
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode
|
||||||
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.testing import parallel, run_optionally
|
from frostfs_testlib.testing import parallel, run_optionally
|
||||||
from frostfs_testlib.testing.test_control import retry
|
from frostfs_testlib.testing.test_control import retry
|
||||||
from frostfs_testlib.utils import datetime_utils
|
from frostfs_testlib.utils import datetime_utils
|
||||||
from frostfs_testlib.utils.file_keeper import FileKeeper
|
from frostfs_testlib.utils.file_keeper import FileKeeper
|
||||||
|
from threading import Event
|
||||||
|
|
||||||
|
|
||||||
class RunnerBase(ScenarioRunner):
|
class RunnerBase(ScenarioRunner):
|
||||||
|
@ -52,17 +57,17 @@ class RunnerBase(ScenarioRunner):
|
||||||
|
|
||||||
class DefaultRunner(RunnerBase):
|
class DefaultRunner(RunnerBase):
|
||||||
loaders: list[Loader]
|
loaders: list[Loader]
|
||||||
user: User
|
loaders_wallet: WalletInfo
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
user: User,
|
loaders_wallet: WalletInfo,
|
||||||
load_ip_list: Optional[list[str]] = None,
|
load_ip_list: Optional[list[str]] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
if load_ip_list is None:
|
if load_ip_list is None:
|
||||||
load_ip_list = LOAD_NODES
|
load_ip_list = LOAD_NODES
|
||||||
self.loaders = RemoteLoader.from_ip_list(load_ip_list)
|
self.loaders = RemoteLoader.from_ip_list(load_ip_list)
|
||||||
self.user = user
|
self.loaders_wallet = loaders_wallet
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
||||||
@reporter.step("Preparation steps")
|
@reporter.step("Preparation steps")
|
||||||
|
@ -73,35 +78,54 @@ class DefaultRunner(RunnerBase):
|
||||||
nodes_under_load: list[ClusterNode],
|
nodes_under_load: list[ClusterNode],
|
||||||
k6_dir: str,
|
k6_dir: str,
|
||||||
):
|
):
|
||||||
if load_params.force_fresh_registry and load_params.custom_registry:
|
|
||||||
with reporter.step("Forcing fresh registry files"):
|
|
||||||
parallel(self._force_fresh_registry, self.loaders, load_params)
|
|
||||||
|
|
||||||
if load_params.load_type != LoadType.S3:
|
if load_params.load_type != LoadType.S3:
|
||||||
return
|
return
|
||||||
|
|
||||||
with reporter.step("Init s3 client on loaders"):
|
with reporter.step("Init s3 client on loaders"):
|
||||||
s3_credentials = self.user.s3_credentials
|
storage_node = nodes_under_load[0].service(StorageNode)
|
||||||
parallel(self._aws_configure_on_loader, self.loaders, s3_credentials)
|
s3_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes]
|
||||||
|
grpc_peer = storage_node.get_rpc_endpoint()
|
||||||
|
|
||||||
def _force_fresh_registry(self, loader: Loader, load_params: LoadParams):
|
parallel(self._prepare_loader, self.loaders, load_params, grpc_peer, s3_public_keys, k6_dir)
|
||||||
with reporter.step(f"Forcing fresh registry on {loader.ip}"):
|
|
||||||
shell = loader.get_shell()
|
|
||||||
shell.exec(f"rm -f {load_params.registry_file}")
|
|
||||||
|
|
||||||
def _aws_configure_on_loader(
|
def _prepare_loader(
|
||||||
self,
|
self,
|
||||||
loader: Loader,
|
loader: Loader,
|
||||||
s3_credentials: S3Credentials,
|
load_params: LoadParams,
|
||||||
|
grpc_peer: str,
|
||||||
|
s3_public_keys: list[str],
|
||||||
|
k6_dir: str,
|
||||||
):
|
):
|
||||||
with reporter.step(f"Aws configure on {loader.ip}"):
|
with reporter.step(f"Init s3 client on {loader.ip}"):
|
||||||
|
shell = loader.get_shell()
|
||||||
|
frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC)
|
||||||
|
issue_secret_output = frostfs_authmate_exec.secret.issue(
|
||||||
|
wallet=self.loaders_wallet.path,
|
||||||
|
peer=grpc_peer,
|
||||||
|
gate_public_key=s3_public_keys,
|
||||||
|
container_placement_policy=load_params.preset.container_placement_policy,
|
||||||
|
container_policy=f"{k6_dir}/scenarios/files/policy.json",
|
||||||
|
wallet_password=self.loaders_wallet.password,
|
||||||
|
).stdout
|
||||||
|
aws_access_key_id = str(
|
||||||
|
re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group(
|
||||||
|
"aws_access_key_id"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
aws_secret_access_key = str(
|
||||||
|
re.search(
|
||||||
|
r"secret_access_key.*:\s.(?P<aws_secret_access_key>\w*)",
|
||||||
|
issue_secret_output,
|
||||||
|
).group("aws_secret_access_key")
|
||||||
|
)
|
||||||
|
|
||||||
configure_input = [
|
configure_input = [
|
||||||
InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=s3_credentials.access_key),
|
InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id),
|
||||||
InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=s3_credentials.secret_key),
|
InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key),
|
||||||
InteractiveInput(prompt_pattern=r".*", input=""),
|
InteractiveInput(prompt_pattern=r".*", input=""),
|
||||||
InteractiveInput(prompt_pattern=r".*", input=""),
|
InteractiveInput(prompt_pattern=r".*", input=""),
|
||||||
]
|
]
|
||||||
loader.get_shell().exec("aws configure", CommandOptions(interactive_inputs=configure_input))
|
shell.exec("aws configure", CommandOptions(interactive_inputs=configure_input))
|
||||||
|
|
||||||
@reporter.step("Init k6 instances")
|
@reporter.step("Init k6 instances")
|
||||||
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
|
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
|
||||||
|
@ -143,10 +167,12 @@ class DefaultRunner(RunnerBase):
|
||||||
k6_dir,
|
k6_dir,
|
||||||
shell,
|
shell,
|
||||||
loader,
|
loader,
|
||||||
self.user,
|
self.loaders_wallet,
|
||||||
)
|
)
|
||||||
|
|
||||||
def _get_distributed_load_params_list(self, original_load_params: LoadParams, workers_count: int) -> list[LoadParams]:
|
def _get_distributed_load_params_list(
|
||||||
|
self, original_load_params: LoadParams, workers_count: int
|
||||||
|
) -> list[LoadParams]:
|
||||||
divisor = int(BACKGROUND_LOAD_VUS_COUNT_DIVISOR)
|
divisor = int(BACKGROUND_LOAD_VUS_COUNT_DIVISOR)
|
||||||
distributed_load_params: list[LoadParams] = []
|
distributed_load_params: list[LoadParams] = []
|
||||||
|
|
||||||
|
@ -231,20 +257,18 @@ class LocalRunner(RunnerBase):
|
||||||
loaders: list[Loader]
|
loaders: list[Loader]
|
||||||
cluster_state_controller: ClusterStateController
|
cluster_state_controller: ClusterStateController
|
||||||
file_keeper: FileKeeper
|
file_keeper: FileKeeper
|
||||||
user: User
|
wallet: WalletInfo
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
cluster_state_controller: ClusterStateController,
|
cluster_state_controller: ClusterStateController,
|
||||||
file_keeper: FileKeeper,
|
file_keeper: FileKeeper,
|
||||||
nodes_under_load: list[ClusterNode],
|
nodes_under_load: list[ClusterNode],
|
||||||
user: User,
|
|
||||||
) -> None:
|
) -> None:
|
||||||
self.cluster_state_controller = cluster_state_controller
|
self.cluster_state_controller = cluster_state_controller
|
||||||
self.file_keeper = file_keeper
|
self.file_keeper = file_keeper
|
||||||
self.loaders = [NodeLoader(node) for node in nodes_under_load]
|
self.loaders = [NodeLoader(node) for node in nodes_under_load]
|
||||||
self.nodes_under_load = nodes_under_load
|
self.nodes_under_load = nodes_under_load
|
||||||
self.user = user
|
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
||||||
@reporter.step("Preparation steps")
|
@reporter.step("Preparation steps")
|
||||||
|
@ -290,12 +314,14 @@ class LocalRunner(RunnerBase):
|
||||||
with reporter.step("Download K6"):
|
with reporter.step("Download K6"):
|
||||||
shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}")
|
shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}")
|
||||||
shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}")
|
shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}")
|
||||||
shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz --strip-components 2 -C {k6_dir}")
|
shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz -C {k6_dir}")
|
||||||
shell.exec(f"sudo chmod -R 777 {k6_dir}")
|
shell.exec(f"sudo chmod -R 777 {k6_dir}")
|
||||||
|
|
||||||
with reporter.step("chmod 777 wallet related files on loader"):
|
with reporter.step("Create empty_passwd"):
|
||||||
shell.exec(f"sudo chmod -R 777 {self.user.wallet.config_path}")
|
self.wallet = WalletInfo(f"{k6_dir}/scenarios/files/wallet.json", "", "/tmp/empty_passwd.yml")
|
||||||
shell.exec(f"sudo chmod -R 777 {self.user.wallet.path}")
|
content = yaml.dump({"password": ""})
|
||||||
|
shell.exec(f'echo "{content}" | sudo tee {self.wallet.config_path}')
|
||||||
|
shell.exec(f"sudo chmod -R 777 {self.wallet.config_path}")
|
||||||
|
|
||||||
@reporter.step("Init k6 instances")
|
@reporter.step("Init k6 instances")
|
||||||
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
|
def init_k6_instances(self, load_params: LoadParams, endpoints: list[str], k6_dir: str):
|
||||||
|
@ -328,7 +354,7 @@ class LocalRunner(RunnerBase):
|
||||||
k6_dir,
|
k6_dir,
|
||||||
shell,
|
shell,
|
||||||
loader,
|
loader,
|
||||||
self.user,
|
self.wallet,
|
||||||
)
|
)
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
|
@ -418,7 +444,7 @@ class S3LocalRunner(LocalRunner):
|
||||||
k6_dir,
|
k6_dir,
|
||||||
shell,
|
shell,
|
||||||
loader,
|
loader,
|
||||||
self.user,
|
self.wallet,
|
||||||
)
|
)
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
@run_optionally(optionals.OPTIONAL_BACKGROUND_LOAD_ENABLED)
|
||||||
|
@ -431,10 +457,17 @@ class S3LocalRunner(LocalRunner):
|
||||||
k6_dir: str,
|
k6_dir: str,
|
||||||
):
|
):
|
||||||
self.k6_dir = k6_dir
|
self.k6_dir = k6_dir
|
||||||
parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, cluster_nodes)
|
with reporter.step("Init s3 client on loaders"):
|
||||||
|
storage_node = nodes_under_load[0].service(StorageNode)
|
||||||
|
s3_public_keys = [node.service(S3Gate).get_wallet_public_key() for node in cluster_nodes]
|
||||||
|
grpc_peer = storage_node.get_rpc_endpoint()
|
||||||
|
|
||||||
|
parallel(self.prepare_node, nodes_under_load, k6_dir, load_params, s3_public_keys, grpc_peer)
|
||||||
|
|
||||||
@reporter.step("Prepare node {cluster_node}")
|
@reporter.step("Prepare node {cluster_node}")
|
||||||
def prepare_node(self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams, cluster_nodes: list[ClusterNode]):
|
def prepare_node(
|
||||||
|
self, cluster_node: ClusterNode, k6_dir: str, load_params: LoadParams, s3_public_keys: list[str], grpc_peer: str
|
||||||
|
):
|
||||||
LocalRunner.prepare_node(self, cluster_node, k6_dir, load_params)
|
LocalRunner.prepare_node(self, cluster_node, k6_dir, load_params)
|
||||||
self.endpoints = cluster_node.s3_gate.get_all_endpoints()
|
self.endpoints = cluster_node.s3_gate.get_all_endpoints()
|
||||||
shell = cluster_node.host.get_shell()
|
shell = cluster_node.host.get_shell()
|
||||||
|
@ -455,9 +488,29 @@ class S3LocalRunner(LocalRunner):
|
||||||
shell.exec(f"sudo python3 -m pip install -I {k6_dir}/requests.tar.gz")
|
shell.exec(f"sudo python3 -m pip install -I {k6_dir}/requests.tar.gz")
|
||||||
|
|
||||||
with reporter.step(f"Init s3 client on {cluster_node.host_ip}"):
|
with reporter.step(f"Init s3 client on {cluster_node.host_ip}"):
|
||||||
|
frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC)
|
||||||
|
issue_secret_output = frostfs_authmate_exec.secret.issue(
|
||||||
|
wallet=self.wallet.path,
|
||||||
|
peer=grpc_peer,
|
||||||
|
gate_public_key=s3_public_keys,
|
||||||
|
container_placement_policy=load_params.preset.container_placement_policy,
|
||||||
|
container_policy=f"{k6_dir}/scenarios/files/policy.json",
|
||||||
|
wallet_password=self.wallet.password,
|
||||||
|
).stdout
|
||||||
|
aws_access_key_id = str(
|
||||||
|
re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group(
|
||||||
|
"aws_access_key_id"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
aws_secret_access_key = str(
|
||||||
|
re.search(
|
||||||
|
r"secret_access_key.*:\s.(?P<aws_secret_access_key>\w*)",
|
||||||
|
issue_secret_output,
|
||||||
|
).group("aws_secret_access_key")
|
||||||
|
)
|
||||||
configure_input = [
|
configure_input = [
|
||||||
InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=self.user.s3_credentials.access_key),
|
InteractiveInput(prompt_pattern=r"AWS Access Key ID.*", input=aws_access_key_id),
|
||||||
InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=self.user.s3_credentials.secret_key),
|
InteractiveInput(prompt_pattern=r"AWS Secret Access Key.*", input=aws_secret_access_key),
|
||||||
InteractiveInput(prompt_pattern=r".*", input=""),
|
InteractiveInput(prompt_pattern=r".*", input=""),
|
||||||
InteractiveInput(prompt_pattern=r".*", input=""),
|
InteractiveInput(prompt_pattern=r".*", input=""),
|
||||||
]
|
]
|
||||||
|
|
|
@ -9,4 +9,4 @@ FROSTFS_ADM_EXEC = os.getenv("FROSTFS_ADM_EXEC", "frostfs-adm")
|
||||||
# Config for frostfs-adm utility. Optional if tests are running against devenv
|
# Config for frostfs-adm utility. Optional if tests are running against devenv
|
||||||
FROSTFS_ADM_CONFIG_PATH = os.getenv("FROSTFS_ADM_CONFIG_PATH")
|
FROSTFS_ADM_CONFIG_PATH = os.getenv("FROSTFS_ADM_CONFIG_PATH")
|
||||||
|
|
||||||
CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", "100s")
|
CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", None)
|
||||||
|
|
|
@ -29,17 +29,13 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
|
|
||||||
@reporter.step("Configure S3 client (aws cli)")
|
@reporter.step("Configure S3 client (aws cli)")
|
||||||
def __init__(
|
def __init__(
|
||||||
self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1"
|
self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default"
|
||||||
) -> None:
|
) -> None:
|
||||||
self.s3gate_endpoint = s3gate_endpoint
|
self.s3gate_endpoint = s3gate_endpoint
|
||||||
self.access_key_id: str = access_key_id
|
|
||||||
self.secret_access_key: str = secret_access_key
|
|
||||||
self.profile = profile
|
self.profile = profile
|
||||||
self.local_shell = LocalShell()
|
self.local_shell = LocalShell()
|
||||||
self.region = region
|
|
||||||
self.iam_endpoint = None
|
|
||||||
try:
|
try:
|
||||||
_configure_aws_cli(f"aws configure --profile {profile}", access_key_id, secret_access_key, region)
|
_configure_aws_cli(f"aws configure --profile {profile}", access_key_id, secret_access_key)
|
||||||
self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS} --profile {profile}")
|
self.local_shell.exec(f"aws configure set max_attempts {MAX_REQUEST_ATTEMPTS} --profile {profile}")
|
||||||
self.local_shell.exec(
|
self.local_shell.exec(
|
||||||
f"aws configure set retry_mode {RETRY_MODE} --profile {profile}",
|
f"aws configure set retry_mode {RETRY_MODE} --profile {profile}",
|
||||||
|
@ -47,14 +43,10 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
raise RuntimeError("Error while configuring AwsCliClient") from err
|
raise RuntimeError("Error while configuring AwsCliClient") from err
|
||||||
|
|
||||||
@reporter.step("Set S3 endpoint to {s3gate_endpoint}")
|
@reporter.step("Set endpoint S3 to {s3gate_endpoint}")
|
||||||
def set_endpoint(self, s3gate_endpoint: str):
|
def set_endpoint(self, s3gate_endpoint: str):
|
||||||
self.s3gate_endpoint = s3gate_endpoint
|
self.s3gate_endpoint = s3gate_endpoint
|
||||||
|
|
||||||
@reporter.step("Set IAM endpoint to {iam_endpoint}")
|
|
||||||
def set_iam_endpoint(self, iam_endpoint: str):
|
|
||||||
self.iam_endpoint = iam_endpoint
|
|
||||||
|
|
||||||
@reporter.step("Create bucket S3")
|
@reporter.step("Create bucket S3")
|
||||||
def create_bucket(
|
def create_bucket(
|
||||||
self,
|
self,
|
||||||
|
@ -573,13 +565,12 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
self.local_shell.exec(cmd)
|
self.local_shell.exec(cmd)
|
||||||
|
|
||||||
@reporter.step("Put object tagging")
|
@reporter.step("Put object tagging")
|
||||||
def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = '') -> None:
|
def put_object_tagging(self, bucket: str, key: str, tags: list) -> None:
|
||||||
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
|
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
|
||||||
tagging = {"TagSet": tags}
|
tagging = {"TagSet": tags}
|
||||||
version = f" --version-id {version_id}" if version_id else ""
|
|
||||||
cmd = (
|
cmd = (
|
||||||
f"aws {self.common_flags} s3api put-object-tagging --bucket {bucket} --key {key} "
|
f"aws {self.common_flags} s3api put-object-tagging --bucket {bucket} --key {key} "
|
||||||
f"{version} --tagging '{json.dumps(tagging)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}"
|
f"--tagging '{json.dumps(tagging)}' --endpoint {self.s3gate_endpoint} --profile {self.profile}"
|
||||||
)
|
)
|
||||||
self.local_shell.exec(cmd)
|
self.local_shell.exec(cmd)
|
||||||
|
|
||||||
|
@ -595,11 +586,10 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
return response.get("TagSet")
|
return response.get("TagSet")
|
||||||
|
|
||||||
@reporter.step("Delete object tagging")
|
@reporter.step("Delete object tagging")
|
||||||
def delete_object_tagging(self, bucket: str, key: str, version_id: Optional[str] = None) -> None:
|
def delete_object_tagging(self, bucket: str, key: str) -> None:
|
||||||
version = f" --version-id {version_id}" if version_id else ""
|
|
||||||
cmd = (
|
cmd = (
|
||||||
f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} "
|
f"aws {self.common_flags} s3api delete-object-tagging --bucket {bucket} "
|
||||||
f"--key {key} {version} --endpoint {self.s3gate_endpoint} --profile {self.profile}"
|
f"--key {key} --endpoint {self.s3gate_endpoint} --profile {self.profile}"
|
||||||
)
|
)
|
||||||
self.local_shell.exec(cmd)
|
self.local_shell.exec(cmd)
|
||||||
|
|
||||||
|
@ -760,563 +750,3 @@ class AwsCliClient(S3ClientWrapper):
|
||||||
json_output = json.loads(output[output.index("{") :])
|
json_output = json.loads(output[output.index("{") :])
|
||||||
|
|
||||||
return json_output
|
return json_output
|
||||||
|
|
||||||
# IAM METHODS #
|
|
||||||
# Some methods don't have checks because AWS is silent in some cases (delete, attach, etc.)
|
|
||||||
|
|
||||||
@reporter.step("Adds the specified user to the specified group")
|
|
||||||
def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam add-user-to-group --user-name {user_name} --group-name {group_name} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Attaches the specified managed policy to the specified IAM group")
|
|
||||||
def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam attach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
sleep(S3_SYNC_WAIT_TIME * 10)
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Attaches the specified managed policy to the specified user")
|
|
||||||
def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam attach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
sleep(S3_SYNC_WAIT_TIME * 10)
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Creates a new AWS secret access key and access key ID for the specified user")
|
|
||||||
def iam_create_access_key(self, user_name: Optional[str] = None) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam create-access-key --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
if user_name:
|
|
||||||
cmd += f" --user-name {user_name}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
access_key_id = response["AccessKey"].get("AccessKeyId")
|
|
||||||
secret_access_key = response["AccessKey"].get("SecretAccessKey")
|
|
||||||
assert access_key_id, f"Expected AccessKeyId in response:\n{response}"
|
|
||||||
assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}"
|
|
||||||
|
|
||||||
return access_key_id, secret_access_key
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Creates a new group")
|
|
||||||
def iam_create_group(self, group_name: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam create-group --group-name {group_name} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
assert response.get("Group"), f"Expected Group in response:\n{response}"
|
|
||||||
assert response["Group"].get("GroupName") == group_name, f"GroupName should be equal to {group_name}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Creates a new managed policy for your AWS account")
|
|
||||||
def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam create-policy --endpoint {self.iam_endpoint}"
|
|
||||||
f" --policy-name {policy_name} --policy-document '{json.dumps(policy_document)}'"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
assert response.get("Policy"), f"Expected Policy in response:\n{response}"
|
|
||||||
assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Creates a new IAM user for your AWS account")
|
|
||||||
def iam_create_user(self, user_name: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam create-user --user-name {user_name} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
assert response.get("User"), f"Expected User in response:\n{response}"
|
|
||||||
assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Deletes the access key pair associated with the specified IAM user")
|
|
||||||
def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam delete-access-key --access-key-id {access_key_id} --user-name {user_name} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Deletes the specified IAM group")
|
|
||||||
def iam_delete_group(self, group_name: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam delete-group --group-name {group_name} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group")
|
|
||||||
def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam delete-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Deletes the specified managed policy")
|
|
||||||
def iam_delete_policy(self, policy_arn: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam delete-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Deletes the specified IAM user")
|
|
||||||
def iam_delete_user(self, user_name: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam delete-user --user-name {user_name} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user")
|
|
||||||
def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam delete-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Removes the specified managed policy from the specified IAM group")
|
|
||||||
def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam detach-group-policy --group-name {group_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
sleep(S3_SYNC_WAIT_TIME * 10)
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Removes the specified managed policy from the specified user")
|
|
||||||
def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam detach-user-policy --user-name {user_name} --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
sleep(S3_SYNC_WAIT_TIME * 10)
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Returns a list of IAM users that are in the specified IAM group")
|
|
||||||
def iam_get_group(self, group_name: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam get-group --group-name {group_name} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
assert "Users" in response.keys(), f"Expected Users in response:\n{response}"
|
|
||||||
assert response.get("Group").get("GroupName") == group_name, f"GroupName should be equal to {group_name}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group")
|
|
||||||
def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam get-group-policy --group-name {group_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Retrieves information about the specified managed policy")
|
|
||||||
def iam_get_policy(self, policy_arn: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam get-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
assert response.get("Policy"), f"Expected Policy in response:\n{response}"
|
|
||||||
assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Retrieves information about the specified version of the specified managed policy")
|
|
||||||
def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam get-policy-version --policy-arn {policy_arn} --version-id {version_id} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
assert response.get("PolicyVersion"), f"Expected PolicyVersion in response:\n{response}"
|
|
||||||
assert response["PolicyVersion"].get("VersionId") == version_id, f"VersionId should be equal to {version_id}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Retrieves information about the specified IAM user")
|
|
||||||
def iam_get_user(self, user_name: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam get-user --user-name {user_name} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
assert response.get("User"), f"Expected User in response:\n{response}"
|
|
||||||
assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user")
|
|
||||||
def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam get-user-policy --user-name {user_name} --policy-name {policy_name} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
assert response.get("UserName"), f"Expected User in response:\n{response}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Returns information about the access key IDs associated with the specified IAM user")
|
|
||||||
def iam_list_access_keys(self, user_name: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam list-access-keys --user-name {user_name} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Lists all managed policies that are attached to the specified IAM group")
|
|
||||||
def iam_list_attached_group_policies(self, group_name: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam list-attached-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Lists all managed policies that are attached to the specified IAM user")
|
|
||||||
def iam_list_attached_user_policies(self, user_name: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam list-attached-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to")
|
|
||||||
def iam_list_entities_for_policy(self, policy_arn: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam list-entities-for-policy --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
assert response.get("PolicyGroups"), f"Expected PolicyGroups in response:\n{response}"
|
|
||||||
assert response.get("PolicyUsers"), f"Expected PolicyUsers in response:\n{response}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group")
|
|
||||||
def iam_list_group_policies(self, group_name: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam list-group-policies --group-name {group_name} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Lists the IAM groups")
|
|
||||||
def iam_list_groups(self) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam list-groups --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
assert response.get("Groups"), f"Expected Groups in response:\n{response}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Lists the IAM groups that the specified IAM user belongs to")
|
|
||||||
def iam_list_groups_for_user(self, user_name: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam list-groups-for-user --user-name {user_name} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
assert response.get("Groups"), f"Expected Groups in response:\n{response}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Lists all the managed policies that are available in your AWS account")
|
|
||||||
def iam_list_policies(self) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam list-policies --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
assert 'Policies' in response.keys(), f"Expected Policies in response:\n{response}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Lists information about the versions of the specified managed policy")
|
|
||||||
def iam_list_policy_versions(self, policy_arn: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam list-policy-versions --policy-arn {policy_arn} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
assert response.get("Versions"), f"Expected Versions in response:\n{response}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Lists the names of the inline policies embedded in the specified IAM user")
|
|
||||||
def iam_list_user_policies(self, user_name: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam list-user-policies --user-name {user_name} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Lists the IAM users")
|
|
||||||
def iam_list_users(self) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam list-users --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
assert "Users" in response.keys(), f"Expected Users in response:\n{response}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group")
|
|
||||||
def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam put-group-policy --endpoint {self.iam_endpoint}"
|
|
||||||
f" --group-name {group_name} --policy-name {policy_name} --policy-document \'{json.dumps(policy_document)}\'"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
sleep(S3_SYNC_WAIT_TIME * 10)
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user")
|
|
||||||
def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam put-user-policy --endpoint {self.iam_endpoint}"
|
|
||||||
f" --user-name {user_name} --policy-name {policy_name} --policy-document \'{json.dumps(policy_document)}\'"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
sleep(S3_SYNC_WAIT_TIME * 10)
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Removes the specified user from the specified group")
|
|
||||||
def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam remove-user-from-group --endpoint {self.iam_endpoint}"
|
|
||||||
f" --group-name {group_name} --user-name {user_name}"
|
|
||||||
)
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Updates the name and/or the path of the specified IAM group")
|
|
||||||
def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam update-group --group-name {group_name} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if new_name:
|
|
||||||
cmd += f" --new-group-name {new_name}"
|
|
||||||
if new_path:
|
|
||||||
cmd += f" --new-path {new_path}"
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Updates the name and/or the path of the specified IAM user")
|
|
||||||
def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict:
|
|
||||||
cmd = (
|
|
||||||
f"aws {self.common_flags} iam update-user --user-name {user_name} --endpoint {self.iam_endpoint}"
|
|
||||||
)
|
|
||||||
if new_name:
|
|
||||||
cmd += f" --new-user-name {new_name}"
|
|
||||||
if new_path:
|
|
||||||
cmd += f" --new-path {new_path}"
|
|
||||||
if self.profile:
|
|
||||||
cmd += f" --profile {self.profile}"
|
|
||||||
|
|
||||||
output = self.local_shell.exec(cmd).stdout
|
|
||||||
response = self._to_json(output)
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -18,9 +18,6 @@ from frostfs_testlib.resources.common import ASSETS_DIR, MAX_REQUEST_ATTEMPTS, R
|
||||||
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
|
from frostfs_testlib.s3.interfaces import S3ClientWrapper, VersioningStatus, _make_objs_dict
|
||||||
from frostfs_testlib.utils.cli_utils import log_command_execution
|
from frostfs_testlib.utils.cli_utils import log_command_execution
|
||||||
|
|
||||||
# TODO: Refactor this code to use shell instead of _cmd_run
|
|
||||||
from frostfs_testlib.utils.cli_utils import _configure_aws_cli
|
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
# Disable warnings on self-signed certificate which the
|
# Disable warnings on self-signed certificate which the
|
||||||
|
@ -46,11 +43,10 @@ class Boto3ClientWrapper(S3ClientWrapper):
|
||||||
@reporter.step("Configure S3 client (boto3)")
|
@reporter.step("Configure S3 client (boto3)")
|
||||||
@report_error
|
@report_error
|
||||||
def __init__(
|
def __init__(
|
||||||
self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default", region: str = "us-east-1"
|
self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str = "default"
|
||||||
) -> None:
|
) -> None:
|
||||||
self.boto3_client: S3Client = None
|
self.boto3_client: S3Client = None
|
||||||
self.session = boto3.Session()
|
self.session = boto3.Session(profile_name=profile)
|
||||||
self.region = region
|
|
||||||
self.config = Config(
|
self.config = Config(
|
||||||
retries={
|
retries={
|
||||||
"max_attempts": MAX_REQUEST_ATTEMPTS,
|
"max_attempts": MAX_REQUEST_ATTEMPTS,
|
||||||
|
@ -60,7 +56,6 @@ class Boto3ClientWrapper(S3ClientWrapper):
|
||||||
self.access_key_id: str = access_key_id
|
self.access_key_id: str = access_key_id
|
||||||
self.secret_access_key: str = secret_access_key
|
self.secret_access_key: str = secret_access_key
|
||||||
self.s3gate_endpoint: str = ""
|
self.s3gate_endpoint: str = ""
|
||||||
self.boto3_iam_client: S3Client = None
|
|
||||||
self.set_endpoint(s3gate_endpoint)
|
self.set_endpoint(s3gate_endpoint)
|
||||||
|
|
||||||
@reporter.step("Set endpoint S3 to {s3gate_endpoint}")
|
@reporter.step("Set endpoint S3 to {s3gate_endpoint}")
|
||||||
|
@ -74,23 +69,11 @@ class Boto3ClientWrapper(S3ClientWrapper):
|
||||||
service_name="s3",
|
service_name="s3",
|
||||||
aws_access_key_id=self.access_key_id,
|
aws_access_key_id=self.access_key_id,
|
||||||
aws_secret_access_key=self.secret_access_key,
|
aws_secret_access_key=self.secret_access_key,
|
||||||
region_name=self.region,
|
|
||||||
config=self.config,
|
config=self.config,
|
||||||
endpoint_url=s3gate_endpoint,
|
endpoint_url=s3gate_endpoint,
|
||||||
verify=False,
|
verify=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Set endpoint IAM to {iam_endpoint}")
|
|
||||||
def set_iam_endpoint(self, iam_endpoint: str):
|
|
||||||
self.boto3_iam_client = self.session.client(
|
|
||||||
service_name="iam",
|
|
||||||
aws_access_key_id=self.access_key_id,
|
|
||||||
aws_secret_access_key=self.secret_access_key,
|
|
||||||
endpoint_url=iam_endpoint,
|
|
||||||
verify=False,)
|
|
||||||
|
|
||||||
|
|
||||||
def _to_s3_param(self, param: str):
|
def _to_s3_param(self, param: str):
|
||||||
replacement_map = {
|
replacement_map = {
|
||||||
"Acl": "ACL",
|
"Acl": "ACL",
|
||||||
|
@ -135,7 +118,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
|
||||||
|
|
||||||
s3_bucket = self.boto3_client.create_bucket(**params)
|
s3_bucket = self.boto3_client.create_bucket(**params)
|
||||||
log_command_execution(f"Created S3 bucket {bucket}", s3_bucket)
|
log_command_execution(f"Created S3 bucket {bucket}", s3_bucket)
|
||||||
sleep(S3_SYNC_WAIT_TIME * 10)
|
sleep(S3_SYNC_WAIT_TIME)
|
||||||
return bucket
|
return bucket
|
||||||
|
|
||||||
@reporter.step("List buckets S3")
|
@reporter.step("List buckets S3")
|
||||||
|
@ -156,7 +139,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
|
||||||
def delete_bucket(self, bucket: str) -> None:
|
def delete_bucket(self, bucket: str) -> None:
|
||||||
response = self.boto3_client.delete_bucket(Bucket=bucket)
|
response = self.boto3_client.delete_bucket(Bucket=bucket)
|
||||||
log_command_execution("S3 Delete bucket result", response)
|
log_command_execution("S3 Delete bucket result", response)
|
||||||
sleep(S3_SYNC_WAIT_TIME * 10)
|
sleep(S3_SYNC_WAIT_TIME)
|
||||||
|
|
||||||
@reporter.step("Head bucket S3")
|
@reporter.step("Head bucket S3")
|
||||||
@report_error
|
@report_error
|
||||||
|
@ -372,7 +355,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
|
||||||
}
|
}
|
||||||
response = self.boto3_client.delete_object(**params)
|
response = self.boto3_client.delete_object(**params)
|
||||||
log_command_execution("S3 Delete object result", response)
|
log_command_execution("S3 Delete object result", response)
|
||||||
sleep(S3_SYNC_WAIT_TIME * 10)
|
sleep(S3_SYNC_WAIT_TIME)
|
||||||
return response
|
return response
|
||||||
|
|
||||||
@reporter.step("Delete objects S3")
|
@reporter.step("Delete objects S3")
|
||||||
|
@ -383,7 +366,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
|
||||||
assert (
|
assert (
|
||||||
"Errors" not in response
|
"Errors" not in response
|
||||||
), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}'
|
), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}'
|
||||||
sleep(S3_SYNC_WAIT_TIME * 10)
|
sleep(S3_SYNC_WAIT_TIME)
|
||||||
return response
|
return response
|
||||||
|
|
||||||
@reporter.step("Delete object versions S3")
|
@reporter.step("Delete object versions S3")
|
||||||
|
@ -609,10 +592,10 @@ class Boto3ClientWrapper(S3ClientWrapper):
|
||||||
|
|
||||||
@reporter.step("Put object tagging")
|
@reporter.step("Put object tagging")
|
||||||
@report_error
|
@report_error
|
||||||
def put_object_tagging(self, bucket: str, key: str, tags: list, version_id: Optional[str] = '') -> None:
|
def put_object_tagging(self, bucket: str, key: str, tags: list) -> None:
|
||||||
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
|
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
|
||||||
tagging = {"TagSet": tags}
|
tagging = {"TagSet": tags}
|
||||||
response = self.boto3_client.put_object_tagging(Bucket=bucket, Key=key, Tagging=tagging, VersionId=version_id)
|
response = self.boto3_client.put_object_tagging(Bucket=bucket, Key=key, Tagging=tagging)
|
||||||
log_command_execution("S3 Put object tagging", response)
|
log_command_execution("S3 Put object tagging", response)
|
||||||
|
|
||||||
@reporter.step("Get object tagging")
|
@reporter.step("Get object tagging")
|
||||||
|
@ -671,287 +654,3 @@ class Boto3ClientWrapper(S3ClientWrapper):
|
||||||
raise NotImplementedError("Cp is not supported for boto3 client")
|
raise NotImplementedError("Cp is not supported for boto3 client")
|
||||||
|
|
||||||
# END OBJECT METHODS #
|
# END OBJECT METHODS #
|
||||||
|
|
||||||
|
|
||||||
# IAM METHODS #
|
|
||||||
# Some methods don't have checks because boto3 is silent in some cases (delete, attach, etc.)
|
|
||||||
|
|
||||||
@reporter.step("Adds the specified user to the specified group")
|
|
||||||
def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.add_user_to_group(UserName=user_name, GroupName=group_name)
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Attaches the specified managed policy to the specified IAM group")
|
|
||||||
def iam_attach_group_policy(self, group_name: str, policy_arn: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.attach_group_policy(GroupName=group_name, PolicyArn=policy_arn)
|
|
||||||
sleep(S3_SYNC_WAIT_TIME * 10)
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Attaches the specified managed policy to the specified user")
|
|
||||||
def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.attach_user_policy(UserName=user_name, PolicyArn=policy_arn)
|
|
||||||
sleep(S3_SYNC_WAIT_TIME * 10)
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Creates a new AWS secret access key and access key ID for the specified user")
|
|
||||||
def iam_create_access_key(self, user_name: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.create_access_key(UserName=user_name)
|
|
||||||
|
|
||||||
access_key_id = response["AccessKey"].get("AccessKeyId")
|
|
||||||
secret_access_key = response["AccessKey"].get("SecretAccessKey")
|
|
||||||
assert access_key_id, f"Expected AccessKeyId in response:\n{response}"
|
|
||||||
assert secret_access_key, f"Expected SecretAccessKey in response:\n{response}"
|
|
||||||
|
|
||||||
return access_key_id, secret_access_key
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Creates a new group")
|
|
||||||
def iam_create_group(self, group_name: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.create_group(GroupName=group_name)
|
|
||||||
assert response.get("Group"), f"Expected Group in response:\n{response}"
|
|
||||||
assert response["Group"].get("GroupName") == group_name, f"GroupName should be equal to {group_name}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Creates a new managed policy for your AWS account")
|
|
||||||
def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict:
|
|
||||||
response = self.boto3_iam_client.create_policy(PolicyName=policy_name, PolicyDocument=json.dumps(policy_document))
|
|
||||||
assert response.get("Policy"), f"Expected Policy in response:\n{response}"
|
|
||||||
assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Creates a new IAM user for your AWS account")
|
|
||||||
def iam_create_user(self, user_name: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.create_user(UserName=user_name)
|
|
||||||
assert response.get("User"), f"Expected User in response:\n{response}"
|
|
||||||
assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Deletes the access key pair associated with the specified IAM user")
|
|
||||||
def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.delete_access_key(AccessKeyId=access_key_id, UserName=user_name)
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Deletes the specified IAM group")
|
|
||||||
def iam_delete_group(self, group_name: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.delete_group(GroupName=group_name)
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Deletes the specified inline policy that is embedded in the specified IAM group")
|
|
||||||
def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.delete_group_policy(GroupName=group_name, PolicyName=policy_name)
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Deletes the specified managed policy")
|
|
||||||
def iam_delete_policy(self, policy_arn: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.delete_policy(PolicyArn=policy_arn)
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Deletes the specified IAM user")
|
|
||||||
def iam_delete_user(self, user_name: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.delete_user(UserName=user_name)
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Deletes the specified inline policy that is embedded in the specified IAM user")
|
|
||||||
def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.delete_user_policy(UserName=user_name, PolicyName=policy_name)
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Removes the specified managed policy from the specified IAM group")
|
|
||||||
def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.detach_group_policy(GroupName=group_name, PolicyArn=policy_arn)
|
|
||||||
sleep(S3_SYNC_WAIT_TIME * 10)
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Removes the specified managed policy from the specified user")
|
|
||||||
def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.detach_user_policy(UserName=user_name, PolicyArn=policy_arn)
|
|
||||||
sleep(S3_SYNC_WAIT_TIME * 10)
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Returns a list of IAM users that are in the specified IAM group")
|
|
||||||
def iam_get_group(self, group_name: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.get_group(GroupName=group_name)
|
|
||||||
assert response.get("Group").get("GroupName") == group_name, f"GroupName should be equal to {group_name}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM group")
|
|
||||||
def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.get_group_policy(GroupName=group_name, PolicyName=policy_name)
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Retrieves information about the specified managed policy")
|
|
||||||
def iam_get_policy(self, policy_arn: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.get_policy(PolicyArn=policy_arn)
|
|
||||||
assert response.get("Policy"), f"Expected Policy in response:\n{response}"
|
|
||||||
assert response["Policy"].get("PolicyName") == policy_name, f"PolicyName should be equal to {policy_name}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Retrieves information about the specified version of the specified managed policy")
|
|
||||||
def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.get_policy_version(PolicyArn=policy_arn, VersionId=version_id)
|
|
||||||
assert response.get("PolicyVersion"), f"Expected PolicyVersion in response:\n{response}"
|
|
||||||
assert response["PolicyVersion"].get("VersionId") == version_id, f"VersionId should be equal to {version_id}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Retrieves information about the specified IAM user")
|
|
||||||
def iam_get_user(self, user_name: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.get_user(UserName=user_name)
|
|
||||||
assert response.get("User"), f"Expected User in response:\n{response}"
|
|
||||||
assert response["User"].get("UserName") == user_name, f"UserName should be equal to {user_name}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Retrieves the specified inline policy document that is embedded in the specified IAM user")
|
|
||||||
def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.get_user_policy(UserName=user_name, PolicyName=policy_name)
|
|
||||||
assert response.get("UserName"), f"Expected UserName in response:\n{response}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Returns information about the access key IDs associated with the specified IAM user")
|
|
||||||
def iam_list_access_keys(self, user_name: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.list_access_keys(UserName=user_name)
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Lists all managed policies that are attached to the specified IAM group")
|
|
||||||
def iam_list_attached_group_policies(self, group_name: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.list_attached_group_policies(GroupName=group_name)
|
|
||||||
assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Lists all managed policies that are attached to the specified IAM user")
|
|
||||||
def iam_list_attached_user_policies(self, user_name: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.list_attached_user_policies(UserName=user_name)
|
|
||||||
assert response.get("AttachedPolicies"), f"Expected AttachedPolicies in response:\n{response}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Lists all IAM users, groups, and roles that the specified managed policy is attached to")
|
|
||||||
def iam_list_entities_for_policy(self, policy_arn: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.list_entities_for_policy(PolicyArn=policy_arn)
|
|
||||||
|
|
||||||
assert response.get("PolicyGroups"), f"Expected PolicyGroups in response:\n{response}"
|
|
||||||
assert response.get("PolicyUsers"), f"Expected PolicyUsers in response:\n{response}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Lists the names of the inline policies that are embedded in the specified IAM group")
|
|
||||||
def iam_list_group_policies(self, group_name: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.list_group_policies(GroupName=group_name)
|
|
||||||
assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Lists the IAM groups")
|
|
||||||
def iam_list_groups(self) -> dict:
|
|
||||||
response = self.boto3_iam_client.list_groups()
|
|
||||||
assert response.get("Groups"), f"Expected Groups in response:\n{response}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Lists the IAM groups that the specified IAM user belongs to")
|
|
||||||
def iam_list_groups_for_user(self, user_name: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.list_groups_for_user(UserName=user_name)
|
|
||||||
assert response.get("Groups"), f"Expected Groups in response:\n{response}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Lists all the managed policies that are available in your AWS account")
|
|
||||||
def iam_list_policies(self) -> dict:
|
|
||||||
response = self.boto3_iam_client.list_policies()
|
|
||||||
assert response.get("Policies"), f"Expected Policies in response:\n{response}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Lists information about the versions of the specified managed policy")
|
|
||||||
def iam_list_policy_versions(self, policy_arn: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.list_policy_versions(PolicyArn=policy_arn)
|
|
||||||
assert response.get("Versions"), f"Expected Versions in response:\n{response}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Lists the names of the inline policies embedded in the specified IAM user")
|
|
||||||
def iam_list_user_policies(self, user_name: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.list_user_policies(UserName=user_name)
|
|
||||||
assert response.get("PolicyNames"), f"Expected PolicyNames in response:\n{response}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Lists the IAM users")
|
|
||||||
def iam_list_users(self) -> dict:
|
|
||||||
response = self.boto3_iam_client.list_users()
|
|
||||||
assert response.get("Users"), f"Expected Users in response:\n{response}"
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM group")
|
|
||||||
def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict:
|
|
||||||
response = self.boto3_iam_client.put_group_policy(GroupName=group_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document))
|
|
||||||
sleep(S3_SYNC_WAIT_TIME * 10)
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Adds or updates an inline policy document that is embedded in the specified IAM user")
|
|
||||||
def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict:
|
|
||||||
response = self.boto3_iam_client.put_user_policy(UserName=user_name, PolicyName=policy_name, PolicyDocument=json.dumps(policy_document))
|
|
||||||
sleep(S3_SYNC_WAIT_TIME * 10)
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Removes the specified user from the specified group")
|
|
||||||
def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict:
|
|
||||||
response = self.boto3_iam_client.remove_user_from_group(GroupName=group_name, UserName=user_name)
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Updates the name and/or the path of the specified IAM group")
|
|
||||||
def iam_update_group(self, group_name: str, new_name: str, new_path: Optional[str] = None) -> dict:
|
|
||||||
response = self.boto3_iam_client.update_group(GroupName=group_name, NewGroupName=new_name, NewPath='/')
|
|
||||||
|
|
||||||
return response
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Updates the name and/or the path of the specified IAM user")
|
|
||||||
def iam_update_user(self, user_name: str, new_name: str, new_path: Optional[str] = None) -> dict:
|
|
||||||
response = self.boto3_iam_client.update_user(UserName=user_name, NewUserName=new_name, NewPath='/')
|
|
||||||
return response
|
|
|
@ -1,16 +0,0 @@
|
||||||
import re
|
|
||||||
|
|
||||||
from frostfs_testlib.cli.generic_cli import GenericCli
|
|
||||||
from frostfs_testlib.s3.interfaces import BucketContainerResolver
|
|
||||||
from frostfs_testlib.storage.cluster import ClusterNode
|
|
||||||
|
|
||||||
|
|
||||||
class CurlBucketContainerResolver(BucketContainerResolver):
|
|
||||||
def resolve(self, node: ClusterNode, bucket_name: str, **kwargs: dict) -> str:
|
|
||||||
curl = GenericCli("curl", node.host)
|
|
||||||
output = curl(f"-I http://127.0.0.1:8084/{bucket_name}")
|
|
||||||
pattern = r"X-Container-Id: (\S+)"
|
|
||||||
cid = re.findall(pattern, output.stdout)
|
|
||||||
if cid:
|
|
||||||
return cid[0]
|
|
||||||
return None
|
|
|
@ -1,8 +1,7 @@
|
||||||
from abc import ABC, abstractmethod
|
from abc import abstractmethod
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from typing import Literal, Optional, Union
|
from typing import Literal, Optional, Union
|
||||||
|
|
||||||
from frostfs_testlib.storage.cluster import ClusterNode
|
|
||||||
from frostfs_testlib.testing.readable import HumanReadableABC, HumanReadableEnum
|
from frostfs_testlib.testing.readable import HumanReadableABC, HumanReadableEnum
|
||||||
|
|
||||||
|
|
||||||
|
@ -32,25 +31,9 @@ ACL_COPY = [
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
class BucketContainerResolver(ABC):
|
|
||||||
@abstractmethod
|
|
||||||
def resolve(self, node: ClusterNode, bucket_name: str, **kwargs: dict) -> str:
|
|
||||||
"""
|
|
||||||
Resolve Container ID from bucket name
|
|
||||||
|
|
||||||
Args:
|
|
||||||
node: node from where we want to resolve
|
|
||||||
bucket_name: name of the bucket
|
|
||||||
**kwargs: any other required params
|
|
||||||
|
|
||||||
Returns: Container ID
|
|
||||||
"""
|
|
||||||
raise NotImplementedError("Call from abstract class")
|
|
||||||
|
|
||||||
|
|
||||||
class S3ClientWrapper(HumanReadableABC):
|
class S3ClientWrapper(HumanReadableABC):
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str, region: str) -> None:
|
def __init__(self, access_key_id: str, secret_access_key: str, s3gate_endpoint: str, profile: str) -> None:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
|
@ -313,11 +296,15 @@ class S3ClientWrapper(HumanReadableABC):
|
||||||
abort a given multipart upload multiple times in order to completely free all storage consumed by all parts."""
|
abort a given multipart upload multiple times in order to completely free all storage consumed by all parts."""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def upload_part(self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str) -> str:
|
def upload_part(
|
||||||
|
self, bucket: str, key: str, upload_id: str, part_num: int, filepath: str
|
||||||
|
) -> str:
|
||||||
"""Uploads a part in a multipart upload."""
|
"""Uploads a part in a multipart upload."""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
def upload_part_copy(self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str) -> str:
|
def upload_part_copy(
|
||||||
|
self, bucket: str, key: str, upload_id: str, part_num: int, copy_source: str
|
||||||
|
) -> str:
|
||||||
"""Uploads a part by copying data from an existing object as data source."""
|
"""Uploads a part by copying data from an existing object as data source."""
|
||||||
|
|
||||||
@abstractmethod
|
@abstractmethod
|
||||||
|
@ -395,154 +382,3 @@ class S3ClientWrapper(HumanReadableABC):
|
||||||
"""cp directory TODO: Add proper description"""
|
"""cp directory TODO: Add proper description"""
|
||||||
|
|
||||||
# END OF OBJECT METHODS #
|
# END OF OBJECT METHODS #
|
||||||
|
|
||||||
|
|
||||||
# IAM METHODS #
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_add_user_to_group(self, user_name: str, group_name: str) -> dict:
|
|
||||||
'''Adds the specified user to the specified group'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_attach_group_policy(self, group: str, policy_arn: str) -> dict:
|
|
||||||
'''Attaches the specified managed policy to the specified IAM group'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_attach_user_policy(self, user_name: str, policy_arn: str) -> dict:
|
|
||||||
'''Attaches the specified managed policy to the specified user'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_create_access_key(self, user_name: str) -> dict:
|
|
||||||
'''Creates a new AWS secret access key and access key ID for the specified user'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_create_group(self, group_name: str) -> dict:
|
|
||||||
'''Creates a new group'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_create_policy(self, policy_name: str, policy_document: dict) -> dict:
|
|
||||||
'''Creates a new managed policy for your AWS account'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_create_user(self, user_name: str) -> dict:
|
|
||||||
'''Creates a new IAM user for your AWS account'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_delete_access_key(self, access_key_id: str, user_name: str) -> dict:
|
|
||||||
'''Deletes the access key pair associated with the specified IAM user'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_delete_group(self, group_name: str) -> dict:
|
|
||||||
'''Deletes the specified IAM group'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_delete_group_policy(self, group_name: str, policy_name: str) -> dict:
|
|
||||||
'''Deletes the specified inline policy that is embedded in the specified IAM group'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_delete_policy(self, policy_arn: str) -> dict:
|
|
||||||
'''Deletes the specified managed policy'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_delete_user(self, user_name: str) -> dict:
|
|
||||||
'''Deletes the specified IAM user'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_delete_user_policy(self, user_name: str, policy_name: str) -> dict:
|
|
||||||
'''Deletes the specified inline policy that is embedded in the specified IAM user'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_detach_group_policy(self, group_name: str, policy_arn: str) -> dict:
|
|
||||||
'''Removes the specified managed policy from the specified IAM group'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_detach_user_policy(self, user_name: str, policy_arn: str) -> dict:
|
|
||||||
'''Removes the specified managed policy from the specified user'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_get_group(self, group_name: str) -> dict:
|
|
||||||
'''Returns a list of IAM users that are in the specified IAM group'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_get_group_policy(self, group_name: str, policy_name: str) -> dict:
|
|
||||||
'''Retrieves the specified inline policy document that is embedded in the specified IAM group'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_get_policy(self, policy_arn: str) -> dict:
|
|
||||||
'''Retrieves information about the specified managed policy'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_get_policy_version(self, policy_arn: str, version_id: str) -> dict:
|
|
||||||
'''Retrieves information about the specified version of the specified managed policy'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_get_user(self, user_name: str) -> dict:
|
|
||||||
'''Retrieves information about the specified IAM user'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_get_user_policy(self, user_name: str, policy_name: str) -> dict:
|
|
||||||
'''Retrieves the specified inline policy document that is embedded in the specified IAM user'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_list_access_keys(self, user_name: str) -> dict:
|
|
||||||
'''Returns information about the access key IDs associated with the specified IAM user'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_list_attached_group_policies(self, group_name: str) -> dict:
|
|
||||||
'''Lists all managed policies that are attached to the specified IAM group'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_list_attached_user_policies(self, user_name: str) -> dict:
|
|
||||||
'''Lists all managed policies that are attached to the specified IAM user'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_list_entities_for_policy(self, policy_arn: str) -> dict:
|
|
||||||
'''Lists all IAM users, groups, and roles that the specified managed policy is attached to'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_list_group_policies(self, group_name: str) -> dict:
|
|
||||||
'''Lists the names of the inline policies that are embedded in the specified IAM group'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_list_groups(self) -> dict:
|
|
||||||
'''Lists the IAM groups'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_list_groups_for_user(self, user_name: str) -> dict:
|
|
||||||
'''Lists the IAM groups that the specified IAM user belongs to'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_list_policies(self) -> dict:
|
|
||||||
'''Lists all the managed policies that are available in your AWS account'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_list_policy_versions(self, policy_arn: str) -> dict:
|
|
||||||
'''Lists information about the versions of the specified managed policy'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_list_user_policies(self, user_name: str) -> dict:
|
|
||||||
'''Lists the names of the inline policies embedded in the specified IAM user'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_list_users(self) -> dict:
|
|
||||||
'''Lists the IAM users'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_put_group_policy(self, group_name: str, policy_name: str, policy_document: dict) -> dict:
|
|
||||||
'''Adds or updates an inline policy document that is embedded in the specified IAM group'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_put_user_policy(self, user_name: str, policy_name: str, policy_document: dict) -> dict:
|
|
||||||
'''Adds or updates an inline policy document that is embedded in the specified IAM user'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_remove_user_from_group(self, group_name: str, user_name: str) -> dict:
|
|
||||||
'''Removes the specified user from the specified group'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_update_group(self, group_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict:
|
|
||||||
'''Updates the name and/or the path of the specified IAM group'''
|
|
||||||
|
|
||||||
@abstractmethod
|
|
||||||
def iam_update_user(self, user_name: str, new_name: Optional[str] = None, new_path: Optional[str] = None) -> dict:
|
|
||||||
'''Updates the name and/or the path of the specified IAM user'''
|
|
||||||
|
|
|
@ -11,20 +11,25 @@ import base58
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.cli import FrostfsCli
|
from frostfs_testlib.cli import FrostfsCli
|
||||||
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
|
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
|
||||||
from frostfs_testlib.resources.common import ASSETS_DIR
|
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.storage.dataclasses.acl import EACL_LIFETIME, FROSTFS_CONTRACT_CACHE_TIMEOUT, EACLPubKey, EACLRole, EACLRule
|
from frostfs_testlib.storage.dataclasses.acl import (
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
EACL_LIFETIME,
|
||||||
|
FROSTFS_CONTRACT_CACHE_TIMEOUT,
|
||||||
|
EACLPubKey,
|
||||||
|
EACLRole,
|
||||||
|
EACLRule,
|
||||||
|
)
|
||||||
from frostfs_testlib.utils import wallet_utils
|
from frostfs_testlib.utils import wallet_utils
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Get extended ACL")
|
@reporter.step("Get extended ACL")
|
||||||
def get_eacl(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str) -> Optional[str]:
|
def get_eacl(wallet_path: str, cid: str, shell: Shell, endpoint: str) -> Optional[str]:
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
|
||||||
try:
|
try:
|
||||||
result = cli.container.get_eacl(rpc_endpoint=endpoint, cid=cid)
|
result = cli.container.get_eacl(wallet=wallet_path, rpc_endpoint=endpoint, cid=cid)
|
||||||
except RuntimeError as exc:
|
except RuntimeError as exc:
|
||||||
logger.info("Extended ACL table is not set for this container")
|
logger.info("Extended ACL table is not set for this container")
|
||||||
logger.info(f"Got exception while getting eacl: {exc}")
|
logger.info(f"Got exception while getting eacl: {exc}")
|
||||||
|
@ -36,15 +41,16 @@ def get_eacl(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str) -> Optio
|
||||||
|
|
||||||
@reporter.step("Set extended ACL")
|
@reporter.step("Set extended ACL")
|
||||||
def set_eacl(
|
def set_eacl(
|
||||||
wallet: WalletInfo,
|
wallet_path: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
eacl_table_path: str,
|
eacl_table_path: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
session_token: Optional[str] = None,
|
session_token: Optional[str] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
|
||||||
cli.container.set_eacl(
|
cli.container.set_eacl(
|
||||||
|
wallet=wallet_path,
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
cid=cid,
|
cid=cid,
|
||||||
table=eacl_table_path,
|
table=eacl_table_path,
|
||||||
|
@ -60,7 +66,7 @@ def _encode_cid_for_eacl(cid: str) -> str:
|
||||||
|
|
||||||
def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str:
|
def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str:
|
||||||
table_file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"eacl_table_{str(uuid.uuid4())}.json")
|
table_file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"eacl_table_{str(uuid.uuid4())}.json")
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
|
||||||
cli.acl.extended_create(cid=cid, out=table_file_path, rule=rules_list)
|
cli.acl.extended_create(cid=cid, out=table_file_path, rule=rules_list)
|
||||||
|
|
||||||
with open(table_file_path, "r") as file:
|
with open(table_file_path, "r") as file:
|
||||||
|
@ -71,7 +77,7 @@ def create_eacl(cid: str, rules_list: List[EACLRule], shell: Shell) -> str:
|
||||||
|
|
||||||
|
|
||||||
def form_bearertoken_file(
|
def form_bearertoken_file(
|
||||||
wallet: WalletInfo,
|
wif: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
eacl_rule_list: List[Union[EACLRule, EACLPubKey]],
|
eacl_rule_list: List[Union[EACLRule, EACLPubKey]],
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
|
@ -86,7 +92,7 @@ def form_bearertoken_file(
|
||||||
enc_cid = _encode_cid_for_eacl(cid) if cid else None
|
enc_cid = _encode_cid_for_eacl(cid) if cid else None
|
||||||
file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
|
file_path = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
|
||||||
|
|
||||||
eacl = get_eacl(wallet, cid, shell, endpoint)
|
eacl = get_eacl(wif, cid, shell, endpoint)
|
||||||
json_eacl = dict()
|
json_eacl = dict()
|
||||||
if eacl:
|
if eacl:
|
||||||
eacl = eacl.replace("eACL: ", "").split("Signature")[0]
|
eacl = eacl.replace("eACL: ", "").split("Signature")[0]
|
||||||
|
@ -127,7 +133,7 @@ def form_bearertoken_file(
|
||||||
if sign:
|
if sign:
|
||||||
sign_bearer(
|
sign_bearer(
|
||||||
shell=shell,
|
shell=shell,
|
||||||
wallet=wallet,
|
wallet_path=wif,
|
||||||
eacl_rules_file_from=file_path,
|
eacl_rules_file_from=file_path,
|
||||||
eacl_rules_file_to=file_path,
|
eacl_rules_file_to=file_path,
|
||||||
json=True,
|
json=True,
|
||||||
|
@ -158,9 +164,11 @@ def eacl_rules(access: str, verbs: list, user: str) -> list[str]:
|
||||||
return rules
|
return rules
|
||||||
|
|
||||||
|
|
||||||
def sign_bearer(shell: Shell, wallet: WalletInfo, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool) -> None:
|
def sign_bearer(shell: Shell, wallet_path: str, eacl_rules_file_from: str, eacl_rules_file_to: str, json: bool) -> None:
|
||||||
frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG)
|
||||||
frostfscli.util.sign_bearer_token(eacl_rules_file_from, eacl_rules_file_to, json=json)
|
frostfscli.util.sign_bearer_token(
|
||||||
|
wallet=wallet_path, from_file=eacl_rules_file_from, to_file=eacl_rules_file_to, json=json
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Wait for eACL cache expired")
|
@reporter.step("Wait for eACL cache expired")
|
||||||
|
@ -170,7 +178,9 @@ def wait_for_cache_expired():
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Return bearer token in base64 to caller")
|
@reporter.step("Return bearer token in base64 to caller")
|
||||||
def bearer_token_base64_from_file(bearer_path: str) -> str:
|
def bearer_token_base64_from_file(
|
||||||
|
bearer_path: str,
|
||||||
|
) -> str:
|
||||||
with open(bearer_path, "rb") as file:
|
with open(bearer_path, "rb") as file:
|
||||||
signed = file.read()
|
signed = file.read()
|
||||||
return base64.b64encode(signed).decode("utf-8")
|
return base64.b64encode(signed).decode("utf-8")
|
||||||
|
|
|
@ -1,15 +1,15 @@
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
|
import requests
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from time import sleep
|
from time import sleep
|
||||||
from typing import Optional, Union
|
from typing import Optional, Union
|
||||||
|
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.cli import FrostfsCli
|
from frostfs_testlib.cli import FrostfsCli
|
||||||
from frostfs_testlib.plugins import load_plugin
|
|
||||||
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
|
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
|
||||||
from frostfs_testlib.s3.interfaces import BucketContainerResolver
|
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node
|
from frostfs_testlib.steps.cli.object import put_object, put_object_to_random_node
|
||||||
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||||
|
@ -24,7 +24,7 @@ logger = logging.getLogger("NeoLogger")
|
||||||
@dataclass
|
@dataclass
|
||||||
class StorageContainerInfo:
|
class StorageContainerInfo:
|
||||||
id: str
|
id: str
|
||||||
wallet: WalletInfo
|
wallet_file: WalletInfo
|
||||||
|
|
||||||
|
|
||||||
class StorageContainer:
|
class StorageContainer:
|
||||||
|
@ -41,8 +41,11 @@ class StorageContainer:
|
||||||
def get_id(self) -> str:
|
def get_id(self) -> str:
|
||||||
return self.storage_container_info.id
|
return self.storage_container_info.id
|
||||||
|
|
||||||
def get_wallet(self) -> str:
|
def get_wallet_path(self) -> str:
|
||||||
return self.storage_container_info.wallet
|
return self.storage_container_info.wallet_file.path
|
||||||
|
|
||||||
|
def get_wallet_config_path(self) -> str:
|
||||||
|
return self.storage_container_info.wallet_file.config_path
|
||||||
|
|
||||||
@reporter.step("Generate new object and put in container")
|
@reporter.step("Generate new object and put in container")
|
||||||
def generate_object(
|
def generate_object(
|
||||||
|
@ -57,34 +60,37 @@ class StorageContainer:
|
||||||
file_hash = get_file_hash(file_path)
|
file_hash = get_file_hash(file_path)
|
||||||
|
|
||||||
container_id = self.get_id()
|
container_id = self.get_id()
|
||||||
wallet = self.get_wallet()
|
wallet_path = self.get_wallet_path()
|
||||||
|
wallet_config = self.get_wallet_config_path()
|
||||||
with reporter.step(f"Put object with size {size} to container {container_id}"):
|
with reporter.step(f"Put object with size {size} to container {container_id}"):
|
||||||
if endpoint:
|
if endpoint:
|
||||||
object_id = put_object(
|
object_id = put_object(
|
||||||
wallet=wallet,
|
wallet=wallet_path,
|
||||||
path=file_path,
|
path=file_path,
|
||||||
cid=container_id,
|
cid=container_id,
|
||||||
expire_at=expire_at,
|
expire_at=expire_at,
|
||||||
shell=self.shell,
|
shell=self.shell,
|
||||||
endpoint=endpoint,
|
endpoint=endpoint,
|
||||||
bearer=bearer_token,
|
bearer=bearer_token,
|
||||||
|
wallet_config=wallet_config,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
object_id = put_object_to_random_node(
|
object_id = put_object_to_random_node(
|
||||||
wallet=wallet,
|
wallet=wallet_path,
|
||||||
path=file_path,
|
path=file_path,
|
||||||
cid=container_id,
|
cid=container_id,
|
||||||
expire_at=expire_at,
|
expire_at=expire_at,
|
||||||
shell=self.shell,
|
shell=self.shell,
|
||||||
cluster=self.cluster,
|
cluster=self.cluster,
|
||||||
bearer=bearer_token,
|
bearer=bearer_token,
|
||||||
|
wallet_config=wallet_config,
|
||||||
)
|
)
|
||||||
|
|
||||||
storage_object = StorageObjectInfo(
|
storage_object = StorageObjectInfo(
|
||||||
container_id,
|
container_id,
|
||||||
object_id,
|
object_id,
|
||||||
size=size,
|
size=size,
|
||||||
wallet=wallet,
|
wallet_file_path=wallet_path,
|
||||||
file_path=file_path,
|
file_path=file_path,
|
||||||
file_hash=file_hash,
|
file_hash=file_hash,
|
||||||
)
|
)
|
||||||
|
@ -95,18 +101,18 @@ class StorageContainer:
|
||||||
DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
|
DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
|
||||||
SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X"
|
SINGLE_PLACEMENT_RULE = "REP 1 IN X CBF 1 SELECT 4 FROM * AS X"
|
||||||
REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X"
|
REP_2_FOR_3_NODES_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 3 FROM * AS X"
|
||||||
DEFAULT_EC_PLACEMENT_RULE = "EC 3.1"
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Create Container")
|
@reporter.step("Create Container")
|
||||||
def create_container(
|
def create_container(
|
||||||
wallet: WalletInfo,
|
wallet: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
rule: str = DEFAULT_PLACEMENT_RULE,
|
rule: str = DEFAULT_PLACEMENT_RULE,
|
||||||
basic_acl: str = "",
|
basic_acl: str = "",
|
||||||
attributes: Optional[dict] = None,
|
attributes: Optional[dict] = None,
|
||||||
session_token: str = "",
|
session_token: str = "",
|
||||||
|
session_wallet: str = "",
|
||||||
name: Optional[str] = None,
|
name: Optional[str] = None,
|
||||||
options: Optional[dict] = None,
|
options: Optional[dict] = None,
|
||||||
await_mode: bool = True,
|
await_mode: bool = True,
|
||||||
|
@ -117,7 +123,7 @@ def create_container(
|
||||||
A wrapper for `frostfs-cli container create` call.
|
A wrapper for `frostfs-cli container create` call.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
wallet (WalletInfo): a wallet on whose behalf a container is created
|
wallet (str): a wallet on whose behalf a container is created
|
||||||
rule (optional, str): placement rule for container
|
rule (optional, str): placement rule for container
|
||||||
basic_acl (optional, str): an ACL for container, will be
|
basic_acl (optional, str): an ACL for container, will be
|
||||||
appended to `--basic-acl` key
|
appended to `--basic-acl` key
|
||||||
|
@ -139,9 +145,10 @@ def create_container(
|
||||||
(str): CID of the created container
|
(str): CID of the created container
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
|
||||||
result = cli.container.create(
|
result = cli.container.create(
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
|
wallet=session_wallet if session_wallet else wallet,
|
||||||
policy=rule,
|
policy=rule,
|
||||||
basic_acl=basic_acl,
|
basic_acl=basic_acl,
|
||||||
attributes=attributes,
|
attributes=attributes,
|
||||||
|
@ -162,7 +169,9 @@ def create_container(
|
||||||
return cid
|
return cid
|
||||||
|
|
||||||
|
|
||||||
def wait_for_container_creation(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, attempts: int = 15, sleep_interval: int = 1):
|
def wait_for_container_creation(
|
||||||
|
wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 15, sleep_interval: int = 1
|
||||||
|
):
|
||||||
for _ in range(attempts):
|
for _ in range(attempts):
|
||||||
containers = list_containers(wallet, shell, endpoint)
|
containers = list_containers(wallet, shell, endpoint)
|
||||||
if cid in containers:
|
if cid in containers:
|
||||||
|
@ -172,7 +181,9 @@ def wait_for_container_creation(wallet: WalletInfo, cid: str, shell: Shell, endp
|
||||||
raise RuntimeError(f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting")
|
raise RuntimeError(f"After {attempts * sleep_interval} seconds container {cid} hasn't been persisted; exiting")
|
||||||
|
|
||||||
|
|
||||||
def wait_for_container_deletion(wallet: WalletInfo, cid: str, shell: Shell, endpoint: str, attempts: int = 30, sleep_interval: int = 1):
|
def wait_for_container_deletion(
|
||||||
|
wallet: str, cid: str, shell: Shell, endpoint: str, attempts: int = 30, sleep_interval: int = 1
|
||||||
|
):
|
||||||
for _ in range(attempts):
|
for _ in range(attempts):
|
||||||
try:
|
try:
|
||||||
get_container(wallet, cid, shell=shell, endpoint=endpoint)
|
get_container(wallet, cid, shell=shell, endpoint=endpoint)
|
||||||
|
@ -186,27 +197,29 @@ def wait_for_container_deletion(wallet: WalletInfo, cid: str, shell: Shell, endp
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("List Containers")
|
@reporter.step("List Containers")
|
||||||
def list_containers(wallet: WalletInfo, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT) -> list[str]:
|
def list_containers(
|
||||||
|
wallet: str, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT
|
||||||
|
) -> list[str]:
|
||||||
"""
|
"""
|
||||||
A wrapper for `frostfs-cli container list` call. It returns all the
|
A wrapper for `frostfs-cli container list` call. It returns all the
|
||||||
available containers for the given wallet.
|
available containers for the given wallet.
|
||||||
Args:
|
Args:
|
||||||
wallet (WalletInfo): a wallet on whose behalf we list the containers
|
wallet (str): a wallet on whose behalf we list the containers
|
||||||
shell: executor for cli command
|
shell: executor for cli command
|
||||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||||
timeout: Timeout for the operation.
|
timeout: Timeout for the operation.
|
||||||
Returns:
|
Returns:
|
||||||
(list): list of containers
|
(list): list of containers
|
||||||
"""
|
"""
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
|
||||||
result = cli.container.list(rpc_endpoint=endpoint, timeout=timeout)
|
result = cli.container.list(rpc_endpoint=endpoint, wallet=wallet, timeout=timeout)
|
||||||
logger.info(f"Containers: \n{result}")
|
logger.info(f"Containers: \n{result}")
|
||||||
return result.stdout.split()
|
return result.stdout.split()
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("List Objects in container")
|
@reporter.step("List Objects in container")
|
||||||
def list_objects(
|
def list_objects(
|
||||||
wallet: WalletInfo,
|
wallet: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
container_id: str,
|
container_id: str,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
|
@ -216,7 +229,7 @@ def list_objects(
|
||||||
A wrapper for `frostfs-cli container list-objects` call. It returns all the
|
A wrapper for `frostfs-cli container list-objects` call. It returns all the
|
||||||
available objects in container.
|
available objects in container.
|
||||||
Args:
|
Args:
|
||||||
wallet (WalletInfo): a wallet on whose behalf we list the containers objects
|
wallet (str): a wallet on whose behalf we list the containers objects
|
||||||
shell: executor for cli command
|
shell: executor for cli command
|
||||||
container_id: cid of container
|
container_id: cid of container
|
||||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||||
|
@ -224,15 +237,15 @@ def list_objects(
|
||||||
Returns:
|
Returns:
|
||||||
(list): list of containers
|
(list): list of containers
|
||||||
"""
|
"""
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
|
||||||
result = cli.container.list_objects(rpc_endpoint=endpoint, cid=container_id, timeout=timeout)
|
result = cli.container.list_objects(rpc_endpoint=endpoint, wallet=wallet, cid=container_id, timeout=timeout)
|
||||||
logger.info(f"Container objects: \n{result}")
|
logger.info(f"Container objects: \n{result}")
|
||||||
return result.stdout.split()
|
return result.stdout.split()
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Get Container")
|
@reporter.step("Get Container")
|
||||||
def get_container(
|
def get_container(
|
||||||
wallet: WalletInfo,
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
|
@ -243,7 +256,7 @@ def get_container(
|
||||||
A wrapper for `frostfs-cli container get` call. It extracts container's
|
A wrapper for `frostfs-cli container get` call. It extracts container's
|
||||||
attributes and rearranges them into a more compact view.
|
attributes and rearranges them into a more compact view.
|
||||||
Args:
|
Args:
|
||||||
wallet (WalletInfo): path to a wallet on whose behalf we get the container
|
wallet (str): path to a wallet on whose behalf we get the container
|
||||||
cid (str): ID of the container to get
|
cid (str): ID of the container to get
|
||||||
shell: executor for cli command
|
shell: executor for cli command
|
||||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||||
|
@ -253,8 +266,8 @@ def get_container(
|
||||||
(dict, str): dict of container attributes
|
(dict, str): dict of container attributes
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
|
||||||
result = cli.container.get(rpc_endpoint=endpoint, cid=cid, json_mode=json_mode, timeout=timeout)
|
result = cli.container.get(rpc_endpoint=endpoint, wallet=wallet, cid=cid, json_mode=json_mode, timeout=timeout)
|
||||||
|
|
||||||
if not json_mode:
|
if not json_mode:
|
||||||
return result.stdout
|
return result.stdout
|
||||||
|
@ -271,34 +284,37 @@ def get_container(
|
||||||
@reporter.step("Delete Container")
|
@reporter.step("Delete Container")
|
||||||
# TODO: make the error message about a non-found container more user-friendly
|
# TODO: make the error message about a non-found container more user-friendly
|
||||||
def delete_container(
|
def delete_container(
|
||||||
wallet: WalletInfo,
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
force: bool = False,
|
force: bool = False,
|
||||||
session_token: Optional[str] = None,
|
session_token: Optional[str] = None,
|
||||||
await_mode: bool = False,
|
await_mode: bool = False,
|
||||||
|
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
A wrapper for `frostfs-cli container delete` call.
|
A wrapper for `frostfs-cli container delete` call.
|
||||||
Args:
|
Args:
|
||||||
await_mode: Block execution until container is removed.
|
wallet (str): path to a wallet on whose behalf we delete the container
|
||||||
wallet (WalletInfo): path to a wallet on whose behalf we delete the container
|
|
||||||
cid (str): ID of the container to delete
|
cid (str): ID of the container to delete
|
||||||
shell: executor for cli command
|
shell: executor for cli command
|
||||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||||
force (bool): do not check whether container contains locks and remove immediately
|
force (bool): do not check whether container contains locks and remove immediately
|
||||||
session_token: a path to session token file
|
session_token: a path to session token file
|
||||||
|
timeout: Timeout for the operation.
|
||||||
This function doesn't return anything.
|
This function doesn't return anything.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
|
||||||
cli.container.delete(
|
cli.container.delete(
|
||||||
|
wallet=wallet,
|
||||||
cid=cid,
|
cid=cid,
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
force=force,
|
force=force,
|
||||||
session=session_token,
|
session=session_token,
|
||||||
await_mode=await_mode,
|
await_mode=await_mode,
|
||||||
|
timeout=timeout,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -330,22 +346,26 @@ def _parse_cid(output: str) -> str:
|
||||||
|
|
||||||
@reporter.step("Search container by name")
|
@reporter.step("Search container by name")
|
||||||
def search_container_by_name(name: str, node: ClusterNode):
|
def search_container_by_name(name: str, node: ClusterNode):
|
||||||
resolver_cls = load_plugin("frostfs.testlib.bucket_cid_resolver", node.host.config.product)
|
node_shell = node.host.get_shell()
|
||||||
resolver: BucketContainerResolver = resolver_cls()
|
output = node_shell.exec(f"curl -I HEAD http://127.0.0.1:8084/{name}")
|
||||||
return resolver.resolve(node, name)
|
pattern = r"X-Container-Id: (\S+)"
|
||||||
|
cid = re.findall(pattern, output.stdout)
|
||||||
|
if cid:
|
||||||
|
return cid[0]
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Search for nodes with a container")
|
@reporter.step("Search for nodes with a container")
|
||||||
def search_nodes_with_container(
|
def search_nodes_with_container(
|
||||||
wallet: WalletInfo,
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
cluster: Cluster,
|
cluster: Cluster,
|
||||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||||
) -> list[ClusterNode]:
|
) -> list[ClusterNode]:
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
|
||||||
result = cli.container.search_node(rpc_endpoint=endpoint, cid=cid, timeout=timeout)
|
result = cli.container.search_node(rpc_endpoint=endpoint, wallet=wallet, cid=cid, timeout=timeout)
|
||||||
|
|
||||||
pattern = r"[0-9]+(?:\.[0-9]+){3}"
|
pattern = r"[0-9]+(?:\.[0-9]+){3}"
|
||||||
nodes_ip = list(set(re.findall(pattern, result.stdout)))
|
nodes_ip = list(set(re.findall(pattern, result.stdout)))
|
||||||
|
|
|
@ -9,10 +9,9 @@ from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.cli import FrostfsCli
|
from frostfs_testlib.cli import FrostfsCli
|
||||||
from frostfs_testlib.cli.neogo import NeoGo
|
from frostfs_testlib.cli.neogo import NeoGo
|
||||||
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE
|
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE
|
||||||
from frostfs_testlib.resources.common import ASSETS_DIR
|
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
|
||||||
from frostfs_testlib.utils import json_utils
|
from frostfs_testlib.utils import json_utils
|
||||||
from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output
|
from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output
|
||||||
|
|
||||||
|
@ -21,7 +20,7 @@ logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
@reporter.step("Get object from random node")
|
@reporter.step("Get object from random node")
|
||||||
def get_object_from_random_node(
|
def get_object_from_random_node(
|
||||||
wallet: WalletInfo,
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
|
@ -29,6 +28,7 @@ def get_object_from_random_node(
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
write_object: Optional[str] = None,
|
write_object: Optional[str] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
|
wallet_config: Optional[str] = None,
|
||||||
no_progress: bool = True,
|
no_progress: bool = True,
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||||
|
@ -44,6 +44,7 @@ def get_object_from_random_node(
|
||||||
cluster: cluster object
|
cluster: cluster object
|
||||||
bearer (optional, str): path to Bearer Token file, appends to `--bearer` key
|
bearer (optional, str): path to Bearer Token file, appends to `--bearer` key
|
||||||
write_object (optional, str): path to downloaded file, appends to `--file` key
|
write_object (optional, str): path to downloaded file, appends to `--file` key
|
||||||
|
wallet_config(optional, str): path to the wallet config
|
||||||
no_progress(optional, bool): do not show progress bar
|
no_progress(optional, bool): do not show progress bar
|
||||||
xhdr (optional, dict): Request X-Headers in form of Key=Value
|
xhdr (optional, dict): Request X-Headers in form of Key=Value
|
||||||
session (optional, dict): path to a JSON-encoded container session token
|
session (optional, dict): path to a JSON-encoded container session token
|
||||||
|
@ -61,6 +62,7 @@ def get_object_from_random_node(
|
||||||
bearer,
|
bearer,
|
||||||
write_object,
|
write_object,
|
||||||
xhdr,
|
xhdr,
|
||||||
|
wallet_config,
|
||||||
no_progress,
|
no_progress,
|
||||||
session,
|
session,
|
||||||
timeout,
|
timeout,
|
||||||
|
@ -69,7 +71,7 @@ def get_object_from_random_node(
|
||||||
|
|
||||||
@reporter.step("Get object from {endpoint}")
|
@reporter.step("Get object from {endpoint}")
|
||||||
def get_object(
|
def get_object(
|
||||||
wallet: WalletInfo,
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
|
@ -77,6 +79,7 @@ def get_object(
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
write_object: Optional[str] = None,
|
write_object: Optional[str] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
|
wallet_config: Optional[str] = None,
|
||||||
no_progress: bool = True,
|
no_progress: bool = True,
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||||
|
@ -85,13 +88,14 @@ def get_object(
|
||||||
GET from FrostFS.
|
GET from FrostFS.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
wallet (WalletInfo): wallet on whose behalf GET is done
|
wallet (str): wallet on whose behalf GET is done
|
||||||
cid (str): ID of Container where we get the Object from
|
cid (str): ID of Container where we get the Object from
|
||||||
oid (str): Object ID
|
oid (str): Object ID
|
||||||
shell: executor for cli command
|
shell: executor for cli command
|
||||||
bearer: path to Bearer Token file, appends to `--bearer` key
|
bearer: path to Bearer Token file, appends to `--bearer` key
|
||||||
write_object: path to downloaded file, appends to `--file` key
|
write_object: path to downloaded file, appends to `--file` key
|
||||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||||
|
wallet_config(optional, str): path to the wallet config
|
||||||
no_progress(optional, bool): do not show progress bar
|
no_progress(optional, bool): do not show progress bar
|
||||||
xhdr (optional, dict): Request X-Headers in form of Key=Value
|
xhdr (optional, dict): Request X-Headers in form of Key=Value
|
||||||
session (optional, dict): path to a JSON-encoded container session token
|
session (optional, dict): path to a JSON-encoded container session token
|
||||||
|
@ -104,9 +108,10 @@ def get_object(
|
||||||
write_object = str(uuid.uuid4())
|
write_object = str(uuid.uuid4())
|
||||||
file_path = os.path.join(ASSETS_DIR, write_object)
|
file_path = os.path.join(ASSETS_DIR, write_object)
|
||||||
|
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
|
||||||
cli.object.get(
|
cli.object.get(
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
|
wallet=wallet,
|
||||||
cid=cid,
|
cid=cid,
|
||||||
oid=oid,
|
oid=oid,
|
||||||
file=file_path,
|
file=file_path,
|
||||||
|
@ -122,13 +127,14 @@ def get_object(
|
||||||
|
|
||||||
@reporter.step("Get Range Hash from {endpoint}")
|
@reporter.step("Get Range Hash from {endpoint}")
|
||||||
def get_range_hash(
|
def get_range_hash(
|
||||||
wallet: WalletInfo,
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
range_cut: str,
|
range_cut: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
|
wallet_config: Optional[str] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||||
|
@ -145,15 +151,17 @@ def get_range_hash(
|
||||||
range_cut: Range to take hash from in the form offset1:length1,...,
|
range_cut: Range to take hash from in the form offset1:length1,...,
|
||||||
value to pass to the `--range` parameter
|
value to pass to the `--range` parameter
|
||||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||||
|
wallet_config: path to the wallet config
|
||||||
xhdr: Request X-Headers in form of Key=Values
|
xhdr: Request X-Headers in form of Key=Values
|
||||||
session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session.
|
session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session.
|
||||||
timeout: Timeout for the operation.
|
timeout: Timeout for the operation.
|
||||||
Returns:
|
Returns:
|
||||||
None
|
None
|
||||||
"""
|
"""
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
|
||||||
result = cli.object.hash(
|
result = cli.object.hash(
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
|
wallet=wallet,
|
||||||
cid=cid,
|
cid=cid,
|
||||||
oid=oid,
|
oid=oid,
|
||||||
range=range_cut,
|
range=range_cut,
|
||||||
|
@ -169,7 +177,7 @@ def get_range_hash(
|
||||||
|
|
||||||
@reporter.step("Put object to random node")
|
@reporter.step("Put object to random node")
|
||||||
def put_object_to_random_node(
|
def put_object_to_random_node(
|
||||||
wallet: WalletInfo,
|
wallet: str,
|
||||||
path: str,
|
path: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
|
@ -178,6 +186,7 @@ def put_object_to_random_node(
|
||||||
copies_number: Optional[int] = None,
|
copies_number: Optional[int] = None,
|
||||||
attributes: Optional[dict] = None,
|
attributes: Optional[dict] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
|
wallet_config: Optional[str] = None,
|
||||||
expire_at: Optional[int] = None,
|
expire_at: Optional[int] = None,
|
||||||
no_progress: bool = True,
|
no_progress: bool = True,
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
|
@ -196,6 +205,7 @@ def put_object_to_random_node(
|
||||||
copies_number: Number of copies of the object to store within the RPC call
|
copies_number: Number of copies of the object to store within the RPC call
|
||||||
attributes: User attributes in form of Key1=Value1,Key2=Value2
|
attributes: User attributes in form of Key1=Value1,Key2=Value2
|
||||||
cluster: cluster under test
|
cluster: cluster under test
|
||||||
|
wallet_config: path to the wallet config
|
||||||
no_progress: do not show progress bar
|
no_progress: do not show progress bar
|
||||||
expire_at: Last epoch in the life of the object
|
expire_at: Last epoch in the life of the object
|
||||||
xhdr: Request X-Headers in form of Key=Value
|
xhdr: Request X-Headers in form of Key=Value
|
||||||
|
@ -216,6 +226,7 @@ def put_object_to_random_node(
|
||||||
copies_number,
|
copies_number,
|
||||||
attributes,
|
attributes,
|
||||||
xhdr,
|
xhdr,
|
||||||
|
wallet_config,
|
||||||
expire_at,
|
expire_at,
|
||||||
no_progress,
|
no_progress,
|
||||||
session,
|
session,
|
||||||
|
@ -225,7 +236,7 @@ def put_object_to_random_node(
|
||||||
|
|
||||||
@reporter.step("Put object at {endpoint} in container {cid}")
|
@reporter.step("Put object at {endpoint} in container {cid}")
|
||||||
def put_object(
|
def put_object(
|
||||||
wallet: WalletInfo,
|
wallet: str,
|
||||||
path: str,
|
path: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
|
@ -234,6 +245,7 @@ def put_object(
|
||||||
copies_number: Optional[int] = None,
|
copies_number: Optional[int] = None,
|
||||||
attributes: Optional[dict] = None,
|
attributes: Optional[dict] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
|
wallet_config: Optional[str] = None,
|
||||||
expire_at: Optional[int] = None,
|
expire_at: Optional[int] = None,
|
||||||
no_progress: bool = True,
|
no_progress: bool = True,
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
|
@ -251,6 +263,7 @@ def put_object(
|
||||||
copies_number: Number of copies of the object to store within the RPC call
|
copies_number: Number of copies of the object to store within the RPC call
|
||||||
attributes: User attributes in form of Key1=Value1,Key2=Value2
|
attributes: User attributes in form of Key1=Value1,Key2=Value2
|
||||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||||
|
wallet_config: path to the wallet config
|
||||||
no_progress: do not show progress bar
|
no_progress: do not show progress bar
|
||||||
expire_at: Last epoch in the life of the object
|
expire_at: Last epoch in the life of the object
|
||||||
xhdr: Request X-Headers in form of Key=Value
|
xhdr: Request X-Headers in form of Key=Value
|
||||||
|
@ -260,9 +273,10 @@ def put_object(
|
||||||
(str): ID of uploaded Object
|
(str): ID of uploaded Object
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
|
||||||
result = cli.object.put(
|
result = cli.object.put(
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
|
wallet=wallet,
|
||||||
file=path,
|
file=path,
|
||||||
cid=cid,
|
cid=cid,
|
||||||
attributes=attributes,
|
attributes=attributes,
|
||||||
|
@ -283,12 +297,13 @@ def put_object(
|
||||||
|
|
||||||
@reporter.step("Delete object {cid}/{oid} from {endpoint}")
|
@reporter.step("Delete object {cid}/{oid} from {endpoint}")
|
||||||
def delete_object(
|
def delete_object(
|
||||||
wallet: WalletInfo,
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
bearer: str = "",
|
bearer: str = "",
|
||||||
|
wallet_config: Optional[str] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||||
|
@ -303,6 +318,7 @@ def delete_object(
|
||||||
shell: executor for cli command
|
shell: executor for cli command
|
||||||
bearer: path to Bearer Token file, appends to `--bearer` key
|
bearer: path to Bearer Token file, appends to `--bearer` key
|
||||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||||
|
wallet_config: path to the wallet config
|
||||||
xhdr: Request X-Headers in form of Key=Value
|
xhdr: Request X-Headers in form of Key=Value
|
||||||
session: path to a JSON-encoded container session token
|
session: path to a JSON-encoded container session token
|
||||||
timeout: Timeout for the operation.
|
timeout: Timeout for the operation.
|
||||||
|
@ -310,9 +326,10 @@ def delete_object(
|
||||||
(str): Tombstone ID
|
(str): Tombstone ID
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
|
||||||
result = cli.object.delete(
|
result = cli.object.delete(
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
|
wallet=wallet,
|
||||||
cid=cid,
|
cid=cid,
|
||||||
oid=oid,
|
oid=oid,
|
||||||
bearer=bearer,
|
bearer=bearer,
|
||||||
|
@ -328,12 +345,13 @@ def delete_object(
|
||||||
|
|
||||||
@reporter.step("Get Range")
|
@reporter.step("Get Range")
|
||||||
def get_range(
|
def get_range(
|
||||||
wallet: WalletInfo,
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
range_cut: str,
|
range_cut: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
|
wallet_config: Optional[str] = None,
|
||||||
bearer: str = "",
|
bearer: str = "",
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
|
@ -350,6 +368,7 @@ def get_range(
|
||||||
shell: executor for cli command
|
shell: executor for cli command
|
||||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||||
bearer: path to Bearer Token file, appends to `--bearer` key
|
bearer: path to Bearer Token file, appends to `--bearer` key
|
||||||
|
wallet_config: path to the wallet config
|
||||||
xhdr: Request X-Headers in form of Key=Value
|
xhdr: Request X-Headers in form of Key=Value
|
||||||
session: path to a JSON-encoded container session token
|
session: path to a JSON-encoded container session token
|
||||||
timeout: Timeout for the operation.
|
timeout: Timeout for the operation.
|
||||||
|
@ -358,9 +377,10 @@ def get_range(
|
||||||
"""
|
"""
|
||||||
range_file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4()))
|
range_file_path = os.path.join(ASSETS_DIR, str(uuid.uuid4()))
|
||||||
|
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
|
||||||
cli.object.range(
|
cli.object.range(
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
|
wallet=wallet,
|
||||||
cid=cid,
|
cid=cid,
|
||||||
oid=oid,
|
oid=oid,
|
||||||
range=range_cut,
|
range=range_cut,
|
||||||
|
@ -378,7 +398,7 @@ def get_range(
|
||||||
|
|
||||||
@reporter.step("Lock Object")
|
@reporter.step("Lock Object")
|
||||||
def lock_object(
|
def lock_object(
|
||||||
wallet: WalletInfo,
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
|
@ -388,6 +408,7 @@ def lock_object(
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
bearer: Optional[str] = None,
|
bearer: Optional[str] = None,
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
|
wallet_config: Optional[str] = None,
|
||||||
ttl: Optional[int] = None,
|
ttl: Optional[int] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||||
|
@ -414,12 +435,13 @@ def lock_object(
|
||||||
Lock object ID
|
Lock object ID
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
|
||||||
result = cli.object.lock(
|
result = cli.object.lock(
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
lifetime=lifetime,
|
lifetime=lifetime,
|
||||||
expire_at=expire_at,
|
expire_at=expire_at,
|
||||||
address=address,
|
address=address,
|
||||||
|
wallet=wallet,
|
||||||
cid=cid,
|
cid=cid,
|
||||||
oid=oid,
|
oid=oid,
|
||||||
bearer=bearer,
|
bearer=bearer,
|
||||||
|
@ -437,13 +459,14 @@ def lock_object(
|
||||||
|
|
||||||
@reporter.step("Search object")
|
@reporter.step("Search object")
|
||||||
def search_object(
|
def search_object(
|
||||||
wallet: WalletInfo,
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
bearer: str = "",
|
bearer: str = "",
|
||||||
filters: Optional[dict] = None,
|
filters: Optional[dict] = None,
|
||||||
expected_objects_list: Optional[list] = None,
|
expected_objects_list: Optional[list] = None,
|
||||||
|
wallet_config: Optional[str] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
phy: bool = False,
|
phy: bool = False,
|
||||||
|
@ -461,6 +484,7 @@ def search_object(
|
||||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||||
filters: key=value pairs to filter Objects
|
filters: key=value pairs to filter Objects
|
||||||
expected_objects_list: a list of ObjectIDs to compare found Objects with
|
expected_objects_list: a list of ObjectIDs to compare found Objects with
|
||||||
|
wallet_config: path to the wallet config
|
||||||
xhdr: Request X-Headers in form of Key=Value
|
xhdr: Request X-Headers in form of Key=Value
|
||||||
session: path to a JSON-encoded container session token
|
session: path to a JSON-encoded container session token
|
||||||
phy: Search physically stored objects.
|
phy: Search physically stored objects.
|
||||||
|
@ -471,9 +495,10 @@ def search_object(
|
||||||
list of found ObjectIDs
|
list of found ObjectIDs
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
|
||||||
result = cli.object.search(
|
result = cli.object.search(
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
|
wallet=wallet,
|
||||||
cid=cid,
|
cid=cid,
|
||||||
bearer=bearer,
|
bearer=bearer,
|
||||||
xhdr=xhdr,
|
xhdr=xhdr,
|
||||||
|
@ -488,18 +513,23 @@ def search_object(
|
||||||
|
|
||||||
if expected_objects_list:
|
if expected_objects_list:
|
||||||
if sorted(found_objects) == sorted(expected_objects_list):
|
if sorted(found_objects) == sorted(expected_objects_list):
|
||||||
logger.info(f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'")
|
logger.info(
|
||||||
|
f"Found objects list '{found_objects}' " f"is equal for expected list '{expected_objects_list}'"
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
logger.warning(f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'")
|
logger.warning(
|
||||||
|
f"Found object list {found_objects} " f"is not equal to expected list '{expected_objects_list}'"
|
||||||
|
)
|
||||||
|
|
||||||
return found_objects
|
return found_objects
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Get netmap netinfo")
|
@reporter.step("Get netmap netinfo")
|
||||||
def get_netmap_netinfo(
|
def get_netmap_netinfo(
|
||||||
wallet: WalletInfo,
|
wallet: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
|
wallet_config: Optional[str] = None,
|
||||||
address: Optional[str] = None,
|
address: Optional[str] = None,
|
||||||
ttl: Optional[int] = None,
|
ttl: Optional[int] = None,
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
|
@ -509,7 +539,7 @@ def get_netmap_netinfo(
|
||||||
Get netmap netinfo output from node
|
Get netmap netinfo output from node
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
wallet (WalletInfo): wallet on whose behalf request is done
|
wallet (str): wallet on whose behalf request is done
|
||||||
shell: executor for cli command
|
shell: executor for cli command
|
||||||
endpoint (optional, str): FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
endpoint (optional, str): FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||||
address: Address of wallet account
|
address: Address of wallet account
|
||||||
|
@ -522,8 +552,9 @@ def get_netmap_netinfo(
|
||||||
(dict): dict of parsed command output
|
(dict): dict of parsed command output
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
|
||||||
output = cli.netmap.netinfo(
|
output = cli.netmap.netinfo(
|
||||||
|
wallet=wallet,
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
address=address,
|
address=address,
|
||||||
ttl=ttl,
|
ttl=ttl,
|
||||||
|
@ -547,7 +578,7 @@ def get_netmap_netinfo(
|
||||||
|
|
||||||
@reporter.step("Head object")
|
@reporter.step("Head object")
|
||||||
def head_object(
|
def head_object(
|
||||||
wallet: WalletInfo,
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
|
@ -557,6 +588,7 @@ def head_object(
|
||||||
json_output: bool = True,
|
json_output: bool = True,
|
||||||
is_raw: bool = False,
|
is_raw: bool = False,
|
||||||
is_direct: bool = False,
|
is_direct: bool = False,
|
||||||
|
wallet_config: Optional[str] = None,
|
||||||
session: Optional[str] = None,
|
session: Optional[str] = None,
|
||||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||||
):
|
):
|
||||||
|
@ -564,7 +596,7 @@ def head_object(
|
||||||
HEAD an Object.
|
HEAD an Object.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
wallet (WalletInfo): wallet on whose behalf HEAD is done
|
wallet (str): wallet on whose behalf HEAD is done
|
||||||
cid (str): ID of Container where we get the Object from
|
cid (str): ID of Container where we get the Object from
|
||||||
oid (str): ObjectID to HEAD
|
oid (str): ObjectID to HEAD
|
||||||
shell: executor for cli command
|
shell: executor for cli command
|
||||||
|
@ -576,6 +608,7 @@ def head_object(
|
||||||
turns into `--raw` key
|
turns into `--raw` key
|
||||||
is_direct(optional, bool): send request directly to the node or not; this flag
|
is_direct(optional, bool): send request directly to the node or not; this flag
|
||||||
turns into `--ttl 1` key
|
turns into `--ttl 1` key
|
||||||
|
wallet_config(optional, str): path to the wallet config
|
||||||
xhdr (optional, dict): Request X-Headers in form of Key=Value
|
xhdr (optional, dict): Request X-Headers in form of Key=Value
|
||||||
session (optional, dict): path to a JSON-encoded container session token
|
session (optional, dict): path to a JSON-encoded container session token
|
||||||
timeout: Timeout for the operation.
|
timeout: Timeout for the operation.
|
||||||
|
@ -586,9 +619,10 @@ def head_object(
|
||||||
(str): HEAD response as a plain text
|
(str): HEAD response as a plain text
|
||||||
"""
|
"""
|
||||||
|
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
|
||||||
result = cli.object.head(
|
result = cli.object.head(
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
|
wallet=wallet,
|
||||||
cid=cid,
|
cid=cid,
|
||||||
oid=oid,
|
oid=oid,
|
||||||
bearer=bearer,
|
bearer=bearer,
|
||||||
|
@ -639,7 +673,7 @@ def head_object(
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Run neo-go dump-keys")
|
@reporter.step("Run neo-go dump-keys")
|
||||||
def neo_go_dump_keys(shell: Shell, wallet: WalletInfo) -> dict:
|
def neo_go_dump_keys(shell: Shell, wallet: str) -> dict:
|
||||||
"""
|
"""
|
||||||
Run neo-go dump keys command
|
Run neo-go dump keys command
|
||||||
|
|
||||||
|
@ -698,24 +732,23 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict:
|
||||||
@reporter.step("Search object nodes")
|
@reporter.step("Search object nodes")
|
||||||
def get_object_nodes(
|
def get_object_nodes(
|
||||||
cluster: Cluster,
|
cluster: Cluster,
|
||||||
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
alive_node: ClusterNode,
|
shell: Shell,
|
||||||
|
endpoint: str,
|
||||||
bearer: str = "",
|
bearer: str = "",
|
||||||
xhdr: Optional[dict] = None,
|
xhdr: Optional[dict] = None,
|
||||||
is_direct: bool = False,
|
is_direct: bool = False,
|
||||||
verify_presence_all: bool = False,
|
verify_presence_all: bool = False,
|
||||||
|
wallet_config: Optional[str] = None,
|
||||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||||
) -> list[ClusterNode]:
|
) -> list[ClusterNode]:
|
||||||
shell = alive_node.host.get_shell()
|
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG)
|
||||||
endpoint = alive_node.storage_node.get_rpc_endpoint()
|
|
||||||
wallet = alive_node.storage_node.get_remote_wallet_path()
|
|
||||||
wallet_config = alive_node.storage_node.get_remote_wallet_config_path()
|
|
||||||
|
|
||||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config)
|
|
||||||
|
|
||||||
result_object_nodes = cli.object.nodes(
|
result_object_nodes = cli.object.nodes(
|
||||||
rpc_endpoint=endpoint,
|
rpc_endpoint=endpoint,
|
||||||
|
wallet=wallet,
|
||||||
cid=cid,
|
cid=cid,
|
||||||
oid=oid,
|
oid=oid,
|
||||||
bearer=bearer,
|
bearer=bearer,
|
||||||
|
@ -727,7 +760,9 @@ def get_object_nodes(
|
||||||
|
|
||||||
parsing_output = parse_cmd_table(result_object_nodes.stdout, "|")
|
parsing_output = parse_cmd_table(result_object_nodes.stdout, "|")
|
||||||
list_object_nodes = [
|
list_object_nodes = [
|
||||||
node for node in parsing_output if node["should_contain_object"] == "true" and node["actually_contains_object"] == "true"
|
node
|
||||||
|
for node in parsing_output
|
||||||
|
if node["should_contain_object"] == "true" and node["actually_contains_object"] == "true"
|
||||||
]
|
]
|
||||||
|
|
||||||
netmap_nodes_list = parse_netmap_output(
|
netmap_nodes_list = parse_netmap_output(
|
||||||
|
@ -744,7 +779,10 @@ def get_object_nodes(
|
||||||
]
|
]
|
||||||
|
|
||||||
result = [
|
result = [
|
||||||
cluster_node for netmap_node in netmap_nodes for cluster_node in cluster.cluster_nodes if netmap_node.node == cluster_node.host_ip
|
cluster_node
|
||||||
|
for netmap_node in netmap_nodes
|
||||||
|
for cluster_node in cluster.cluster_nodes
|
||||||
|
if netmap_node.node == cluster_node.host_ip
|
||||||
]
|
]
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
|
@ -14,11 +14,11 @@ from typing import Optional, Tuple
|
||||||
|
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
|
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
|
||||||
|
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.cli.object import head_object
|
from frostfs_testlib.steps.cli.object import head_object
|
||||||
from frostfs_testlib.storage.cluster import Cluster, StorageNode
|
from frostfs_testlib.storage.cluster import Cluster, StorageNode
|
||||||
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
|
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
@ -44,7 +44,7 @@ def get_storage_object_chunks(
|
||||||
|
|
||||||
with reporter.step(f"Get complex object chunks (f{storage_object.oid})"):
|
with reporter.step(f"Get complex object chunks (f{storage_object.oid})"):
|
||||||
split_object_id = get_link_object(
|
split_object_id = get_link_object(
|
||||||
storage_object.wallet,
|
storage_object.wallet_file_path,
|
||||||
storage_object.cid,
|
storage_object.cid,
|
||||||
storage_object.oid,
|
storage_object.oid,
|
||||||
shell,
|
shell,
|
||||||
|
@ -53,7 +53,7 @@ def get_storage_object_chunks(
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
)
|
)
|
||||||
head = head_object(
|
head = head_object(
|
||||||
storage_object.wallet,
|
storage_object.wallet_file_path,
|
||||||
storage_object.cid,
|
storage_object.cid,
|
||||||
split_object_id,
|
split_object_id,
|
||||||
shell,
|
shell,
|
||||||
|
@ -96,7 +96,7 @@ def get_complex_object_split_ranges(
|
||||||
chunks_ids = get_storage_object_chunks(storage_object, shell, cluster)
|
chunks_ids = get_storage_object_chunks(storage_object, shell, cluster)
|
||||||
for chunk_id in chunks_ids:
|
for chunk_id in chunks_ids:
|
||||||
head = head_object(
|
head = head_object(
|
||||||
storage_object.wallet,
|
storage_object.wallet_file_path,
|
||||||
storage_object.cid,
|
storage_object.cid,
|
||||||
chunk_id,
|
chunk_id,
|
||||||
shell,
|
shell,
|
||||||
|
@ -114,12 +114,13 @@ def get_complex_object_split_ranges(
|
||||||
|
|
||||||
@reporter.step("Get Link Object")
|
@reporter.step("Get Link Object")
|
||||||
def get_link_object(
|
def get_link_object(
|
||||||
wallet: WalletInfo,
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
nodes: list[StorageNode],
|
nodes: list[StorageNode],
|
||||||
bearer: str = "",
|
bearer: str = "",
|
||||||
|
wallet_config: str = DEFAULT_WALLET_CONFIG,
|
||||||
is_direct: bool = True,
|
is_direct: bool = True,
|
||||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||||
):
|
):
|
||||||
|
@ -153,6 +154,7 @@ def get_link_object(
|
||||||
is_raw=True,
|
is_raw=True,
|
||||||
is_direct=is_direct,
|
is_direct=is_direct,
|
||||||
bearer=bearer,
|
bearer=bearer,
|
||||||
|
wallet_config=wallet_config,
|
||||||
timeout=timeout,
|
timeout=timeout,
|
||||||
)
|
)
|
||||||
if resp["link"]:
|
if resp["link"]:
|
||||||
|
@ -165,7 +167,7 @@ def get_link_object(
|
||||||
|
|
||||||
@reporter.step("Get Last Object")
|
@reporter.step("Get Last Object")
|
||||||
def get_last_object(
|
def get_last_object(
|
||||||
wallet: WalletInfo,
|
wallet: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
|
|
|
@ -4,7 +4,13 @@ from typing import Optional
|
||||||
|
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo
|
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo
|
||||||
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE
|
from frostfs_testlib.resources.cli import (
|
||||||
|
CLI_DEFAULT_TIMEOUT,
|
||||||
|
FROSTFS_ADM_CONFIG_PATH,
|
||||||
|
FROSTFS_ADM_EXEC,
|
||||||
|
FROSTFS_CLI_EXEC,
|
||||||
|
NEOGO_EXECUTABLE,
|
||||||
|
)
|
||||||
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
|
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.payment_neogo import get_contract_hash
|
from frostfs_testlib.steps.payment_neogo import get_contract_hash
|
||||||
|
@ -81,7 +87,7 @@ def tick_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode]
|
||||||
alive_node = alive_node if alive_node else cluster.services(StorageNode)[0]
|
alive_node = alive_node if alive_node else cluster.services(StorageNode)[0]
|
||||||
remote_shell = alive_node.host.get_shell()
|
remote_shell = alive_node.host.get_shell()
|
||||||
|
|
||||||
if "force_transactions" not in alive_node.host.config.attributes:
|
if FROSTFS_ADM_EXEC and FROSTFS_ADM_CONFIG_PATH:
|
||||||
# If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests)
|
# If frostfs-adm is available, then we tick epoch with it (to be consistent with UAT tests)
|
||||||
frostfs_adm = FrostfsAdm(
|
frostfs_adm = FrostfsAdm(
|
||||||
shell=remote_shell,
|
shell=remote_shell,
|
||||||
|
|
|
@ -11,14 +11,13 @@ from urllib.parse import quote_plus
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.cli import GenericCli
|
|
||||||
from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE
|
from frostfs_testlib.resources.common import SIMPLE_OBJECT_SIZE
|
||||||
from frostfs_testlib.s3.aws_cli_client import command_options
|
from frostfs_testlib.s3.aws_cli_client import command_options
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.shell.local_shell import LocalShell
|
from frostfs_testlib.shell.local_shell import LocalShell
|
||||||
from frostfs_testlib.steps.cli.object import get_object
|
from frostfs_testlib.steps.cli.object import get_object
|
||||||
from frostfs_testlib.steps.storage_policy import get_nodes_without_object
|
from frostfs_testlib.steps.storage_policy import get_nodes_without_object
|
||||||
from frostfs_testlib.storage.cluster import ClusterNode, StorageNode
|
from frostfs_testlib.storage.cluster import StorageNode
|
||||||
from frostfs_testlib.testing.test_control import retry
|
from frostfs_testlib.testing.test_control import retry
|
||||||
from frostfs_testlib.utils.file_utils import get_file_hash
|
from frostfs_testlib.utils.file_utils import get_file_hash
|
||||||
|
|
||||||
|
@ -32,7 +31,8 @@ local_shell = LocalShell()
|
||||||
def get_via_http_gate(
|
def get_via_http_gate(
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
node: ClusterNode,
|
endpoint: str,
|
||||||
|
http_hostname: str,
|
||||||
request_path: Optional[str] = None,
|
request_path: Optional[str] = None,
|
||||||
timeout: Optional[int] = 300,
|
timeout: Optional[int] = 300,
|
||||||
):
|
):
|
||||||
|
@ -40,19 +40,18 @@ def get_via_http_gate(
|
||||||
This function gets given object from HTTP gate
|
This function gets given object from HTTP gate
|
||||||
cid: container id to get object from
|
cid: container id to get object from
|
||||||
oid: object ID
|
oid: object ID
|
||||||
node: node to make request
|
endpoint: http gate endpoint
|
||||||
|
http_hostname: http host name on the node
|
||||||
request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}]
|
request_path: (optional) http request, if ommited - use default [{endpoint}/get/{cid}/{oid}]
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# if `request_path` parameter omitted, use default
|
# if `request_path` parameter omitted, use default
|
||||||
if request_path is None:
|
if request_path is None:
|
||||||
request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}"
|
request = f"{endpoint}/get/{cid}/{oid}"
|
||||||
else:
|
else:
|
||||||
request = f"{node.http_gate.get_endpoint()}{request_path}"
|
request = f"{endpoint}{request_path}"
|
||||||
|
|
||||||
resp = requests.get(
|
resp = requests.get(request, headers={"Host": http_hostname}, stream=True, timeout=timeout, verify=False)
|
||||||
request, headers={"Host": node.storage_node.get_http_hostname()[0]}, stream=True, timeout=timeout, verify=False
|
|
||||||
)
|
|
||||||
|
|
||||||
if not resp.ok:
|
if not resp.ok:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
|
@ -73,14 +72,15 @@ def get_via_http_gate(
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Get via Zip HTTP Gate")
|
@reporter.step("Get via Zip HTTP Gate")
|
||||||
def get_via_zip_http_gate(cid: str, prefix: str, node: ClusterNode, timeout: Optional[int] = 300):
|
def get_via_zip_http_gate(cid: str, prefix: str, endpoint: str, http_hostname: str, timeout: Optional[int] = 300):
|
||||||
"""
|
"""
|
||||||
This function gets given object from HTTP gate
|
This function gets given object from HTTP gate
|
||||||
cid: container id to get object from
|
cid: container id to get object from
|
||||||
prefix: common prefix
|
prefix: common prefix
|
||||||
node: node to make request
|
endpoint: http gate endpoint
|
||||||
|
http_hostname: http host name on the node
|
||||||
"""
|
"""
|
||||||
request = f"{node.http_gate.get_endpoint()}/zip/{cid}/{prefix}"
|
request = f"{endpoint}/zip/{cid}/{prefix}"
|
||||||
resp = requests.get(request, stream=True, timeout=timeout, verify=False)
|
resp = requests.get(request, stream=True, timeout=timeout, verify=False)
|
||||||
|
|
||||||
if not resp.ok:
|
if not resp.ok:
|
||||||
|
@ -109,7 +109,8 @@ def get_via_zip_http_gate(cid: str, prefix: str, node: ClusterNode, timeout: Opt
|
||||||
def get_via_http_gate_by_attribute(
|
def get_via_http_gate_by_attribute(
|
||||||
cid: str,
|
cid: str,
|
||||||
attribute: dict,
|
attribute: dict,
|
||||||
node: ClusterNode,
|
endpoint: str,
|
||||||
|
http_hostname: str,
|
||||||
request_path: Optional[str] = None,
|
request_path: Optional[str] = None,
|
||||||
timeout: Optional[int] = 300,
|
timeout: Optional[int] = 300,
|
||||||
):
|
):
|
||||||
|
@ -125,13 +126,11 @@ def get_via_http_gate_by_attribute(
|
||||||
attr_value = quote_plus(str(attribute.get(attr_name)))
|
attr_value = quote_plus(str(attribute.get(attr_name)))
|
||||||
# if `request_path` parameter ommited, use default
|
# if `request_path` parameter ommited, use default
|
||||||
if request_path is None:
|
if request_path is None:
|
||||||
request = f"{node.http_gate.get_endpoint()}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}"
|
request = f"{endpoint}/get_by_attribute/{cid}/{quote_plus(str(attr_name))}/{attr_value}"
|
||||||
else:
|
else:
|
||||||
request = f"{node.http_gate.get_endpoint()}{request_path}"
|
request = f"{endpoint}{request_path}"
|
||||||
|
|
||||||
resp = requests.get(
|
resp = requests.get(request, stream=True, timeout=timeout, verify=False, headers={"Host": http_hostname})
|
||||||
request, stream=True, timeout=timeout, verify=False, headers={"Host": node.storage_node.get_http_hostname()[0]}
|
|
||||||
)
|
|
||||||
|
|
||||||
if not resp.ok:
|
if not resp.ok:
|
||||||
raise Exception(
|
raise Exception(
|
||||||
|
@ -248,18 +247,19 @@ def upload_via_http_gate_curl(
|
||||||
|
|
||||||
@retry(max_attempts=3, sleep_interval=1)
|
@retry(max_attempts=3, sleep_interval=1)
|
||||||
@reporter.step("Get via HTTP Gate using Curl")
|
@reporter.step("Get via HTTP Gate using Curl")
|
||||||
def get_via_http_curl(cid: str, oid: str, node: ClusterNode) -> str:
|
def get_via_http_curl(cid: str, oid: str, endpoint: str, http_hostname: str) -> str:
|
||||||
"""
|
"""
|
||||||
This function gets given object from HTTP gate using curl utility.
|
This function gets given object from HTTP gate using curl utility.
|
||||||
cid: CID to get object from
|
cid: CID to get object from
|
||||||
oid: object OID
|
oid: object OID
|
||||||
node: node for request
|
endpoint: http gate endpoint
|
||||||
|
http_hostname: http host name of the node
|
||||||
"""
|
"""
|
||||||
request = f"{node.http_gate.get_endpoint()}/get/{cid}/{oid}"
|
request = f"{endpoint}/get/{cid}/{oid}"
|
||||||
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}")
|
file_path = os.path.join(os.getcwd(), ASSETS_DIR, f"{cid}_{oid}_{str(uuid.uuid4())}")
|
||||||
|
|
||||||
curl = GenericCli("curl", node.host)
|
cmd = f'curl -k -H "Host: {http_hostname}" {request} > {file_path}'
|
||||||
curl(f'-k -H "Host: {node.storage_node.get_http_hostname()[0]}"', f"{request} > {file_path}", shell=local_shell)
|
local_shell.exec(cmd)
|
||||||
|
|
||||||
return file_path
|
return file_path
|
||||||
|
|
||||||
|
@ -274,11 +274,12 @@ def _attach_allure_step(request: str, status_code: int, req_type="GET"):
|
||||||
def try_to_get_object_and_expect_error(
|
def try_to_get_object_and_expect_error(
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
node: ClusterNode,
|
|
||||||
error_pattern: str,
|
error_pattern: str,
|
||||||
|
endpoint: str,
|
||||||
|
http_hostname: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
try:
|
try:
|
||||||
get_via_http_gate(cid=cid, oid=oid, node=node)
|
get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname)
|
||||||
raise AssertionError(f"Expected error on getting object with cid: {cid}")
|
raise AssertionError(f"Expected error on getting object with cid: {cid}")
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
match = error_pattern.casefold() in str(err).casefold()
|
match = error_pattern.casefold() in str(err).casefold()
|
||||||
|
@ -291,10 +292,13 @@ def get_object_by_attr_and_verify_hashes(
|
||||||
file_name: str,
|
file_name: str,
|
||||||
cid: str,
|
cid: str,
|
||||||
attrs: dict,
|
attrs: dict,
|
||||||
node: ClusterNode,
|
endpoint: str,
|
||||||
|
http_hostname: str,
|
||||||
) -> None:
|
) -> None:
|
||||||
got_file_path_http = get_via_http_gate(cid=cid, oid=oid, node=node)
|
got_file_path_http = get_via_http_gate(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname)
|
||||||
got_file_path_http_attr = get_via_http_gate_by_attribute(cid=cid, attribute=attrs, node=node)
|
got_file_path_http_attr = get_via_http_gate_by_attribute(
|
||||||
|
cid=cid, attribute=attrs, endpoint=endpoint, http_hostname=http_hostname
|
||||||
|
)
|
||||||
assert_hashes_are_equal(file_name, got_file_path_http, got_file_path_http_attr)
|
assert_hashes_are_equal(file_name, got_file_path_http, got_file_path_http_attr)
|
||||||
|
|
||||||
|
|
||||||
|
@ -305,7 +309,8 @@ def verify_object_hash(
|
||||||
cid: str,
|
cid: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
nodes: list[StorageNode],
|
nodes: list[StorageNode],
|
||||||
request_node: ClusterNode,
|
endpoint: str,
|
||||||
|
http_hostname: str,
|
||||||
object_getter=None,
|
object_getter=None,
|
||||||
) -> None:
|
) -> None:
|
||||||
|
|
||||||
|
@ -331,7 +336,7 @@ def verify_object_hash(
|
||||||
shell=shell,
|
shell=shell,
|
||||||
endpoint=random_node.get_rpc_endpoint(),
|
endpoint=random_node.get_rpc_endpoint(),
|
||||||
)
|
)
|
||||||
got_file_path_http = object_getter(cid=cid, oid=oid, node=request_node)
|
got_file_path_http = object_getter(cid=cid, oid=oid, endpoint=endpoint, http_hostname=http_hostname)
|
||||||
|
|
||||||
assert_hashes_are_equal(file_name, got_file_path, got_file_path_http)
|
assert_hashes_are_equal(file_name, got_file_path, got_file_path_http)
|
||||||
|
|
||||||
|
@ -360,9 +365,10 @@ def attr_into_str_header_curl(attrs: dict) -> list:
|
||||||
def try_to_get_object_via_passed_request_and_expect_error(
|
def try_to_get_object_via_passed_request_and_expect_error(
|
||||||
cid: str,
|
cid: str,
|
||||||
oid: str,
|
oid: str,
|
||||||
node: ClusterNode,
|
|
||||||
error_pattern: str,
|
error_pattern: str,
|
||||||
|
endpoint: str,
|
||||||
http_request_path: str,
|
http_request_path: str,
|
||||||
|
http_hostname: str,
|
||||||
attrs: Optional[dict] = None,
|
attrs: Optional[dict] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
try:
|
try:
|
||||||
|
@ -370,15 +376,17 @@ def try_to_get_object_via_passed_request_and_expect_error(
|
||||||
get_via_http_gate(
|
get_via_http_gate(
|
||||||
cid=cid,
|
cid=cid,
|
||||||
oid=oid,
|
oid=oid,
|
||||||
node=node,
|
endpoint=endpoint,
|
||||||
request_path=http_request_path,
|
request_path=http_request_path,
|
||||||
|
http_hostname=http_hostname,
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
get_via_http_gate_by_attribute(
|
get_via_http_gate_by_attribute(
|
||||||
cid=cid,
|
cid=cid,
|
||||||
attribute=attrs,
|
attribute=attrs,
|
||||||
node=node,
|
endpoint=endpoint,
|
||||||
request_path=http_request_path,
|
request_path=http_request_path,
|
||||||
|
http_hostname=http_hostname,
|
||||||
)
|
)
|
||||||
raise AssertionError(f"Expected error on getting object with cid: {cid}")
|
raise AssertionError(f"Expected error on getting object with cid: {cid}")
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
|
|
|
@ -13,6 +13,7 @@ from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align
|
from frostfs_testlib.steps.epoch import tick_epoch, wait_for_epochs_align
|
||||||
from frostfs_testlib.storage.cluster import Cluster, StorageNode
|
from frostfs_testlib.storage.cluster import Cluster, StorageNode
|
||||||
|
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate
|
||||||
from frostfs_testlib.utils import datetime_utils
|
from frostfs_testlib.utils import datetime_utils
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
@ -51,24 +52,9 @@ def storage_node_healthcheck(node: StorageNode) -> HealthStatus:
|
||||||
Returns:
|
Returns:
|
||||||
health status as HealthStatus object.
|
health status as HealthStatus object.
|
||||||
"""
|
"""
|
||||||
|
command = "control healthcheck"
|
||||||
host = node.host
|
output = _run_control_command_with_retries(node, command)
|
||||||
service_config = host.get_service_config(node.name)
|
return HealthStatus.from_stdout(output)
|
||||||
wallet_path = service_config.attributes["wallet_path"]
|
|
||||||
wallet_password = service_config.attributes["wallet_password"]
|
|
||||||
control_endpoint = service_config.attributes["control_endpoint"]
|
|
||||||
|
|
||||||
shell = host.get_shell()
|
|
||||||
wallet_config_path = f"/tmp/{node.name}-config.yaml"
|
|
||||||
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
|
|
||||||
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
|
|
||||||
|
|
||||||
cli_config = host.get_cli_config("frostfs-cli")
|
|
||||||
|
|
||||||
cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
|
|
||||||
result = cli.control.healthcheck(control_endpoint)
|
|
||||||
|
|
||||||
return HealthStatus.from_stdout(result.stdout)
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Set status for {node}")
|
@reporter.step("Set status for {node}")
|
||||||
|
@ -80,21 +66,8 @@ def storage_node_set_status(node: StorageNode, status: str, retries: int = 0) ->
|
||||||
status: online or offline.
|
status: online or offline.
|
||||||
retries (optional, int): number of retry attempts if it didn't work from the first time
|
retries (optional, int): number of retry attempts if it didn't work from the first time
|
||||||
"""
|
"""
|
||||||
host = node.host
|
command = f"control set-status --status {status}"
|
||||||
service_config = host.get_service_config(node.name)
|
_run_control_command_with_retries(node, command, retries)
|
||||||
wallet_path = service_config.attributes["wallet_path"]
|
|
||||||
wallet_password = service_config.attributes["wallet_password"]
|
|
||||||
control_endpoint = service_config.attributes["control_endpoint"]
|
|
||||||
|
|
||||||
shell = host.get_shell()
|
|
||||||
wallet_config_path = f"/tmp/{node.name}-config.yaml"
|
|
||||||
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
|
|
||||||
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
|
|
||||||
|
|
||||||
cli_config = host.get_cli_config("frostfs-cli")
|
|
||||||
|
|
||||||
cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
|
|
||||||
cli.control.set_status(control_endpoint, status)
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Get netmap snapshot")
|
@reporter.step("Get netmap snapshot")
|
||||||
|
@ -118,7 +91,7 @@ def get_netmap_snapshot(node: StorageNode, shell: Shell) -> str:
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Get shard list for {node}")
|
@reporter.step("Get shard list for {node}")
|
||||||
def node_shard_list(node: StorageNode, json: Optional[bool] = None) -> list[str]:
|
def node_shard_list(node: StorageNode) -> list[str]:
|
||||||
"""
|
"""
|
||||||
The function returns list of shards for specified storage node.
|
The function returns list of shards for specified storage node.
|
||||||
Args:
|
Args:
|
||||||
|
@ -126,72 +99,31 @@ def node_shard_list(node: StorageNode, json: Optional[bool] = None) -> list[str]
|
||||||
Returns:
|
Returns:
|
||||||
list of shards.
|
list of shards.
|
||||||
"""
|
"""
|
||||||
host = node.host
|
command = "control shards list"
|
||||||
service_config = host.get_service_config(node.name)
|
output = _run_control_command_with_retries(node, command)
|
||||||
wallet_path = service_config.attributes["wallet_path"]
|
return re.findall(r"Shard (.*):", output)
|
||||||
wallet_password = service_config.attributes["wallet_password"]
|
|
||||||
control_endpoint = service_config.attributes["control_endpoint"]
|
|
||||||
|
|
||||||
shell = host.get_shell()
|
|
||||||
wallet_config_path = f"/tmp/{node.name}-config.yaml"
|
|
||||||
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
|
|
||||||
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
|
|
||||||
|
|
||||||
cli_config = host.get_cli_config("frostfs-cli")
|
|
||||||
|
|
||||||
cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
|
|
||||||
result = cli.shards.list(endpoint=control_endpoint, json_mode=json)
|
|
||||||
|
|
||||||
return re.findall(r"Shard (.*):", result.stdout)
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Shard set for {node}")
|
@reporter.step("Shard set for {node}")
|
||||||
def node_shard_set_mode(node: StorageNode, shard: list[str], mode: str) -> None:
|
def node_shard_set_mode(node: StorageNode, shard: str, mode: str) -> str:
|
||||||
"""
|
"""
|
||||||
The function sets mode for specified shard.
|
The function sets mode for specified shard.
|
||||||
Args:
|
Args:
|
||||||
node: node on which shard mode should be set.
|
node: node on which shard mode should be set.
|
||||||
"""
|
"""
|
||||||
host = node.host
|
command = f"control shards set-mode --id {shard} --mode {mode}"
|
||||||
service_config = host.get_service_config(node.name)
|
return _run_control_command_with_retries(node, command)
|
||||||
wallet_path = service_config.attributes["wallet_path"]
|
|
||||||
wallet_password = service_config.attributes["wallet_password"]
|
|
||||||
control_endpoint = service_config.attributes["control_endpoint"]
|
|
||||||
|
|
||||||
shell = host.get_shell()
|
|
||||||
wallet_config_path = f"/tmp/{node.name}-config.yaml"
|
|
||||||
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
|
|
||||||
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
|
|
||||||
|
|
||||||
cli_config = host.get_cli_config("frostfs-cli")
|
|
||||||
|
|
||||||
cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
|
|
||||||
cli.shards.set_mode(endpoint=control_endpoint, mode=mode, id=shard)
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Drop object from {node}")
|
@reporter.step("Drop object from {node}")
|
||||||
def drop_object(node: StorageNode, cid: str, oid: str) -> None:
|
def drop_object(node: StorageNode, cid: str, oid: str) -> str:
|
||||||
"""
|
"""
|
||||||
The function drops object from specified node.
|
The function drops object from specified node.
|
||||||
Args:
|
Args:
|
||||||
node: node from which object should be dropped.
|
node_id str: node from which object should be dropped.
|
||||||
"""
|
"""
|
||||||
host = node.host
|
command = f"control drop-objects -o {cid}/{oid}"
|
||||||
service_config = host.get_service_config(node.name)
|
return _run_control_command_with_retries(node, command)
|
||||||
wallet_path = service_config.attributes["wallet_path"]
|
|
||||||
wallet_password = service_config.attributes["wallet_password"]
|
|
||||||
control_endpoint = service_config.attributes["control_endpoint"]
|
|
||||||
|
|
||||||
shell = host.get_shell()
|
|
||||||
wallet_config_path = f"/tmp/{node.name}-config.yaml"
|
|
||||||
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
|
|
||||||
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
|
|
||||||
|
|
||||||
cli_config = host.get_cli_config("frostfs-cli")
|
|
||||||
|
|
||||||
cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
|
|
||||||
objects = f"{cid}/{oid}"
|
|
||||||
cli.control.drop_objects(control_endpoint, objects)
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Delete data from host for node {node}")
|
@reporter.step("Delete data from host for node {node}")
|
||||||
|
@ -263,7 +195,7 @@ def check_node_not_in_map(node: StorageNode, shell: Shell, alive_node: Optional[
|
||||||
|
|
||||||
@reporter.step("Wait for node {node} is ready")
|
@reporter.step("Wait for node {node} is ready")
|
||||||
def wait_for_node_to_be_ready(node: StorageNode) -> None:
|
def wait_for_node_to_be_ready(node: StorageNode) -> None:
|
||||||
timeout, attempts = 60, 15
|
timeout, attempts = 30, 6
|
||||||
for _ in range(attempts):
|
for _ in range(attempts):
|
||||||
try:
|
try:
|
||||||
health_check = storage_node_healthcheck(node)
|
health_check = storage_node_healthcheck(node)
|
||||||
|
@ -306,3 +238,38 @@ def remove_nodes_from_map_morph(
|
||||||
config_file=FROSTFS_ADM_CONFIG_PATH,
|
config_file=FROSTFS_ADM_CONFIG_PATH,
|
||||||
)
|
)
|
||||||
frostfsadm.morph.remove_nodes(node_netmap_keys)
|
frostfsadm.morph.remove_nodes(node_netmap_keys)
|
||||||
|
|
||||||
|
|
||||||
|
def _run_control_command_with_retries(node: StorageNode, command: str, retries: int = 0) -> str:
|
||||||
|
for attempt in range(1 + retries): # original attempt + specified retries
|
||||||
|
try:
|
||||||
|
return _run_control_command(node, command)
|
||||||
|
except AssertionError as err:
|
||||||
|
if attempt < retries:
|
||||||
|
logger.warning(f"Command {command} failed with error {err} and will be retried")
|
||||||
|
continue
|
||||||
|
raise AssertionError(f"Command {command} failed with error {err}") from err
|
||||||
|
|
||||||
|
|
||||||
|
def _run_control_command(node: StorageNode, command: str) -> None:
|
||||||
|
host = node.host
|
||||||
|
|
||||||
|
service_config = host.get_service_config(node.name)
|
||||||
|
wallet_path = service_config.attributes["wallet_path"]
|
||||||
|
wallet_password = service_config.attributes["wallet_password"]
|
||||||
|
control_endpoint = service_config.attributes["control_endpoint"]
|
||||||
|
|
||||||
|
shell = host.get_shell()
|
||||||
|
wallet_config_path = f"/tmp/{node.name}-config.yaml"
|
||||||
|
wallet_config = f'password: "{wallet_password}"'
|
||||||
|
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
|
||||||
|
|
||||||
|
cli_config = host.get_cli_config("frostfs-cli")
|
||||||
|
|
||||||
|
# TODO: implement cli.control
|
||||||
|
# cli = FrostfsCli(shell, cli_config.exec_path, wallet_config_path)
|
||||||
|
result = shell.exec(
|
||||||
|
f"{cli_config.exec_path} {command} --endpoint {control_endpoint} "
|
||||||
|
f"--wallet {wallet_path} --config {wallet_config_path}"
|
||||||
|
)
|
||||||
|
return result.stdout
|
||||||
|
|
|
@ -1,16 +1,25 @@
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import re
|
||||||
|
import uuid
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from dateutil.parser import parse
|
from dateutil.parser import parse
|
||||||
|
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
|
from frostfs_testlib.cli import FrostfsAuthmate
|
||||||
|
from frostfs_testlib.resources.cli import FROSTFS_AUTHMATE_EXEC
|
||||||
|
from frostfs_testlib.resources.common import CREDENTIALS_CREATE_TIMEOUT
|
||||||
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
|
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import CommandOptions, InteractiveInput, Shell
|
||||||
|
from frostfs_testlib.shell.interfaces import SshCredentials
|
||||||
from frostfs_testlib.steps.cli.container import search_container_by_name, search_nodes_with_container
|
from frostfs_testlib.steps.cli.container import search_container_by_name, search_nodes_with_container
|
||||||
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
|
||||||
|
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
|
from frostfs_testlib.utils.cli_utils import _run_with_passwd
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
@ -29,7 +38,9 @@ def check_objects_in_bucket(
|
||||||
assert bucket_object in bucket_objects, f"Expected object {bucket_object} in objects list {bucket_objects}"
|
assert bucket_object in bucket_objects, f"Expected object {bucket_object} in objects list {bucket_objects}"
|
||||||
|
|
||||||
for bucket_object in unexpected_objects:
|
for bucket_object in unexpected_objects:
|
||||||
assert bucket_object not in bucket_objects, f"Expected object {bucket_object} not in objects list {bucket_objects}"
|
assert (
|
||||||
|
bucket_object not in bucket_objects
|
||||||
|
), f"Expected object {bucket_object} not in objects list {bucket_objects}"
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Try to get object and got error")
|
@reporter.step("Try to get object and got error")
|
||||||
|
@ -57,7 +68,9 @@ def object_key_from_file_path(full_path: str) -> str:
|
||||||
return os.path.basename(full_path)
|
return os.path.basename(full_path)
|
||||||
|
|
||||||
|
|
||||||
def assert_tags(actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None) -> None:
|
def assert_tags(
|
||||||
|
actual_tags: list, expected_tags: Optional[list] = None, unexpected_tags: Optional[list] = None
|
||||||
|
) -> None:
|
||||||
expected_tags = [{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else []
|
expected_tags = [{"Key": key, "Value": value} for key, value in expected_tags] if expected_tags else []
|
||||||
unexpected_tags = [{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else []
|
unexpected_tags = [{"Key": key, "Value": value} for key, value in unexpected_tags] if unexpected_tags else []
|
||||||
if expected_tags == []:
|
if expected_tags == []:
|
||||||
|
@ -148,6 +161,43 @@ def assert_s3_acl(acl_grants: list, permitted_users: str):
|
||||||
logger.error("FULL_CONTROL is given to All Users")
|
logger.error("FULL_CONTROL is given to All Users")
|
||||||
|
|
||||||
|
|
||||||
|
@reporter.step("Init S3 Credentials")
|
||||||
|
def init_s3_credentials(
|
||||||
|
wallet: WalletInfo,
|
||||||
|
shell: Shell,
|
||||||
|
cluster: Cluster,
|
||||||
|
policy: Optional[dict] = None,
|
||||||
|
s3gates: Optional[list[S3Gate]] = None,
|
||||||
|
container_placement_policy: Optional[str] = None,
|
||||||
|
):
|
||||||
|
gate_public_keys = []
|
||||||
|
bucket = str(uuid.uuid4())
|
||||||
|
if not s3gates:
|
||||||
|
s3gates = [cluster.s3_gates[0]]
|
||||||
|
for s3gate in s3gates:
|
||||||
|
gate_public_keys.append(s3gate.get_wallet_public_key())
|
||||||
|
frostfs_authmate_exec: FrostfsAuthmate = FrostfsAuthmate(shell, FROSTFS_AUTHMATE_EXEC)
|
||||||
|
issue_secret_output = frostfs_authmate_exec.secret.issue(
|
||||||
|
wallet=wallet.path,
|
||||||
|
peer=cluster.default_rpc_endpoint,
|
||||||
|
gate_public_key=gate_public_keys,
|
||||||
|
wallet_password=wallet.password,
|
||||||
|
container_policy=policy,
|
||||||
|
container_friendly_name=bucket,
|
||||||
|
container_placement_policy=container_placement_policy,
|
||||||
|
).stdout
|
||||||
|
aws_access_key_id = str(
|
||||||
|
re.search(r"access_key_id.*:\s.(?P<aws_access_key_id>\w*)", issue_secret_output).group("aws_access_key_id")
|
||||||
|
)
|
||||||
|
aws_secret_access_key = str(
|
||||||
|
re.search(r"secret_access_key.*:\s.(?P<aws_secret_access_key>\w*)", issue_secret_output).group(
|
||||||
|
"aws_secret_access_key"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
cid = str(re.search(r"container_id.*:\s.(?P<container_id>\w*)", issue_secret_output).group("container_id"))
|
||||||
|
return cid, aws_access_key_id, aws_secret_access_key
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Delete bucket with all objects")
|
@reporter.step("Delete bucket with all objects")
|
||||||
def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str):
|
def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str):
|
||||||
versioning_status = s3_client.get_bucket_versioning_status(bucket)
|
versioning_status = s3_client.get_bucket_versioning_status(bucket)
|
||||||
|
@ -177,7 +227,7 @@ def delete_bucket_with_objects(s3_client: S3ClientWrapper, bucket: str):
|
||||||
def search_nodes_with_bucket(
|
def search_nodes_with_bucket(
|
||||||
cluster: Cluster,
|
cluster: Cluster,
|
||||||
bucket_name: str,
|
bucket_name: str,
|
||||||
wallet: WalletInfo,
|
wallet: str,
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
endpoint: str,
|
endpoint: str,
|
||||||
) -> list[ClusterNode]:
|
) -> list[ClusterNode]:
|
||||||
|
|
|
@ -4,12 +4,13 @@ import logging
|
||||||
import os
|
import os
|
||||||
import uuid
|
import uuid
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
|
from enum import Enum
|
||||||
from typing import Any, Optional
|
from typing import Any, Optional
|
||||||
|
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.cli import FrostfsCli
|
from frostfs_testlib.cli import FrostfsCli
|
||||||
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
|
from frostfs_testlib.resources.cli import FROSTFS_CLI_EXEC
|
||||||
from frostfs_testlib.resources.common import ASSETS_DIR
|
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
|
from frostfs_testlib.storage.dataclasses.storage_object_info import StorageObjectInfo
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
|
@ -230,7 +231,8 @@ def get_object_signed_token(
|
||||||
def create_session_token(
|
def create_session_token(
|
||||||
shell: Shell,
|
shell: Shell,
|
||||||
owner: str,
|
owner: str,
|
||||||
wallet: WalletInfo,
|
wallet_path: str,
|
||||||
|
wallet_password: str,
|
||||||
rpc_endpoint: str,
|
rpc_endpoint: str,
|
||||||
) -> str:
|
) -> str:
|
||||||
"""
|
"""
|
||||||
|
@ -245,18 +247,19 @@ def create_session_token(
|
||||||
The path to the generated session token file.
|
The path to the generated session token file.
|
||||||
"""
|
"""
|
||||||
session_token = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
|
session_token = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
|
||||||
frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC)
|
||||||
frostfscli.session.create(
|
frostfscli.session.create(
|
||||||
rpc_endpoint=rpc_endpoint,
|
rpc_endpoint=rpc_endpoint,
|
||||||
address=owner,
|
address=owner,
|
||||||
|
wallet=wallet_path,
|
||||||
|
wallet_password=wallet_password,
|
||||||
out=session_token,
|
out=session_token,
|
||||||
wallet=wallet.path,
|
|
||||||
)
|
)
|
||||||
return session_token
|
return session_token
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Sign Session Token")
|
@reporter.step("Sign Session Token")
|
||||||
def sign_session_token(shell: Shell, session_token_file: str, wallet: WalletInfo) -> str:
|
def sign_session_token(shell: Shell, session_token_file: str, wlt: WalletInfo) -> str:
|
||||||
"""
|
"""
|
||||||
This function signs the session token by the given wallet.
|
This function signs the session token by the given wallet.
|
||||||
|
|
||||||
|
@ -269,6 +272,6 @@ def sign_session_token(shell: Shell, session_token_file: str, wallet: WalletInfo
|
||||||
The path to the signed token.
|
The path to the signed token.
|
||||||
"""
|
"""
|
||||||
signed_token_file = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
|
signed_token_file = os.path.join(os.getcwd(), ASSETS_DIR, str(uuid.uuid4()))
|
||||||
frostfscli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
frostfscli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG)
|
||||||
frostfscli.util.sign_session_token(session_token_file, signed_token_file)
|
frostfscli.util.sign_session_token(wallet=wlt.path, from_file=session_token_file, to_file=signed_token_file)
|
||||||
return signed_token_file
|
return signed_token_file
|
||||||
|
|
|
@ -30,14 +30,14 @@ def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell, clust
|
||||||
with reporter.step("Delete objects"):
|
with reporter.step("Delete objects"):
|
||||||
for storage_object in storage_objects:
|
for storage_object in storage_objects:
|
||||||
storage_object.tombstone = delete_object(
|
storage_object.tombstone = delete_object(
|
||||||
storage_object.wallet,
|
storage_object.wallet_file_path,
|
||||||
storage_object.cid,
|
storage_object.cid,
|
||||||
storage_object.oid,
|
storage_object.oid,
|
||||||
shell=shell,
|
shell=shell,
|
||||||
endpoint=cluster.default_rpc_endpoint,
|
endpoint=cluster.default_rpc_endpoint,
|
||||||
)
|
)
|
||||||
verify_head_tombstone(
|
verify_head_tombstone(
|
||||||
wallet=storage_object.wallet,
|
wallet_path=storage_object.wallet_file_path,
|
||||||
cid=storage_object.cid,
|
cid=storage_object.cid,
|
||||||
oid_ts=storage_object.tombstone,
|
oid_ts=storage_object.tombstone,
|
||||||
oid=storage_object.oid,
|
oid=storage_object.oid,
|
||||||
|
@ -52,7 +52,7 @@ def delete_objects(storage_objects: list[StorageObjectInfo], shell: Shell, clust
|
||||||
for storage_object in storage_objects:
|
for storage_object in storage_objects:
|
||||||
with pytest.raises(Exception, match=OBJECT_ALREADY_REMOVED):
|
with pytest.raises(Exception, match=OBJECT_ALREADY_REMOVED):
|
||||||
get_object(
|
get_object(
|
||||||
storage_object.wallet,
|
storage_object.wallet_file_path,
|
||||||
storage_object.cid,
|
storage_object.cid,
|
||||||
storage_object.oid,
|
storage_object.oid,
|
||||||
shell=shell,
|
shell=shell,
|
||||||
|
|
|
@ -12,15 +12,13 @@ from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.cli.object import head_object
|
from frostfs_testlib.steps.cli.object import head_object
|
||||||
from frostfs_testlib.steps.complex_object_actions import get_last_object
|
from frostfs_testlib.steps.complex_object_actions import get_last_object
|
||||||
from frostfs_testlib.storage.cluster import StorageNode
|
from frostfs_testlib.storage.cluster import StorageNode
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
|
||||||
from frostfs_testlib.utils import string_utils
|
from frostfs_testlib.utils import string_utils
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
|
||||||
# TODO: Unused, remove or make use of
|
|
||||||
@reporter.step("Get Object Copies")
|
@reporter.step("Get Object Copies")
|
||||||
def get_object_copies(complexity: str, wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int:
|
def get_object_copies(complexity: str, wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int:
|
||||||
"""
|
"""
|
||||||
The function performs requests to all nodes of the container and
|
The function performs requests to all nodes of the container and
|
||||||
finds out if they store a copy of the object. The procedure is
|
finds out if they store a copy of the object. The procedure is
|
||||||
|
@ -45,7 +43,7 @@ def get_object_copies(complexity: str, wallet: WalletInfo, cid: str, oid: str, s
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Get Simple Object Copies")
|
@reporter.step("Get Simple Object Copies")
|
||||||
def get_simple_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int:
|
def get_simple_object_copies(wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int:
|
||||||
"""
|
"""
|
||||||
To figure out the number of a simple object copies, only direct
|
To figure out the number of a simple object copies, only direct
|
||||||
HEAD requests should be made to the every node of the container.
|
HEAD requests should be made to the every node of the container.
|
||||||
|
@ -74,7 +72,7 @@ def get_simple_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shel
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Get Complex Object Copies")
|
@reporter.step("Get Complex Object Copies")
|
||||||
def get_complex_object_copies(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int:
|
def get_complex_object_copies(wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> int:
|
||||||
"""
|
"""
|
||||||
To figure out the number of a complex object copies, we firstly
|
To figure out the number of a complex object copies, we firstly
|
||||||
need to retrieve its Last object. We consider that the number of
|
need to retrieve its Last object. We consider that the number of
|
||||||
|
@ -111,7 +109,8 @@ def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageN
|
||||||
|
|
||||||
nodes_list = []
|
nodes_list = []
|
||||||
for node in nodes:
|
for node in nodes:
|
||||||
wallet = WalletInfo.from_node(node)
|
wallet = node.get_wallet_path()
|
||||||
|
wallet_config = node.get_wallet_config_path()
|
||||||
try:
|
try:
|
||||||
res = head_object(
|
res = head_object(
|
||||||
wallet,
|
wallet,
|
||||||
|
@ -120,6 +119,7 @@ def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageN
|
||||||
shell=shell,
|
shell=shell,
|
||||||
endpoint=node.get_rpc_endpoint(),
|
endpoint=node.get_rpc_endpoint(),
|
||||||
is_direct=True,
|
is_direct=True,
|
||||||
|
wallet_config=wallet_config,
|
||||||
)
|
)
|
||||||
if res is not None:
|
if res is not None:
|
||||||
logger.info(f"Found object {oid} on node {node}")
|
logger.info(f"Found object {oid} on node {node}")
|
||||||
|
@ -131,7 +131,9 @@ def get_nodes_with_object(cid: str, oid: str, shell: Shell, nodes: list[StorageN
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Get Nodes Without Object")
|
@reporter.step("Get Nodes Without Object")
|
||||||
def get_nodes_without_object(wallet: WalletInfo, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]) -> list[StorageNode]:
|
def get_nodes_without_object(
|
||||||
|
wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
|
||||||
|
) -> list[StorageNode]:
|
||||||
"""
|
"""
|
||||||
The function returns list of nodes which do not store
|
The function returns list of nodes which do not store
|
||||||
the given object.
|
the given object.
|
||||||
|
|
|
@ -1,23 +1,31 @@
|
||||||
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
|
from neo3.wallet import wallet
|
||||||
|
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.steps.cli.object import head_object
|
from frostfs_testlib.steps.cli.object import head_object
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Verify Head Tombstone")
|
@reporter.step("Verify Head Tombstone")
|
||||||
def verify_head_tombstone(wallet: WalletInfo, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str):
|
def verify_head_tombstone(wallet_path: str, cid: str, oid_ts: str, oid: str, shell: Shell, endpoint: str):
|
||||||
header = head_object(wallet, cid, oid_ts, shell=shell, endpoint=endpoint)["header"]
|
header = head_object(wallet_path, cid, oid_ts, shell=shell, endpoint=endpoint)["header"]
|
||||||
|
|
||||||
s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"]
|
s_oid = header["sessionToken"]["body"]["object"]["target"]["objects"]
|
||||||
logger.info(f"Header Session OIDs is {s_oid}")
|
logger.info(f"Header Session OIDs is {s_oid}")
|
||||||
logger.info(f"OID is {oid}")
|
logger.info(f"OID is {oid}")
|
||||||
|
|
||||||
assert header["containerID"] == cid, "Tombstone Header CID is wrong"
|
assert header["containerID"] == cid, "Tombstone Header CID is wrong"
|
||||||
assert header["ownerID"] == wallet.get_address_from_json(0), "Tombstone Owner ID is wrong"
|
|
||||||
|
with open(wallet_path, "r") as file:
|
||||||
|
wlt_data = json.loads(file.read())
|
||||||
|
wlt = wallet.Wallet.from_json(wlt_data, password="")
|
||||||
|
addr = wlt.accounts[0].address
|
||||||
|
|
||||||
|
assert header["ownerID"] == addr, "Tombstone Owner ID is wrong"
|
||||||
assert header["objectType"] == "TOMBSTONE", "Header Type isn't Tombstone"
|
assert header["objectType"] == "TOMBSTONE", "Header Type isn't Tombstone"
|
||||||
assert header["sessionToken"]["body"]["object"]["verb"] == "DELETE", "Header Session Type isn't DELETE"
|
assert header["sessionToken"]["body"]["object"]["verb"] == "DELETE", "Header Session Type isn't DELETE"
|
||||||
assert header["sessionToken"]["body"]["object"]["target"]["container"] == cid, "Header Session ID is wrong"
|
assert header["sessionToken"]["body"]["object"]["target"]["container"] == cid, "Header Session ID is wrong"
|
||||||
|
|
|
@ -9,6 +9,7 @@ from frostfs_testlib.hosting import Host, Hosting
|
||||||
from frostfs_testlib.hosting.config import ServiceConfig
|
from frostfs_testlib.hosting.config import ServiceConfig
|
||||||
from frostfs_testlib.storage import get_service_registry
|
from frostfs_testlib.storage import get_service_registry
|
||||||
from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml
|
from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml
|
||||||
|
from frostfs_testlib.storage.configuration.service_configuration import ServiceConfiguration
|
||||||
from frostfs_testlib.storage.constants import ConfigAttributes
|
from frostfs_testlib.storage.constants import ConfigAttributes
|
||||||
from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode
|
from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode
|
||||||
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
|
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
|
||||||
|
@ -71,7 +72,6 @@ class ClusterNode:
|
||||||
def s3_gate(self) -> S3Gate:
|
def s3_gate(self) -> S3Gate:
|
||||||
return self.service(S3Gate)
|
return self.service(S3Gate)
|
||||||
|
|
||||||
# TODO: Deprecated. Use config with ServiceConfigurationYml interface
|
|
||||||
def get_config(self, config_file_path: str) -> dict:
|
def get_config(self, config_file_path: str) -> dict:
|
||||||
shell = self.host.get_shell()
|
shell = self.host.get_shell()
|
||||||
|
|
||||||
|
@ -81,7 +81,6 @@ class ClusterNode:
|
||||||
config = yaml.safe_load(config_text)
|
config = yaml.safe_load(config_text)
|
||||||
return config
|
return config
|
||||||
|
|
||||||
# TODO: Deprecated. Use config with ServiceConfigurationYml interface
|
|
||||||
def save_config(self, new_config: dict, config_file_path: str) -> None:
|
def save_config(self, new_config: dict, config_file_path: str) -> None:
|
||||||
shell = self.host.get_shell()
|
shell = self.host.get_shell()
|
||||||
|
|
||||||
|
@ -89,7 +88,7 @@ class ClusterNode:
|
||||||
shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}")
|
shell.exec(f"echo '{config_str}' | sudo tee {config_file_path}")
|
||||||
|
|
||||||
def config(self, service_type: type[ServiceClass]) -> ServiceConfigurationYml:
|
def config(self, service_type: type[ServiceClass]) -> ServiceConfigurationYml:
|
||||||
return self.service(service_type).config
|
return ServiceConfiguration(self.service(service_type))
|
||||||
|
|
||||||
def service(self, service_type: type[ServiceClass]) -> ServiceClass:
|
def service(self, service_type: type[ServiceClass]) -> ServiceClass:
|
||||||
"""
|
"""
|
||||||
|
@ -106,7 +105,7 @@ class ClusterNode:
|
||||||
service_entry = self.class_registry.get_entry(service_type)
|
service_entry = self.class_registry.get_entry(service_type)
|
||||||
service_name = service_entry["hosting_service_name"]
|
service_name = service_entry["hosting_service_name"]
|
||||||
|
|
||||||
pattern = f"{service_name}_{self.id:02}"
|
pattern = f"{service_name}{self.id:02}"
|
||||||
config = self.host.get_service_config(pattern)
|
config = self.host.get_service_config(pattern)
|
||||||
|
|
||||||
return service_type(
|
return service_type(
|
||||||
|
@ -121,7 +120,7 @@ class ClusterNode:
|
||||||
svcs_names_on_node = [svc.name for svc in self.host.config.services]
|
svcs_names_on_node = [svc.name for svc in self.host.config.services]
|
||||||
for entry in self.class_registry._class_mapping.values():
|
for entry in self.class_registry._class_mapping.values():
|
||||||
hosting_svc_name = entry["hosting_service_name"]
|
hosting_svc_name = entry["hosting_service_name"]
|
||||||
pattern = f"{hosting_svc_name}_{self.id:02}"
|
pattern = f"{hosting_svc_name}{self.id:02}"
|
||||||
if pattern in svcs_names_on_node:
|
if pattern in svcs_names_on_node:
|
||||||
config = self.host.get_service_config(pattern)
|
config = self.host.get_service_config(pattern)
|
||||||
svcs.append(
|
svcs.append(
|
||||||
|
@ -268,13 +267,13 @@ class Cluster:
|
||||||
service_name = service["hosting_service_name"]
|
service_name = service["hosting_service_name"]
|
||||||
cls: type[NodeBase] = service["cls"]
|
cls: type[NodeBase] = service["cls"]
|
||||||
|
|
||||||
pattern = f"{service_name}_\d*$"
|
pattern = f"{service_name}\d*$"
|
||||||
configs = self.hosting.find_service_configs(pattern)
|
configs = self.hosting.find_service_configs(pattern)
|
||||||
|
|
||||||
found_nodes = []
|
found_nodes = []
|
||||||
for config in configs:
|
for config in configs:
|
||||||
# config.name is something like s3-gate01. Cut last digits to know service type
|
# config.name is something like s3-gate01. Cut last digits to know service type
|
||||||
service_type = re.findall("(.*)_\d+", config.name)[0]
|
service_type = re.findall(".*\D", config.name)[0]
|
||||||
# exclude unsupported services
|
# exclude unsupported services
|
||||||
if service_type != service_name:
|
if service_type != service_name:
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -5,74 +5,51 @@ from typing import Any
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.shell.interfaces import CommandOptions, Shell
|
from frostfs_testlib.shell.interfaces import CommandOptions
|
||||||
from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml
|
from frostfs_testlib.storage.configuration.interfaces import ServiceConfigurationYml
|
||||||
|
from frostfs_testlib.storage.dataclasses.node_base import ServiceClass
|
||||||
|
|
||||||
def extend_dict(extend_me: dict, extend_by: dict):
|
|
||||||
if isinstance(extend_by, dict):
|
|
||||||
for k, v in extend_by.items():
|
|
||||||
if k in extend_me:
|
|
||||||
extend_dict(extend_me.get(k), v)
|
|
||||||
else:
|
|
||||||
extend_me[k] = v
|
|
||||||
else:
|
|
||||||
extend_me += extend_by
|
|
||||||
|
|
||||||
|
|
||||||
class ServiceConfiguration(ServiceConfigurationYml):
|
class ServiceConfiguration(ServiceConfigurationYml):
|
||||||
def __init__(self, service_name: str, shell: Shell, config_dir: str, main_config_path: str) -> None:
|
def __init__(self, service: "ServiceClass") -> None:
|
||||||
self.service_name = service_name
|
self.service = service
|
||||||
self.shell = shell
|
self.shell = self.service.host.get_shell()
|
||||||
self.main_config_path = main_config_path
|
self.confd_path = os.path.join(self.service.config_dir, "conf.d")
|
||||||
self.confd_path = os.path.join(config_dir, "conf.d")
|
|
||||||
self.custom_file = os.path.join(self.confd_path, "99_changes.yml")
|
self.custom_file = os.path.join(self.confd_path, "99_changes.yml")
|
||||||
|
|
||||||
def _path_exists(self, path: str) -> bool:
|
def _path_exists(self, path: str) -> bool:
|
||||||
return not self.shell.exec(f"test -e {path}", options=CommandOptions(check=False)).return_code
|
return not self.shell.exec(f"test -e {path}", options=CommandOptions(check=False)).return_code
|
||||||
|
|
||||||
def _get_config_files(self):
|
def _get_data_from_file(self, path: str) -> dict:
|
||||||
config_files = [self.main_config_path]
|
content = self.shell.exec(f"cat {path}").stdout
|
||||||
|
data = yaml.safe_load(content)
|
||||||
|
return data
|
||||||
|
|
||||||
if self._path_exists(self.confd_path):
|
def get(self, key: str) -> str:
|
||||||
files = self.shell.exec(f"find {self.confd_path} -type f").stdout.strip().split()
|
with reporter.step(f"Get {key} configuration value for {self.service}"):
|
||||||
# Sorting files in backwards order from latest to first one
|
config_files = [self.service.main_config_path]
|
||||||
config_files.extend(sorted(files, key=lambda x: -int(re.findall("^\d+", os.path.basename(x))[0])))
|
|
||||||
|
|
||||||
return config_files
|
if self._path_exists(self.confd_path):
|
||||||
|
files = self.shell.exec(f"find {self.confd_path} -type f").stdout.strip().split()
|
||||||
|
# Sorting files in backwards order from latest to first one
|
||||||
|
config_files.extend(sorted(files, key=lambda x: -int(re.findall("^\d+", os.path.basename(x))[0])))
|
||||||
|
|
||||||
def _get_configuration(self, config_files: list[str]) -> dict:
|
result = None
|
||||||
if not config_files:
|
for file in files:
|
||||||
return [{}]
|
data = self._get_data_from_file(file)
|
||||||
|
result = self._find_option(key, data)
|
||||||
|
if result is not None:
|
||||||
|
break
|
||||||
|
|
||||||
splitter = "+++++"
|
|
||||||
files_str = " ".join(config_files)
|
|
||||||
all_content = self.shell.exec(
|
|
||||||
f"echo Getting config files; for file in {files_str}; do (echo {splitter}; sudo cat ${{file}}); done"
|
|
||||||
).stdout
|
|
||||||
files_content = all_content.split("+++++")[1:]
|
|
||||||
files_data = [yaml.safe_load(file_content) for file_content in files_content]
|
|
||||||
|
|
||||||
mergedData = {}
|
|
||||||
for data in files_data:
|
|
||||||
extend_dict(mergedData, data)
|
|
||||||
|
|
||||||
return mergedData
|
|
||||||
|
|
||||||
def get(self, key: str) -> str | Any:
|
|
||||||
with reporter.step(f"Get {key} configuration value for {self.service_name}"):
|
|
||||||
config_files = self._get_config_files()
|
|
||||||
configuration = self._get_configuration(config_files)
|
|
||||||
result = self._find_option(key, configuration)
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
def set(self, values: dict[str, Any]):
|
def set(self, values: dict[str, Any]):
|
||||||
with reporter.step(f"Change configuration for {self.service_name}"):
|
with reporter.step(f"Change configuration for {self.service}"):
|
||||||
if not self._path_exists(self.confd_path):
|
if not self._path_exists(self.confd_path):
|
||||||
self.shell.exec(f"mkdir {self.confd_path}")
|
self.shell.exec(f"mkdir {self.confd_path}")
|
||||||
|
|
||||||
if self._path_exists(self.custom_file):
|
if self._path_exists(self.custom_file):
|
||||||
data = self._get_configuration([self.custom_file])
|
data = self._get_data_from_file(self.custom_file)
|
||||||
else:
|
else:
|
||||||
data = {}
|
data = {}
|
||||||
|
|
||||||
|
@ -84,5 +61,5 @@ class ServiceConfiguration(ServiceConfigurationYml):
|
||||||
self.shell.exec(f"chmod 777 {self.custom_file}")
|
self.shell.exec(f"chmod 777 {self.custom_file}")
|
||||||
|
|
||||||
def revert(self):
|
def revert(self):
|
||||||
with reporter.step(f"Revert changed options for {self.service_name}"):
|
with reporter.step(f"Revert changed options for {self.service}"):
|
||||||
self.shell.exec(f"rm -rf {self.custom_file}")
|
self.shell.exec(f"rm -rf {self.custom_file}")
|
||||||
|
|
|
@ -8,8 +8,7 @@ class ConfigAttributes:
|
||||||
SHARD_CONFIG_PATH = "shard_config_path"
|
SHARD_CONFIG_PATH = "shard_config_path"
|
||||||
LOGGER_CONFIG_PATH = "logger_config_path"
|
LOGGER_CONFIG_PATH = "logger_config_path"
|
||||||
LOCAL_WALLET_PATH = "local_wallet_path"
|
LOCAL_WALLET_PATH = "local_wallet_path"
|
||||||
LOCAL_WALLET_CONFIG = "local_wallet_config_path"
|
LOCAL_WALLET_CONFIG = "local_config_path"
|
||||||
REMOTE_WALLET_CONFIG = "remote_wallet_config_path"
|
|
||||||
ENDPOINT_DATA_0 = "endpoint_data0"
|
ENDPOINT_DATA_0 = "endpoint_data0"
|
||||||
ENDPOINT_DATA_1 = "endpoint_data1"
|
ENDPOINT_DATA_1 = "endpoint_data1"
|
||||||
ENDPOINT_INTERNAL = "endpoint_internal0"
|
ENDPOINT_INTERNAL = "endpoint_internal0"
|
||||||
|
@ -18,3 +17,11 @@ class ConfigAttributes:
|
||||||
UN_LOCODE = "un_locode"
|
UN_LOCODE = "un_locode"
|
||||||
HTTP_HOSTNAME = "http_hostname"
|
HTTP_HOSTNAME = "http_hostname"
|
||||||
S3_HOSTNAME = "s3_hostname"
|
S3_HOSTNAME = "s3_hostname"
|
||||||
|
|
||||||
|
|
||||||
|
class _FrostfsServicesNames:
|
||||||
|
STORAGE = "s"
|
||||||
|
S3_GATE = "s3-gate"
|
||||||
|
HTTP_GATE = "http-gate"
|
||||||
|
MORPH_CHAIN = "morph-chain"
|
||||||
|
INNER_RING = "ir"
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
import copy
|
import copy
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
import frostfs_testlib.resources.optionals as optionals
|
import frostfs_testlib.resources.optionals as optionals
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
|
@ -9,6 +10,7 @@ from frostfs_testlib.load.load_report import LoadReport
|
||||||
from frostfs_testlib.load.load_verifiers import LoadVerifier
|
from frostfs_testlib.load.load_verifiers import LoadVerifier
|
||||||
from frostfs_testlib.storage.cluster import ClusterNode
|
from frostfs_testlib.storage.cluster import ClusterNode
|
||||||
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode
|
from frostfs_testlib.storage.dataclasses.frostfs_services import S3Gate, StorageNode
|
||||||
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.testing.parallel import parallel
|
from frostfs_testlib.testing.parallel import parallel
|
||||||
from frostfs_testlib.testing.test_control import run_optionally
|
from frostfs_testlib.testing.test_control import run_optionally
|
||||||
|
|
||||||
|
@ -21,6 +23,7 @@ class BackgroundLoadController:
|
||||||
cluster_nodes: list[ClusterNode]
|
cluster_nodes: list[ClusterNode]
|
||||||
nodes_under_load: list[ClusterNode]
|
nodes_under_load: list[ClusterNode]
|
||||||
load_counter: int
|
load_counter: int
|
||||||
|
loaders_wallet: WalletInfo
|
||||||
load_summaries: dict
|
load_summaries: dict
|
||||||
endpoints: list[str]
|
endpoints: list[str]
|
||||||
runner: ScenarioRunner
|
runner: ScenarioRunner
|
||||||
|
@ -31,6 +34,7 @@ class BackgroundLoadController:
|
||||||
self,
|
self,
|
||||||
k6_dir: str,
|
k6_dir: str,
|
||||||
load_params: LoadParams,
|
load_params: LoadParams,
|
||||||
|
loaders_wallet: WalletInfo,
|
||||||
cluster_nodes: list[ClusterNode],
|
cluster_nodes: list[ClusterNode],
|
||||||
nodes_under_load: list[ClusterNode],
|
nodes_under_load: list[ClusterNode],
|
||||||
runner: ScenarioRunner,
|
runner: ScenarioRunner,
|
||||||
|
@ -41,6 +45,7 @@ class BackgroundLoadController:
|
||||||
self.cluster_nodes = cluster_nodes
|
self.cluster_nodes = cluster_nodes
|
||||||
self.nodes_under_load = nodes_under_load
|
self.nodes_under_load = nodes_under_load
|
||||||
self.load_counter = 1
|
self.load_counter = 1
|
||||||
|
self.loaders_wallet = loaders_wallet
|
||||||
self.runner = runner
|
self.runner = runner
|
||||||
self.started = False
|
self.started = False
|
||||||
self.load_reporters = []
|
self.load_reporters = []
|
||||||
|
@ -59,7 +64,10 @@ class BackgroundLoadController:
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
EndpointSelectionStrategy.FIRST: list(
|
EndpointSelectionStrategy.FIRST: list(
|
||||||
set(node_under_load.service(StorageNode).get_rpc_endpoint() for node_under_load in self.nodes_under_load)
|
set(
|
||||||
|
node_under_load.service(StorageNode).get_rpc_endpoint()
|
||||||
|
for node_under_load in self.nodes_under_load
|
||||||
|
)
|
||||||
),
|
),
|
||||||
},
|
},
|
||||||
# for some reason xk6 appends http protocol on its own
|
# for some reason xk6 appends http protocol on its own
|
||||||
|
@ -187,19 +195,15 @@ class BackgroundLoadController:
|
||||||
read_from=self.load_params.read_from,
|
read_from=self.load_params.read_from,
|
||||||
registry_file=self.load_params.registry_file,
|
registry_file=self.load_params.registry_file,
|
||||||
verify_time=self.load_params.verify_time,
|
verify_time=self.load_params.verify_time,
|
||||||
custom_registry=self.load_params.custom_registry,
|
|
||||||
load_type=self.load_params.load_type,
|
load_type=self.load_params.load_type,
|
||||||
load_id=self.load_params.load_id,
|
load_id=self.load_params.load_id,
|
||||||
vu_init_time=0,
|
vu_init_time=0,
|
||||||
working_dir=self.load_params.working_dir,
|
working_dir=self.load_params.working_dir,
|
||||||
endpoint_selection_strategy=self.load_params.endpoint_selection_strategy,
|
endpoint_selection_strategy=self.load_params.endpoint_selection_strategy,
|
||||||
k6_process_allocation_strategy=self.load_params.k6_process_allocation_strategy,
|
k6_process_allocation_strategy=self.load_params.k6_process_allocation_strategy,
|
||||||
setup_timeout=self.load_params.setup_timeout,
|
setup_timeout="1s",
|
||||||
)
|
)
|
||||||
|
|
||||||
if self.verification_params.custom_registry:
|
|
||||||
self.verification_params.registry_file = self.load_params.custom_registry
|
|
||||||
|
|
||||||
if self.verification_params.verify_time is None:
|
if self.verification_params.verify_time is None:
|
||||||
raise RuntimeError("verify_time should not be none")
|
raise RuntimeError("verify_time should not be none")
|
||||||
|
|
||||||
|
|
|
@ -11,14 +11,12 @@ from frostfs_testlib.healthcheck.interfaces import Healthcheck
|
||||||
from frostfs_testlib.hosting.interfaces import HostStatus
|
from frostfs_testlib.hosting.interfaces import HostStatus
|
||||||
from frostfs_testlib.plugins import load_all
|
from frostfs_testlib.plugins import load_all
|
||||||
from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC
|
from frostfs_testlib.resources.cli import FROSTFS_ADM_CONFIG_PATH, FROSTFS_ADM_EXEC, FROSTFS_CLI_EXEC
|
||||||
from frostfs_testlib.resources.common import MORPH_BLOCK_TIME
|
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG, MORPH_BLOCK_TIME
|
||||||
from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider
|
from frostfs_testlib.shell import CommandOptions, Shell, SshConnectionProvider
|
||||||
from frostfs_testlib.steps.network import IpHelper
|
from frostfs_testlib.steps.network import IpHelper
|
||||||
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode
|
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode
|
||||||
from frostfs_testlib.storage.controllers.disk_controller import DiskController
|
from frostfs_testlib.storage.controllers.disk_controller import DiskController
|
||||||
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
|
from frostfs_testlib.storage.dataclasses.node_base import NodeBase, ServiceClass
|
||||||
from frostfs_testlib.storage.dataclasses.storage_object_info import NodeStatus
|
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
|
||||||
from frostfs_testlib.testing import parallel
|
from frostfs_testlib.testing import parallel
|
||||||
from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success
|
from frostfs_testlib.testing.test_control import retry, run_optionally, wait_for_success
|
||||||
from frostfs_testlib.utils.datetime_utils import parse_time
|
from frostfs_testlib.utils.datetime_utils import parse_time
|
||||||
|
@ -328,8 +326,6 @@ class ClusterStateController:
|
||||||
|
|
||||||
@reporter.step("Restore blocked nodes")
|
@reporter.step("Restore blocked nodes")
|
||||||
def restore_all_traffic(self):
|
def restore_all_traffic(self):
|
||||||
if not self.dropped_traffic:
|
|
||||||
return
|
|
||||||
parallel(self._restore_traffic_to_node, self.dropped_traffic)
|
parallel(self._restore_traffic_to_node, self.dropped_traffic)
|
||||||
|
|
||||||
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
@run_optionally(optionals.OPTIONAL_FAILOVER_ENABLED)
|
||||||
|
@ -414,43 +410,45 @@ class ClusterStateController:
|
||||||
)
|
)
|
||||||
frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}")
|
frostfs_adm.morph.set_config(set_key_value=f"MaintenanceModeAllowed={status}")
|
||||||
|
|
||||||
@reporter.step("Set node status to {status} in CSC")
|
@reporter.step("Set mode node to {status}")
|
||||||
def set_node_status(self, cluster_node: ClusterNode, wallet: WalletInfo, status: NodeStatus, await_tick: bool = True) -> None:
|
def set_mode_node(self, cluster_node: ClusterNode, wallet: str, status: str, await_tick: bool = True) -> None:
|
||||||
rpc_endpoint = cluster_node.storage_node.get_rpc_endpoint()
|
rpc_endpoint = cluster_node.storage_node.get_rpc_endpoint()
|
||||||
control_endpoint = cluster_node.service(StorageNode).get_control_endpoint()
|
control_endpoint = cluster_node.service(StorageNode).get_control_endpoint()
|
||||||
|
|
||||||
frostfs_adm, frostfs_cli, frostfs_cli_remote = self._get_cli(self.shell, wallet, cluster_node)
|
frostfs_adm, frostfs_cli, frostfs_cli_remote = self._get_cli(local_shell=self.shell, cluster_node=cluster_node)
|
||||||
node_netinfo = NetmapParser.netinfo(frostfs_cli.netmap.netinfo(rpc_endpoint).stdout)
|
node_netinfo = NetmapParser.netinfo(frostfs_cli.netmap.netinfo(rpc_endpoint=rpc_endpoint, wallet=wallet).stdout)
|
||||||
|
|
||||||
if node_netinfo.maintenance_mode_allowed == "false":
|
with reporter.step("If status maintenance, then check that the option is enabled"):
|
||||||
with reporter.step("Enable maintenance mode"):
|
if node_netinfo.maintenance_mode_allowed == "false":
|
||||||
frostfs_adm.morph.set_config("MaintenanceModeAllowed=true")
|
frostfs_adm.morph.set_config(set_key_value="MaintenanceModeAllowed=true")
|
||||||
|
|
||||||
with reporter.step(f"Set node status to {status} using FrostfsCli"):
|
with reporter.step(f"Change the status to {status}"):
|
||||||
frostfs_cli_remote.control.set_status(control_endpoint, status.value)
|
frostfs_cli_remote.control.set_status(endpoint=control_endpoint, status=status)
|
||||||
|
|
||||||
if not await_tick:
|
if not await_tick:
|
||||||
return
|
return
|
||||||
|
|
||||||
with reporter.step("Tick 1 epoch and await 2 block"):
|
with reporter.step("Tick 1 epoch, and await 2 block"):
|
||||||
frostfs_adm.morph.force_new_epoch()
|
frostfs_adm.morph.force_new_epoch()
|
||||||
time.sleep(parse_time(MORPH_BLOCK_TIME) * 2)
|
time.sleep(parse_time(MORPH_BLOCK_TIME) * 2)
|
||||||
|
|
||||||
self.await_node_status(status, wallet, cluster_node)
|
self.check_node_status(status=status, wallet=wallet, cluster_node=cluster_node)
|
||||||
|
|
||||||
@wait_for_success(80, 8, title="Wait for node status become {status}")
|
@wait_for_success(80, 8, title="Wait for storage status become {status}")
|
||||||
def await_node_status(self, status: NodeStatus, wallet: WalletInfo, cluster_node: ClusterNode):
|
def check_node_status(self, status: str, wallet: str, cluster_node: ClusterNode):
|
||||||
frostfs_cli = FrostfsCli(self.shell, FROSTFS_CLI_EXEC, wallet.config_path)
|
frostfs_cli = FrostfsCli(
|
||||||
netmap = NetmapParser.snapshot_all_nodes(frostfs_cli.netmap.snapshot(cluster_node.storage_node.get_rpc_endpoint()).stdout)
|
shell=self.shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG
|
||||||
|
)
|
||||||
|
netmap = NetmapParser.snapshot_all_nodes(
|
||||||
|
frostfs_cli.netmap.snapshot(rpc_endpoint=cluster_node.storage_node.get_rpc_endpoint(), wallet=wallet).stdout
|
||||||
|
)
|
||||||
netmap = [node for node in netmap if cluster_node.host_ip == node.node]
|
netmap = [node for node in netmap if cluster_node.host_ip == node.node]
|
||||||
if status == NodeStatus.OFFLINE:
|
if status == "offline":
|
||||||
assert cluster_node.host_ip not in netmap, f"{cluster_node.host_ip} not in Offline"
|
assert cluster_node.host_ip not in netmap, f"{cluster_node.host_ip} not in Offline"
|
||||||
else:
|
else:
|
||||||
assert netmap[0].node_status == status, f"Node status should be '{status}', but was '{netmap[0].node_status}'"
|
assert netmap[0].node_status == status.upper(), f"Node state - {netmap[0].node_status} != {status} expect"
|
||||||
|
|
||||||
def _get_cli(
|
def _get_cli(self, local_shell: Shell, cluster_node: ClusterNode) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]:
|
||||||
self, local_shell: Shell, local_wallet: WalletInfo, cluster_node: ClusterNode
|
|
||||||
) -> tuple[FrostfsAdm, FrostfsCli, FrostfsCli]:
|
|
||||||
# TODO Move to service config
|
# TODO Move to service config
|
||||||
host = cluster_node.host
|
host = cluster_node.host
|
||||||
service_config = host.get_service_config(cluster_node.storage_node.name)
|
service_config = host.get_service_config(cluster_node.storage_node.name)
|
||||||
|
@ -462,8 +460,12 @@ class ClusterStateController:
|
||||||
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
|
wallet_config = f'wallet: {wallet_path}\npassword: "{wallet_password}"'
|
||||||
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
|
shell.exec(f"echo '{wallet_config}' > {wallet_config_path}")
|
||||||
|
|
||||||
frostfs_adm = FrostfsAdm(shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH)
|
frostfs_adm = FrostfsAdm(
|
||||||
frostfs_cli = FrostfsCli(local_shell, FROSTFS_CLI_EXEC, local_wallet.config_path)
|
shell=shell, frostfs_adm_exec_path=FROSTFS_ADM_EXEC, config_file=FROSTFS_ADM_CONFIG_PATH
|
||||||
|
)
|
||||||
|
frostfs_cli = FrostfsCli(
|
||||||
|
shell=local_shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=DEFAULT_WALLET_CONFIG
|
||||||
|
)
|
||||||
frostfs_cli_remote = FrostfsCli(
|
frostfs_cli_remote = FrostfsCli(
|
||||||
shell=shell,
|
shell=shell,
|
||||||
frostfs_cli_exec_path=FROSTFS_CLI_EXEC,
|
frostfs_cli_exec_path=FROSTFS_CLI_EXEC,
|
||||||
|
@ -507,7 +509,9 @@ class ClusterStateController:
|
||||||
options = CommandOptions(check=False)
|
options = CommandOptions(check=False)
|
||||||
return self.shell.exec(f"ping {node.host.config.address} -c 1", options).return_code
|
return self.shell.exec(f"ping {node.host.config.address} -c 1", options).return_code
|
||||||
|
|
||||||
@retry(max_attempts=60, sleep_interval=10, expected_result=HostStatus.ONLINE, title="Waiting for {node} to go online")
|
@retry(
|
||||||
|
max_attempts=60, sleep_interval=10, expected_result=HostStatus.ONLINE, title="Waiting for {node} to go online"
|
||||||
|
)
|
||||||
def _wait_for_host_online(self, node: ClusterNode):
|
def _wait_for_host_online(self, node: ClusterNode):
|
||||||
try:
|
try:
|
||||||
ping_result = self._ping_host(node)
|
ping_result = self._ping_host(node)
|
||||||
|
@ -518,7 +522,9 @@ class ClusterStateController:
|
||||||
logger.warning(f"Host ping fails with error {err}")
|
logger.warning(f"Host ping fails with error {err}")
|
||||||
return HostStatus.OFFLINE
|
return HostStatus.OFFLINE
|
||||||
|
|
||||||
@retry(max_attempts=60, sleep_interval=10, expected_result=HostStatus.OFFLINE, title="Waiting for {node} to go offline")
|
@retry(
|
||||||
|
max_attempts=60, sleep_interval=10, expected_result=HostStatus.OFFLINE, title="Waiting for {node} to go offline"
|
||||||
|
)
|
||||||
def _wait_for_host_offline(self, node: ClusterNode):
|
def _wait_for_host_offline(self, node: ClusterNode):
|
||||||
try:
|
try:
|
||||||
ping_result = self._ping_host(node)
|
ping_result = self._ping_host(node)
|
||||||
|
|
|
@ -79,7 +79,9 @@ class ShardsWatcher:
|
||||||
assert self._is_shard_present(shard_id)
|
assert self._is_shard_present(shard_id)
|
||||||
shards_with_new_errors = self.get_shards_with_new_errors()
|
shards_with_new_errors = self.get_shards_with_new_errors()
|
||||||
|
|
||||||
assert shard_id in shards_with_new_errors, f"Expected shard {shard_id} to have new errors, but haven't {self.shards_snapshots[-1]}"
|
assert (
|
||||||
|
shard_id in shards_with_new_errors
|
||||||
|
), f"Expected shard {shard_id} to have new errors, but haven't {self.shards_snapshots[-1]}"
|
||||||
|
|
||||||
@wait_for_success(300, 5)
|
@wait_for_success(300, 5)
|
||||||
def await_for_shards_have_no_new_errors(self):
|
def await_for_shards_have_no_new_errors(self):
|
||||||
|
@ -108,9 +110,9 @@ class ShardsWatcher:
|
||||||
self.storage_node.host.get_cli_config("frostfs-cli").exec_path,
|
self.storage_node.host.get_cli_config("frostfs-cli").exec_path,
|
||||||
)
|
)
|
||||||
return shards_cli.set_mode(
|
return shards_cli.set_mode(
|
||||||
endpoint=self.storage_node.get_control_endpoint(),
|
self.storage_node.get_control_endpoint(),
|
||||||
wallet=self.storage_node.get_remote_wallet_path(),
|
self.storage_node.get_remote_wallet_path(),
|
||||||
wallet_password=self.storage_node.get_wallet_password(),
|
self.storage_node.get_wallet_password(),
|
||||||
mode=mode,
|
mode=mode,
|
||||||
id=[shard_id],
|
id=[shard_id],
|
||||||
clear_errors=clear_errors,
|
clear_errors=clear_errors,
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
import logging
|
import logging
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
|
from enum import Enum
|
||||||
from typing import Any, Dict, List, Optional, Union
|
from typing import Any, Dict, List, Optional, Union
|
||||||
|
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
|
||||||
from frostfs_testlib.testing.readable import HumanReadableEnum
|
from frostfs_testlib.testing.readable import HumanReadableEnum
|
||||||
from frostfs_testlib.utils import wallet_utils
|
from frostfs_testlib.utils import wallet_utils
|
||||||
|
|
||||||
|
@ -65,7 +65,11 @@ class EACLFilters:
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
return ",".join(
|
return ",".join(
|
||||||
[f"{filter.header_type.value}:" f"{filter.key}{filter.match_type.value}{filter.value}" for filter in self.filters]
|
[
|
||||||
|
f"{filter.header_type.value}:"
|
||||||
|
f"{filter.key}{filter.match_type.value}{filter.value}"
|
||||||
|
for filter in self.filters
|
||||||
|
]
|
||||||
if self.filters
|
if self.filters
|
||||||
else []
|
else []
|
||||||
)
|
)
|
||||||
|
@ -80,7 +84,7 @@ class EACLPubKey:
|
||||||
class EACLRule:
|
class EACLRule:
|
||||||
operation: Optional[EACLOperation] = None
|
operation: Optional[EACLOperation] = None
|
||||||
access: Optional[EACLAccess] = None
|
access: Optional[EACLAccess] = None
|
||||||
role: Optional[Union[EACLRole, WalletInfo]] = None
|
role: Optional[Union[EACLRole, str]] = None
|
||||||
filters: Optional[EACLFilters] = None
|
filters: Optional[EACLFilters] = None
|
||||||
|
|
||||||
def to_dict(self) -> Dict[str, Any]:
|
def to_dict(self) -> Dict[str, Any]:
|
||||||
|
@ -92,9 +96,9 @@ class EACLRule:
|
||||||
}
|
}
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
role = ""
|
role = (
|
||||||
if isinstance(self.role, EACLRole):
|
self.role.value
|
||||||
role = self.role.value
|
if isinstance(self.role, EACLRole)
|
||||||
if isinstance(self.role, WalletInfo):
|
else f'pubkey:{wallet_utils.get_wallet_public_key(self.role, "")}'
|
||||||
role = f"pubkey:{wallet_utils.get_wallet_public_key(self.role.path, self.role.password)}"
|
)
|
||||||
return f'{self.access.value} {self.operation.value} {self.filters or ""} {role}'
|
return f'{self.access.value} {self.operation.value} {self.filters or ""} {role}'
|
||||||
|
|
|
@ -5,7 +5,6 @@ from frostfs_testlib.storage.constants import ConfigAttributes
|
||||||
from frostfs_testlib.storage.dataclasses.node_base import NodeBase
|
from frostfs_testlib.storage.dataclasses.node_base import NodeBase
|
||||||
from frostfs_testlib.storage.dataclasses.shard import Shard
|
from frostfs_testlib.storage.dataclasses.shard import Shard
|
||||||
|
|
||||||
|
|
||||||
class InnerRing(NodeBase):
|
class InnerRing(NodeBase):
|
||||||
"""
|
"""
|
||||||
Class represents inner ring node in a cluster
|
Class represents inner ring node in a cluster
|
||||||
|
@ -18,7 +17,11 @@ class InnerRing(NodeBase):
|
||||||
|
|
||||||
def service_healthcheck(self) -> bool:
|
def service_healthcheck(self) -> bool:
|
||||||
health_metric = "frostfs_ir_ir_health"
|
health_metric = "frostfs_ir_ir_health"
|
||||||
output = self.host.get_shell().exec(f"curl -s localhost:6662 | grep {health_metric} | sed 1,2d").stdout
|
output = (
|
||||||
|
self.host.get_shell()
|
||||||
|
.exec(f"curl -s localhost:6662 | grep {health_metric} | sed 1,2d")
|
||||||
|
.stdout
|
||||||
|
)
|
||||||
return health_metric in output
|
return health_metric in output
|
||||||
|
|
||||||
def get_netmap_cleaner_threshold(self) -> str:
|
def get_netmap_cleaner_threshold(self) -> str:
|
||||||
|
@ -47,7 +50,11 @@ class S3Gate(NodeBase):
|
||||||
|
|
||||||
def service_healthcheck(self) -> bool:
|
def service_healthcheck(self) -> bool:
|
||||||
health_metric = "frostfs_s3_gw_state_health"
|
health_metric = "frostfs_s3_gw_state_health"
|
||||||
output = self.host.get_shell().exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d").stdout
|
output = (
|
||||||
|
self.host.get_shell()
|
||||||
|
.exec(f"curl -s localhost:8086 | grep {health_metric} | sed 1,2d")
|
||||||
|
.stdout
|
||||||
|
)
|
||||||
return health_metric in output
|
return health_metric in output
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -65,7 +72,11 @@ class HTTPGate(NodeBase):
|
||||||
|
|
||||||
def service_healthcheck(self) -> bool:
|
def service_healthcheck(self) -> bool:
|
||||||
health_metric = "frostfs_http_gw_state_health"
|
health_metric = "frostfs_http_gw_state_health"
|
||||||
output = self.host.get_shell().exec(f"curl -s localhost:5662 | grep {health_metric} | sed 1,2d").stdout
|
output = (
|
||||||
|
self.host.get_shell()
|
||||||
|
.exec(f"curl -s localhost:5662 | grep {health_metric} | sed 1,2d")
|
||||||
|
.stdout
|
||||||
|
)
|
||||||
return health_metric in output
|
return health_metric in output
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
@ -124,26 +135,32 @@ class StorageNode(NodeBase):
|
||||||
|
|
||||||
def service_healthcheck(self) -> bool:
|
def service_healthcheck(self) -> bool:
|
||||||
health_metric = "frostfs_node_state_health"
|
health_metric = "frostfs_node_state_health"
|
||||||
output = self.host.get_shell().exec(f"curl -s localhost:6672 | grep {health_metric} | sed 1,2d").stdout
|
output = (
|
||||||
|
self.host.get_shell()
|
||||||
|
.exec(f"curl -s localhost:6672 | grep {health_metric} | sed 1,2d")
|
||||||
|
.stdout
|
||||||
|
)
|
||||||
return health_metric in output
|
return health_metric in output
|
||||||
|
|
||||||
# TODO: Deprecated. Use new approach with config
|
|
||||||
def get_shard_config_path(self) -> str:
|
def get_shard_config_path(self) -> str:
|
||||||
return self._get_attribute(ConfigAttributes.SHARD_CONFIG_PATH)
|
return self._get_attribute(ConfigAttributes.SHARD_CONFIG_PATH)
|
||||||
|
|
||||||
# TODO: Deprecated. Use new approach with config
|
|
||||||
def get_shards_config(self) -> tuple[str, dict]:
|
def get_shards_config(self) -> tuple[str, dict]:
|
||||||
return self.get_config(self.get_shard_config_path())
|
return self.get_config(self.get_shard_config_path())
|
||||||
|
|
||||||
def get_shards(self) -> list[Shard]:
|
def get_shards(self) -> list[Shard]:
|
||||||
shards = self.config.get("storage:shard")
|
config = self.get_shards_config()[1]
|
||||||
|
config["storage"]["shard"].pop("default")
|
||||||
|
return [Shard.from_object(shard) for shard in config["storage"]["shard"].values()]
|
||||||
|
|
||||||
if not shards:
|
def get_shards_from_env(self) -> list[Shard]:
|
||||||
raise RuntimeError(f"Cannot get shards information for {self.name} on {self.host.config.address}")
|
config = self.get_shards_config()[1]
|
||||||
|
configObj = ConfigObj(StringIO(config))
|
||||||
|
|
||||||
if "default" in shards:
|
pattern = f"{SHARD_PREFIX}\d*"
|
||||||
shards.pop("default")
|
num_shards = len(set(re.findall(pattern, self.get_shards_config())))
|
||||||
return [Shard.from_object(shard) for shard in shards.values()]
|
|
||||||
|
return [Shard.from_config_object(configObj, shard_id) for shard_id in range(num_shards)]
|
||||||
|
|
||||||
def get_control_endpoint(self) -> str:
|
def get_control_endpoint(self) -> str:
|
||||||
return self._get_attribute(ConfigAttributes.CONTROL_ENDPOINT)
|
return self._get_attribute(ConfigAttributes.CONTROL_ENDPOINT)
|
||||||
|
@ -157,10 +174,10 @@ class StorageNode(NodeBase):
|
||||||
def get_storage_config(self) -> str:
|
def get_storage_config(self) -> str:
|
||||||
return self.host.get_storage_config(self.name)
|
return self.host.get_storage_config(self.name)
|
||||||
|
|
||||||
def get_http_hostname(self) -> list[str]:
|
def get_http_hostname(self) -> str:
|
||||||
return self._get_attribute(ConfigAttributes.HTTP_HOSTNAME)
|
return self._get_attribute(ConfigAttributes.HTTP_HOSTNAME)
|
||||||
|
|
||||||
def get_s3_hostname(self) -> list[str]:
|
def get_s3_hostname(self) -> str:
|
||||||
return self._get_attribute(ConfigAttributes.S3_HOSTNAME)
|
return self._get_attribute(ConfigAttributes.S3_HOSTNAME)
|
||||||
|
|
||||||
def delete_blobovnicza(self):
|
def delete_blobovnicza(self):
|
||||||
|
|
|
@ -10,7 +10,6 @@ from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.hosting.config import ServiceConfig
|
from frostfs_testlib.hosting.config import ServiceConfig
|
||||||
from frostfs_testlib.hosting.interfaces import Host
|
from frostfs_testlib.hosting.interfaces import Host
|
||||||
from frostfs_testlib.shell.interfaces import CommandResult
|
from frostfs_testlib.shell.interfaces import CommandResult
|
||||||
from frostfs_testlib.storage.configuration.service_configuration import ServiceConfiguration, ServiceConfigurationYml
|
|
||||||
from frostfs_testlib.storage.constants import ConfigAttributes
|
from frostfs_testlib.storage.constants import ConfigAttributes
|
||||||
from frostfs_testlib.testing.readable import HumanReadableABC
|
from frostfs_testlib.testing.readable import HumanReadableABC
|
||||||
from frostfs_testlib.utils import wallet_utils
|
from frostfs_testlib.utils import wallet_utils
|
||||||
|
@ -115,14 +114,6 @@ class NodeBase(HumanReadableABC):
|
||||||
ConfigAttributes.CONFIG_PATH,
|
ConfigAttributes.CONFIG_PATH,
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_remote_wallet_config_path(self) -> str:
|
|
||||||
"""
|
|
||||||
Returns node config file path located on remote host
|
|
||||||
"""
|
|
||||||
return self._get_attribute(
|
|
||||||
ConfigAttributes.REMOTE_WALLET_CONFIG,
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_wallet_config_path(self) -> str:
|
def get_wallet_config_path(self) -> str:
|
||||||
return self._get_attribute(
|
return self._get_attribute(
|
||||||
ConfigAttributes.LOCAL_WALLET_CONFIG,
|
ConfigAttributes.LOCAL_WALLET_CONFIG,
|
||||||
|
@ -134,11 +125,8 @@ class NodeBase(HumanReadableABC):
|
||||||
Returns config path for logger located on remote host
|
Returns config path for logger located on remote host
|
||||||
"""
|
"""
|
||||||
config_attributes = self.host.get_service_config(self.name)
|
config_attributes = self.host.get_service_config(self.name)
|
||||||
return (
|
return self._get_attribute(
|
||||||
self._get_attribute(ConfigAttributes.LOGGER_CONFIG_PATH)
|
ConfigAttributes.LOGGER_CONFIG_PATH) if ConfigAttributes.LOGGER_CONFIG_PATH in config_attributes.attributes else None
|
||||||
if ConfigAttributes.LOGGER_CONFIG_PATH in config_attributes.attributes
|
|
||||||
else None
|
|
||||||
)
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def config_dir(self) -> str:
|
def config_dir(self) -> str:
|
||||||
|
@ -148,11 +136,7 @@ class NodeBase(HumanReadableABC):
|
||||||
def main_config_path(self) -> str:
|
def main_config_path(self) -> str:
|
||||||
return self._get_attribute(ConfigAttributes.CONFIG_PATH)
|
return self._get_attribute(ConfigAttributes.CONFIG_PATH)
|
||||||
|
|
||||||
@property
|
# TODO: Deprecated
|
||||||
def config(self) -> ServiceConfigurationYml:
|
|
||||||
return ServiceConfiguration(self.name, self.host.get_shell(), self.config_dir, self.main_config_path)
|
|
||||||
|
|
||||||
# TODO: Deprecated. Use config with ServiceConfigurationYml interface
|
|
||||||
def get_config(self, config_file_path: Optional[str] = None) -> tuple[str, dict]:
|
def get_config(self, config_file_path: Optional[str] = None) -> tuple[str, dict]:
|
||||||
if config_file_path is None:
|
if config_file_path is None:
|
||||||
config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH)
|
config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH)
|
||||||
|
@ -165,7 +149,7 @@ class NodeBase(HumanReadableABC):
|
||||||
config = yaml.safe_load(config_text)
|
config = yaml.safe_load(config_text)
|
||||||
return config_file_path, config
|
return config_file_path, config
|
||||||
|
|
||||||
# TODO: Deprecated. Use config with ServiceConfigurationYml interface
|
# TODO: Deprecated
|
||||||
def save_config(self, new_config: dict, config_file_path: Optional[str] = None) -> None:
|
def save_config(self, new_config: dict, config_file_path: Optional[str] = None) -> None:
|
||||||
if config_file_path is None:
|
if config_file_path is None:
|
||||||
config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH)
|
config_file_path = self._get_attribute(ConfigAttributes.CONFIG_PATH)
|
||||||
|
|
|
@ -1,13 +0,0 @@
|
||||||
from dataclasses import dataclass
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class PlacementPolicy:
|
|
||||||
name: str
|
|
||||||
value: str
|
|
||||||
|
|
||||||
def __str__(self) -> str:
|
|
||||||
return self.name
|
|
||||||
|
|
||||||
def __repr__(self) -> str:
|
|
||||||
return self.__str__()
|
|
|
@ -1,6 +1,16 @@
|
||||||
|
import json
|
||||||
|
import pathlib
|
||||||
|
import re
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
|
from io import StringIO
|
||||||
|
|
||||||
|
import allure
|
||||||
|
import pytest
|
||||||
|
import yaml
|
||||||
from configobj import ConfigObj
|
from configobj import ConfigObj
|
||||||
|
from frostfs_testlib.cli import FrostfsCli
|
||||||
|
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT
|
||||||
|
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG
|
||||||
|
|
||||||
SHARD_PREFIX = "FROSTFS_STORAGE_SHARD_"
|
SHARD_PREFIX = "FROSTFS_STORAGE_SHARD_"
|
||||||
BLOBSTOR_PREFIX = "_BLOBSTOR_"
|
BLOBSTOR_PREFIX = "_BLOBSTOR_"
|
||||||
|
@ -84,5 +94,6 @@ class Shard:
|
||||||
blobstor=[Blobstor(path=blobstor["path"], path_type=blobstor["type"]) for blobstor in shard["blobstor"]],
|
blobstor=[Blobstor(path=blobstor["path"], path_type=blobstor["type"]) for blobstor in shard["blobstor"]],
|
||||||
metabase=metabase,
|
metabase=metabase,
|
||||||
writecache=writecache,
|
writecache=writecache,
|
||||||
pilorama=pilorama,
|
pilorama=pilorama
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
|
||||||
from frostfs_testlib.testing.readable import HumanReadableEnum
|
from frostfs_testlib.testing.readable import HumanReadableEnum
|
||||||
|
|
||||||
|
|
||||||
|
@ -20,7 +19,7 @@ class LockObjectInfo(ObjectRef):
|
||||||
@dataclass
|
@dataclass
|
||||||
class StorageObjectInfo(ObjectRef):
|
class StorageObjectInfo(ObjectRef):
|
||||||
size: Optional[int] = None
|
size: Optional[int] = None
|
||||||
wallet: Optional[WalletInfo] = None
|
wallet_file_path: Optional[str] = None
|
||||||
file_path: Optional[str] = None
|
file_path: Optional[str] = None
|
||||||
file_hash: Optional[str] = None
|
file_hash: Optional[str] = None
|
||||||
attributes: Optional[list[dict[str, str]]] = None
|
attributes: Optional[list[dict[str, str]]] = None
|
||||||
|
@ -28,7 +27,7 @@ class StorageObjectInfo(ObjectRef):
|
||||||
locks: Optional[list[LockObjectInfo]] = None
|
locks: Optional[list[LockObjectInfo]] = None
|
||||||
|
|
||||||
|
|
||||||
class NodeStatus(HumanReadableEnum):
|
class ModeNode(HumanReadableEnum):
|
||||||
MAINTENANCE: str = "maintenance"
|
MAINTENANCE: str = "maintenance"
|
||||||
ONLINE: str = "online"
|
ONLINE: str = "online"
|
||||||
OFFLINE: str = "offline"
|
OFFLINE: str = "offline"
|
||||||
|
@ -37,7 +36,7 @@ class NodeStatus(HumanReadableEnum):
|
||||||
@dataclass
|
@dataclass
|
||||||
class NodeNetmapInfo:
|
class NodeNetmapInfo:
|
||||||
node_id: str = None
|
node_id: str = None
|
||||||
node_status: NodeStatus = None
|
node_status: ModeNode = None
|
||||||
node_data_ips: list[str] = None
|
node_data_ips: list[str] = None
|
||||||
cluster_name: str = None
|
cluster_name: str = None
|
||||||
continent: str = None
|
continent: str = None
|
||||||
|
|
|
@ -1,15 +1,13 @@
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import uuid
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
import yaml
|
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG, DEFAULT_WALLET_PASS
|
||||||
|
|
||||||
from frostfs_testlib import reporter
|
|
||||||
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_CONFIG, DEFAULT_WALLET_PASS
|
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.storage.cluster import NodeBase
|
from frostfs_testlib.storage.cluster import Cluster, NodeBase
|
||||||
from frostfs_testlib.utils.wallet_utils import get_last_address_from_wallet, init_wallet
|
from frostfs_testlib.utils.wallet_utils import get_last_address_from_wallet, init_wallet
|
||||||
|
|
||||||
logger = logging.getLogger("frostfs.testlib.utils")
|
logger = logging.getLogger("frostfs.testlib.utils")
|
||||||
|
@ -23,13 +21,9 @@ class WalletInfo:
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def from_node(node: NodeBase):
|
def from_node(node: NodeBase):
|
||||||
wallet_path = node.get_wallet_path()
|
return WalletInfo(
|
||||||
wallet_password = node.get_wallet_password()
|
node.get_wallet_path(), node.get_wallet_password(), node.get_wallet_config_path()
|
||||||
wallet_config_file = os.path.join(ASSETS_DIR, os.path.basename(node.get_wallet_config_path()))
|
)
|
||||||
with open(wallet_config_file, "w") as file:
|
|
||||||
file.write(yaml.dump({"wallet": wallet_path, "password": wallet_password}))
|
|
||||||
|
|
||||||
return WalletInfo(wallet_path, wallet_password, wallet_config_file)
|
|
||||||
|
|
||||||
def get_address(self) -> str:
|
def get_address(self) -> str:
|
||||||
"""
|
"""
|
||||||
|
@ -53,17 +47,22 @@ class WalletInfo:
|
||||||
"""
|
"""
|
||||||
with open(self.path, "r") as wallet:
|
with open(self.path, "r") as wallet:
|
||||||
wallet_json = json.load(wallet)
|
wallet_json = json.load(wallet)
|
||||||
assert abs(account_id) + 1 <= len(wallet_json["accounts"]), f"There is no index '{account_id}' in wallet: {wallet_json}"
|
assert abs(account_id) + 1 <= len(
|
||||||
|
wallet_json["accounts"]
|
||||||
|
), f"There is no index '{account_id}' in wallet: {wallet_json}"
|
||||||
|
|
||||||
return wallet_json["accounts"][account_id]["address"]
|
return wallet_json["accounts"][account_id]["address"]
|
||||||
|
|
||||||
|
|
||||||
class WalletFactory:
|
class WalletFactory:
|
||||||
def __init__(self, wallets_dir: str, shell: Shell) -> None:
|
def __init__(self, wallets_dir: str, shell: Shell, cluster: Cluster) -> None:
|
||||||
self.shell = shell
|
self.shell = shell
|
||||||
self.wallets_dir = wallets_dir
|
self.wallets_dir = wallets_dir
|
||||||
|
self.cluster = cluster
|
||||||
|
|
||||||
def create_wallet(self, file_name: str, password: Optional[str] = None) -> WalletInfo:
|
def create_wallet(
|
||||||
|
self, file_name: Optional[str] = None, password: Optional[str] = None
|
||||||
|
) -> WalletInfo:
|
||||||
"""
|
"""
|
||||||
Creates new default wallet.
|
Creates new default wallet.
|
||||||
|
|
||||||
|
@ -75,6 +74,8 @@ class WalletFactory:
|
||||||
WalletInfo object of new wallet.
|
WalletInfo object of new wallet.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
if file_name is None:
|
||||||
|
file_name = str(uuid.uuid4())
|
||||||
if password is None:
|
if password is None:
|
||||||
password = ""
|
password = ""
|
||||||
|
|
||||||
|
@ -84,8 +85,6 @@ class WalletFactory:
|
||||||
init_wallet(wallet_path, password)
|
init_wallet(wallet_path, password)
|
||||||
|
|
||||||
with open(wallet_config_path, "w") as config_file:
|
with open(wallet_config_path, "w") as config_file:
|
||||||
config_file.write(f'wallet: {wallet_path}\npassword: "{password}"')
|
config_file.write(f'password: "{password}"')
|
||||||
|
|
||||||
reporter.attach(wallet_path, os.path.basename(wallet_path))
|
|
||||||
|
|
||||||
return WalletInfo(wallet_path, password, wallet_config_path)
|
return WalletInfo(wallet_path, password, wallet_config_path)
|
||||||
|
|
|
@ -41,7 +41,7 @@ def _run_with_passwd(cmd: str) -> str:
|
||||||
return cmd.decode()
|
return cmd.decode()
|
||||||
|
|
||||||
|
|
||||||
def _configure_aws_cli(cmd: str, key_id: str, access_key: str, region: str, out_format: str = "json") -> str:
|
def _configure_aws_cli(cmd: str, key_id: str, access_key: str, out_format: str = "json") -> str:
|
||||||
child = pexpect.spawn(cmd)
|
child = pexpect.spawn(cmd)
|
||||||
child.delaybeforesend = 1
|
child.delaybeforesend = 1
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ def _configure_aws_cli(cmd: str, key_id: str, access_key: str, region: str, out_
|
||||||
child.sendline(access_key)
|
child.sendline(access_key)
|
||||||
|
|
||||||
child.expect("Default region name.*")
|
child.expect("Default region name.*")
|
||||||
child.sendline("region")
|
child.sendline("")
|
||||||
|
|
||||||
child.expect("Default output format.*")
|
child.expect("Default output format.*")
|
||||||
child.sendline(out_format)
|
child.sendline(out_format)
|
||||||
|
|
|
@ -1,17 +1,15 @@
|
||||||
import logging
|
import logging
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from frostfs_testlib import reporter
|
|
||||||
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli
|
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli
|
||||||
from frostfs_testlib.hosting import Host, Hosting
|
from frostfs_testlib.hosting import Hosting
|
||||||
from frostfs_testlib.resources.cli import FROSTFS_ADM_EXEC, FROSTFS_AUTHMATE_EXEC, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE
|
from frostfs_testlib.resources.cli import FROSTFS_ADM_EXEC, FROSTFS_AUTHMATE_EXEC, FROSTFS_CLI_EXEC, NEOGO_EXECUTABLE
|
||||||
|
from frostfs_testlib.resources.common import DEFAULT_WALLET_CONFIG
|
||||||
from frostfs_testlib.shell import Shell
|
from frostfs_testlib.shell import Shell
|
||||||
from frostfs_testlib.testing.parallel import parallel
|
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Get local binaries versions")
|
|
||||||
def get_local_binaries_versions(shell: Shell) -> dict[str, str]:
|
def get_local_binaries_versions(shell: Shell) -> dict[str, str]:
|
||||||
versions = {}
|
versions = {}
|
||||||
|
|
||||||
|
@ -19,7 +17,7 @@ def get_local_binaries_versions(shell: Shell) -> dict[str, str]:
|
||||||
out = shell.exec(f"{binary} --version").stdout
|
out = shell.exec(f"{binary} --version").stdout
|
||||||
versions[binary] = _parse_version(out)
|
versions[binary] = _parse_version(out)
|
||||||
|
|
||||||
frostfs_cli = FrostfsCli(shell, FROSTFS_CLI_EXEC)
|
frostfs_cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, DEFAULT_WALLET_CONFIG)
|
||||||
versions[FROSTFS_CLI_EXEC] = _parse_version(frostfs_cli.version.get().stdout)
|
versions[FROSTFS_CLI_EXEC] = _parse_version(frostfs_cli.version.get().stdout)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -31,83 +29,52 @@ def get_local_binaries_versions(shell: Shell) -> dict[str, str]:
|
||||||
out = shell.exec("aws --version").stdout
|
out = shell.exec("aws --version").stdout
|
||||||
out_lines = out.split("\n")
|
out_lines = out.split("\n")
|
||||||
versions["AWS"] = out_lines[0] if out_lines else "Unknown"
|
versions["AWS"] = out_lines[0] if out_lines else "Unknown"
|
||||||
logger.info(f"Local binaries version: {out_lines[0]}")
|
|
||||||
|
|
||||||
return versions
|
return versions
|
||||||
|
|
||||||
|
|
||||||
def parallel_binary_verions(host: Host) -> dict[str, str]:
|
|
||||||
versions_by_host = {}
|
|
||||||
|
|
||||||
binary_path_by_name = {} # Maps binary name to executable path
|
|
||||||
for service_config in host.config.services:
|
|
||||||
exec_path = service_config.attributes.get("exec_path")
|
|
||||||
requires_check = service_config.attributes.get("requires_version_check", "true")
|
|
||||||
if exec_path:
|
|
||||||
binary_path_by_name[service_config.name] = {
|
|
||||||
"exec_path": exec_path,
|
|
||||||
"check": requires_check.lower() == "true",
|
|
||||||
}
|
|
||||||
for cli_config in host.config.clis:
|
|
||||||
requires_check = cli_config.attributes.get("requires_version_check", "true")
|
|
||||||
binary_path_by_name[cli_config.name] = {
|
|
||||||
"exec_path": cli_config.exec_path,
|
|
||||||
"check": requires_check.lower() == "true",
|
|
||||||
}
|
|
||||||
|
|
||||||
shell = host.get_shell()
|
|
||||||
versions_at_host = {}
|
|
||||||
for binary_name, binary in binary_path_by_name.items():
|
|
||||||
try:
|
|
||||||
binary_path = binary["exec_path"]
|
|
||||||
result = shell.exec(f"{binary_path} --version")
|
|
||||||
versions_at_host[binary_name] = {"version": _parse_version(result.stdout), "check": binary["check"]}
|
|
||||||
except Exception as exc:
|
|
||||||
logger.error(f"Cannot get version for {binary_path} because of\n{exc}")
|
|
||||||
versions_at_host[binary_name] = {"version": "Unknown", "check": binary["check"]}
|
|
||||||
versions_by_host[host.config.address] = versions_at_host
|
|
||||||
return versions_by_host
|
|
||||||
|
|
||||||
|
|
||||||
@reporter.step("Get remote binaries versions")
|
|
||||||
def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]:
|
def get_remote_binaries_versions(hosting: Hosting) -> dict[str, str]:
|
||||||
versions_by_host = {}
|
versions_by_host = {}
|
||||||
future_binary_verions = parallel(parallel_binary_verions, parallel_items=hosting.hosts)
|
for host in hosting.hosts:
|
||||||
for future in future_binary_verions:
|
binary_path_by_name = {} # Maps binary name to executable path
|
||||||
versions_by_host.update(future.result())
|
for service_config in host.config.services:
|
||||||
|
exec_path = service_config.attributes.get("exec_path")
|
||||||
|
requires_check = service_config.attributes.get("requires_version_check", "true")
|
||||||
|
if exec_path:
|
||||||
|
binary_path_by_name[service_config.name] = {
|
||||||
|
"exec_path": exec_path,
|
||||||
|
"check": requires_check.lower() == "true",
|
||||||
|
}
|
||||||
|
for cli_config in host.config.clis:
|
||||||
|
requires_check = cli_config.attributes.get("requires_version_check", "true")
|
||||||
|
binary_path_by_name[cli_config.name] = {
|
||||||
|
"exec_path": cli_config.exec_path,
|
||||||
|
"check": requires_check.lower() == "true",
|
||||||
|
}
|
||||||
|
|
||||||
|
shell = host.get_shell()
|
||||||
|
versions_at_host = {}
|
||||||
|
for binary_name, binary in binary_path_by_name.items():
|
||||||
|
try:
|
||||||
|
binary_path = binary["exec_path"]
|
||||||
|
result = shell.exec(f"{binary_path} --version")
|
||||||
|
versions_at_host[binary_name] = {"version": _parse_version(result.stdout), "check": binary["check"]}
|
||||||
|
except Exception as exc:
|
||||||
|
logger.error(f"Cannot get version for {binary_path} because of\n{exc}")
|
||||||
|
versions_at_host[binary_name] = {"version": "Unknown", "check": binary["check"]}
|
||||||
|
versions_by_host[host.config.address] = versions_at_host
|
||||||
|
|
||||||
# Consolidate versions across all hosts
|
# Consolidate versions across all hosts
|
||||||
cheak_versions = {}
|
|
||||||
exсeptions = []
|
|
||||||
exception = set()
|
|
||||||
previous_host = None
|
|
||||||
versions = {}
|
versions = {}
|
||||||
captured_version = None
|
|
||||||
for host, binary_versions in versions_by_host.items():
|
for host, binary_versions in versions_by_host.items():
|
||||||
for name, binary in binary_versions.items():
|
for name, binary in binary_versions.items():
|
||||||
|
captured_version = versions.get(name, {}).get("version")
|
||||||
version = binary["version"]
|
version = binary["version"]
|
||||||
if not cheak_versions.get(f"{name[:-2]}", None):
|
if captured_version:
|
||||||
captured_version = cheak_versions.get(f"{name[:-2]}", {}).get(host, {}).get(captured_version)
|
assert captured_version == version, f"Binary {name} has inconsistent version on host {host}"
|
||||||
cheak_versions[f"{name[:-2]}"] = {host: {version: name}}
|
|
||||||
else:
|
else:
|
||||||
captured_version = list(cheak_versions.get(f"{name[:-2]}", {}).get(previous_host).keys())[0]
|
versions[name] = {"version": version, "check": binary["check"]}
|
||||||
cheak_versions[f"{name[:-2]}"].update({host: {version: name}})
|
return versions
|
||||||
|
|
||||||
if captured_version and captured_version != version:
|
|
||||||
exception.add(name[:-2])
|
|
||||||
|
|
||||||
versions[name] = {"version": version, "check": binary["check"]}
|
|
||||||
previous_host = host
|
|
||||||
logger.info(
|
|
||||||
"Remote binaries versions:\n" + "\n".join([f"{key} ver: {value['version']}" for key, value in versions.items()])
|
|
||||||
)
|
|
||||||
if exception:
|
|
||||||
for i in exception:
|
|
||||||
for host in versions_by_host.keys():
|
|
||||||
for version, name in cheak_versions.get(i).get(host).items():
|
|
||||||
exсeptions.append(f"Binary {name} has inconsistent version {version} on host {host}")
|
|
||||||
exсeptions.append("\n")
|
|
||||||
return versions, exсeptions
|
|
||||||
|
|
||||||
|
|
||||||
def _parse_version(version_output: str) -> str:
|
def _parse_version(version_output: str) -> str:
|
||||||
|
|
|
@ -9,16 +9,6 @@ from neo3.wallet import wallet as neo3_wallet
|
||||||
logger = logging.getLogger("frostfs.testlib.utils")
|
logger = logging.getLogger("frostfs.testlib.utils")
|
||||||
|
|
||||||
|
|
||||||
def __fix_wallet_schema(wallet: dict) -> None:
|
|
||||||
# Temporary function to fix wallets that do not conform to the schema
|
|
||||||
# TODO: get rid of it once issue is solved
|
|
||||||
if "name" not in wallet:
|
|
||||||
wallet["name"] = None
|
|
||||||
for account in wallet["accounts"]:
|
|
||||||
if "extra" not in account:
|
|
||||||
account["extra"] = None
|
|
||||||
|
|
||||||
|
|
||||||
def init_wallet(wallet_path: str, wallet_password: str):
|
def init_wallet(wallet_path: str, wallet_password: str):
|
||||||
"""
|
"""
|
||||||
Create new wallet and new account.
|
Create new wallet and new account.
|
||||||
|
@ -43,15 +33,29 @@ def get_last_address_from_wallet(wallet_path: str, wallet_password: str):
|
||||||
Returns:
|
Returns:
|
||||||
The address for the wallet.
|
The address for the wallet.
|
||||||
"""
|
"""
|
||||||
wallet = load_wallet(wallet_path, wallet_password)
|
with open(wallet_path) as wallet_file:
|
||||||
|
wallet = neo3_wallet.Wallet.from_json(json.load(wallet_file), password=wallet_password)
|
||||||
address = wallet.accounts[-1].address
|
address = wallet.accounts[-1].address
|
||||||
logger.info(f"got address: {address}")
|
logger.info(f"got address: {address}")
|
||||||
return address
|
return address
|
||||||
|
|
||||||
|
|
||||||
def get_wallet_public_key(wallet_path: str, wallet_password: str, format: str = "hex") -> str:
|
def get_wallet_public_key(wallet_path: str, wallet_password: str, format: str = "hex") -> str:
|
||||||
wallet = load_wallet(wallet_path, wallet_password)
|
def __fix_wallet_schema(wallet: dict) -> None:
|
||||||
public_key_hex = str(wallet.accounts[0].public_key)
|
# Temporary function to fix wallets that do not conform to the schema
|
||||||
|
# TODO: get rid of it once issue is solved
|
||||||
|
if "name" not in wallet:
|
||||||
|
wallet["name"] = None
|
||||||
|
for account in wallet["accounts"]:
|
||||||
|
if "extra" not in account:
|
||||||
|
account["extra"] = None
|
||||||
|
|
||||||
|
# Get public key from wallet file
|
||||||
|
with open(wallet_path, "r") as file:
|
||||||
|
wallet_content = json.load(file)
|
||||||
|
__fix_wallet_schema(wallet_content)
|
||||||
|
wallet_from_json = neo3_wallet.Wallet.from_json(wallet_content, password=wallet_password)
|
||||||
|
public_key_hex = str(wallet_from_json.accounts[0].public_key)
|
||||||
|
|
||||||
# Convert public key to specified format
|
# Convert public key to specified format
|
||||||
if format == "hex":
|
if format == "hex":
|
||||||
|
@ -65,9 +69,7 @@ def get_wallet_public_key(wallet_path: str, wallet_password: str, format: str =
|
||||||
raise ValueError(f"Invalid public key format: {format}")
|
raise ValueError(f"Invalid public key format: {format}")
|
||||||
|
|
||||||
|
|
||||||
def load_wallet(wallet_path: str, wallet_password: str) -> neo3_wallet.Wallet:
|
def load_wallet(path: str, passwd: str = "") -> neo3_wallet.Wallet:
|
||||||
with open(wallet_path) as wallet_file:
|
with open(path, "r") as wallet_file:
|
||||||
wallet_content = json.load(wallet_file)
|
wlt_data = wallet_file.read()
|
||||||
|
return neo3_wallet.Wallet.from_json(json.loads(wlt_data), password=passwd)
|
||||||
__fix_wallet_schema(wallet_content)
|
|
||||||
return neo3_wallet.Wallet.from_json(wallet_content, password=wallet_password)
|
|
||||||
|
|
|
@ -4,7 +4,13 @@ import pytest
|
||||||
|
|
||||||
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper
|
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper
|
||||||
from frostfs_testlib.storage.dataclasses.acl import EACLRole
|
from frostfs_testlib.storage.dataclasses.acl import EACLRole
|
||||||
from frostfs_testlib.storage.dataclasses.frostfs_services import HTTPGate, InnerRing, MorphChain, S3Gate, StorageNode
|
from frostfs_testlib.storage.dataclasses.frostfs_services import (
|
||||||
|
HTTPGate,
|
||||||
|
InnerRing,
|
||||||
|
MorphChain,
|
||||||
|
S3Gate,
|
||||||
|
StorageNode,
|
||||||
|
)
|
||||||
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
|
||||||
|
|
||||||
|
|
||||||
|
@ -16,10 +22,10 @@ class TestDataclassesStr:
|
||||||
[
|
[
|
||||||
(Boto3ClientWrapper, "Boto3 client"),
|
(Boto3ClientWrapper, "Boto3 client"),
|
||||||
(AwsCliClient, "AWS CLI"),
|
(AwsCliClient, "AWS CLI"),
|
||||||
(ObjectSize("simple", 1), "simple"),
|
(ObjectSize("simple", 1), "simple object size"),
|
||||||
(ObjectSize("simple", 10), "simple"),
|
(ObjectSize("simple", 10), "simple object size"),
|
||||||
(ObjectSize("complex", 5000), "complex"),
|
(ObjectSize("complex", 5000), "complex object size"),
|
||||||
(ObjectSize("complex", 5555), "complex"),
|
(ObjectSize("complex", 5555), "complex object size"),
|
||||||
(StorageNode, "StorageNode"),
|
(StorageNode, "StorageNode"),
|
||||||
(MorphChain, "MorphChain"),
|
(MorphChain, "MorphChain"),
|
||||||
(S3Gate, "S3Gate"),
|
(S3Gate, "S3Gate"),
|
||||||
|
|
|
@ -15,7 +15,6 @@ class TestHosting(TestCase):
|
||||||
HOST1 = {
|
HOST1 = {
|
||||||
"address": HOST1_ADDRESS,
|
"address": HOST1_ADDRESS,
|
||||||
"plugin_name": HOST1_PLUGIN,
|
"plugin_name": HOST1_PLUGIN,
|
||||||
"healthcheck_plugin_name": "basic",
|
|
||||||
"attributes": HOST1_ATTRIBUTES,
|
"attributes": HOST1_ATTRIBUTES,
|
||||||
"clis": HOST1_CLIS,
|
"clis": HOST1_CLIS,
|
||||||
"services": HOST1_SERVICES,
|
"services": HOST1_SERVICES,
|
||||||
|
@ -33,7 +32,6 @@ class TestHosting(TestCase):
|
||||||
HOST2 = {
|
HOST2 = {
|
||||||
"address": HOST2_ADDRESS,
|
"address": HOST2_ADDRESS,
|
||||||
"plugin_name": HOST2_PLUGIN,
|
"plugin_name": HOST2_PLUGIN,
|
||||||
"healthcheck_plugin_name": "basic",
|
|
||||||
"attributes": HOST2_ATTRIBUTES,
|
"attributes": HOST2_ATTRIBUTES,
|
||||||
"clis": HOST2_CLIS,
|
"clis": HOST2_CLIS,
|
||||||
"services": HOST2_SERVICES,
|
"services": HOST2_SERVICES,
|
||||||
|
@ -54,14 +52,18 @@ class TestHosting(TestCase):
|
||||||
self.assertEqual(host1.config.plugin_name, self.HOST1_PLUGIN)
|
self.assertEqual(host1.config.plugin_name, self.HOST1_PLUGIN)
|
||||||
self.assertDictEqual(host1.config.attributes, self.HOST1_ATTRIBUTES)
|
self.assertDictEqual(host1.config.attributes, self.HOST1_ATTRIBUTES)
|
||||||
self.assertListEqual(host1.config.clis, [CLIConfig(**cli) for cli in self.HOST1_CLIS])
|
self.assertListEqual(host1.config.clis, [CLIConfig(**cli) for cli in self.HOST1_CLIS])
|
||||||
self.assertListEqual(host1.config.services, [ServiceConfig(**service) for service in self.HOST1_SERVICES])
|
self.assertListEqual(
|
||||||
|
host1.config.services, [ServiceConfig(**service) for service in self.HOST1_SERVICES]
|
||||||
|
)
|
||||||
|
|
||||||
host2 = hosting.get_host_by_address(self.HOST2_ADDRESS)
|
host2 = hosting.get_host_by_address(self.HOST2_ADDRESS)
|
||||||
self.assertEqual(host2.config.address, self.HOST2_ADDRESS)
|
self.assertEqual(host2.config.address, self.HOST2_ADDRESS)
|
||||||
self.assertEqual(host2.config.plugin_name, self.HOST2_PLUGIN)
|
self.assertEqual(host2.config.plugin_name, self.HOST2_PLUGIN)
|
||||||
self.assertDictEqual(host2.config.attributes, self.HOST2_ATTRIBUTES)
|
self.assertDictEqual(host2.config.attributes, self.HOST2_ATTRIBUTES)
|
||||||
self.assertListEqual(host2.config.clis, [CLIConfig(**cli) for cli in self.HOST2_CLIS])
|
self.assertListEqual(host2.config.clis, [CLIConfig(**cli) for cli in self.HOST2_CLIS])
|
||||||
self.assertListEqual(host2.config.services, [ServiceConfig(**service) for service in self.HOST2_SERVICES])
|
self.assertListEqual(
|
||||||
|
host2.config.services, [ServiceConfig(**service) for service in self.HOST2_SERVICES]
|
||||||
|
)
|
||||||
|
|
||||||
def test_get_host_by_service(self):
|
def test_get_host_by_service(self):
|
||||||
hosting = Hosting()
|
hosting = Hosting()
|
||||||
|
@ -102,7 +104,9 @@ class TestHosting(TestCase):
|
||||||
services = hosting.find_service_configs(rf"^{self.SERVICE_NAME_PREFIX}")
|
services = hosting.find_service_configs(rf"^{self.SERVICE_NAME_PREFIX}")
|
||||||
self.assertEqual(len(services), 2)
|
self.assertEqual(len(services), 2)
|
||||||
for service in services:
|
for service in services:
|
||||||
self.assertEqual(service.name[: len(self.SERVICE_NAME_PREFIX)], self.SERVICE_NAME_PREFIX)
|
self.assertEqual(
|
||||||
|
service.name[: len(self.SERVICE_NAME_PREFIX)], self.SERVICE_NAME_PREFIX
|
||||||
|
)
|
||||||
|
|
||||||
service1 = hosting.find_service_configs(self.SERVICE1["name"])
|
service1 = hosting.find_service_configs(self.SERVICE1["name"])
|
||||||
self.assertEqual(len(service1), 1)
|
self.assertEqual(len(service1), 1)
|
||||||
|
|
|
@ -136,7 +136,6 @@ class TestLoadConfig:
|
||||||
def test_argument_parsing_for_grpc_scenario(self, load_params: LoadParams):
|
def test_argument_parsing_for_grpc_scenario(self, load_params: LoadParams):
|
||||||
expected_preset_args = [
|
expected_preset_args = [
|
||||||
"--size '11'",
|
"--size '11'",
|
||||||
"--acl 'acl'",
|
|
||||||
"--preload_obj '13'",
|
"--preload_obj '13'",
|
||||||
"--out 'pregen_json'",
|
"--out 'pregen_json'",
|
||||||
"--workers '7'",
|
"--workers '7'",
|
||||||
|
@ -144,7 +143,6 @@ class TestLoadConfig:
|
||||||
"--policy 'container_placement_policy'",
|
"--policy 'container_placement_policy'",
|
||||||
"--ignore-errors",
|
"--ignore-errors",
|
||||||
"--sleep '19'",
|
"--sleep '19'",
|
||||||
"--local",
|
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"DURATION": 9,
|
"DURATION": 9,
|
||||||
|
@ -156,8 +154,6 @@ class TestLoadConfig:
|
||||||
"READERS": 7,
|
"READERS": 7,
|
||||||
"DELETERS": 8,
|
"DELETERS": 8,
|
||||||
"READ_AGE": 8,
|
"READ_AGE": 8,
|
||||||
"STREAMING": 9,
|
|
||||||
"K6_OUT": "output",
|
|
||||||
"PREGEN_JSON": "pregen_json",
|
"PREGEN_JSON": "pregen_json",
|
||||||
"PREPARE_LOCALLY": True,
|
"PREPARE_LOCALLY": True,
|
||||||
}
|
}
|
||||||
|
@ -176,13 +172,10 @@ class TestLoadConfig:
|
||||||
"--policy 'container_placement_policy'",
|
"--policy 'container_placement_policy'",
|
||||||
"--ignore-errors",
|
"--ignore-errors",
|
||||||
"--sleep '19'",
|
"--sleep '19'",
|
||||||
"--local",
|
|
||||||
"--acl 'acl'",
|
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"DURATION": 9,
|
"DURATION": 9,
|
||||||
"WRITE_OBJ_SIZE": 11,
|
"WRITE_OBJ_SIZE": 11,
|
||||||
"K6_OUT": "output",
|
|
||||||
"REGISTRY_FILE": "registry_file",
|
"REGISTRY_FILE": "registry_file",
|
||||||
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
|
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
|
||||||
"K6_SETUP_TIMEOUT": "setup_timeout",
|
"K6_SETUP_TIMEOUT": "setup_timeout",
|
||||||
|
@ -198,7 +191,6 @@ class TestLoadConfig:
|
||||||
"READ_RATE": 9,
|
"READ_RATE": 9,
|
||||||
"READ_AGE": 8,
|
"READ_AGE": 8,
|
||||||
"DELETE_RATE": 11,
|
"DELETE_RATE": 11,
|
||||||
"STREAMING": 9,
|
|
||||||
"PREPARE_LOCALLY": True,
|
"PREPARE_LOCALLY": True,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -217,20 +209,17 @@ class TestLoadConfig:
|
||||||
"--location 's3_location'",
|
"--location 's3_location'",
|
||||||
"--ignore-errors",
|
"--ignore-errors",
|
||||||
"--sleep '19'",
|
"--sleep '19'",
|
||||||
"--acl 'acl'",
|
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"DURATION": 9,
|
"DURATION": 9,
|
||||||
"WRITE_OBJ_SIZE": 11,
|
"WRITE_OBJ_SIZE": 11,
|
||||||
"REGISTRY_FILE": "registry_file",
|
"REGISTRY_FILE": "registry_file",
|
||||||
"K6_OUT": "output",
|
|
||||||
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
|
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
|
||||||
"K6_SETUP_TIMEOUT": "setup_timeout",
|
"K6_SETUP_TIMEOUT": "setup_timeout",
|
||||||
"WRITERS": 7,
|
"WRITERS": 7,
|
||||||
"READERS": 7,
|
"READERS": 7,
|
||||||
"DELETERS": 8,
|
"DELETERS": 8,
|
||||||
"READ_AGE": 8,
|
"READ_AGE": 8,
|
||||||
"STREAMING": 9,
|
|
||||||
"NO_VERIFY_SSL": True,
|
"NO_VERIFY_SSL": True,
|
||||||
"PREGEN_JSON": "pregen_json",
|
"PREGEN_JSON": "pregen_json",
|
||||||
}
|
}
|
||||||
|
@ -251,13 +240,11 @@ class TestLoadConfig:
|
||||||
"--location 's3_location'",
|
"--location 's3_location'",
|
||||||
"--ignore-errors",
|
"--ignore-errors",
|
||||||
"--sleep '19'",
|
"--sleep '19'",
|
||||||
"--acl 'acl'",
|
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"DURATION": 183900,
|
"DURATION": 183900,
|
||||||
"WRITE_OBJ_SIZE": 11,
|
"WRITE_OBJ_SIZE": 11,
|
||||||
"REGISTRY_FILE": "registry_file",
|
"REGISTRY_FILE": "registry_file",
|
||||||
"K6_OUT": "output",
|
|
||||||
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
|
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
|
||||||
"K6_SETUP_TIMEOUT": "setup_timeout",
|
"K6_SETUP_TIMEOUT": "setup_timeout",
|
||||||
"NO_VERIFY_SSL": True,
|
"NO_VERIFY_SSL": True,
|
||||||
|
@ -272,7 +259,6 @@ class TestLoadConfig:
|
||||||
"WRITE_RATE": 10,
|
"WRITE_RATE": 10,
|
||||||
"READ_RATE": 9,
|
"READ_RATE": 9,
|
||||||
"READ_AGE": 8,
|
"READ_AGE": 8,
|
||||||
"STREAMING": 9,
|
|
||||||
"DELETE_RATE": 11,
|
"DELETE_RATE": 11,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -291,13 +277,11 @@ class TestLoadConfig:
|
||||||
"--location 's3_location'",
|
"--location 's3_location'",
|
||||||
"--ignore-errors",
|
"--ignore-errors",
|
||||||
"--sleep '19'",
|
"--sleep '19'",
|
||||||
"--acl 'acl'",
|
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"DURATION": 9,
|
"DURATION": 9,
|
||||||
"WRITE_OBJ_SIZE": 11,
|
"WRITE_OBJ_SIZE": 11,
|
||||||
"REGISTRY_FILE": "registry_file",
|
"REGISTRY_FILE": "registry_file",
|
||||||
"K6_OUT": "output",
|
|
||||||
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
|
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
|
||||||
"K6_SETUP_TIMEOUT": "setup_timeout",
|
"K6_SETUP_TIMEOUT": "setup_timeout",
|
||||||
"NO_VERIFY_SSL": True,
|
"NO_VERIFY_SSL": True,
|
||||||
|
@ -312,7 +296,6 @@ class TestLoadConfig:
|
||||||
"WRITE_RATE": 10,
|
"WRITE_RATE": 10,
|
||||||
"READ_RATE": 9,
|
"READ_RATE": 9,
|
||||||
"READ_AGE": 8,
|
"READ_AGE": 8,
|
||||||
"STREAMING": 9,
|
|
||||||
"DELETE_RATE": 11,
|
"DELETE_RATE": 11,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -321,7 +304,6 @@ class TestLoadConfig:
|
||||||
|
|
||||||
@pytest.mark.parametrize("load_params", [LoadScenario.HTTP], indirect=True)
|
@pytest.mark.parametrize("load_params", [LoadScenario.HTTP], indirect=True)
|
||||||
def test_argument_parsing_for_http_scenario(self, load_params: LoadParams):
|
def test_argument_parsing_for_http_scenario(self, load_params: LoadParams):
|
||||||
load_params.preset.local = False
|
|
||||||
expected_preset_args = [
|
expected_preset_args = [
|
||||||
"--no-verify-ssl",
|
"--no-verify-ssl",
|
||||||
"--size '11'",
|
"--size '11'",
|
||||||
|
@ -332,12 +314,10 @@ class TestLoadConfig:
|
||||||
"--policy 'container_placement_policy'",
|
"--policy 'container_placement_policy'",
|
||||||
"--ignore-errors",
|
"--ignore-errors",
|
||||||
"--sleep '19'",
|
"--sleep '19'",
|
||||||
"--acl 'acl'",
|
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"DURATION": 9,
|
"DURATION": 9,
|
||||||
"WRITE_OBJ_SIZE": 11,
|
"WRITE_OBJ_SIZE": 11,
|
||||||
"K6_OUT": "output",
|
|
||||||
"NO_VERIFY_SSL": True,
|
"NO_VERIFY_SSL": True,
|
||||||
"REGISTRY_FILE": "registry_file",
|
"REGISTRY_FILE": "registry_file",
|
||||||
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
|
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
|
||||||
|
@ -346,7 +326,6 @@ class TestLoadConfig:
|
||||||
"READERS": 7,
|
"READERS": 7,
|
||||||
"DELETERS": 8,
|
"DELETERS": 8,
|
||||||
"READ_AGE": 8,
|
"READ_AGE": 8,
|
||||||
"STREAMING": 9,
|
|
||||||
"PREGEN_JSON": "pregen_json",
|
"PREGEN_JSON": "pregen_json",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -355,7 +334,6 @@ class TestLoadConfig:
|
||||||
|
|
||||||
@pytest.mark.parametrize("load_params", [LoadScenario.LOCAL], indirect=True)
|
@pytest.mark.parametrize("load_params", [LoadScenario.LOCAL], indirect=True)
|
||||||
def test_argument_parsing_for_local_scenario(self, load_params: LoadParams):
|
def test_argument_parsing_for_local_scenario(self, load_params: LoadParams):
|
||||||
load_params.preset.local = False
|
|
||||||
expected_preset_args = [
|
expected_preset_args = [
|
||||||
"--size '11'",
|
"--size '11'",
|
||||||
"--preload_obj '13'",
|
"--preload_obj '13'",
|
||||||
|
@ -365,13 +343,11 @@ class TestLoadConfig:
|
||||||
"--policy 'container_placement_policy'",
|
"--policy 'container_placement_policy'",
|
||||||
"--ignore-errors",
|
"--ignore-errors",
|
||||||
"--sleep '19'",
|
"--sleep '19'",
|
||||||
"--acl 'acl'",
|
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"CONFIG_FILE": "config_file",
|
"CONFIG_FILE": "config_file",
|
||||||
"DURATION": 9,
|
"DURATION": 9,
|
||||||
"WRITE_OBJ_SIZE": 11,
|
"WRITE_OBJ_SIZE": 11,
|
||||||
"K6_OUT": "output",
|
|
||||||
"REGISTRY_FILE": "registry_file",
|
"REGISTRY_FILE": "registry_file",
|
||||||
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
|
"K6_MIN_ITERATION_DURATION": "min_iteration_duration",
|
||||||
"K6_SETUP_TIMEOUT": "setup_timeout",
|
"K6_SETUP_TIMEOUT": "setup_timeout",
|
||||||
|
@ -379,7 +355,6 @@ class TestLoadConfig:
|
||||||
"READERS": 7,
|
"READERS": 7,
|
||||||
"DELETERS": 8,
|
"DELETERS": 8,
|
||||||
"READ_AGE": 8,
|
"READ_AGE": 8,
|
||||||
"STREAMING": 9,
|
|
||||||
"PREGEN_JSON": "pregen_json",
|
"PREGEN_JSON": "pregen_json",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -420,20 +395,17 @@ class TestLoadConfig:
|
||||||
"--containers '0'",
|
"--containers '0'",
|
||||||
"--policy ''",
|
"--policy ''",
|
||||||
"--sleep '0'",
|
"--sleep '0'",
|
||||||
"--acl ''",
|
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"DURATION": 0,
|
"DURATION": 0,
|
||||||
"WRITE_OBJ_SIZE": 0,
|
"WRITE_OBJ_SIZE": 0,
|
||||||
"REGISTRY_FILE": "",
|
"REGISTRY_FILE": "",
|
||||||
"K6_OUT": "",
|
|
||||||
"K6_MIN_ITERATION_DURATION": "",
|
"K6_MIN_ITERATION_DURATION": "",
|
||||||
"K6_SETUP_TIMEOUT": "",
|
"K6_SETUP_TIMEOUT": "",
|
||||||
"WRITERS": 0,
|
"WRITERS": 0,
|
||||||
"READERS": 0,
|
"READERS": 0,
|
||||||
"DELETERS": 0,
|
"DELETERS": 0,
|
||||||
"READ_AGE": 0,
|
"READ_AGE": 0,
|
||||||
"STREAMING": 0,
|
|
||||||
"PREGEN_JSON": "",
|
"PREGEN_JSON": "",
|
||||||
"PREPARE_LOCALLY": False,
|
"PREPARE_LOCALLY": False,
|
||||||
}
|
}
|
||||||
|
@ -451,13 +423,11 @@ class TestLoadConfig:
|
||||||
"--containers '0'",
|
"--containers '0'",
|
||||||
"--policy ''",
|
"--policy ''",
|
||||||
"--sleep '0'",
|
"--sleep '0'",
|
||||||
"--acl ''",
|
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"DURATION": 0,
|
"DURATION": 0,
|
||||||
"WRITE_OBJ_SIZE": 0,
|
"WRITE_OBJ_SIZE": 0,
|
||||||
"REGISTRY_FILE": "",
|
"REGISTRY_FILE": "",
|
||||||
"K6_OUT": "",
|
|
||||||
"K6_MIN_ITERATION_DURATION": "",
|
"K6_MIN_ITERATION_DURATION": "",
|
||||||
"K6_SETUP_TIMEOUT": "",
|
"K6_SETUP_TIMEOUT": "",
|
||||||
"MAX_WRITERS": 0,
|
"MAX_WRITERS": 0,
|
||||||
|
@ -472,7 +442,6 @@ class TestLoadConfig:
|
||||||
"READ_RATE": 0,
|
"READ_RATE": 0,
|
||||||
"DELETE_RATE": 0,
|
"DELETE_RATE": 0,
|
||||||
"READ_AGE": 0,
|
"READ_AGE": 0,
|
||||||
"STREAMING": 0,
|
|
||||||
"PREPARE_LOCALLY": False,
|
"PREPARE_LOCALLY": False,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -489,20 +458,17 @@ class TestLoadConfig:
|
||||||
"--buckets '0'",
|
"--buckets '0'",
|
||||||
"--location ''",
|
"--location ''",
|
||||||
"--sleep '0'",
|
"--sleep '0'",
|
||||||
"--acl ''",
|
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"DURATION": 0,
|
"DURATION": 0,
|
||||||
"WRITE_OBJ_SIZE": 0,
|
"WRITE_OBJ_SIZE": 0,
|
||||||
"REGISTRY_FILE": "",
|
"REGISTRY_FILE": "",
|
||||||
"K6_OUT": "",
|
|
||||||
"K6_MIN_ITERATION_DURATION": "",
|
"K6_MIN_ITERATION_DURATION": "",
|
||||||
"K6_SETUP_TIMEOUT": "",
|
"K6_SETUP_TIMEOUT": "",
|
||||||
"WRITERS": 0,
|
"WRITERS": 0,
|
||||||
"READERS": 0,
|
"READERS": 0,
|
||||||
"DELETERS": 0,
|
"DELETERS": 0,
|
||||||
"READ_AGE": 0,
|
"READ_AGE": 0,
|
||||||
"STREAMING": 0,
|
|
||||||
"NO_VERIFY_SSL": False,
|
"NO_VERIFY_SSL": False,
|
||||||
"PREGEN_JSON": "",
|
"PREGEN_JSON": "",
|
||||||
}
|
}
|
||||||
|
@ -520,13 +486,11 @@ class TestLoadConfig:
|
||||||
"--buckets '0'",
|
"--buckets '0'",
|
||||||
"--location ''",
|
"--location ''",
|
||||||
"--sleep '0'",
|
"--sleep '0'",
|
||||||
"--acl ''",
|
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"DURATION": 0,
|
"DURATION": 0,
|
||||||
"WRITE_OBJ_SIZE": 0,
|
"WRITE_OBJ_SIZE": 0,
|
||||||
"REGISTRY_FILE": "",
|
"REGISTRY_FILE": "",
|
||||||
"K6_OUT": "",
|
|
||||||
"K6_MIN_ITERATION_DURATION": "",
|
"K6_MIN_ITERATION_DURATION": "",
|
||||||
"K6_SETUP_TIMEOUT": "",
|
"K6_SETUP_TIMEOUT": "",
|
||||||
"NO_VERIFY_SSL": False,
|
"NO_VERIFY_SSL": False,
|
||||||
|
@ -542,7 +506,6 @@ class TestLoadConfig:
|
||||||
"READ_RATE": 0,
|
"READ_RATE": 0,
|
||||||
"DELETE_RATE": 0,
|
"DELETE_RATE": 0,
|
||||||
"READ_AGE": 0,
|
"READ_AGE": 0,
|
||||||
"STREAMING": 0,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
self._check_preset_params(load_params, expected_preset_args)
|
self._check_preset_params(load_params, expected_preset_args)
|
||||||
|
@ -558,21 +521,18 @@ class TestLoadConfig:
|
||||||
"--containers '0'",
|
"--containers '0'",
|
||||||
"--policy ''",
|
"--policy ''",
|
||||||
"--sleep '0'",
|
"--sleep '0'",
|
||||||
"--acl ''",
|
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"DURATION": 0,
|
"DURATION": 0,
|
||||||
"WRITE_OBJ_SIZE": 0,
|
"WRITE_OBJ_SIZE": 0,
|
||||||
"NO_VERIFY_SSL": False,
|
"NO_VERIFY_SSL": False,
|
||||||
"REGISTRY_FILE": "",
|
"REGISTRY_FILE": "",
|
||||||
"K6_OUT": "",
|
|
||||||
"K6_MIN_ITERATION_DURATION": "",
|
"K6_MIN_ITERATION_DURATION": "",
|
||||||
"K6_SETUP_TIMEOUT": "",
|
"K6_SETUP_TIMEOUT": "",
|
||||||
"WRITERS": 0,
|
"WRITERS": 0,
|
||||||
"READERS": 0,
|
"READERS": 0,
|
||||||
"DELETERS": 0,
|
"DELETERS": 0,
|
||||||
"READ_AGE": 0,
|
"READ_AGE": 0,
|
||||||
"STREAMING": 0,
|
|
||||||
"PREGEN_JSON": "",
|
"PREGEN_JSON": "",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -589,21 +549,18 @@ class TestLoadConfig:
|
||||||
"--containers '0'",
|
"--containers '0'",
|
||||||
"--policy ''",
|
"--policy ''",
|
||||||
"--sleep '0'",
|
"--sleep '0'",
|
||||||
"--acl ''",
|
|
||||||
]
|
]
|
||||||
expected_env_vars = {
|
expected_env_vars = {
|
||||||
"CONFIG_FILE": "",
|
"CONFIG_FILE": "",
|
||||||
"DURATION": 0,
|
"DURATION": 0,
|
||||||
"WRITE_OBJ_SIZE": 0,
|
"WRITE_OBJ_SIZE": 0,
|
||||||
"REGISTRY_FILE": "",
|
"REGISTRY_FILE": "",
|
||||||
"K6_OUT": "",
|
|
||||||
"K6_MIN_ITERATION_DURATION": "",
|
"K6_MIN_ITERATION_DURATION": "",
|
||||||
"K6_SETUP_TIMEOUT": "",
|
"K6_SETUP_TIMEOUT": "",
|
||||||
"WRITERS": 0,
|
"WRITERS": 0,
|
||||||
"READERS": 0,
|
"READERS": 0,
|
||||||
"DELETERS": 0,
|
"DELETERS": 0,
|
||||||
"READ_AGE": 0,
|
"READ_AGE": 0,
|
||||||
"STREAMING": 0,
|
|
||||||
"PREGEN_JSON": "",
|
"PREGEN_JSON": "",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -668,7 +625,7 @@ class TestLoadConfig:
|
||||||
assert sorted(preset_parameters) == sorted(expected_preset_args)
|
assert sorted(preset_parameters) == sorted(expected_preset_args)
|
||||||
|
|
||||||
def _check_env_vars(self, load_params: LoadParams, expected_env_vars: dict[str, str]):
|
def _check_env_vars(self, load_params: LoadParams, expected_env_vars: dict[str, str]):
|
||||||
env_vars = load_params.get_k6_vars()
|
env_vars = load_params.get_env_vars()
|
||||||
assert env_vars == expected_env_vars
|
assert env_vars == expected_env_vars
|
||||||
|
|
||||||
def _check_all_values_none(self, dataclass, skip_fields=None):
|
def _check_all_values_none(self, dataclass, skip_fields=None):
|
||||||
|
|
Loading…
Reference in a new issue