Compare commits
9 commits
EliChin/fe
...
master
Author | SHA1 | Date | |
---|---|---|---|
c3947b0716 | |||
c97855dcee | |||
c997e23194 | |||
cff0e0f23e | |||
ef5e142015 | |||
06dc226ef8 | |||
ac7dae0d2d | |||
3802df25fe | |||
4755a2e167 |
24 changed files with 333 additions and 812 deletions
|
@ -48,20 +48,8 @@ To setup development environment for `frosfs-testcases`, please, take the follow
|
|||
1. Prepare virtualenv
|
||||
|
||||
```shell
|
||||
$ virtualenv --python=python3.9 venv
|
||||
$ source venv/bin/activate
|
||||
```
|
||||
|
||||
2. Install all dependencies:
|
||||
|
||||
```shell
|
||||
$ pip install -r requirements.txt
|
||||
```
|
||||
|
||||
3. Setup pre-commit hooks to run code formatters on staged files before you run a `git commit` command:
|
||||
|
||||
```shell
|
||||
$ pre-commit install
|
||||
$ make venv
|
||||
$ source frostfs-testcases-3.10/bin/activate
|
||||
```
|
||||
|
||||
Optionally you might want to integrate code formatters with your code editor to apply formatters to code files as you go:
|
||||
|
|
45
Makefile
45
Makefile
|
@ -1,28 +1,29 @@
|
|||
#!/usr/bin/make -f
|
||||
SHELL := /bin/bash
|
||||
PYTHON_VERSION := 3.10
|
||||
VENV_NAME = frostfs-testcases-${PYTHON_VERSION}
|
||||
VENV_DIR := venv.${VENV_NAME}
|
||||
|
||||
.DEFAULT_GOAL := help
|
||||
current_dir := $(shell pwd)
|
||||
|
||||
SHELL ?= bash
|
||||
venv: create requirements paths precommit
|
||||
@echo Ready
|
||||
|
||||
VENVS = $(shell ls -1d venv/*/ | sort -u | xargs basename -a)
|
||||
precommit:
|
||||
@echo Isntalling pre-commit hooks
|
||||
. ${VENV_DIR}/bin/activate && pre-commit install
|
||||
|
||||
.PHONY: all
|
||||
all: venvs
|
||||
paths:
|
||||
@echo Append paths for project
|
||||
@echo Virtual environment: ${VENV_DIR}
|
||||
@sudo rm -rf ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
|
||||
@sudo touch ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
|
||||
@echo ${current_dir} | sudo tee ${VENV_DIR}/lib/python${PYTHON_VERSION}/site-packages/_paths.pth
|
||||
|
||||
include venv_template.mk
|
||||
create:
|
||||
@echo Create virtual environment for
|
||||
virtualenv --python=python${PYTHON_VERSION} --prompt=${VENV_NAME} ${VENV_DIR}
|
||||
|
||||
.PHONY: venvs
|
||||
venvs:
|
||||
$(foreach venv,$(VENVS),venv.$(venv))
|
||||
|
||||
$(foreach venv,$(VENVS),$(eval $(call VENV_template,$(venv))))
|
||||
|
||||
clean:
|
||||
rm -rf venv.*
|
||||
|
||||
pytest-local:
|
||||
@echo "⇒ Run Pytest"
|
||||
python -m pytest pytest_tests/testsuites/
|
||||
|
||||
help:
|
||||
@echo "⇒ run Run testcases ${R}"
|
||||
requirements:
|
||||
@echo Isntalling pip requirements
|
||||
. ${VENV_DIR}/bin/activate && pip install -e ../frostfs-testlib
|
||||
. ${VENV_DIR}/bin/activate && pip install -Ur pytest_tests/requirements.txt
|
12
README.md
12
README.md
|
@ -49,17 +49,11 @@ As we use frostfs-dev-env, you'll also need to install
|
|||
6. Prepare virtualenv
|
||||
|
||||
```shell
|
||||
$ make venv.local-pytest
|
||||
$ . venv.local-pytest/bin/activate
|
||||
$ make venv
|
||||
$ source venv.frostfs-testcases-3.10/bin/activate
|
||||
```
|
||||
|
||||
7. Setup pre-commit hooks to run code formatters on staged files before you run a `git commit` command:
|
||||
|
||||
```shell
|
||||
$ pre-commit install
|
||||
```
|
||||
|
||||
Optionally you might want to integrate code formatters with your code editor to apply formatters to code files as you go:
|
||||
7. Optionally you might want to integrate code formatters with your code editor to apply formatters to code files as you go:
|
||||
* isort is supported by [PyCharm](https://plugins.jetbrains.com/plugin/15434-isortconnect), [VS Code](https://cereblanco.medium.com/setup-black-and-isort-in-vscode-514804590bf9). Plugins exist for other IDEs/editors as well.
|
||||
* black can be integrated with multiple editors, please, instructions are available [here](https://black.readthedocs.io/en/stable/integrations/editors.html).
|
||||
|
||||
|
|
|
@ -20,21 +20,25 @@ from pytest_tests.helpers import frostfs_verbs
|
|||
from pytest_tests.helpers.cluster import Cluster, StorageNode
|
||||
from pytest_tests.helpers.frostfs_verbs import head_object
|
||||
from pytest_tests.helpers.storage_object_info import StorageObjectInfo
|
||||
from pytest_tests.resources.common import WALLET_CONFIG
|
||||
from pytest_tests.resources.common import CLI_DEFAULT_TIMEOUT, WALLET_CONFIG
|
||||
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
|
||||
|
||||
def get_storage_object_chunks(
|
||||
storage_object: StorageObjectInfo, shell: Shell, cluster: Cluster
|
||||
storage_object: StorageObjectInfo,
|
||||
shell: Shell,
|
||||
cluster: Cluster,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> list[str]:
|
||||
"""
|
||||
Get complex object split objects ids (no linker object)
|
||||
|
||||
Args:
|
||||
storage_object: storage_object to get it's chunks
|
||||
shell: client shell to do cmd requests
|
||||
cluster: cluster object under test
|
||||
storage_object: storage_object to get it's chunks
|
||||
shell: client shell to do cmd requests
|
||||
cluster: cluster object under test
|
||||
timeout: Timeout for an operation.
|
||||
|
||||
Returns:
|
||||
list of object ids of complex object chunks
|
||||
|
@ -48,6 +52,7 @@ def get_storage_object_chunks(
|
|||
shell,
|
||||
cluster.storage_nodes,
|
||||
is_direct=False,
|
||||
timeout=timeout,
|
||||
)
|
||||
head = head_object(
|
||||
storage_object.wallet_file_path,
|
||||
|
@ -55,6 +60,7 @@ def get_storage_object_chunks(
|
|||
split_object_id,
|
||||
shell,
|
||||
cluster.default_rpc_endpoint,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
chunks_object_ids = []
|
||||
|
@ -65,7 +71,10 @@ def get_storage_object_chunks(
|
|||
|
||||
|
||||
def get_complex_object_split_ranges(
|
||||
storage_object: StorageObjectInfo, shell: Shell, cluster: Cluster
|
||||
storage_object: StorageObjectInfo,
|
||||
shell: Shell,
|
||||
cluster: Cluster,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> list[Tuple[int, int]]:
|
||||
|
||||
"""
|
||||
|
@ -75,9 +84,10 @@ def get_complex_object_split_ranges(
|
|||
[(0, 30), (30, 30), (60, 30), (90, 10)]
|
||||
|
||||
Args:
|
||||
storage_object: storage_object to get it's chunks
|
||||
shell: client shell to do cmd requests
|
||||
cluster: cluster object under test
|
||||
storage_object: storage_object to get it's chunks
|
||||
shell: client shell to do cmd requests
|
||||
cluster: cluster object under test
|
||||
timeout: Timeout for an operation.
|
||||
|
||||
Returns:
|
||||
list of object ids of complex object chunks
|
||||
|
@ -93,6 +103,7 @@ def get_complex_object_split_ranges(
|
|||
chunk_id,
|
||||
shell,
|
||||
cluster.default_rpc_endpoint,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
length = int(head["header"]["payloadLength"])
|
||||
|
@ -113,6 +124,7 @@ def get_link_object(
|
|||
bearer: str = "",
|
||||
wallet_config: str = WALLET_CONFIG,
|
||||
is_direct: bool = True,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
):
|
||||
"""
|
||||
Args:
|
||||
|
@ -126,6 +138,7 @@ def get_link_object(
|
|||
wallet_config (optional, str): path to the frostfs-cli config file
|
||||
is_direct: send request directly to the node or not; this flag
|
||||
turns into `--ttl 1` key
|
||||
timeout: Timeout for an operation.
|
||||
Returns:
|
||||
(str): Link Object ID
|
||||
When no Link Object ID is found after all Storage Nodes polling,
|
||||
|
@ -144,6 +157,7 @@ def get_link_object(
|
|||
is_direct=is_direct,
|
||||
bearer=bearer,
|
||||
wallet_config=wallet_config,
|
||||
timeout=timeout,
|
||||
)
|
||||
if resp["link"]:
|
||||
return resp["link"]
|
||||
|
@ -155,7 +169,12 @@ def get_link_object(
|
|||
|
||||
@allure.step("Get Last Object")
|
||||
def get_last_object(
|
||||
wallet: str, cid: str, oid: str, shell: Shell, nodes: list[StorageNode]
|
||||
wallet: str,
|
||||
cid: str,
|
||||
oid: str,
|
||||
shell: Shell,
|
||||
nodes: list[StorageNode],
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Args:
|
||||
|
@ -165,6 +184,7 @@ def get_last_object(
|
|||
oid (str): Large Object ID
|
||||
shell: executor for cli command
|
||||
nodes: list of nodes to do search on
|
||||
timeout: Timeout for an operation.
|
||||
Returns:
|
||||
(str): Last Object ID
|
||||
When no Last Object ID is found after all Storage Nodes polling,
|
||||
|
@ -174,7 +194,14 @@ def get_last_object(
|
|||
endpoint = node.get_rpc_endpoint()
|
||||
try:
|
||||
resp = frostfs_verbs.head_object(
|
||||
wallet, cid, oid, shell=shell, endpoint=endpoint, is_raw=True, is_direct=True
|
||||
wallet,
|
||||
cid,
|
||||
oid,
|
||||
shell=shell,
|
||||
endpoint=endpoint,
|
||||
is_raw=True,
|
||||
is_direct=True,
|
||||
timeout=timeout,
|
||||
)
|
||||
if resp["lastPart"]:
|
||||
return resp["lastPart"]
|
||||
|
|
|
@ -8,13 +8,13 @@ import allure
|
|||
from frostfs_testlib.cli import FrostfsCli
|
||||
from frostfs_testlib.shell import Shell
|
||||
from frostfs_testlib.utils import json_utils
|
||||
from wallet import WalletFile
|
||||
|
||||
from pytest_tests.helpers.cluster import Cluster
|
||||
from pytest_tests.helpers.file_helper import generate_file, get_file_hash
|
||||
from pytest_tests.helpers.frostfs_verbs import put_object, put_object_to_random_node
|
||||
from pytest_tests.helpers.storage_object_info import StorageObjectInfo
|
||||
from pytest_tests.resources.common import FROSTFS_CLI_EXEC, WALLET_CONFIG
|
||||
from pytest_tests.helpers.wallet import WalletFile
|
||||
from pytest_tests.resources.common import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, WALLET_CONFIG
|
||||
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
|
||||
|
@ -115,6 +115,7 @@ def create_container(
|
|||
options: dict = None,
|
||||
await_mode: bool = True,
|
||||
wait_for_creation: bool = True,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> str:
|
||||
"""
|
||||
A wrapper for `frostfs-cli container create` call.
|
||||
|
@ -136,6 +137,7 @@ def create_container(
|
|||
name (optional, str): container name attribute
|
||||
await_mode (bool): block execution until container is persisted
|
||||
wait_for_creation (): Wait for container shows in container list
|
||||
timeout: Timeout for the operation.
|
||||
|
||||
Returns:
|
||||
(str): CID of the created container
|
||||
|
@ -151,6 +153,7 @@ def create_container(
|
|||
name=name,
|
||||
session=session_token,
|
||||
await_mode=await_mode,
|
||||
timeout=timeout,
|
||||
**options or {},
|
||||
)
|
||||
|
||||
|
@ -194,7 +197,9 @@ def wait_for_container_deletion(
|
|||
|
||||
|
||||
@allure.step("List Containers")
|
||||
def list_containers(wallet: str, shell: Shell, endpoint: str) -> list[str]:
|
||||
def list_containers(
|
||||
wallet: str, shell: Shell, endpoint: str, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT
|
||||
) -> list[str]:
|
||||
"""
|
||||
A wrapper for `frostfs-cli container list` call. It returns all the
|
||||
available containers for the given wallet.
|
||||
|
@ -202,11 +207,12 @@ def list_containers(wallet: str, shell: Shell, endpoint: str) -> list[str]:
|
|||
wallet (str): a wallet on whose behalf we list the containers
|
||||
shell: executor for cli command
|
||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||
timeout: Timeout for the operation.
|
||||
Returns:
|
||||
(list): list of containers
|
||||
"""
|
||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, WALLET_CONFIG)
|
||||
result = cli.container.list(rpc_endpoint=endpoint, wallet=wallet)
|
||||
result = cli.container.list(rpc_endpoint=endpoint, wallet=wallet, timeout=timeout)
|
||||
logger.info(f"Containers: \n{result}")
|
||||
return result.stdout.split()
|
||||
|
||||
|
@ -218,6 +224,7 @@ def get_container(
|
|||
shell: Shell,
|
||||
endpoint: str,
|
||||
json_mode: bool = True,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> Union[dict, str]:
|
||||
"""
|
||||
A wrapper for `frostfs-cli container get` call. It extracts container's
|
||||
|
@ -228,12 +235,15 @@ def get_container(
|
|||
shell: executor for cli command
|
||||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||
json_mode (bool): return container in JSON format
|
||||
timeout: Timeout for the operation.
|
||||
Returns:
|
||||
(dict, str): dict of container attributes
|
||||
"""
|
||||
|
||||
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, WALLET_CONFIG)
|
||||
result = cli.container.get(rpc_endpoint=endpoint, wallet=wallet, cid=cid, json_mode=json_mode)
|
||||
result = cli.container.get(
|
||||
rpc_endpoint=endpoint, wallet=wallet, cid=cid, json_mode=json_mode, timeout=timeout
|
||||
)
|
||||
|
||||
if not json_mode:
|
||||
return result.stdout
|
||||
|
@ -258,6 +268,7 @@ def delete_container(
|
|||
force: bool = False,
|
||||
session_token: Optional[str] = None,
|
||||
await_mode: bool = False,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> None:
|
||||
"""
|
||||
A wrapper for `frostfs-cli container delete` call.
|
||||
|
@ -268,6 +279,7 @@ def delete_container(
|
|||
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
|
||||
force (bool): do not check whether container contains locks and remove immediately
|
||||
session_token: a path to session token file
|
||||
timeout: Timeout for the operation.
|
||||
This function doesn't return anything.
|
||||
"""
|
||||
|
||||
|
@ -279,6 +291,7 @@ def delete_container(
|
|||
force=force,
|
||||
session=session_token,
|
||||
await_mode=await_mode,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -6,11 +6,12 @@ import allure
|
|||
from frostfs_testlib.cli import FrostfsAdm, FrostfsCli, NeoGo
|
||||
from frostfs_testlib.shell import Shell
|
||||
from frostfs_testlib.utils import datetime_utils, wallet_utils
|
||||
from payment_neogo import get_contract_hash
|
||||
|
||||
from pytest_tests.helpers.cluster import Cluster, StorageNode
|
||||
from pytest_tests.helpers.payment_neogo import get_contract_hash
|
||||
from pytest_tests.helpers.test_control import wait_for_success
|
||||
from pytest_tests.resources.common import (
|
||||
CLI_DEFAULT_TIMEOUT,
|
||||
FROSTFS_ADM_CONFIG_PATH,
|
||||
FROSTFS_ADM_EXEC,
|
||||
FROSTFS_CLI_EXEC,
|
||||
|
@ -55,7 +56,7 @@ def get_epoch(shell: Shell, cluster: Cluster, alive_node: Optional[StorageNode]
|
|||
|
||||
cli = FrostfsCli(shell=shell, frostfs_cli_exec_path=FROSTFS_CLI_EXEC, config_file=wallet_config)
|
||||
|
||||
epoch = cli.netmap.epoch(endpoint, wallet_path)
|
||||
epoch = cli.netmap.epoch(endpoint, wallet_path, timeout=CLI_DEFAULT_TIMEOUT)
|
||||
return int(epoch.stdout)
|
||||
|
||||
|
||||
|
|
|
@ -11,7 +11,12 @@ from frostfs_testlib.shell import Shell
|
|||
from frostfs_testlib.utils import json_utils
|
||||
|
||||
from pytest_tests.helpers.cluster import Cluster
|
||||
from pytest_tests.resources.common import ASSETS_DIR, FROSTFS_CLI_EXEC, WALLET_CONFIG
|
||||
from pytest_tests.resources.common import (
|
||||
ASSETS_DIR,
|
||||
CLI_DEFAULT_TIMEOUT,
|
||||
FROSTFS_CLI_EXEC,
|
||||
WALLET_CONFIG,
|
||||
)
|
||||
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
|
||||
|
@ -29,6 +34,7 @@ def get_object_from_random_node(
|
|||
wallet_config: Optional[str] = None,
|
||||
no_progress: bool = True,
|
||||
session: Optional[str] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> str:
|
||||
"""
|
||||
GET from FrostFS random storage node
|
||||
|
@ -45,6 +51,7 @@ def get_object_from_random_node(
|
|||
no_progress(optional, bool): do not show progress bar
|
||||
xhdr (optional, dict): Request X-Headers in form of Key=Value
|
||||
session (optional, dict): path to a JSON-encoded container session token
|
||||
timeout: Timeout for the operation.
|
||||
Returns:
|
||||
(str): path to downloaded file
|
||||
"""
|
||||
|
@ -61,6 +68,7 @@ def get_object_from_random_node(
|
|||
wallet_config,
|
||||
no_progress,
|
||||
session,
|
||||
timeout,
|
||||
)
|
||||
|
||||
|
||||
|
@ -77,6 +85,7 @@ def get_object(
|
|||
wallet_config: Optional[str] = None,
|
||||
no_progress: bool = True,
|
||||
session: Optional[str] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> str:
|
||||
"""
|
||||
GET from FrostFS.
|
||||
|
@ -93,6 +102,7 @@ def get_object(
|
|||
no_progress(optional, bool): do not show progress bar
|
||||
xhdr (optional, dict): Request X-Headers in form of Key=Value
|
||||
session (optional, dict): path to a JSON-encoded container session token
|
||||
timeout: Timeout for the operation.
|
||||
Returns:
|
||||
(str): path to downloaded file
|
||||
"""
|
||||
|
@ -112,6 +122,7 @@ def get_object(
|
|||
no_progress=no_progress,
|
||||
xhdr=xhdr,
|
||||
session=session,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
return file_path
|
||||
|
@ -129,6 +140,7 @@ def get_range_hash(
|
|||
wallet_config: Optional[str] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
session: Optional[str] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
):
|
||||
"""
|
||||
GETRANGEHASH of given Object.
|
||||
|
@ -145,6 +157,7 @@ def get_range_hash(
|
|||
wallet_config: path to the wallet config
|
||||
xhdr: Request X-Headers in form of Key=Values
|
||||
session: Filepath to a JSON- or binary-encoded token of the object RANGEHASH session.
|
||||
timeout: Timeout for the operation.
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
|
@ -158,6 +171,7 @@ def get_range_hash(
|
|||
bearer=bearer,
|
||||
xhdr=xhdr,
|
||||
session=session,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
# cutting off output about range offset and length
|
||||
|
@ -178,6 +192,7 @@ def put_object_to_random_node(
|
|||
expire_at: Optional[int] = None,
|
||||
no_progress: bool = True,
|
||||
session: Optional[str] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
):
|
||||
"""
|
||||
PUT of given file to a random storage node.
|
||||
|
@ -196,6 +211,7 @@ def put_object_to_random_node(
|
|||
expire_at: Last epoch in the life of the object
|
||||
xhdr: Request X-Headers in form of Key=Value
|
||||
session: path to a JSON-encoded container session token
|
||||
timeout: Timeout for the operation.
|
||||
Returns:
|
||||
ID of uploaded Object
|
||||
"""
|
||||
|
@ -214,6 +230,7 @@ def put_object_to_random_node(
|
|||
expire_at,
|
||||
no_progress,
|
||||
session,
|
||||
timeout,
|
||||
)
|
||||
|
||||
|
||||
|
@ -231,6 +248,7 @@ def put_object(
|
|||
expire_at: Optional[int] = None,
|
||||
no_progress: bool = True,
|
||||
session: Optional[str] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
):
|
||||
"""
|
||||
PUT of given file.
|
||||
|
@ -248,6 +266,7 @@ def put_object(
|
|||
expire_at: Last epoch in the life of the object
|
||||
xhdr: Request X-Headers in form of Key=Value
|
||||
session: path to a JSON-encoded container session token
|
||||
timeout: Timeout for the operation.
|
||||
Returns:
|
||||
(str): ID of uploaded Object
|
||||
"""
|
||||
|
@ -264,6 +283,7 @@ def put_object(
|
|||
no_progress=no_progress,
|
||||
xhdr=xhdr,
|
||||
session=session,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
# splitting CLI output to lines and taking the penultimate line
|
||||
|
@ -283,6 +303,7 @@ def delete_object(
|
|||
wallet_config: Optional[str] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
session: Optional[str] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
):
|
||||
"""
|
||||
DELETE an Object.
|
||||
|
@ -297,6 +318,7 @@ def delete_object(
|
|||
wallet_config: path to the wallet config
|
||||
xhdr: Request X-Headers in form of Key=Value
|
||||
session: path to a JSON-encoded container session token
|
||||
timeout: Timeout for the operation.
|
||||
Returns:
|
||||
(str): Tombstone ID
|
||||
"""
|
||||
|
@ -310,6 +332,7 @@ def delete_object(
|
|||
bearer=bearer,
|
||||
xhdr=xhdr,
|
||||
session=session,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
id_str = result.stdout.split("\n")[1]
|
||||
|
@ -329,6 +352,7 @@ def get_range(
|
|||
bearer: str = "",
|
||||
xhdr: Optional[dict] = None,
|
||||
session: Optional[str] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
):
|
||||
"""
|
||||
GETRANGE an Object.
|
||||
|
@ -344,6 +368,7 @@ def get_range(
|
|||
wallet_config: path to the wallet config
|
||||
xhdr: Request X-Headers in form of Key=Value
|
||||
session: path to a JSON-encoded container session token
|
||||
timeout: Timeout for the operation.
|
||||
Returns:
|
||||
(str, bytes) - path to the file with range content and content of this file as bytes
|
||||
"""
|
||||
|
@ -360,6 +385,7 @@ def get_range(
|
|||
bearer=bearer,
|
||||
xhdr=xhdr,
|
||||
session=session,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
with open(range_file_path, "rb") as file:
|
||||
|
@ -382,6 +408,7 @@ def lock_object(
|
|||
wallet_config: Optional[str] = None,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> str:
|
||||
"""
|
||||
Lock object in container.
|
||||
|
@ -399,6 +426,7 @@ def lock_object(
|
|||
ttl: TTL value in request meta header (default 2).
|
||||
wallet: WIF (NEP-2) string or path to the wallet or binary key.
|
||||
xhdr: Dict with request X-Headers.
|
||||
timeout: Timeout for the operation.
|
||||
|
||||
Returns:
|
||||
Lock object ID
|
||||
|
@ -417,6 +445,7 @@ def lock_object(
|
|||
xhdr=xhdr,
|
||||
session=session,
|
||||
ttl=ttl,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
# splitting CLI output to lines and taking the penultimate line
|
||||
|
@ -439,6 +468,7 @@ def search_object(
|
|||
session: Optional[str] = None,
|
||||
phy: bool = False,
|
||||
root: bool = False,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> list:
|
||||
"""
|
||||
SEARCH an Object.
|
||||
|
@ -456,6 +486,7 @@ def search_object(
|
|||
session: path to a JSON-encoded container session token
|
||||
phy: Search physically stored objects.
|
||||
root: Search for user objects.
|
||||
timeout: Timeout for the operation.
|
||||
|
||||
Returns:
|
||||
list of found ObjectIDs
|
||||
|
@ -474,6 +505,7 @@ def search_object(
|
|||
session=session,
|
||||
phy=phy,
|
||||
root=root,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
found_objects = re.findall(r"(\w{43,44})", result.stdout)
|
||||
|
@ -502,6 +534,7 @@ def get_netmap_netinfo(
|
|||
address: Optional[str] = None,
|
||||
ttl: Optional[int] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Get netmap netinfo output from node
|
||||
|
@ -526,6 +559,7 @@ def get_netmap_netinfo(
|
|||
address=address,
|
||||
ttl=ttl,
|
||||
xhdr=xhdr,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
settings = dict()
|
||||
|
@ -556,6 +590,7 @@ def head_object(
|
|||
is_direct: bool = False,
|
||||
wallet_config: Optional[str] = None,
|
||||
session: Optional[str] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
):
|
||||
"""
|
||||
HEAD an Object.
|
||||
|
@ -576,6 +611,7 @@ def head_object(
|
|||
wallet_config(optional, str): path to the wallet config
|
||||
xhdr (optional, dict): Request X-Headers in form of Key=Value
|
||||
session (optional, dict): path to a JSON-encoded container session token
|
||||
timeout: Timeout for the operation.
|
||||
Returns:
|
||||
depending on the `json_output` parameter value, the function returns
|
||||
(dict): HEAD response in JSON format
|
||||
|
@ -595,6 +631,7 @@ def head_object(
|
|||
ttl=1 if is_direct else None,
|
||||
xhdr=xhdr,
|
||||
session=session,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
if not json_output:
|
||||
|
|
|
@ -10,9 +10,9 @@ from urllib.parse import quote_plus
|
|||
|
||||
import allure
|
||||
import requests
|
||||
from aws_cli_client import LONG_TIMEOUT
|
||||
from frostfs_testlib.shell import Shell
|
||||
|
||||
from pytest_tests.helpers.aws_cli_client import LONG_TIMEOUT
|
||||
from pytest_tests.helpers.cli_helpers import _cmd_run
|
||||
from pytest_tests.helpers.cluster import StorageNode
|
||||
from pytest_tests.helpers.file_helper import get_file_hash
|
||||
|
|
|
@ -6,7 +6,8 @@ from typing import Optional
|
|||
|
||||
import allure
|
||||
from frostfs_testlib.shell import Shell
|
||||
from remote_process import RemoteProcess
|
||||
|
||||
from pytest_tests.helpers.remote_process import RemoteProcess
|
||||
|
||||
EXIT_RESULT_CODE = 0
|
||||
LOAD_RESULTS_PATTERNS = {
|
||||
|
|
|
@ -16,6 +16,7 @@ from pytest_tests.helpers.frostfs_verbs import (
|
|||
put_object_to_random_node,
|
||||
search_object,
|
||||
)
|
||||
from pytest_tests.resources.common import CLI_DEFAULT_TIMEOUT
|
||||
|
||||
OPERATION_ERROR_TYPE = RuntimeError
|
||||
|
||||
|
@ -123,6 +124,7 @@ def can_get_head_object(
|
|||
bearer: Optional[str] = None,
|
||||
wallet_config: Optional[str] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> bool:
|
||||
with allure.step("Try get head of object"):
|
||||
try:
|
||||
|
@ -135,6 +137,7 @@ def can_get_head_object(
|
|||
xhdr=xhdr,
|
||||
shell=shell,
|
||||
endpoint=endpoint,
|
||||
timeout=timeout,
|
||||
)
|
||||
except OPERATION_ERROR_TYPE as err:
|
||||
assert string_utils.is_str_match_pattern(
|
||||
|
@ -153,6 +156,7 @@ def can_get_range_of_object(
|
|||
bearer: Optional[str] = None,
|
||||
wallet_config: Optional[str] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> bool:
|
||||
with allure.step("Try get range of object"):
|
||||
try:
|
||||
|
@ -166,6 +170,7 @@ def can_get_range_of_object(
|
|||
xhdr=xhdr,
|
||||
shell=shell,
|
||||
endpoint=endpoint,
|
||||
timeout=timeout,
|
||||
)
|
||||
except OPERATION_ERROR_TYPE as err:
|
||||
assert string_utils.is_str_match_pattern(
|
||||
|
@ -184,6 +189,7 @@ def can_get_range_hash_of_object(
|
|||
bearer: Optional[str] = None,
|
||||
wallet_config: Optional[str] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> bool:
|
||||
with allure.step("Try get range hash of object"):
|
||||
try:
|
||||
|
@ -197,6 +203,7 @@ def can_get_range_hash_of_object(
|
|||
xhdr=xhdr,
|
||||
shell=shell,
|
||||
endpoint=endpoint,
|
||||
timeout=timeout,
|
||||
)
|
||||
except OPERATION_ERROR_TYPE as err:
|
||||
assert string_utils.is_str_match_pattern(
|
||||
|
@ -215,6 +222,7 @@ def can_search_object(
|
|||
bearer: Optional[str] = None,
|
||||
wallet_config: Optional[str] = None,
|
||||
xhdr: Optional[dict] = None,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> bool:
|
||||
with allure.step("Try search object in container"):
|
||||
try:
|
||||
|
@ -226,6 +234,7 @@ def can_search_object(
|
|||
xhdr=xhdr,
|
||||
shell=shell,
|
||||
endpoint=endpoint,
|
||||
timeout=timeout,
|
||||
)
|
||||
except OPERATION_ERROR_TYPE as err:
|
||||
assert string_utils.is_str_match_pattern(
|
||||
|
|
|
@ -12,7 +12,7 @@ from frostfs_testlib.shell import Shell
|
|||
from pytest_tests.helpers.cluster import Cluster
|
||||
from pytest_tests.helpers.complex_object_actions import get_link_object
|
||||
from pytest_tests.helpers.frostfs_verbs import head_object
|
||||
from pytest_tests.resources.common import FROSTFS_CLI_EXEC, WALLET_CONFIG
|
||||
from pytest_tests.resources.common import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC, WALLET_CONFIG
|
||||
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
|
||||
|
@ -27,6 +27,7 @@ def put_storagegroup(
|
|||
bearer: Optional[str] = None,
|
||||
wallet_config: str = WALLET_CONFIG,
|
||||
lifetime: int = 10,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> str:
|
||||
"""
|
||||
Wrapper for `frostfs-cli storagegroup put`. Before the SG is created,
|
||||
|
@ -40,6 +41,7 @@ def put_storagegroup(
|
|||
objects: List of Object IDs to include into the SG.
|
||||
bearer: Path to Bearer token file.
|
||||
wallet_config: Path to frostfs-cli config file.
|
||||
timeout: Timeout for an operation.
|
||||
Returns:
|
||||
Object ID of created Storage Group.
|
||||
"""
|
||||
|
@ -66,6 +68,7 @@ def list_storagegroup(
|
|||
cid: str,
|
||||
bearer: Optional[str] = None,
|
||||
wallet_config: str = WALLET_CONFIG,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> list:
|
||||
"""
|
||||
Wrapper for `frostfs-cli storagegroup list`. This operation
|
||||
|
@ -76,6 +79,7 @@ def list_storagegroup(
|
|||
cid: ID of Container to list.
|
||||
bearer: Path to Bearer token file.
|
||||
wallet_config: Path to frostfs-cli config file.
|
||||
timeout: Timeout for an operation.
|
||||
Returns:
|
||||
Object IDs of found Storage Groups.
|
||||
"""
|
||||
|
@ -87,6 +91,7 @@ def list_storagegroup(
|
|||
cid=cid,
|
||||
bearer=bearer,
|
||||
rpc_endpoint=endpoint,
|
||||
timeout=timeout,
|
||||
)
|
||||
# throwing off the first string of output
|
||||
found_objects = result.stdout.split("\n")[1:]
|
||||
|
@ -102,6 +107,7 @@ def get_storagegroup(
|
|||
gid: str,
|
||||
bearer: str = "",
|
||||
wallet_config: str = WALLET_CONFIG,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> dict:
|
||||
"""
|
||||
Wrapper for `frostfs-cli storagegroup get`.
|
||||
|
@ -112,6 +118,7 @@ def get_storagegroup(
|
|||
gid: ID of the Storage Group.
|
||||
bearer: Path to Bearer token file.
|
||||
wallet_config: Path to frostfs-cli config file.
|
||||
timeout: Timeout for an operation.
|
||||
Returns:
|
||||
Detailed information on the Storage Group.
|
||||
"""
|
||||
|
@ -124,6 +131,7 @@ def get_storagegroup(
|
|||
bearer=bearer,
|
||||
id=gid,
|
||||
rpc_endpoint=endpoint,
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
# TODO: temporary solution for parsing output. Needs to be replaced with
|
||||
|
@ -153,6 +161,7 @@ def delete_storagegroup(
|
|||
gid: str,
|
||||
bearer: str = "",
|
||||
wallet_config: str = WALLET_CONFIG,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
) -> str:
|
||||
"""
|
||||
Wrapper for `frostfs-cli storagegroup delete`.
|
||||
|
@ -163,6 +172,7 @@ def delete_storagegroup(
|
|||
gid: ID of the Storage Group.
|
||||
bearer: Path to Bearer token file.
|
||||
wallet_config: Path to frostfs-cli config file.
|
||||
timeout: Timeout for an operation.
|
||||
Returns:
|
||||
Tombstone ID of the deleted Storage Group.
|
||||
"""
|
||||
|
@ -175,6 +185,7 @@ def delete_storagegroup(
|
|||
bearer=bearer,
|
||||
id=gid,
|
||||
rpc_endpoint=endpoint,
|
||||
timeout=timeout,
|
||||
)
|
||||
tombstone_id = result.stdout.strip().split("\n")[1].split(": ")[1]
|
||||
return tombstone_id
|
||||
|
@ -189,6 +200,7 @@ def verify_list_storage_group(
|
|||
gid: str,
|
||||
bearer: str = None,
|
||||
wallet_config: str = WALLET_CONFIG,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
):
|
||||
storage_groups = list_storagegroup(
|
||||
shell=shell,
|
||||
|
@ -197,6 +209,7 @@ def verify_list_storage_group(
|
|||
cid=cid,
|
||||
bearer=bearer,
|
||||
wallet_config=wallet_config,
|
||||
timeout=timeout,
|
||||
)
|
||||
assert gid in storage_groups
|
||||
|
||||
|
@ -213,6 +226,7 @@ def verify_get_storage_group(
|
|||
max_object_size: int,
|
||||
bearer: str = None,
|
||||
wallet_config: str = WALLET_CONFIG,
|
||||
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
|
||||
):
|
||||
obj_parts = []
|
||||
endpoint = cluster.default_rpc_endpoint
|
||||
|
@ -226,6 +240,7 @@ def verify_get_storage_group(
|
|||
nodes=cluster.storage_nodes,
|
||||
bearer=bearer,
|
||||
wallet_config=wallet_config,
|
||||
timeout=timeout,
|
||||
)
|
||||
obj_head = head_object(
|
||||
wallet=wallet,
|
||||
|
@ -236,6 +251,7 @@ def verify_get_storage_group(
|
|||
is_raw=True,
|
||||
bearer=bearer,
|
||||
wallet_config=wallet_config,
|
||||
timeout=timeout,
|
||||
)
|
||||
obj_parts = obj_head["header"]["split"]["children"]
|
||||
|
||||
|
@ -248,6 +264,7 @@ def verify_get_storage_group(
|
|||
gid=gid,
|
||||
bearer=bearer,
|
||||
wallet_config=wallet_config,
|
||||
timeout=timeout,
|
||||
)
|
||||
exp_size = object_size * obj_num
|
||||
if object_size < max_object_size:
|
||||
|
|
|
@ -45,6 +45,8 @@ STORAGE_NODE_SERVICE_NAME_REGEX = r"s\d\d"
|
|||
HTTP_GATE_SERVICE_NAME_REGEX = r"http-gate\d\d"
|
||||
S3_GATE_SERVICE_NAME_REGEX = r"s3-gate\d\d"
|
||||
|
||||
CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", None)
|
||||
|
||||
# Generate wallet configs
|
||||
# TODO: we should move all info about wallet configs to fixtures
|
||||
WALLET_CONFIG = os.path.join(os.getcwd(), "wallet_config.yml")
|
||||
|
|
|
@ -8,14 +8,14 @@ from typing import Any, Optional
|
|||
import allure
|
||||
import boto3
|
||||
import pytest
|
||||
import s3_gate_bucket
|
||||
import s3_gate_object
|
||||
import urllib3
|
||||
from botocore.config import Config
|
||||
from botocore.exceptions import ClientError
|
||||
from frostfs_testlib.shell import Shell
|
||||
from pytest import FixtureRequest
|
||||
|
||||
from pytest_tests.steps import s3_gate_bucket
|
||||
from pytest_tests.steps import s3_gate_object
|
||||
from pytest_tests.helpers.aws_cli_client import AwsCliClient
|
||||
from pytest_tests.helpers.cli_helpers import _cmd_run, _configure_aws_cli, _run_with_passwd
|
||||
from pytest_tests.helpers.cluster import Cluster
|
||||
|
|
|
@ -8,10 +8,10 @@ import allure
|
|||
import pytest
|
||||
import urllib3
|
||||
from botocore.exceptions import ClientError
|
||||
from s3_gate_bucket import S3_SYNC_WAIT_TIME
|
||||
|
||||
from pytest_tests.helpers.aws_cli_client import AwsCliClient
|
||||
from pytest_tests.helpers.cli_helpers import log_command_execution
|
||||
from pytest_tests.steps.s3_gate_bucket import S3_SYNC_WAIT_TIME
|
||||
|
||||
##########################################################
|
||||
# Disabling warnings on self-signed certificate which the
|
||||
|
|
|
@ -1,473 +0,0 @@
|
|||
import logging
|
||||
import os
|
||||
import uuid
|
||||
from typing import Optional
|
||||
|
||||
import allure
|
||||
import pytest
|
||||
from frostfs_testlib.resources.common import OBJECT_ACCESS_DENIED, OBJECT_NOT_FOUND
|
||||
from frostfs_testlib.utils import wallet_utils
|
||||
|
||||
from pytest_tests.helpers.acl import (
|
||||
EACLAccess,
|
||||
EACLOperation,
|
||||
EACLRole,
|
||||
EACLRule,
|
||||
create_eacl,
|
||||
form_bearertoken_file,
|
||||
set_eacl,
|
||||
)
|
||||
from pytest_tests.helpers.container import create_container
|
||||
from pytest_tests.helpers.file_helper import generate_file
|
||||
from pytest_tests.helpers.frostfs_verbs import put_object_to_random_node
|
||||
from pytest_tests.helpers.payment_neogo import deposit_gas, transfer_gas
|
||||
from pytest_tests.helpers.storage_group import (
|
||||
delete_storagegroup,
|
||||
get_storagegroup,
|
||||
list_storagegroup,
|
||||
put_storagegroup,
|
||||
verify_get_storage_group,
|
||||
verify_list_storage_group,
|
||||
)
|
||||
from pytest_tests.resources.common import ASSETS_DIR, FREE_STORAGE, WALLET_PASS
|
||||
from pytest_tests.steps.cluster_test_base import ClusterTestBase
|
||||
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
deposit = 30
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"object_size",
|
||||
[pytest.lazy_fixture("simple_object_size"), pytest.lazy_fixture("complex_object_size")],
|
||||
ids=["simple object", "complex object"],
|
||||
)
|
||||
@pytest.mark.sanity
|
||||
@pytest.mark.acl
|
||||
@pytest.mark.storage_group
|
||||
class TestStorageGroup(ClusterTestBase):
|
||||
@pytest.fixture(autouse=True)
|
||||
def prepare_two_wallets(self, default_wallet):
|
||||
self.main_wallet = default_wallet
|
||||
self.other_wallet = os.path.join(os.getcwd(), ASSETS_DIR, f"{str(uuid.uuid4())}.json")
|
||||
wallet_utils.init_wallet(self.other_wallet, WALLET_PASS)
|
||||
if not FREE_STORAGE:
|
||||
main_chain = self.cluster.main_chain_nodes[0]
|
||||
deposit = 30
|
||||
transfer_gas(
|
||||
shell=self.shell,
|
||||
amount=deposit + 1,
|
||||
main_chain=main_chain,
|
||||
wallet_to_path=self.other_wallet,
|
||||
wallet_to_password=WALLET_PASS,
|
||||
)
|
||||
deposit_gas(
|
||||
shell=self.shell,
|
||||
amount=deposit,
|
||||
main_chain=main_chain,
|
||||
wallet_from_path=self.other_wallet,
|
||||
wallet_from_password=WALLET_PASS,
|
||||
)
|
||||
|
||||
@allure.title("Test Storage Group in Private Container")
|
||||
def test_storagegroup_basic_private_container(self, object_size, max_object_size):
|
||||
cid = create_container(
|
||||
self.main_wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
|
||||
)
|
||||
file_path = generate_file(object_size)
|
||||
oid = put_object_to_random_node(self.main_wallet, file_path, cid, self.shell, self.cluster)
|
||||
objects = [oid]
|
||||
storage_group = put_storagegroup(
|
||||
shell=self.shell,
|
||||
endpoint=self.cluster.default_rpc_endpoint,
|
||||
wallet=self.main_wallet,
|
||||
cid=cid,
|
||||
objects=objects,
|
||||
)
|
||||
|
||||
self.expect_success_for_storagegroup_operations(
|
||||
wallet=self.main_wallet,
|
||||
cid=cid,
|
||||
obj_list=objects,
|
||||
object_size=object_size,
|
||||
max_object_size=max_object_size,
|
||||
)
|
||||
self.expect_failure_for_storagegroup_operations(
|
||||
wallet=self.other_wallet,
|
||||
cid=cid,
|
||||
obj_list=objects,
|
||||
gid=storage_group,
|
||||
)
|
||||
self.storagegroup_operations_by_system_ro_container(
|
||||
wallet=self.main_wallet,
|
||||
cid=cid,
|
||||
obj_list=objects,
|
||||
object_size=object_size,
|
||||
max_object_size=max_object_size,
|
||||
)
|
||||
|
||||
@allure.title("Test Storage Group in Public Container")
|
||||
def test_storagegroup_basic_public_container(self, object_size, max_object_size):
|
||||
cid = create_container(
|
||||
self.main_wallet,
|
||||
basic_acl="public-read-write",
|
||||
shell=self.shell,
|
||||
endpoint=self.cluster.default_rpc_endpoint,
|
||||
)
|
||||
file_path = generate_file(object_size)
|
||||
oid = put_object_to_random_node(
|
||||
self.main_wallet, file_path, cid, shell=self.shell, cluster=self.cluster
|
||||
)
|
||||
objects = [oid]
|
||||
self.expect_success_for_storagegroup_operations(
|
||||
wallet=self.main_wallet,
|
||||
cid=cid,
|
||||
obj_list=objects,
|
||||
object_size=object_size,
|
||||
max_object_size=max_object_size,
|
||||
)
|
||||
self.expect_success_for_storagegroup_operations(
|
||||
wallet=self.other_wallet,
|
||||
cid=cid,
|
||||
obj_list=objects,
|
||||
object_size=object_size,
|
||||
max_object_size=max_object_size,
|
||||
)
|
||||
self.storagegroup_operations_by_system_ro_container(
|
||||
wallet=self.main_wallet,
|
||||
cid=cid,
|
||||
obj_list=objects,
|
||||
object_size=object_size,
|
||||
max_object_size=max_object_size,
|
||||
)
|
||||
|
||||
@allure.title("Test Storage Group in Read-Only Container")
|
||||
def test_storagegroup_basic_ro_container(self, object_size, max_object_size):
|
||||
cid = create_container(
|
||||
self.main_wallet,
|
||||
basic_acl="public-read",
|
||||
shell=self.shell,
|
||||
endpoint=self.cluster.default_rpc_endpoint,
|
||||
)
|
||||
file_path = generate_file(object_size)
|
||||
oid = put_object_to_random_node(
|
||||
self.main_wallet, file_path, cid, shell=self.shell, cluster=self.cluster
|
||||
)
|
||||
objects = [oid]
|
||||
self.expect_success_for_storagegroup_operations(
|
||||
wallet=self.main_wallet,
|
||||
cid=cid,
|
||||
obj_list=objects,
|
||||
object_size=object_size,
|
||||
max_object_size=max_object_size,
|
||||
)
|
||||
self.storagegroup_operations_by_other_ro_container(
|
||||
owner_wallet=self.main_wallet,
|
||||
other_wallet=self.other_wallet,
|
||||
cid=cid,
|
||||
obj_list=objects,
|
||||
object_size=object_size,
|
||||
max_object_size=max_object_size,
|
||||
)
|
||||
self.storagegroup_operations_by_system_ro_container(
|
||||
wallet=self.main_wallet,
|
||||
cid=cid,
|
||||
obj_list=objects,
|
||||
object_size=object_size,
|
||||
max_object_size=max_object_size,
|
||||
)
|
||||
|
||||
@allure.title("Test Storage Group with Bearer Allow")
|
||||
def test_storagegroup_bearer_allow(self, object_size, max_object_size):
|
||||
cid = create_container(
|
||||
self.main_wallet,
|
||||
basic_acl="eacl-public-read-write",
|
||||
shell=self.shell,
|
||||
endpoint=self.cluster.default_rpc_endpoint,
|
||||
)
|
||||
file_path = generate_file(object_size)
|
||||
oid = put_object_to_random_node(
|
||||
self.main_wallet, file_path, cid, shell=self.shell, cluster=self.cluster
|
||||
)
|
||||
objects = [oid]
|
||||
self.expect_success_for_storagegroup_operations(
|
||||
wallet=self.main_wallet,
|
||||
cid=cid,
|
||||
obj_list=objects,
|
||||
object_size=object_size,
|
||||
max_object_size=max_object_size,
|
||||
)
|
||||
storage_group = put_storagegroup(
|
||||
self.shell, self.cluster.default_rpc_endpoint, self.main_wallet, cid, objects
|
||||
)
|
||||
eacl_deny = [
|
||||
EACLRule(access=EACLAccess.DENY, role=role, operation=op)
|
||||
for op in EACLOperation
|
||||
for role in EACLRole
|
||||
]
|
||||
set_eacl(
|
||||
self.main_wallet,
|
||||
cid,
|
||||
create_eacl(cid, eacl_deny, shell=self.shell),
|
||||
shell=self.shell,
|
||||
endpoint=self.cluster.default_rpc_endpoint,
|
||||
)
|
||||
self.expect_failure_for_storagegroup_operations(
|
||||
self.main_wallet, cid, objects, storage_group
|
||||
)
|
||||
bearer_file = form_bearertoken_file(
|
||||
self.main_wallet,
|
||||
cid,
|
||||
[
|
||||
EACLRule(operation=op, access=EACLAccess.ALLOW, role=EACLRole.USER)
|
||||
for op in EACLOperation
|
||||
],
|
||||
shell=self.shell,
|
||||
endpoint=self.cluster.default_rpc_endpoint,
|
||||
)
|
||||
self.expect_success_for_storagegroup_operations(
|
||||
wallet=self.main_wallet,
|
||||
cid=cid,
|
||||
obj_list=objects,
|
||||
object_size=object_size,
|
||||
max_object_size=max_object_size,
|
||||
bearer=bearer_file,
|
||||
)
|
||||
|
||||
@allure.title("Test to check Storage Group lifetime")
|
||||
def test_storagegroup_lifetime(self, object_size):
|
||||
cid = create_container(
|
||||
self.main_wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
|
||||
)
|
||||
file_path = generate_file(object_size)
|
||||
oid = put_object_to_random_node(
|
||||
self.main_wallet, file_path, cid, shell=self.shell, cluster=self.cluster
|
||||
)
|
||||
objects = [oid]
|
||||
storage_group = put_storagegroup(
|
||||
self.shell,
|
||||
self.cluster.default_rpc_endpoint,
|
||||
self.main_wallet,
|
||||
cid,
|
||||
objects,
|
||||
lifetime=1,
|
||||
)
|
||||
with allure.step("Tick two epochs"):
|
||||
for _ in range(2):
|
||||
self.tick_epoch()
|
||||
self.wait_for_epochs_align()
|
||||
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
|
||||
get_storagegroup(
|
||||
shell=self.shell,
|
||||
endpoint=self.cluster.default_rpc_endpoint,
|
||||
wallet=self.main_wallet,
|
||||
cid=cid,
|
||||
gid=storage_group,
|
||||
)
|
||||
|
||||
@allure.step("Run Storage Group Operations And Expect Success")
|
||||
def expect_success_for_storagegroup_operations(
|
||||
self,
|
||||
wallet: str,
|
||||
cid: str,
|
||||
obj_list: list,
|
||||
object_size: int,
|
||||
max_object_size: int,
|
||||
bearer: Optional[str] = None,
|
||||
):
|
||||
"""
|
||||
This func verifies if the Object's owner is allowed to
|
||||
Put, List, Get and Delete the Storage Group which contains
|
||||
the Object.
|
||||
"""
|
||||
storage_group = put_storagegroup(
|
||||
self.shell, self.cluster.default_rpc_endpoint, wallet, cid, obj_list, bearer
|
||||
)
|
||||
verify_list_storage_group(
|
||||
shell=self.shell,
|
||||
endpoint=self.cluster.default_rpc_endpoint,
|
||||
wallet=wallet,
|
||||
cid=cid,
|
||||
gid=storage_group,
|
||||
bearer=bearer,
|
||||
)
|
||||
verify_get_storage_group(
|
||||
shell=self.shell,
|
||||
cluster=self.cluster,
|
||||
wallet=wallet,
|
||||
cid=cid,
|
||||
gid=storage_group,
|
||||
obj_list=obj_list,
|
||||
object_size=object_size,
|
||||
max_object_size=max_object_size,
|
||||
bearer=bearer,
|
||||
)
|
||||
delete_storagegroup(
|
||||
shell=self.shell,
|
||||
endpoint=self.cluster.default_rpc_endpoint,
|
||||
wallet=wallet,
|
||||
cid=cid,
|
||||
gid=storage_group,
|
||||
bearer=bearer,
|
||||
)
|
||||
|
||||
@allure.step("Run Storage Group Operations And Expect Failure")
|
||||
def expect_failure_for_storagegroup_operations(
|
||||
self, wallet: str, cid: str, obj_list: list, gid: str
|
||||
):
|
||||
"""
|
||||
This func verifies if the Object's owner isn't allowed to
|
||||
Put, List, Get and Delete the Storage Group which contains
|
||||
the Object.
|
||||
"""
|
||||
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
|
||||
put_storagegroup(
|
||||
shell=self.shell,
|
||||
endpoint=self.cluster.default_rpc_endpoint,
|
||||
wallet=wallet,
|
||||
cid=cid,
|
||||
objects=obj_list,
|
||||
)
|
||||
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
|
||||
list_storagegroup(
|
||||
shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, wallet=wallet, cid=cid
|
||||
)
|
||||
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
|
||||
get_storagegroup(
|
||||
shell=self.shell,
|
||||
endpoint=self.cluster.default_rpc_endpoint,
|
||||
wallet=wallet,
|
||||
cid=cid,
|
||||
gid=gid,
|
||||
)
|
||||
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
|
||||
delete_storagegroup(
|
||||
shell=self.shell,
|
||||
endpoint=self.cluster.default_rpc_endpoint,
|
||||
wallet=wallet,
|
||||
cid=cid,
|
||||
gid=gid,
|
||||
)
|
||||
|
||||
@allure.step("Run Storage Group Operations On Other's Behalf In RO Container")
|
||||
def storagegroup_operations_by_other_ro_container(
|
||||
self,
|
||||
owner_wallet: str,
|
||||
other_wallet: str,
|
||||
cid: str,
|
||||
obj_list: list,
|
||||
object_size: int,
|
||||
max_object_size: int,
|
||||
):
|
||||
storage_group = put_storagegroup(
|
||||
self.shell, self.cluster.default_rpc_endpoint, owner_wallet, cid, obj_list
|
||||
)
|
||||
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
|
||||
put_storagegroup(
|
||||
shell=self.shell,
|
||||
endpoint=self.cluster.default_rpc_endpoint,
|
||||
wallet=other_wallet,
|
||||
cid=cid,
|
||||
objects=obj_list,
|
||||
)
|
||||
verify_list_storage_group(
|
||||
shell=self.shell,
|
||||
endpoint=self.cluster.default_rpc_endpoint,
|
||||
wallet=other_wallet,
|
||||
cid=cid,
|
||||
gid=storage_group,
|
||||
)
|
||||
verify_get_storage_group(
|
||||
shell=self.shell,
|
||||
cluster=self.cluster,
|
||||
wallet=other_wallet,
|
||||
cid=cid,
|
||||
gid=storage_group,
|
||||
obj_list=obj_list,
|
||||
object_size=object_size,
|
||||
max_object_size=max_object_size,
|
||||
)
|
||||
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
|
||||
delete_storagegroup(
|
||||
shell=self.shell,
|
||||
endpoint=self.cluster.default_rpc_endpoint,
|
||||
wallet=other_wallet,
|
||||
cid=cid,
|
||||
gid=storage_group,
|
||||
)
|
||||
|
||||
@allure.step("Run Storage Group Operations On Systems's Behalf In RO Container")
|
||||
def storagegroup_operations_by_system_ro_container(
|
||||
self,
|
||||
wallet: str,
|
||||
cid: str,
|
||||
obj_list: list,
|
||||
object_size: int,
|
||||
max_object_size: int,
|
||||
):
|
||||
"""
|
||||
In this func we create a Storage Group on Inner Ring's key behalf
|
||||
and include an Object created on behalf of some user. We expect
|
||||
that System key is granted to make all operations except PUT and DELETE.
|
||||
"""
|
||||
ir_node = self.cluster.ir_nodes[0]
|
||||
ir_wallet_path = ir_node.get_wallet_path()
|
||||
ir_wallet_password = ir_node.get_wallet_password()
|
||||
ir_wallet_config = ir_node.get_wallet_config_path()
|
||||
|
||||
if not FREE_STORAGE:
|
||||
main_chain = self.cluster.main_chain_nodes[0]
|
||||
deposit = 30
|
||||
transfer_gas(
|
||||
shell=self.shell,
|
||||
amount=deposit + 1,
|
||||
main_chain=main_chain,
|
||||
wallet_to_path=ir_wallet_path,
|
||||
wallet_to_password=ir_wallet_password,
|
||||
)
|
||||
deposit_gas(
|
||||
shell=self.shell,
|
||||
amount=deposit,
|
||||
main_chain=main_chain,
|
||||
wallet_from_path=ir_wallet_path,
|
||||
wallet_from_password=ir_wallet_password,
|
||||
)
|
||||
storage_group = put_storagegroup(
|
||||
self.shell, self.cluster.default_rpc_endpoint, wallet, cid, obj_list
|
||||
)
|
||||
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
|
||||
put_storagegroup(
|
||||
self.shell,
|
||||
self.cluster.default_rpc_endpoint,
|
||||
ir_wallet_path,
|
||||
cid,
|
||||
obj_list,
|
||||
wallet_config=ir_wallet_config,
|
||||
)
|
||||
verify_list_storage_group(
|
||||
shell=self.shell,
|
||||
endpoint=self.cluster.default_rpc_endpoint,
|
||||
wallet=ir_wallet_path,
|
||||
cid=cid,
|
||||
gid=storage_group,
|
||||
wallet_config=ir_wallet_config,
|
||||
)
|
||||
verify_get_storage_group(
|
||||
shell=self.shell,
|
||||
cluster=self.cluster,
|
||||
wallet=ir_wallet_path,
|
||||
cid=cid,
|
||||
gid=storage_group,
|
||||
obj_list=obj_list,
|
||||
object_size=object_size,
|
||||
max_object_size=max_object_size,
|
||||
wallet_config=ir_wallet_config,
|
||||
)
|
||||
with pytest.raises(Exception, match=OBJECT_ACCESS_DENIED):
|
||||
delete_storagegroup(
|
||||
shell=self.shell,
|
||||
endpoint=self.cluster.default_rpc_endpoint,
|
||||
wallet=ir_wallet_path,
|
||||
cid=cid,
|
||||
gid=storage_group,
|
||||
wallet_config=ir_wallet_config,
|
||||
)
|
|
@ -1,6 +1,5 @@
|
|||
import logging
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
|
@ -44,11 +43,20 @@ from pytest_tests.steps.load import get_services_endpoints, prepare_k6_instances
|
|||
logger = logging.getLogger("NeoLogger")
|
||||
|
||||
|
||||
# Add logs check test even if it's not fit to mark selectors
|
||||
def pytest_configure(config: pytest.Config):
|
||||
markers = config.option.markexpr
|
||||
if markers != "":
|
||||
config.option.markexpr = f"logs_after_session or ({markers})"
|
||||
|
||||
|
||||
# pytest hook. Do not rename
|
||||
def pytest_collection_modifyitems(items):
|
||||
# Make network tests last based on @pytest.mark.node_mgmt
|
||||
# Make network tests last based on @pytest.mark.node_mgmt and logs_test to be latest
|
||||
def priority(item: pytest.Item) -> int:
|
||||
is_node_mgmt_test = item.get_closest_marker("node_mgmt")
|
||||
return 0 if not is_node_mgmt_test else 1
|
||||
is_node_mgmt_test = 1 if item.get_closest_marker("node_mgmt") else 0
|
||||
is_logs_check_test = 100 if item.get_closest_marker("logs_after_session") else 0
|
||||
return is_node_mgmt_test + is_logs_check_test
|
||||
|
||||
items.sort(key=lambda item: priority(item))
|
||||
|
||||
|
@ -145,23 +153,16 @@ def temp_directory():
|
|||
shutil.rmtree(full_path)
|
||||
|
||||
|
||||
@allure.step("[Autouse/Session] Test session start time")
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
@allure.title("Collect logs")
|
||||
def collect_logs(temp_directory, hosting: Hosting):
|
||||
def session_start_time():
|
||||
start_time = datetime.utcnow()
|
||||
yield
|
||||
end_time = datetime.utcnow()
|
||||
|
||||
# Dump logs to temp directory (because they might be too large to keep in RAM)
|
||||
logs_dir = os.path.join(temp_directory, "logs")
|
||||
dump_logs(hosting, logs_dir, start_time, end_time)
|
||||
attach_logs(logs_dir)
|
||||
check_logs(logs_dir)
|
||||
return start_time
|
||||
|
||||
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
@allure.title("Run health check for all storage nodes")
|
||||
def run_health_check(collect_logs, cluster: Cluster):
|
||||
def run_health_check(session_start_time, cluster: Cluster):
|
||||
failed_nodes = []
|
||||
for node in cluster.storage_nodes:
|
||||
health_check = storage_node_healthcheck(node)
|
||||
|
@ -263,44 +264,3 @@ def default_wallet(client_shell: Shell, temp_directory: str, cluster: Cluster):
|
|||
)
|
||||
|
||||
return wallet_path
|
||||
|
||||
|
||||
@allure.title("Check logs for OOM and PANIC entries in {logs_dir}")
|
||||
def check_logs(logs_dir: str):
|
||||
problem_pattern = r"\Wpanic\W|\Woom\W|\Wtoo many open files\W"
|
||||
|
||||
log_file_paths = []
|
||||
for directory_path, _, file_names in os.walk(logs_dir):
|
||||
log_file_paths += [
|
||||
os.path.join(directory_path, file_name)
|
||||
for file_name in file_names
|
||||
if re.match(r"\.(txt|log)", os.path.splitext(file_name)[-1], flags=re.IGNORECASE)
|
||||
]
|
||||
|
||||
logs_with_problem = []
|
||||
for file_path in log_file_paths:
|
||||
with allure.step(f"Check log file {file_path}"):
|
||||
with open(file_path, "r") as log_file:
|
||||
if re.search(problem_pattern, log_file.read(), flags=re.IGNORECASE):
|
||||
logs_with_problem.append(file_path)
|
||||
if logs_with_problem:
|
||||
raise pytest.fail(f"System logs {', '.join(logs_with_problem)} contain critical errors")
|
||||
|
||||
|
||||
def dump_logs(hosting: Hosting, logs_dir: str, since: datetime, until: datetime) -> None:
|
||||
# Dump logs to temp directory (because they might be too large to keep in RAM)
|
||||
os.makedirs(logs_dir)
|
||||
|
||||
for host in hosting.hosts:
|
||||
with allure.step(f"Dump logs from host {host.config.address}"):
|
||||
try:
|
||||
host.dump_logs(logs_dir, since=since, until=until)
|
||||
except Exception as ex:
|
||||
logger.warning(f"Exception during logs collection: {ex}")
|
||||
|
||||
|
||||
def attach_logs(logs_dir: str) -> None:
|
||||
# Zip all files and attach to Allure because it is more convenient to download a single
|
||||
# zip with all logs rather than mess with individual logs files per service or node
|
||||
logs_zip_file_path = shutil.make_archive(logs_dir, "zip", logs_dir)
|
||||
allure.attach.file(logs_zip_file_path, name="logs.zip", extension="zip")
|
||||
|
|
|
@ -20,8 +20,8 @@ logger = logging.getLogger("NeoLogger")
|
|||
stopped_nodes: list[StorageNode] = []
|
||||
|
||||
|
||||
@pytest.fixture(scope="function", autouse=True)
|
||||
@allure.step("Return all stopped hosts")
|
||||
@pytest.fixture(scope="function", autouse=True)
|
||||
def after_run_return_all_stopped_hosts(cluster: Cluster):
|
||||
yield
|
||||
return_stopped_hosts(cluster)
|
||||
|
|
|
@ -1,102 +0,0 @@
|
|||
import logging
|
||||
import os
|
||||
|
||||
import allure
|
||||
import pytest
|
||||
import yaml
|
||||
from frostfs_testlib.cli import FrostfsCli
|
||||
from frostfs_testlib.shell import CommandResult, Shell
|
||||
|
||||
from pytest_tests.helpers.wallet import WalletFactory, WalletFile
|
||||
from pytest_tests.resources.common import FREE_STORAGE, FROSTFS_CLI_EXEC, WALLET_CONFIG
|
||||
from pytest_tests.steps.cluster_test_base import ClusterTestBase
|
||||
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
DEPOSIT_AMOUNT = 30
|
||||
|
||||
|
||||
@pytest.mark.sanity
|
||||
@pytest.mark.payments
|
||||
@pytest.mark.skipif(FREE_STORAGE, reason="Test only works on public network with paid storage")
|
||||
class TestBalanceAccounting(ClusterTestBase):
|
||||
@pytest.fixture(scope="class")
|
||||
def main_wallet(self, wallet_factory: WalletFactory) -> WalletFile:
|
||||
return wallet_factory.create_wallet()
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def other_wallet(self, wallet_factory: WalletFactory) -> WalletFile:
|
||||
return wallet_factory.create_wallet()
|
||||
|
||||
@pytest.fixture(scope="class")
|
||||
def cli(self, client_shell: Shell) -> FrostfsCli:
|
||||
return FrostfsCli(client_shell, FROSTFS_CLI_EXEC, WALLET_CONFIG)
|
||||
|
||||
@allure.step("Check deposit amount")
|
||||
def check_amount(self, result: CommandResult) -> None:
|
||||
amount_str = result.stdout.rstrip()
|
||||
|
||||
try:
|
||||
amount = int(amount_str)
|
||||
except Exception as ex:
|
||||
pytest.fail(
|
||||
f"Amount parse error, should be parsable as int({DEPOSIT_AMOUNT}), but given {amount_str}: {ex}"
|
||||
)
|
||||
|
||||
assert amount == DEPOSIT_AMOUNT
|
||||
|
||||
@staticmethod
|
||||
@allure.step("Write config with API endpoint")
|
||||
def write_api_config(config_dir: str, endpoint: str, wallet: str) -> str:
|
||||
with open(WALLET_CONFIG, "r") as file:
|
||||
wallet_config = yaml.full_load(file)
|
||||
api_config = {
|
||||
**wallet_config,
|
||||
"rpc-endpoint": endpoint,
|
||||
"wallet": wallet,
|
||||
}
|
||||
api_config_file = os.path.join(config_dir, "frostfs-cli-api-config.yaml")
|
||||
with open(api_config_file, "w") as file:
|
||||
yaml.dump(api_config, file)
|
||||
return api_config_file
|
||||
|
||||
@allure.title("Test balance request with wallet and address")
|
||||
def test_balance_wallet_address(self, main_wallet: WalletFile, cli: FrostfsCli):
|
||||
result = cli.accounting.balance(
|
||||
wallet=main_wallet.path,
|
||||
rpc_endpoint=self.cluster.default_rpc_endpoint,
|
||||
address=main_wallet.get_address(),
|
||||
)
|
||||
|
||||
self.check_amount(result)
|
||||
|
||||
@allure.title("Test balance request with wallet only")
|
||||
def test_balance_wallet(self, main_wallet: WalletFile, cli: FrostfsCli):
|
||||
result = cli.accounting.balance(
|
||||
wallet=main_wallet.path, rpc_endpoint=self.cluster.default_rpc_endpoint
|
||||
)
|
||||
self.check_amount(result)
|
||||
|
||||
@allure.title("Test balance request with wallet and wrong address")
|
||||
def test_balance_wrong_address(
|
||||
self, main_wallet: WalletFile, other_wallet: WalletFile, cli: FrostfsCli
|
||||
):
|
||||
with pytest.raises(Exception, match="address option must be specified and valid"):
|
||||
cli.accounting.balance(
|
||||
wallet=main_wallet.path,
|
||||
rpc_endpoint=self.cluster.default_rpc_endpoint,
|
||||
address=other_wallet.get_address(),
|
||||
)
|
||||
|
||||
@allure.title("Test balance request with config file")
|
||||
def test_balance_api(self, temp_directory: str, main_wallet: WalletFile, client_shell: Shell):
|
||||
config_file = self.write_api_config(
|
||||
config_dir=temp_directory,
|
||||
endpoint=self.cluster.default_rpc_endpoint,
|
||||
wallet=main_wallet.path,
|
||||
)
|
||||
logger.info(f"Config with API endpoint: {config_file}")
|
||||
|
||||
cli = FrostfsCli(client_shell, FROSTFS_CLI_EXEC, config_file=config_file)
|
||||
result = cli.accounting.balance()
|
||||
|
||||
self.check_amount(result)
|
|
@ -51,6 +51,7 @@ class Test_http_object(ClusterTestBase):
|
|||
Expected result:
|
||||
Hashes must be the same.
|
||||
"""
|
||||
|
||||
with allure.step("Create public container"):
|
||||
cid = create_container(
|
||||
self.wallet,
|
||||
|
|
|
@ -24,10 +24,10 @@ from pytest_tests.helpers.http_gate import (
|
|||
from pytest_tests.steps.cluster_test_base import ClusterTestBase
|
||||
|
||||
logger = logging.getLogger("NeoLogger")
|
||||
EXPIRATION_TIMESTAMP_HEADER = "__FROSRFS__EXPIRATION_TIMESTAMP"
|
||||
EXPIRATION_EPOCH_HEADER = "__FROSRFS__EXPIRATION_EPOCH"
|
||||
EXPIRATION_DURATION_HEADER = "__FROSRFS__EXPIRATION_DURATION"
|
||||
EXPIRATION_EXPIRATION_RFC = "__FROSRFS__EXPIRATION_RFC3339"
|
||||
EXPIRATION_TIMESTAMP_HEADER = "__FROSTFS__EXPIRATION_TIMESTAMP"
|
||||
EXPIRATION_EPOCH_HEADER = "__FROSTFS__EXPIRATION_EPOCH"
|
||||
EXPIRATION_DURATION_HEADER = "__FROSTFS__EXPIRATION_DURATION"
|
||||
EXPIRATION_EXPIRATION_RFC = "__FROSTFS__EXPIRATION_RFC3339"
|
||||
FROSTFS_EXPIRATION_EPOCH = "Frostfs-Expiration-Epoch"
|
||||
FROSTFS_EXPIRATION_DURATION = "Frostfs-Expiration-Duration"
|
||||
FROSTFS_EXPIRATION_TIMESTAMP = "Frostfs-Expiration-Timestamp"
|
||||
|
|
|
@ -6,7 +6,6 @@ from random import choices, sample
|
|||
|
||||
import allure
|
||||
import pytest
|
||||
from frostfs_testlib.utils import wallet_utils
|
||||
|
||||
from pytest_tests.helpers.aws_cli_client import AwsCliClient
|
||||
from pytest_tests.helpers.file_helper import (
|
||||
|
@ -662,37 +661,10 @@ class TestS3GateObject(TestS3GateBase):
|
|||
{"Key": tag_key_3, "Value": str(tag_value_3)}
|
||||
], "Tags must be the same"
|
||||
|
||||
@pytest.fixture
|
||||
def prepare_two_wallets(self, default_wallet, client_shell):
|
||||
self.main_wallet = default_wallet
|
||||
self.main_public_key = wallet_utils.get_wallet_public_key(self.main_wallet, WALLET_PASS)
|
||||
self.other_wallet = os.path.join(os.getcwd(), ASSETS_DIR, f"{str(uuid.uuid4())}.json")
|
||||
wallet_utils.init_wallet(self.other_wallet, WALLET_PASS)
|
||||
self.other_public_key = wallet_utils.get_wallet_public_key(self.other_wallet, WALLET_PASS)
|
||||
|
||||
if not FREE_STORAGE:
|
||||
main_chain = self.cluster.main_chain_nodes[0]
|
||||
deposit = 30
|
||||
transfer_gas(
|
||||
shell=client_shell,
|
||||
amount=deposit + 1,
|
||||
main_chain=main_chain,
|
||||
wallet_to_path=self.other_wallet,
|
||||
wallet_to_password=WALLET_PASS,
|
||||
)
|
||||
deposit_gas(
|
||||
shell=client_shell,
|
||||
main_chain=main_chain,
|
||||
amount=deposit,
|
||||
wallet_from_path=self.other_wallet,
|
||||
wallet_from_password=WALLET_PASS,
|
||||
)
|
||||
|
||||
@allure.title("Test S3: put object with ACL")
|
||||
@pytest.mark.parametrize("bucket_versioning", ["ENABLED", "SUSPENDED"])
|
||||
def test_s3_put_object_acl(
|
||||
self,
|
||||
prepare_two_wallets,
|
||||
bucket_versioning,
|
||||
bucket,
|
||||
complex_object_size,
|
||||
|
|
|
@ -187,7 +187,6 @@ class TestObjectStaticSession(ClusterTestBase):
|
|||
)
|
||||
|
||||
@allure.title("Validate static session with range operations")
|
||||
@pytest.mark.static_session
|
||||
@pytest.mark.parametrize(
|
||||
"method_under_test,verb",
|
||||
[(get_range, ObjectVerb.RANGE), (get_range_hash, ObjectVerb.RANGEHASH)],
|
||||
|
@ -227,7 +226,6 @@ class TestObjectStaticSession(ClusterTestBase):
|
|||
)
|
||||
|
||||
@allure.title("Validate static session with search operation")
|
||||
@pytest.mark.static_session
|
||||
@pytest.mark.xfail
|
||||
# (see https://github.com/nspcc-dev/neofs-node/issues/2030)
|
||||
def test_static_session_search(
|
||||
|
@ -255,7 +253,6 @@ class TestObjectStaticSession(ClusterTestBase):
|
|||
assert expected_object_ids == actual_object_ids
|
||||
|
||||
@allure.title("Validate static session with object id not in session")
|
||||
@pytest.mark.static_session
|
||||
def test_static_session_unrelated_object(
|
||||
self,
|
||||
user_wallet: WalletFile,
|
||||
|
@ -280,7 +277,6 @@ class TestObjectStaticSession(ClusterTestBase):
|
|||
)
|
||||
|
||||
@allure.title("Validate static session with user id not in session")
|
||||
@pytest.mark.static_session
|
||||
def test_static_session_head_unrelated_user(
|
||||
self,
|
||||
stranger_wallet: WalletFile,
|
||||
|
@ -307,7 +303,6 @@ class TestObjectStaticSession(ClusterTestBase):
|
|||
)
|
||||
|
||||
@allure.title("Validate static session with wrong verb in session")
|
||||
@pytest.mark.static_session
|
||||
def test_static_session_head_wrong_verb(
|
||||
self,
|
||||
user_wallet: WalletFile,
|
||||
|
@ -334,7 +329,6 @@ class TestObjectStaticSession(ClusterTestBase):
|
|||
)
|
||||
|
||||
@allure.title("Validate static session with container id not in session")
|
||||
@pytest.mark.static_session
|
||||
def test_static_session_unrelated_container(
|
||||
self,
|
||||
user_wallet: WalletFile,
|
||||
|
@ -362,7 +356,6 @@ class TestObjectStaticSession(ClusterTestBase):
|
|||
)
|
||||
|
||||
@allure.title("Validate static session which signed by another wallet")
|
||||
@pytest.mark.static_session
|
||||
def test_static_session_signed_by_other(
|
||||
self,
|
||||
owner_wallet: WalletFile,
|
||||
|
@ -401,7 +394,6 @@ class TestObjectStaticSession(ClusterTestBase):
|
|||
)
|
||||
|
||||
@allure.title("Validate static session which signed for another container")
|
||||
@pytest.mark.static_session
|
||||
def test_static_session_signed_for_other_container(
|
||||
self,
|
||||
owner_wallet: WalletFile,
|
||||
|
@ -440,7 +432,6 @@ class TestObjectStaticSession(ClusterTestBase):
|
|||
)
|
||||
|
||||
@allure.title("Validate static session which wasn't signed")
|
||||
@pytest.mark.static_session
|
||||
def test_static_session_without_sign(
|
||||
self,
|
||||
owner_wallet: WalletFile,
|
||||
|
@ -477,7 +468,6 @@ class TestObjectStaticSession(ClusterTestBase):
|
|||
)
|
||||
|
||||
@allure.title("Validate static session which expires at next epoch")
|
||||
@pytest.mark.static_session
|
||||
def test_static_session_expiration_at_next(
|
||||
self,
|
||||
owner_wallet: WalletFile,
|
||||
|
@ -499,40 +489,56 @@ class TestObjectStaticSession(ClusterTestBase):
|
|||
object_id = storage_objects[0].oid
|
||||
expiration = Lifetime(epoch + 1, epoch, epoch)
|
||||
|
||||
token_expire_at_next_epoch = get_object_signed_token(
|
||||
owner_wallet,
|
||||
user_wallet,
|
||||
container,
|
||||
storage_objects,
|
||||
ObjectVerb.HEAD,
|
||||
self.shell,
|
||||
temp_directory,
|
||||
expiration,
|
||||
)
|
||||
|
||||
head_object(
|
||||
user_wallet.path,
|
||||
container,
|
||||
object_id,
|
||||
self.shell,
|
||||
self.cluster.default_rpc_endpoint,
|
||||
session=token_expire_at_next_epoch,
|
||||
)
|
||||
|
||||
self.tick_epoch()
|
||||
|
||||
with pytest.raises(Exception, match=EXPIRED_SESSION_TOKEN):
|
||||
head_object(
|
||||
user_wallet.path,
|
||||
with allure.step("Create session token"):
|
||||
token_expire_at_next_epoch = get_object_signed_token(
|
||||
owner_wallet,
|
||||
user_wallet,
|
||||
container,
|
||||
object_id,
|
||||
storage_objects,
|
||||
ObjectVerb.HEAD,
|
||||
self.shell,
|
||||
self.cluster.default_rpc_endpoint,
|
||||
session=token_expire_at_next_epoch,
|
||||
temp_directory,
|
||||
expiration,
|
||||
)
|
||||
|
||||
with allure.step("Object should be available with session token after token creation"):
|
||||
with expect_not_raises():
|
||||
head_object(
|
||||
user_wallet.path,
|
||||
container,
|
||||
object_id,
|
||||
self.shell,
|
||||
self.cluster.default_rpc_endpoint,
|
||||
session=token_expire_at_next_epoch,
|
||||
)
|
||||
|
||||
with allure.step(
|
||||
"Object should be available at last epoch before session token expiration"
|
||||
):
|
||||
self.tick_epoch()
|
||||
with expect_not_raises():
|
||||
head_object(
|
||||
user_wallet.path,
|
||||
container,
|
||||
object_id,
|
||||
self.shell,
|
||||
self.cluster.default_rpc_endpoint,
|
||||
session=token_expire_at_next_epoch,
|
||||
)
|
||||
|
||||
with allure.step("Object should NOT be available after session token expiration epoch"):
|
||||
self.tick_epoch()
|
||||
with pytest.raises(Exception, match=EXPIRED_SESSION_TOKEN):
|
||||
head_object(
|
||||
user_wallet.path,
|
||||
container,
|
||||
object_id,
|
||||
self.shell,
|
||||
self.cluster.default_rpc_endpoint,
|
||||
session=token_expire_at_next_epoch,
|
||||
)
|
||||
|
||||
@allure.title("Validate static session which is valid starting from next epoch")
|
||||
@pytest.mark.static_session
|
||||
def test_static_session_start_at_next(
|
||||
self,
|
||||
owner_wallet: WalletFile,
|
||||
|
@ -554,50 +560,70 @@ class TestObjectStaticSession(ClusterTestBase):
|
|||
object_id = storage_objects[0].oid
|
||||
expiration = Lifetime(epoch + 2, epoch + 1, epoch)
|
||||
|
||||
token_start_at_next_epoch = get_object_signed_token(
|
||||
owner_wallet,
|
||||
user_wallet,
|
||||
container,
|
||||
storage_objects,
|
||||
ObjectVerb.HEAD,
|
||||
self.shell,
|
||||
temp_directory,
|
||||
expiration,
|
||||
)
|
||||
|
||||
with pytest.raises(Exception, match=MALFORMED_REQUEST):
|
||||
head_object(
|
||||
user_wallet.path,
|
||||
with allure.step("Create session token"):
|
||||
token_start_at_next_epoch = get_object_signed_token(
|
||||
owner_wallet,
|
||||
user_wallet,
|
||||
container,
|
||||
object_id,
|
||||
storage_objects,
|
||||
ObjectVerb.HEAD,
|
||||
self.shell,
|
||||
self.cluster.default_rpc_endpoint,
|
||||
session=token_start_at_next_epoch,
|
||||
temp_directory,
|
||||
expiration,
|
||||
)
|
||||
|
||||
self.tick_epoch()
|
||||
head_object(
|
||||
user_wallet.path,
|
||||
container,
|
||||
object_id,
|
||||
self.shell,
|
||||
self.cluster.default_rpc_endpoint,
|
||||
session=token_start_at_next_epoch,
|
||||
)
|
||||
with allure.step("Object should NOT be available with session token after token creation"):
|
||||
with pytest.raises(Exception, match=MALFORMED_REQUEST):
|
||||
head_object(
|
||||
user_wallet.path,
|
||||
container,
|
||||
object_id,
|
||||
self.shell,
|
||||
self.cluster.default_rpc_endpoint,
|
||||
session=token_start_at_next_epoch,
|
||||
)
|
||||
|
||||
self.tick_epoch()
|
||||
with pytest.raises(Exception, match=EXPIRED_SESSION_TOKEN):
|
||||
head_object(
|
||||
user_wallet.path,
|
||||
container,
|
||||
object_id,
|
||||
self.shell,
|
||||
self.cluster.default_rpc_endpoint,
|
||||
session=token_start_at_next_epoch,
|
||||
)
|
||||
with allure.step(
|
||||
"Object should be available with session token starting from token nbf epoch"
|
||||
):
|
||||
self.tick_epoch()
|
||||
with expect_not_raises():
|
||||
head_object(
|
||||
user_wallet.path,
|
||||
container,
|
||||
object_id,
|
||||
self.shell,
|
||||
self.cluster.default_rpc_endpoint,
|
||||
session=token_start_at_next_epoch,
|
||||
)
|
||||
|
||||
with allure.step(
|
||||
"Object should be available at last epoch before session token expiration"
|
||||
):
|
||||
self.tick_epoch()
|
||||
with expect_not_raises():
|
||||
head_object(
|
||||
user_wallet.path,
|
||||
container,
|
||||
object_id,
|
||||
self.shell,
|
||||
self.cluster.default_rpc_endpoint,
|
||||
session=token_start_at_next_epoch,
|
||||
)
|
||||
|
||||
with allure.step("Object should NOT be available after session token expiration epoch"):
|
||||
self.tick_epoch()
|
||||
with pytest.raises(Exception, match=EXPIRED_SESSION_TOKEN):
|
||||
head_object(
|
||||
user_wallet.path,
|
||||
container,
|
||||
object_id,
|
||||
self.shell,
|
||||
self.cluster.default_rpc_endpoint,
|
||||
session=token_start_at_next_epoch,
|
||||
)
|
||||
|
||||
@allure.title("Validate static session which is already expired")
|
||||
@pytest.mark.static_session
|
||||
def test_static_session_already_expired(
|
||||
self,
|
||||
owner_wallet: WalletFile,
|
||||
|
@ -691,7 +717,6 @@ class TestObjectStaticSession(ClusterTestBase):
|
|||
)
|
||||
|
||||
@allure.title("Validate static session which is issued in future epoch")
|
||||
@pytest.mark.static_session
|
||||
def test_static_session_invalid_issued_epoch(
|
||||
self,
|
||||
owner_wallet: WalletFile,
|
||||
|
|
|
@ -11,7 +11,7 @@ from configobj import ConfigObj
|
|||
from frostfs_testlib.cli import FrostfsCli
|
||||
|
||||
from pytest_tests.helpers.cluster import Cluster, StorageNode
|
||||
from pytest_tests.resources.common import WALLET_CONFIG
|
||||
from pytest_tests.resources.common import CLI_DEFAULT_TIMEOUT, WALLET_CONFIG
|
||||
|
||||
SHARD_PREFIX = "FROSTFS_STORAGE_SHARD_"
|
||||
BLOBSTOR_PREFIX = "_BLOBSTOR_"
|
||||
|
@ -143,6 +143,7 @@ class TestControlShard:
|
|||
wallet=wallet_path,
|
||||
wallet_password=wallet_password,
|
||||
json_mode=True,
|
||||
timeout=CLI_DEFAULT_TIMEOUT,
|
||||
)
|
||||
return [Shard.from_object(shard) for shard in json.loads(result.stdout.split(">", 1)[1])]
|
||||
|
||||
|
|
47
pytest_tests/testsuites/special/test_logs.py
Normal file
47
pytest_tests/testsuites/special/test_logs.py
Normal file
|
@ -0,0 +1,47 @@
|
|||
import os
|
||||
import shutil
|
||||
from datetime import datetime
|
||||
|
||||
import allure
|
||||
import pytest
|
||||
|
||||
from pytest_tests.steps.cluster_test_base import ClusterTestBase
|
||||
|
||||
|
||||
class TestLogs(ClusterTestBase):
|
||||
@pytest.mark.logs_after_session
|
||||
def test_logs_after_session(self, temp_directory: str, session_start_time: datetime):
|
||||
"""
|
||||
This test automatically added to any test run to check logs from cluster for critical errors.
|
||||
|
||||
"""
|
||||
|
||||
end_time = datetime.utcnow()
|
||||
logs_dir = os.path.join(temp_directory, "logs")
|
||||
os.makedirs(logs_dir)
|
||||
issues_regex = r"\Wpanic\W|\Woom\W|\Wtoo many open files\W"
|
||||
|
||||
hosts_with_problems = []
|
||||
for host in self.cluster.hosts:
|
||||
with allure.step(f"Check logs on {host.config.address}"):
|
||||
if host.is_message_in_logs(issues_regex, session_start_time, end_time):
|
||||
hosts_with_problems.append(host.config.address)
|
||||
host.dump_logs(
|
||||
logs_dir,
|
||||
since=session_start_time,
|
||||
until=end_time,
|
||||
filter_regex=issues_regex,
|
||||
)
|
||||
|
||||
if hosts_with_problems:
|
||||
self._attach_logs(logs_dir)
|
||||
|
||||
assert (
|
||||
not hosts_with_problems
|
||||
), f"The following hosts contains contain critical errors in system logs: {', '.join(hosts_with_problems)}"
|
||||
|
||||
def _attach_logs(self, logs_dir: str) -> None:
|
||||
# Zip all files and attach to Allure because it is more convenient to download a single
|
||||
# zip with all logs rather than mess with individual logs files per service or node
|
||||
logs_zip_file_path = shutil.make_archive(logs_dir, "zip", logs_dir)
|
||||
allure.attach.file(logs_zip_file_path, name="logs.zip", extension="zip")
|
Loading…
Reference in a new issue