Add test suites for acl, container and node management
Signed-off-by: Vladimir Domnich <v.domnich@yadro.com> commit f7c68cfb423e3213179521954dccb6053fc6382d Merge: e234b61 99bfe6b Merge branch 'avolkov/add_ssh' into internal_tmp_b commit 99bfe6b56cd75590f868313910068cf1a80bd43f Tick one more epoch. commit bd70bc49391d578cdda727edb4dcd181b832bf1e Start nodes in case of test fail. commit b3888ec62cfc3c18b1dff58962a94a3094342186 Catch json decode error. commit c18e415b783ec3e4ce804f43c19246240c186a97 Add ssh-key access. commit 7dbdeb653b7d5b7ab3874b546e05a48b502c2460 Add some tests. commit 844367c68638c7f97ba4860dd0069c07f499d66d Add some tests for nodes management. commit 1b84b37048dcd3cc0888aa54639975fc11fb2d75 Add some tests for nodes management. commit b30c1336a6919e0c8e500bdf2a9be3d5a14470ea Add ssh execution option. commit 2df40eca74ee20bd668778715185ffddda63cb05 Change AWS cli v1 to cli v2. commit 7403da3d7c2a5963cfbb12b7c0f3d1d641f52a7e Change AWS cli v1 to cli v2. commit b110dcdb655a585e6c53e6ebc3eae7bf1f1e792f Change AWS cli v1 to cli v2. commit 6183756a4c064c932ee193c2e08a79343017fa49 Change AWS cli v1 to cli v2. commit 398006544d60896faa3fc6e6a9dbb51ada06759c Fix container run. commit e7202136dabbe7e2d3da508e0a2ec55a0d5cb67a Added tests with AWS CLI. commit 042e1478ee1fd700c8572cbc6d0d9e6b312b8e8d Fix PR comments. commit e234b61dbb9b8b10812e069322ab03615af0d44e Add debug for env. commit 14febd06713dc03a8207bb80384acb4a7d32df0e Move env variables for pytest docker into env file. commit bafdc6131b5ac855a43b672be194cde2ccf6f75b Move env variables for pytest docker into env file. commit 27c2c6b11f51d2e3c085d44b814cb4c00f81b376 Move env variables for pytest docker into env file. commit e4db4948978e092adb83aeacdf06619f5ca2f242 Merge branch 'master' into avolkov/try_pytest commit c83a7e625e8daba3a40b65a1d69b2b1323e9ae28 WIP. commit 42489bbf8058acd2926cdb04074dc9a8ff86a0a0 Merge branch 'avolkov/try_pytest' into internal_tmp_b commit 62526d94dc2bf72372125bea119fa66f670cf7e1 Improve allure attachments. commit 4564dae697cb069ac45bc4ba7eb0b5bbdcf1d153 Merge branch 'avolkov/try_pytest' into internal_tmp_b commit ab65810b23410ca7382ed4bdd257addfa6619659 Added tests for S3 API. commit 846c495a846c977f3e5f0bada01e5a9691a81e3d Let's get NEOFS_IR_CONTRACTS_NEOFS from env. commit c39bd88568b70ffcb76b76d68531b17d3747829d Added S3 test for versioning. commit d7c9f351abc7e02d4ebf162475604a2d6b46e712 Merge remote-tracking branch 'origin/avolkov/try_pytest' into internal_tmp_b commit bfbed22a50ce4cb6a49de383cfef66452ba9f4c1 Added some tests for S3 API and curl tests for HTTP. commit 1c49def3ddd0b3f7cf97f131e269ad465c70a680 Add yadro submodule commit 2a91685f9108101ab523e05cc9287d0f5a20196b Fix. commit 33fc2813e205766e69ef74a42a10850db6c63ce6 Add debug. commit aaaceca59e4c67253ecd4a741667b7327d1fb679 Add env variables for data nodes. commit 001cb26bcc22c8543fb2672564e898928d20622b Merge: b48a87d c70da26 Merge branch 'avolkov/try_pytest' into tmp_b commit b48a87d9a09309fea671573ba6cf303c31b11b6a Added submodule commit c70da265d319950977774e34740276f324eb57a7 Added tests for S3 bucket API. commit 3d335abe6de45d1859454f1ddf85a97514667b8f Added tests for S3 object API. commit 2ac829c700f5bc20c28953f1d40cd953fed8b390 flake8 changes for python_keywords module. commit 2de5963e96b13a5e944906b695e5d9c0829de9ad Add pytest tests. commit 4472c079b9dfd979b7c101bea32893c80cb1fe57 Add pytest tests. Signed-off-by: a.y.volkov <a.y.volkov@yadro.com>
This commit is contained in:
parent
dd98eb3d9d
commit
d9d74baa72
20 changed files with 860 additions and 32 deletions
4
.gitignore
vendored
4
.gitignore
vendored
|
@ -1,8 +1,10 @@
|
|||
# ignore test result files under any path
|
||||
# ignore test results
|
||||
**/log.html
|
||||
**/output.xml
|
||||
**/report.html
|
||||
**/dockerlogs*.tar.gz
|
||||
allure_results/*
|
||||
xunit_results.xml
|
||||
|
||||
# ignore pycache under any path
|
||||
**/__pycache__
|
||||
|
|
8
Makefile
8
Makefile
|
@ -7,6 +7,13 @@ SHELL = bash
|
|||
OUTPUT_DIR = artifacts/
|
||||
KEYWORDS_REPO = git@github.com:nspcc-dev/neofs-keywords.git
|
||||
VENVS = $(shell ls -1d venv/*/ | sort -u | xargs basename -a)
|
||||
ROOT_DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
|
||||
DEV_IMAGE_PY ?= registry.spb.yadro.com/tools/pytest-neofs-x86_64:6
|
||||
|
||||
ifeq ($(shell uname -s),Darwin)
|
||||
DOCKER_NETWORK = --network bridge -p 389:389 -p 636:636
|
||||
endif
|
||||
|
||||
|
||||
.PHONY: all
|
||||
all: venvs
|
||||
|
@ -32,6 +39,7 @@ clean:
|
|||
|
||||
pytest-local:
|
||||
@echo "⇒ Run Pytest"
|
||||
export PYTHONPATH=$(ROOT_DIR)/neofs-keywords/lib:$(ROOT_DIR)/neofs-keywords/robot:$(ROOT_DIR)/robot/resources/lib:$(ROOT_DIR)/robot/resources/lib/python_keywords:$(ROOT_DIR)/robot/variables && \
|
||||
python -m pytest pytest_tests/testsuites/
|
||||
|
||||
help:
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit f66be076acb102a80e9f8abd5d1cde104673464e
|
||||
Subproject commit 1d7ebe1d6d4bb6c8ce62f36091347bddb54d333b
|
|
@ -22,9 +22,12 @@ def create_file_with_content(file_path: str = None, content: str = None) -> str:
|
|||
return file_path
|
||||
|
||||
|
||||
def get_file_content(file_path: str) -> str:
|
||||
with open(file_path, 'r') as out_file:
|
||||
content = out_file.read()
|
||||
def get_file_content(file_path: str, content_len: int = None, mode='r') -> str:
|
||||
with open(file_path, mode) as out_file:
|
||||
if content_len:
|
||||
content = out_file.read(content_len)
|
||||
else:
|
||||
content = out_file.read()
|
||||
|
||||
return content
|
||||
|
||||
|
@ -46,3 +49,37 @@ def split_file(file_path: str, parts: int) -> list[str]:
|
|||
part_id += 1
|
||||
|
||||
return files
|
||||
|
||||
|
||||
def robot_time_to_int(value: str) -> int:
|
||||
if value.endswith('s'):
|
||||
return int(value[:-1])
|
||||
|
||||
if value.endswith('m'):
|
||||
return int(value[:-1]) * 60
|
||||
|
||||
|
||||
def placement_policy_from_container(container_info: str) -> str:
|
||||
"""
|
||||
Get placement policy from container info:
|
||||
|
||||
container ID: j7k4auNHRmiPMSmnH2qENLECD2au2y675fvTX6csDwd
|
||||
version: 2.12
|
||||
owner ID: NQ8HUxE5qEj7UUvADj7z9Z7pcvJdjtPwuw
|
||||
basic ACL: 0x0fbfbfff (eacl-public-read-write)
|
||||
attribute: Timestamp=1656340345 (2022-06-27 17:32:25 +0300 MSK)
|
||||
nonce: 1c511e88-efd7-4004-8dbf-14391a5d375a
|
||||
placement policy:
|
||||
REP 1 IN LOC_PLACE
|
||||
CBF 1
|
||||
SELECT 1 FROM LOC_SW AS LOC_PLACE
|
||||
FILTER Country EQ Sweden AS LOC_SW
|
||||
|
||||
Args:
|
||||
container_info: output from neofs-cli container get command
|
||||
|
||||
Returns:
|
||||
placement policy as a string
|
||||
"""
|
||||
assert ':' in container_info, f'Could not find placement rule in the output {container_info}'
|
||||
return container_info.split(':')[-1].replace('\n', ' ').strip()
|
||||
|
|
|
@ -14,4 +14,6 @@ markers =
|
|||
http_gate: HTTP gate contract
|
||||
s3_gate: S3 gate tests
|
||||
curl: tests for HTTP gate with curl utility
|
||||
long: long tests (with long execution time)
|
||||
long: long tests (with long execution time)
|
||||
node_mgmt: neofs control commands
|
||||
acl: tests for basic and extended ACL
|
||||
|
|
|
@ -60,4 +60,5 @@ tomli==2.0.1
|
|||
typing-extensions==4.2.0
|
||||
urllib3==1.26.9
|
||||
websocket-client==1.3.2
|
||||
yarl==1.7.2
|
||||
yarl==1.7.2
|
||||
paramiko==2.10.3
|
||||
|
|
117
pytest_tests/testsuites/acl/test_acl.py
Normal file
117
pytest_tests/testsuites/acl/test_acl.py
Normal file
|
@ -0,0 +1,117 @@
|
|||
import os
|
||||
from typing import Tuple
|
||||
|
||||
import allure
|
||||
import pytest
|
||||
|
||||
import wallet
|
||||
from common import ASSETS_DIR
|
||||
from python_keywords.acl import set_eacl
|
||||
from python_keywords.container import create_container
|
||||
from python_keywords.neofs_verbs import (delete_object, get_object, get_range,
|
||||
get_range_hash, head_object,
|
||||
put_object, search_object)
|
||||
from python_keywords.utility_keywords import generate_file, get_file_hash
|
||||
|
||||
RESOURCE_DIR = os.path.join(
|
||||
os.path.dirname(os.path.realpath(__file__)),
|
||||
'../../../robot/resources/files/',
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.sanity
|
||||
@pytest.mark.acl
|
||||
class TestACL:
|
||||
@pytest.fixture(autouse=True)
|
||||
def create_two_wallets(self, prepare_wallet_and_deposit):
|
||||
self.main_wallet = prepare_wallet_and_deposit
|
||||
self.other_wallet = wallet.init_wallet(ASSETS_DIR)
|
||||
|
||||
@allure.title('Test basic ACL')
|
||||
def test_basic_acl(self):
|
||||
"""
|
||||
Test basic ACL set during container creation.
|
||||
"""
|
||||
file_name = generate_file()
|
||||
|
||||
with allure.step('Create public container and check access'):
|
||||
cid_public = create_container(self.main_wallet, basic_acl='public-read-write')
|
||||
self.check_full_access(cid_public, file_name)
|
||||
|
||||
with allure.step('Create private container and check only owner has access'):
|
||||
cid_private = create_container(self.main_wallet, basic_acl='private')
|
||||
|
||||
with allure.step('Check owner can put/get object into private container'):
|
||||
oid = put_object(wallet=self.main_wallet, path=file_name, cid=cid_private)
|
||||
|
||||
got_file = get_object(self.main_wallet, cid_private, oid)
|
||||
assert get_file_hash(got_file) == get_file_hash(file_name)
|
||||
|
||||
with allure.step('Check no one except owner has access to operations with container'):
|
||||
self.check_no_access_to_container(self.other_wallet, cid_private, oid, file_name)
|
||||
|
||||
delete_object(self.main_wallet, cid_private, oid)
|
||||
|
||||
@allure.title('Test extended ACL')
|
||||
def test_extended_acl(self):
|
||||
"""
|
||||
Test basic extended ACL applied after container creation.
|
||||
"""
|
||||
file_name = generate_file()
|
||||
deny_all_eacl = os.path.join(RESOURCE_DIR, 'eacl_tables/gen_eacl_deny_all_OTHERS')
|
||||
|
||||
with allure.step('Create public container and check access'):
|
||||
cid_public = create_container(self.main_wallet, basic_acl='eacl-public-read-write')
|
||||
oid = self.check_full_access(cid_public, file_name)
|
||||
|
||||
with allure.step('Set "deny all operations for other" for created container'):
|
||||
set_eacl(self.main_wallet, cid_public, deny_all_eacl)
|
||||
|
||||
with allure.step('Check no one except owner has access to operations with container'):
|
||||
self.check_no_access_to_container(self.other_wallet, cid_public, oid, file_name)
|
||||
|
||||
with allure.step('Check owner has access to operations with container'):
|
||||
self.check_full_access(cid_public, file_name, wallet_to_check=((self.main_wallet, 'owner'),))
|
||||
|
||||
delete_object(self.main_wallet, cid_public, oid)
|
||||
|
||||
@staticmethod
|
||||
def check_no_access_to_container(wallet: str, cid: str, oid: str, file_name: str):
|
||||
err_pattern = '.*access to object operation denied.*'
|
||||
with pytest.raises(Exception, match=err_pattern):
|
||||
get_object(wallet, cid, oid)
|
||||
|
||||
with pytest.raises(Exception, match=err_pattern):
|
||||
put_object(wallet, file_name, cid)
|
||||
|
||||
with pytest.raises(Exception, match=err_pattern):
|
||||
delete_object(wallet, cid, oid)
|
||||
|
||||
with pytest.raises(Exception, match=err_pattern):
|
||||
head_object(wallet, cid, oid)
|
||||
|
||||
with pytest.raises(Exception, match=err_pattern):
|
||||
get_range(wallet, cid, oid, file_path='s_get_range', bearer='', range_cut='0:10')
|
||||
|
||||
with pytest.raises(Exception, match=err_pattern):
|
||||
get_range_hash(wallet, cid, oid, bearer_token='', range_cut='0:10')
|
||||
|
||||
with pytest.raises(Exception, match=err_pattern):
|
||||
search_object(wallet, cid)
|
||||
|
||||
def check_full_access(self, cid: str, file_name: str, wallet_to_check: Tuple = None) -> str:
|
||||
wallets = wallet_to_check or ((self.main_wallet, 'owner'), (self.other_wallet, 'not owner'))
|
||||
for current_wallet, desc in wallets:
|
||||
with allure.step(f'Check {desc} can put object into public container'):
|
||||
oid = put_object(current_wallet, file_name, cid)
|
||||
|
||||
with allure.step(f'Check {desc} can execute operations on object from public container'):
|
||||
got_file = get_object(current_wallet, cid, oid)
|
||||
assert get_file_hash(got_file) == get_file_hash(file_name), 'Expected hashes are the same'
|
||||
|
||||
head_object(current_wallet, cid, oid)
|
||||
get_range(current_wallet, cid, oid, file_path='s_get_range', bearer='', range_cut='0:10')
|
||||
get_range_hash(current_wallet, cid, oid, bearer_token='', range_cut='0:10')
|
||||
search_object(current_wallet, cid)
|
||||
|
||||
return oid
|
|
@ -45,6 +45,12 @@ def init_wallet_with_address():
|
|||
|
||||
yield wallet.init_wallet(ASSETS_DIR)
|
||||
|
||||
|
||||
@allure.title('Prepare tmp directory')
|
||||
def prepare_tmp_dir():
|
||||
full_path = f'{os.getcwd()}/{ASSETS_DIR}'
|
||||
os.mkdir(full_path)
|
||||
yield
|
||||
shutil.rmtree(full_path)
|
||||
|
||||
|
||||
|
|
64
pytest_tests/testsuites/container/test_container.py
Normal file
64
pytest_tests/testsuites/container/test_container.py
Normal file
|
@ -0,0 +1,64 @@
|
|||
import json
|
||||
from time import sleep
|
||||
|
||||
import allure
|
||||
import pytest
|
||||
|
||||
from contract_keywords import tick_epoch
|
||||
from python_keywords.container import create_container, get_container, list_containers, delete_container
|
||||
from utility import placement_policy_from_container
|
||||
|
||||
|
||||
@pytest.mark.parametrize('name', ['', 'test-container'], ids=['No name', 'Set particular name'])
|
||||
@pytest.mark.sanity
|
||||
@pytest.mark.container
|
||||
def test_container_creation(prepare_wallet_and_deposit, name):
|
||||
wallet = prepare_wallet_and_deposit
|
||||
msg = f'with name {name}' if name else 'without name'
|
||||
allure.dynamic.title(f'User can create container {msg}')
|
||||
|
||||
with open(wallet) as fp:
|
||||
json_wallet = json.load(fp)
|
||||
|
||||
placement_rule = 'REP 2 IN X CBF 1 SELECT 2 FROM * AS X'
|
||||
info_to_check = {'basic ACL: 0x1c8c8ccc (private)',
|
||||
f'owner ID: {json_wallet.get("accounts")[0].get("address")}'}
|
||||
if name:
|
||||
info_to_check.add(f'attribute: Name={name}')
|
||||
name = f' --name {name}'
|
||||
|
||||
cid = create_container(wallet, rule=placement_rule, options=name)
|
||||
info_to_check.add(f'container ID: {cid}')
|
||||
|
||||
containers = list_containers(wallet)
|
||||
assert cid in containers, f'Expected container {cid} in containers: {containers}'
|
||||
|
||||
get_output = get_container(wallet, cid, flag='')
|
||||
|
||||
with allure.step('Check container has correct information'):
|
||||
got_policy = placement_policy_from_container(get_output)
|
||||
assert got_policy == placement_rule.replace('\'', ''), \
|
||||
f'Expected \n{placement_rule} and got policy \n{got_policy} are the same'
|
||||
|
||||
for info in info_to_check:
|
||||
assert info in get_output, f'Expected info {info} in output:\n{get_output}'
|
||||
|
||||
with allure.step('Delete container and check it was deleted'):
|
||||
delete_container(wallet, cid)
|
||||
tick_epoch()
|
||||
wait_for_container_deletion(wallet, cid)
|
||||
|
||||
|
||||
@allure.step('Wait for container deletion')
|
||||
def wait_for_container_deletion(wallet: str, cid: str):
|
||||
attempts, sleep_interval = 10, 5
|
||||
for _ in range(attempts):
|
||||
try:
|
||||
get_container(wallet, cid)
|
||||
sleep(sleep_interval)
|
||||
continue
|
||||
except Exception as err:
|
||||
if 'container not found' not in str(err):
|
||||
raise AssertionError(f'Expected "container not found" in error, got\n{err}')
|
||||
return
|
||||
raise AssertionError(f'Expected container deleted during {attempts * sleep_interval} sec.')
|
322
pytest_tests/testsuites/network/test_node_management.py
Normal file
322
pytest_tests/testsuites/network/test_node_management.py
Normal file
|
@ -0,0 +1,322 @@
|
|||
import logging
|
||||
from random import choice
|
||||
from time import sleep
|
||||
|
||||
import allure
|
||||
import pytest
|
||||
from common import (COMPLEX_OBJ_SIZE, MAINNET_BLOCK_TIME, NEOFS_CONTRACT_CACHE_TIMEOUT,
|
||||
NEOFS_NETMAP_DICT, SHARD_0_GC_SLEEP)
|
||||
from contract_keywords import tick_epoch
|
||||
from utility_keywords import generate_file
|
||||
from python_keywords.container import create_container, get_container
|
||||
from python_keywords.neofs_verbs import (delete_object, get_object,
|
||||
head_object, put_object)
|
||||
from python_keywords.node_management import (drop_object, get_netmap_snapshot,
|
||||
get_locode,
|
||||
node_healthcheck,
|
||||
node_set_status, node_shard_list,
|
||||
node_shard_set_mode,
|
||||
start_nodes_remote,
|
||||
stop_nodes_remote)
|
||||
from storage_policy import get_nodes_with_object, get_simple_object_copies
|
||||
from utility import robot_time_to_int
|
||||
from wellknown_acl import PUBLIC_ACL
|
||||
from utility import placement_policy_from_container
|
||||
|
||||
logger = logging.getLogger('NeoLogger')
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@allure.title('Create container and pick the node with data')
|
||||
def crate_container_and_pick_node(create_remote_connection, prepare_wallet_and_deposit):
|
||||
wallet = prepare_wallet_and_deposit
|
||||
file_path = generate_file()
|
||||
placement_rule = 'REP 1 IN X CBF 1 SELECT 1 FROM * AS X'
|
||||
|
||||
cid = create_container(wallet, rule=placement_rule, basic_acl=PUBLIC_ACL)
|
||||
oid = put_object(wallet, file_path, cid)
|
||||
|
||||
nodes = get_nodes_with_object(wallet, cid, oid)
|
||||
assert len(nodes) == 1
|
||||
node = nodes[0]
|
||||
|
||||
node_name = choice([node_name for node_name, params in NEOFS_NETMAP_DICT.items() if params.get('rpc') == node])
|
||||
|
||||
yield cid, node_name
|
||||
|
||||
shards = node_shard_list(create_remote_connection, node_name)
|
||||
assert shards
|
||||
|
||||
for shard in shards:
|
||||
node_shard_set_mode(create_remote_connection, node_name, shard, 'read-write')
|
||||
|
||||
node_shard_list(create_remote_connection, node_name)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def start_node_if_needed(create_remote_connection):
|
||||
yield
|
||||
try:
|
||||
start_nodes_remote(create_remote_connection, list(NEOFS_NETMAP_DICT.keys()))
|
||||
except Exception as err:
|
||||
logger.error(f'Node start fails with error:\n{err}')
|
||||
|
||||
|
||||
@allure.title('Control Operations with storage nodes')
|
||||
@pytest.mark.node_mgmt
|
||||
def test_nodes_management(prepare_tmp_dir, create_remote_connection):
|
||||
"""
|
||||
This test checks base control operations with storage nodes (healthcheck, netmap-snapshot, set-status).
|
||||
"""
|
||||
random_node = choice(list(NEOFS_NETMAP_DICT))
|
||||
alive_node = choice([node for node in NEOFS_NETMAP_DICT if node != random_node])
|
||||
snapshot = get_netmap_snapshot(create_remote_connection, node_name=alive_node)
|
||||
assert random_node in snapshot, f'Expected node {random_node} in netmap'
|
||||
|
||||
with allure.step('Run health check for all storage nodes'):
|
||||
for node_name in NEOFS_NETMAP_DICT.keys():
|
||||
health_check = node_healthcheck(create_remote_connection, node_name)
|
||||
assert health_check.health_status == 'READY' and health_check.network_status == 'ONLINE'
|
||||
|
||||
with allure.step(f'Move node {random_node} to offline state'):
|
||||
node_set_status(create_remote_connection, random_node, status='offline')
|
||||
|
||||
sleep(robot_time_to_int(MAINNET_BLOCK_TIME))
|
||||
tick_epoch()
|
||||
|
||||
with allure.step(f'Check node {random_node} went to offline'):
|
||||
health_check = node_healthcheck(create_remote_connection, random_node)
|
||||
assert health_check.health_status == 'READY' and health_check.network_status == 'STATUS_UNDEFINED'
|
||||
snapshot = get_netmap_snapshot(create_remote_connection, node_name=alive_node)
|
||||
assert random_node not in snapshot, f'Expected node {random_node} not in netmap'
|
||||
|
||||
with allure.step(f'Check node {random_node} went to online'):
|
||||
node_set_status(create_remote_connection, random_node, status='online')
|
||||
|
||||
sleep(robot_time_to_int(MAINNET_BLOCK_TIME))
|
||||
tick_epoch()
|
||||
|
||||
with allure.step(f'Check node {random_node} went to online'):
|
||||
health_check = node_healthcheck(create_remote_connection, random_node)
|
||||
assert health_check.health_status == 'READY' and health_check.network_status == 'ONLINE'
|
||||
snapshot = get_netmap_snapshot(create_remote_connection, node_name=alive_node)
|
||||
assert random_node in snapshot, f'Expected node {random_node} in netmap'
|
||||
|
||||
|
||||
@pytest.mark.parametrize('placement_rule,expected_copies', [
|
||||
('REP 2 IN X CBF 2 SELECT 2 FROM * AS X', 2),
|
||||
('REP 2 IN X CBF 1 SELECT 2 FROM * AS X', 2),
|
||||
('REP 3 IN X CBF 1 SELECT 3 FROM * AS X', 3),
|
||||
('REP 1 IN X CBF 1 SELECT 1 FROM * AS X', 1),
|
||||
('REP 1 IN X CBF 2 SELECT 1 FROM * AS X', 1),
|
||||
('REP 4 IN X CBF 1 SELECT 4 FROM * AS X', 4),
|
||||
('REP 2 IN X CBF 1 SELECT 4 FROM * AS X', 2),
|
||||
])
|
||||
@pytest.mark.node_mgmt
|
||||
@allure.title('Test object copies based on placement policy')
|
||||
def test_placement_policy(prepare_wallet_and_deposit, placement_rule, expected_copies):
|
||||
"""
|
||||
This test checks object's copies based on container's placement policy.
|
||||
"""
|
||||
wallet = prepare_wallet_and_deposit
|
||||
file_path = generate_file()
|
||||
validate_object_copies(wallet, placement_rule, file_path, expected_copies)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('placement_rule,expected_copies,nodes', [
|
||||
('REP 4 IN X CBF 1 SELECT 4 FROM * AS X', 4, ['s01', 's02', 's03', 's04']),
|
||||
('REP 1 IN LOC_PLACE CBF 1 SELECT 1 FROM LOC_SW AS LOC_PLACE FILTER Country EQ Sweden AS LOC_SW', 1, ['s03']),
|
||||
("REP 1 CBF 1 SELECT 1 FROM LOC_SPB FILTER 'UN-LOCODE' EQ 'RU LED' AS LOC_SPB", 1, ['s02']),
|
||||
("REP 1 IN LOC_SPB_PLACE REP 1 IN LOC_MSK_PLACE CBF 1 SELECT 1 FROM LOC_SPB AS LOC_SPB_PLACE "
|
||||
"SELECT 1 FROM LOC_MSK AS LOC_MSK_PLACE "
|
||||
"FILTER 'UN-LOCODE' EQ 'RU LED' AS LOC_SPB FILTER 'UN-LOCODE' EQ 'RU MOW' AS LOC_MSK", 2, ['s01', 's02']),
|
||||
('REP 4 CBF 1 SELECT 4 FROM LOC_EU FILTER Continent EQ Europe AS LOC_EU', 4, ['s01', 's02', 's03', 's04']),
|
||||
("REP 1 CBF 1 SELECT 1 FROM LOC_SPB "
|
||||
"FILTER 'UN-LOCODE' NE 'RU MOW' AND 'UN-LOCODE' NE 'SE STO' AND 'UN-LOCODE' NE 'FI HEL' AS LOC_SPB", 1, ['s02']),
|
||||
("REP 2 CBF 1 SELECT 2 FROM LOC_RU FILTER SubDivCode NE 'AB' AND SubDivCode NE '18' AS LOC_RU", 2, ['s01', 's02']),
|
||||
("REP 2 CBF 1 SELECT 2 FROM LOC_RU FILTER Country EQ 'Russia' AS LOC_RU", 2, ['s01', 's02']),
|
||||
("REP 2 CBF 1 SELECT 2 FROM LOC_EU FILTER Country NE 'Russia' AS LOC_EU", 2, ['s03', 's04']),
|
||||
])
|
||||
@pytest.mark.node_mgmt
|
||||
@allure.title('Test object copies and storage nodes based on placement policy')
|
||||
def test_placement_policy_with_nodes(prepare_wallet_and_deposit, placement_rule, expected_copies, nodes):
|
||||
"""
|
||||
Based on container's placement policy check that storage nodes are piked correctly and object has
|
||||
correct copies amount.
|
||||
"""
|
||||
wallet = prepare_wallet_and_deposit
|
||||
file_path = generate_file()
|
||||
cid, oid, found_nodes = validate_object_copies(wallet, placement_rule, file_path, expected_copies)
|
||||
expected_nodes = [NEOFS_NETMAP_DICT[node_name].get('rpc') for node_name in nodes]
|
||||
assert set(found_nodes) == set(expected_nodes), f'Expected nodes {expected_nodes}, got {found_nodes}'
|
||||
|
||||
|
||||
@pytest.mark.parametrize('placement_rule,expected_copies', [
|
||||
('REP 2 IN X CBF 2 SELECT 6 FROM * AS X', 2),
|
||||
])
|
||||
@pytest.mark.node_mgmt
|
||||
@allure.title('Negative cases for placement policy')
|
||||
def test_placement_policy_negative(prepare_wallet_and_deposit, placement_rule, expected_copies):
|
||||
"""
|
||||
Negative test for placement policy.
|
||||
"""
|
||||
wallet = prepare_wallet_and_deposit
|
||||
file_path = generate_file()
|
||||
with pytest.raises(RuntimeError, match='.*not enough nodes to SELECT from.*'):
|
||||
validate_object_copies(wallet, placement_rule, file_path, expected_copies)
|
||||
|
||||
|
||||
@pytest.mark.node_mgmt
|
||||
@allure.title('NeoFS object replication on node failover')
|
||||
def test_replication(prepare_wallet_and_deposit, create_remote_connection, start_node_if_needed):
|
||||
"""
|
||||
Test checks object replication on storage not failover and come back.
|
||||
"""
|
||||
wallet = prepare_wallet_and_deposit
|
||||
file_path = generate_file()
|
||||
expected_nodes_count = 2
|
||||
|
||||
cid = create_container(wallet, basic_acl=PUBLIC_ACL)
|
||||
oid = put_object(wallet, file_path, cid)
|
||||
|
||||
nodes = get_nodes_with_object(wallet, cid, oid)
|
||||
assert len(nodes) == expected_nodes_count, f'Expected {expected_nodes_count} copies, got {len(nodes)}'
|
||||
|
||||
node_names = [name for name, config in NEOFS_NETMAP_DICT.items() if config.get('rpc') in nodes]
|
||||
stopped_nodes = stop_nodes_remote(create_remote_connection, 1, node_names)
|
||||
|
||||
wait_for_expected_object_copies(wallet, cid, oid)
|
||||
|
||||
start_nodes_remote(create_remote_connection, stopped_nodes)
|
||||
tick_epoch()
|
||||
|
||||
for node_name in node_names:
|
||||
wait_for_node_go_online(create_remote_connection, node_name)
|
||||
|
||||
wait_for_expected_object_copies(wallet, cid, oid)
|
||||
|
||||
|
||||
@pytest.mark.node_mgmt
|
||||
@allure.title('NeoFS object could be dropped using control command')
|
||||
def test_drop_object(prepare_wallet_and_deposit, create_remote_connection):
|
||||
"""
|
||||
Test checks object could be dropped using `neofs-cli control drop-objects` command.
|
||||
"""
|
||||
wallet = prepare_wallet_and_deposit
|
||||
file_path_simple, file_path_complex = generate_file(), generate_file(COMPLEX_OBJ_SIZE)
|
||||
|
||||
locode = get_locode()
|
||||
rule = f"REP 1 CBF 1 SELECT 1 FROM * FILTER 'UN-LOCODE' EQ '{locode}' AS LOC"
|
||||
cid = create_container(wallet, rule=rule)
|
||||
oid_simple = put_object(wallet, file_path_simple, cid)
|
||||
oid_complex = put_object(wallet, file_path_complex, cid)
|
||||
|
||||
for oid in (oid_simple, oid_complex):
|
||||
get_object(wallet, cid, oid)
|
||||
head_object(wallet, cid, oid)
|
||||
|
||||
nodes = get_nodes_with_object(wallet, cid, oid_simple)
|
||||
node_name = choice([name for name, config in NEOFS_NETMAP_DICT.items() if config.get('rpc') in nodes])
|
||||
|
||||
for oid in (oid_simple, oid_complex):
|
||||
with allure.step(f'Drop object {oid}'):
|
||||
get_object(wallet, cid, oid)
|
||||
head_object(wallet, cid, oid)
|
||||
drop_object(create_remote_connection, node_name, cid, oid)
|
||||
wait_for_obj_dropped(wallet, cid, oid, get_object)
|
||||
wait_for_obj_dropped(wallet, cid, oid, head_object)
|
||||
|
||||
|
||||
@pytest.mark.node_mgmt
|
||||
@pytest.mark.skip(reason='Need to clarify scenario')
|
||||
@allure.title('Control Operations with storage nodes')
|
||||
def test_shards(prepare_wallet_and_deposit, create_remote_connection, crate_container_and_pick_node):
|
||||
"""
|
||||
This test checks base control operations with storage nodes (healthcheck, netmap-snapshot, set-status).
|
||||
"""
|
||||
wallet = prepare_wallet_and_deposit
|
||||
file_path = generate_file()
|
||||
|
||||
cid, node_name = crate_container_and_pick_node
|
||||
original_oid = put_object(wallet, file_path, cid)
|
||||
|
||||
# for mode in ('read-only', 'degraded'):
|
||||
for mode in ('degraded',):
|
||||
shards = node_shard_list(create_remote_connection, node_name)
|
||||
assert shards
|
||||
|
||||
for shard in shards:
|
||||
node_shard_set_mode(create_remote_connection, node_name, shard, mode)
|
||||
|
||||
shards = node_shard_list(create_remote_connection, node_name)
|
||||
assert shards
|
||||
|
||||
with pytest.raises(RuntimeError):
|
||||
put_object(wallet, file_path, cid)
|
||||
|
||||
with pytest.raises(RuntimeError):
|
||||
delete_object(wallet, cid, original_oid)
|
||||
|
||||
# head_object(wallet, cid, original_oid)
|
||||
get_object(wallet, cid, original_oid)
|
||||
|
||||
for shard in shards:
|
||||
node_shard_set_mode(create_remote_connection, node_name, shard, 'read-write')
|
||||
|
||||
shards = node_shard_list(create_remote_connection, node_name)
|
||||
assert shards
|
||||
|
||||
oid = put_object(wallet, file_path, cid)
|
||||
delete_object(wallet, cid, oid)
|
||||
|
||||
|
||||
@allure.step('Validate object has {expected_copies} copies')
|
||||
def validate_object_copies(wallet: str, placement_rule: str, file_path: str, expected_copies: int):
|
||||
cid = create_container(wallet, rule=placement_rule, basic_acl=PUBLIC_ACL)
|
||||
got_policy = placement_policy_from_container(get_container(wallet, cid, flag=''))
|
||||
assert got_policy == placement_rule.replace('\'', ''), \
|
||||
f'Expected \n{placement_rule} and got policy \n{got_policy} are the same'
|
||||
oid = put_object(wallet, file_path, cid)
|
||||
nodes = get_nodes_with_object(wallet, cid, oid)
|
||||
assert len(nodes) == expected_copies, f'Expected {expected_copies} copies, got {len(nodes)}'
|
||||
return cid, oid, nodes
|
||||
|
||||
|
||||
@allure.step('Wait for node {node_name} goes online')
|
||||
def wait_for_node_go_online(create_remote_connection, node_name: str):
|
||||
timeout, attempts = 5, 20
|
||||
for _ in range(attempts):
|
||||
try:
|
||||
health_check = node_healthcheck(create_remote_connection, node_name)
|
||||
assert health_check.health_status == 'READY' and health_check.network_status == 'ONLINE'
|
||||
return
|
||||
except Exception as err:
|
||||
logger.warning(f'Node {node_name} is not online:\n{err}')
|
||||
sleep(timeout)
|
||||
continue
|
||||
raise AssertionError(f'Node {node_name} does not go online during timeout {timeout * attempts}')
|
||||
|
||||
|
||||
@allure.step('Wait for {expected_copies} object copies in the wallet')
|
||||
def wait_for_expected_object_copies(wallet: str, cid: str, oid: str, expected_copies: int = 2):
|
||||
for i in range(2):
|
||||
copies = get_simple_object_copies(wallet, cid, oid)
|
||||
if copies == expected_copies:
|
||||
break
|
||||
tick_epoch()
|
||||
sleep(robot_time_to_int(NEOFS_CONTRACT_CACHE_TIMEOUT))
|
||||
else:
|
||||
raise AssertionError(f'There are no {expected_copies} copies during time')
|
||||
|
||||
|
||||
@allure.step('Wait for object to be dropped')
|
||||
def wait_for_obj_dropped(wallet: str, cid: str, oid: str, checker):
|
||||
for _ in range(3):
|
||||
try:
|
||||
checker(wallet, cid, oid)
|
||||
sleep(robot_time_to_int(SHARD_0_GC_SLEEP))
|
||||
except Exception as err:
|
||||
if 'object not found' in str(err):
|
||||
break
|
||||
else:
|
||||
raise AssertionError(f'Object {oid} is not dropped from node')
|
|
@ -4,13 +4,15 @@ from time import sleep
|
|||
import allure
|
||||
import pytest
|
||||
from container import create_container
|
||||
from epoch import tick_epoch
|
||||
from epoch import get_epoch, tick_epoch
|
||||
from tombstone import verify_head_tombstone
|
||||
from python_keywords.neofs_verbs import (delete_object, get_object, get_range,
|
||||
get_range_hash, head_object,
|
||||
put_object, search_object)
|
||||
from python_keywords.storage_policy import get_simple_object_copies
|
||||
from python_keywords.utility_keywords import generate_file, get_file_hash
|
||||
from common import SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE
|
||||
from utility import get_file_content
|
||||
|
||||
logger = logging.getLogger('NeoLogger')
|
||||
|
||||
|
@ -20,16 +22,23 @@ CLEANUP_TIMEOUT = 10
|
|||
@allure.title('Test native object API')
|
||||
@pytest.mark.sanity
|
||||
@pytest.mark.grpc_api
|
||||
def test_object_api(prepare_wallet_and_deposit):
|
||||
@pytest.mark.parametrize('object_size', [SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE], ids=['simple object', 'complex object'])
|
||||
def test_object_api(prepare_wallet_and_deposit, request, object_size):
|
||||
"""
|
||||
Test common gRPC API for object (put/get/head/get_range_hash/get_range/search/delete).
|
||||
"""
|
||||
wallet = prepare_wallet_and_deposit
|
||||
cid = create_container(wallet)
|
||||
wallet_cid = {'wallet': wallet, 'cid': cid}
|
||||
file_usr_header = {'key1': 1, 'key2': 'abc'}
|
||||
file_usr_header_oth = {'key1': 2}
|
||||
range_cut = '0:10'
|
||||
file_usr_header = {'key1': 1, 'key2': 'abc', 'common_key': 'common_value'}
|
||||
file_usr_header_oth = {'key1': 2, 'common_key': 'common_value'}
|
||||
common_header = {'common_key': 'common_value'}
|
||||
range_len = 10
|
||||
range_cut = f'0:{range_len}'
|
||||
oids = []
|
||||
|
||||
file_path = generate_file()
|
||||
allure.dynamic.title(f'Test native object API for {request.node.callspec.id}')
|
||||
file_path = generate_file(object_size)
|
||||
file_hash = get_file_hash(file_path)
|
||||
|
||||
search_object(**wallet_cid, expected_objects_list=oids)
|
||||
|
@ -50,14 +59,21 @@ def test_object_api(prepare_wallet_and_deposit):
|
|||
assert file_hash == got_file_hash
|
||||
|
||||
with allure.step('Get range/range hash'):
|
||||
get_range_hash(**wallet_cid, oid=oids[0], bearer_token='', range_cut=range_cut)
|
||||
get_range_hash(**wallet_cid, oid=oids[1], bearer_token='', range_cut=range_cut)
|
||||
get_range(**wallet_cid, oid=oids[1], bearer='', range_cut=range_cut)
|
||||
range_hash = get_range_hash(**wallet_cid, oid=oids[0], bearer_token='', range_cut=range_cut)
|
||||
assert get_file_hash(file_path, range_len) == range_hash, 'Expected range hash is correct'
|
||||
|
||||
range_hash = get_range_hash(**wallet_cid, oid=oids[1], bearer_token='', range_cut=range_cut)
|
||||
assert get_file_hash(file_path, range_len) == range_hash, 'Expected range hash is correct'
|
||||
|
||||
_, got_content = get_range(**wallet_cid, oid=oids[1], bearer='', range_cut=range_cut)
|
||||
assert get_file_content(file_path, content_len=range_len, mode='rb') == got_content, \
|
||||
'Expected range content is correct'
|
||||
|
||||
with allure.step('Search objects'):
|
||||
search_object(**wallet_cid, expected_objects_list=oids)
|
||||
search_object(**wallet_cid, filters=file_usr_header, expected_objects_list=oids[1:2])
|
||||
search_object(**wallet_cid, filters=file_usr_header_oth, expected_objects_list=oids[2:3])
|
||||
search_object(**wallet_cid, filters=common_header, expected_objects_list=oids[1:3])
|
||||
|
||||
with allure.step('Head object and validate'):
|
||||
head_object(**wallet_cid, oid=oids[0])
|
||||
|
@ -79,6 +95,35 @@ def test_object_api(prepare_wallet_and_deposit):
|
|||
get_object_and_check_error(**wallet_cid, oid=oids[1], err_msg='object already removed')
|
||||
|
||||
|
||||
@allure.title('Test object life time')
|
||||
@pytest.mark.sanity
|
||||
@pytest.mark.grpc_api
|
||||
@pytest.mark.parametrize('object_size', [SIMPLE_OBJ_SIZE, COMPLEX_OBJ_SIZE], ids=['simple object', 'complex object'])
|
||||
def test_object_life_time(prepare_container, request, object_size):
|
||||
"""
|
||||
Test object deleted after expiration epoch.
|
||||
"""
|
||||
cid, wallet = prepare_container
|
||||
|
||||
allure.dynamic.title(f'Test object life time for {request.node.callspec.id}')
|
||||
|
||||
file_path = generate_file(object_size)
|
||||
file_hash = get_file_hash(file_path)
|
||||
epoch = get_epoch()
|
||||
|
||||
oid = put_object(wallet, file_path, cid, options=f'--expires-on {epoch + 1}')
|
||||
got_file = get_object(wallet, cid, oid)
|
||||
assert get_file_hash(got_file) == file_hash
|
||||
|
||||
with allure.step('Tick two epochs'):
|
||||
for _ in range(3):
|
||||
tick_epoch()
|
||||
|
||||
with allure.step('Check object deleted because if expires-on epoch'):
|
||||
with pytest.raises(Exception, match='.*object not found.*'):
|
||||
get_object(wallet, cid, oid)
|
||||
|
||||
|
||||
def get_object_and_check_error(wallet: str, cid: str, oid: str, err_msg: str):
|
||||
try:
|
||||
get_object(wallet=wallet, cid=cid, oid=oid)
|
||||
|
|
|
@ -221,7 +221,6 @@ class TestHttpGate:
|
|||
@staticmethod
|
||||
@allure.step('Verify object can be get using HTTP header attribute')
|
||||
def get_object_by_attr_and_verify_hashes(oid: str, file_name: str, cid: str, attrs: dict):
|
||||
|
||||
got_file_path_http = get_via_http_gate(cid=cid, oid=oid)
|
||||
got_file_path_http_attr = get_via_http_gate_by_attribute(cid=cid, attribute=attrs)
|
||||
|
||||
|
|
|
@ -23,6 +23,8 @@ def _cmd_run(cmd: str, timeout: int = 30) -> str:
|
|||
Runs given shell command <cmd>, in case of success returns its stdout,
|
||||
in case of failure returns error message.
|
||||
"""
|
||||
compl_proc = None
|
||||
start_time = datetime.now()
|
||||
try:
|
||||
logger.info(f"Executing command: {cmd}")
|
||||
start_time = datetime.utcnow()
|
||||
|
@ -38,12 +40,20 @@ def _cmd_run(cmd: str, timeout: int = 30) -> str:
|
|||
|
||||
return output
|
||||
except subprocess.CalledProcessError as exc:
|
||||
logger.info(f"Error:\nreturn code: {exc.returncode} "
|
||||
f"\nOutput: {exc.output}")
|
||||
end_time = datetime.now()
|
||||
return_code, cmd_output = subprocess.getstatusoutput(cmd)
|
||||
_attach_allure_log(cmd, cmd_output, return_code, start_time, end_time)
|
||||
|
||||
raise RuntimeError(f"Error:\nreturn code: {exc.returncode} "
|
||||
f"\nOutput: {exc.output}") from exc
|
||||
except OSError as exc:
|
||||
raise RuntimeError(f"Output: {exc.strerror}") from exc
|
||||
except Exception as exc:
|
||||
return_code, _ = subprocess.getstatusoutput(cmd)
|
||||
return_code, cmd_output = subprocess.getstatusoutput(cmd)
|
||||
end_time = datetime.now()
|
||||
_attach_allure_log(cmd, cmd_output, return_code, start_time, end_time)
|
||||
logger.info(f"Error:\nreturn code: {return_code}\nOutput: "
|
||||
f"{exc.output.decode('utf-8') if type(exc.output) is bytes else exc.output}")
|
||||
raise
|
||||
|
|
|
@ -91,21 +91,24 @@ def list_containers(wallet: str) -> list[str]:
|
|||
|
||||
|
||||
@keyword('Get Container')
|
||||
def get_container(wallet: str, cid: str) -> dict:
|
||||
def get_container(wallet: str, cid: str, flag: str = '--json') -> dict:
|
||||
"""
|
||||
A wrapper for `neofs-cli container get` call. It extracts container's
|
||||
attributes and rearranges them into a more compact view.
|
||||
Args:
|
||||
wallet (str): path to a wallet on whose behalf we get the container
|
||||
cid (str): ID of the container to get
|
||||
flag (str): output as json or plain text
|
||||
Returns:
|
||||
(dict): dict of container attributes
|
||||
(dict, str): dict of container attributes
|
||||
"""
|
||||
cmd = (
|
||||
f'{NEOFS_CLI_EXEC} --rpc-endpoint {NEOFS_ENDPOINT} --wallet {wallet} '
|
||||
f'--config {WALLET_CONFIG} --cid {cid} container get --json'
|
||||
)
|
||||
output = _cmd_run(cmd)
|
||||
if flag != '--json':
|
||||
return output
|
||||
container_info = json.loads(output)
|
||||
attributes = dict()
|
||||
for attr in container_info['attributes']:
|
||||
|
|
|
@ -110,6 +110,8 @@ def put_object(wallet: str, path: str, cid: str, bearer: str = "", user_headers:
|
|||
"""
|
||||
if not endpoint:
|
||||
endpoint = random.sample(NEOFS_NETMAP, 1)[0]
|
||||
if not endpoint:
|
||||
logger.info(f'---DEB:\n{NEOFS_NETMAP}')
|
||||
cmd = (
|
||||
f'{NEOFS_CLI_EXEC} --rpc-endpoint {endpoint} --wallet {wallet} '
|
||||
f'object put --file {path} --cid {cid} {options} --config {wallet_config} '
|
||||
|
|
|
@ -6,15 +6,35 @@
|
|||
"""
|
||||
|
||||
import random
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Tuple
|
||||
|
||||
import docker
|
||||
from common import NEOFS_NETMAP_DICT
|
||||
from common import DEPLOY_PATH, NEOFS_NETMAP_DICT
|
||||
from robot.api import logger
|
||||
from robot.api.deco import keyword
|
||||
from ssh_helper import HostClient
|
||||
|
||||
ROBOT_AUTO_KEYWORDS = False
|
||||
|
||||
|
||||
@dataclass
|
||||
class HealthStatus:
|
||||
network_status: str = None
|
||||
health_status: str = None
|
||||
|
||||
@staticmethod
|
||||
def from_stdout(output: str) -> 'HealthStatus':
|
||||
network, health = None, None
|
||||
for line in output.split('\n'):
|
||||
if 'Network status' in line:
|
||||
network = line.split(':')[-1].strip()
|
||||
if 'Health status' in line:
|
||||
health = line.split(':')[-1].strip()
|
||||
return HealthStatus(network, health)
|
||||
|
||||
|
||||
@keyword('Stop Nodes')
|
||||
def stop_nodes(number: int, nodes: list):
|
||||
"""
|
||||
|
@ -83,3 +103,178 @@ def get_locode():
|
|||
logger.info(f'Random locode chosen: {locode}')
|
||||
|
||||
return locode
|
||||
|
||||
|
||||
@keyword('Stop Nodes Remote')
|
||||
def stop_nodes_remote(client: HostClient, number: int, nodes: list):
|
||||
"""
|
||||
The function shuts down the given number of randomly
|
||||
selected nodes in docker.
|
||||
Args:
|
||||
client (HostClient): client that implements exec command
|
||||
number (int): the number of nodes to shut down
|
||||
nodes (list): the list of nodes for possible shut down
|
||||
Returns:
|
||||
(list): the list of nodes which have been shut down
|
||||
"""
|
||||
nodes = random.sample(nodes, number)
|
||||
for node in nodes:
|
||||
node = node.split('.')[0]
|
||||
client.exec(f'docker stop {node}')
|
||||
return nodes
|
||||
|
||||
|
||||
@keyword('Start Nodes Remote')
|
||||
def start_nodes_remote(client: HostClient, nodes: list):
|
||||
"""
|
||||
The function starts nodes in docker.
|
||||
Args:
|
||||
client (HostClient): client that implements exec command
|
||||
nodes (list): the list of nodes for possible shut down
|
||||
"""
|
||||
for node in nodes:
|
||||
node = node.split('.')[0]
|
||||
client.exec(f'docker start {node}')
|
||||
|
||||
|
||||
@keyword('Healthcheck for node')
|
||||
def node_healthcheck(client: HostClient, node_name: str) -> HealthStatus:
|
||||
"""
|
||||
The function returns node's health status.
|
||||
Args:
|
||||
client HostClient: client that implements exec command.
|
||||
node_name str: node name to use for netmap snapshot operation
|
||||
Returns:
|
||||
health status as HealthStatus object.
|
||||
"""
|
||||
if node_name not in NEOFS_NETMAP_DICT:
|
||||
raise AssertionError(f'Node {node_name} is not found!')
|
||||
|
||||
node_config = NEOFS_NETMAP_DICT.get(node_name)
|
||||
control_url = node_config.get('control')
|
||||
host, port = control_url.split(':')
|
||||
cmd = f'{DEPLOY_PATH}/vendor/neofs-cli control healthcheck --endpoint {control_url} ' \
|
||||
f'--wallet {DEPLOY_PATH}/services/storage/wallet0{port[-1]}.json ' \
|
||||
f'--config {DEPLOY_PATH}/services/storage/cli-cfg.yml'
|
||||
output = client.exec_with_confirmation(cmd, [''])
|
||||
return HealthStatus.from_stdout(output.stdout)
|
||||
|
||||
|
||||
@keyword('Set status for node')
|
||||
def node_set_status(client: HostClient, node_name: str, status: str):
|
||||
"""
|
||||
The function sets particular status for given node.
|
||||
Args:
|
||||
client HostClient: client that implements exec command.
|
||||
node_name str: node name to use for netmap snapshot operation
|
||||
status str: online or offline.
|
||||
Returns:
|
||||
(void)
|
||||
"""
|
||||
if node_name not in NEOFS_NETMAP_DICT:
|
||||
raise AssertionError(f'Node {node_name} is not found!')
|
||||
|
||||
node_config = NEOFS_NETMAP_DICT.get(node_name)
|
||||
control_url = node_config.get('control')
|
||||
host, port = control_url.split(':')
|
||||
cmd = f'{DEPLOY_PATH}/vendor/neofs-cli control set-status --endpoint {control_url} ' \
|
||||
f'--wallet {DEPLOY_PATH}/services/storage/wallet0{port[-1]}.json ' \
|
||||
f'--config {DEPLOY_PATH}/services/storage/cli-cfg.yml --status {status}'
|
||||
client.exec_with_confirmation(cmd, [''])
|
||||
|
||||
|
||||
@keyword('Get netmap snapshot')
|
||||
def get_netmap_snapshot(client: HostClient, node_name: str = None) -> str:
|
||||
"""
|
||||
The function returns string representation of netmap-snapshot.
|
||||
Args:
|
||||
client HostClient: client that implements exec command.
|
||||
node_name str: node name to use for netmap snapshot operation
|
||||
Returns:
|
||||
string representation of netmap-snapshot
|
||||
"""
|
||||
node_name = node_name or list(NEOFS_NETMAP_DICT)[0]
|
||||
|
||||
if node_name not in NEOFS_NETMAP_DICT:
|
||||
raise AssertionError(f'Node {node_name} is not found!')
|
||||
|
||||
node_config = NEOFS_NETMAP_DICT.get(node_name)
|
||||
control_url = node_config.get('control')
|
||||
host, port = control_url.split(':')
|
||||
cmd = f'{DEPLOY_PATH}/vendor/neofs-cli control netmap-snapshot --endpoint {control_url} ' \
|
||||
f'--wallet {DEPLOY_PATH}/services/storage/wallet0{port[-1]}.json ' \
|
||||
f'--config {DEPLOY_PATH}/services/storage/cli-cfg.yml'
|
||||
output = client.exec_with_confirmation(cmd, [''])
|
||||
return output.stdout
|
||||
|
||||
|
||||
@keyword('Shard list for node')
|
||||
def node_shard_list(client: HostClient, node_name: str) -> List[str]:
|
||||
"""
|
||||
The function returns list of shards for particular node.
|
||||
Args:
|
||||
client HostClient: client that implements exec command.
|
||||
node_name str: node name to use for netmap snapshot operation
|
||||
Returns:
|
||||
list of shards.
|
||||
"""
|
||||
control_url, port = _url_port_for_node(node_name)
|
||||
cmd = f'{DEPLOY_PATH}/vendor/neofs-cli control shards list --endpoint {control_url} ' \
|
||||
f'--wallet {DEPLOY_PATH}/services/storage/wallet0{port[-1]}.json ' \
|
||||
f'--config {DEPLOY_PATH}/services/storage/cli-cfg.yml'
|
||||
output = client.exec_with_confirmation(cmd, [''])
|
||||
return re.findall(r'Shard (.*):', output.stdout)
|
||||
|
||||
|
||||
@keyword('Shard list for node')
|
||||
def node_shard_set_mode(client: HostClient, node_name: str, shard: str, mode: str) -> str:
|
||||
"""
|
||||
The function sets mode for node's particular shard.
|
||||
Args:
|
||||
client HostClient: client that implements exec command.
|
||||
node_name str: node name to use for netmap snapshot operation
|
||||
Returns:
|
||||
health status as HealthStatus object.
|
||||
"""
|
||||
control_url, port = _url_port_for_node(node_name)
|
||||
cmd = f'{DEPLOY_PATH}/vendor/neofs-cli control shards set-mode --endpoint {control_url} ' \
|
||||
f'--wallet {DEPLOY_PATH}/services/storage/wallet0{port[-1]}.json ' \
|
||||
f'--config {DEPLOY_PATH}/services/storage/cli-cfg.yml --id {shard} --mode {mode}'
|
||||
output = client.exec_with_confirmation(cmd, [''])
|
||||
return output.stdout
|
||||
|
||||
|
||||
@keyword('Drop object from node {node_name}')
|
||||
def drop_object(client: HostClient, node_name: str, cid: str, oid: str) -> str:
|
||||
"""
|
||||
The function drops object from particular node.
|
||||
Args:
|
||||
client HostClient: client that implements exec command.
|
||||
node_name str: node name to use for netmap snapshot operation
|
||||
Returns:
|
||||
health status as HealthStatus object.
|
||||
"""
|
||||
control_url, port = _url_port_for_node(node_name)
|
||||
cmd = f'{DEPLOY_PATH}/vendor/neofs-cli control drop-objects --endpoint {control_url} ' \
|
||||
f'--wallet {DEPLOY_PATH}/services/storage/wallet0{port[-1]}.json ' \
|
||||
f'--config {DEPLOY_PATH}/services/storage/cli-cfg.yml -o {cid}/{oid}'
|
||||
output = client.exec_with_confirmation(cmd, [''])
|
||||
return output.stdout
|
||||
|
||||
|
||||
def _url_port_for_node(node_name: str) -> Tuple[str, str]:
|
||||
"""
|
||||
Returns control url and port for particular storage node.
|
||||
Args:
|
||||
node_name: str node bane from NEOFS_NETMAP_DICT
|
||||
|
||||
Returns:
|
||||
control url and port as a tuple.
|
||||
"""
|
||||
if node_name not in NEOFS_NETMAP_DICT:
|
||||
raise AssertionError(f'Node {node_name} is not found!')
|
||||
|
||||
node_config = NEOFS_NETMAP_DICT.get(node_name)
|
||||
control_url = node_config.get('control')
|
||||
port = control_url.split(':')[-1]
|
||||
return control_url, port
|
||||
|
|
|
@ -52,7 +52,10 @@ def init_s3_credentials(wallet_path, s3_bearer_rules_file: str = None):
|
|||
# first five string are log output, cutting them off and parse
|
||||
# the rest of the output as JSON
|
||||
output = '\n'.join(output.split('\n')[5:])
|
||||
output_dict = json.loads(output)
|
||||
try:
|
||||
output_dict = json.loads(output)
|
||||
except json.JSONDecodeError:
|
||||
raise AssertionError(f'Could not parse info from output\n{output}')
|
||||
|
||||
return (output_dict['container_id'],
|
||||
bucket,
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
#!/usr/bin/python3
|
||||
|
||||
'''
|
||||
"""
|
||||
This module contains keywords which are used for asserting
|
||||
that storage policies are kept.
|
||||
'''
|
||||
"""
|
||||
|
||||
from robot.api import logger
|
||||
from robot.api.deco import keyword
|
||||
|
@ -57,8 +57,9 @@ def get_simple_object_copies(wallet: str, cid: str, oid: str):
|
|||
endpoint=node,
|
||||
is_direct=True)
|
||||
if response:
|
||||
logger.info(f"Found object {oid} on node {node}")
|
||||
copies += 1
|
||||
except Exception as exc:
|
||||
except Exception:
|
||||
logger.info(f"No {oid} object copy found on {node}, continue")
|
||||
continue
|
||||
return copies
|
||||
|
@ -104,8 +105,9 @@ def get_nodes_with_object(wallet: str, cid: str, oid: str):
|
|||
endpoint=node,
|
||||
is_direct=True)
|
||||
if res is not None:
|
||||
logger.info(f"Found object {oid} on node {node}")
|
||||
nodes_list.append(node)
|
||||
except Exception as exc:
|
||||
except Exception:
|
||||
logger.info(f"No {oid} object copy found on {node}, continue")
|
||||
continue
|
||||
return nodes_list
|
||||
|
|
|
@ -51,17 +51,21 @@ def generate_file_and_file_hash(size: int) -> Tuple[str, str]:
|
|||
|
||||
|
||||
@keyword('Get File Hash')
|
||||
def get_file_hash(filename: str):
|
||||
def get_file_hash(filename: str, len: int = None):
|
||||
"""
|
||||
This function generates hash for the specified file.
|
||||
Args:
|
||||
filename (str): the path to the file to generate hash for
|
||||
len (int): how many bytes to read
|
||||
Returns:
|
||||
(str): the hash of the file
|
||||
"""
|
||||
file_hash = hashlib.sha256()
|
||||
with open(filename, "rb") as out:
|
||||
file_hash.update(out.read())
|
||||
if len:
|
||||
file_hash.update(out.read(len))
|
||||
else:
|
||||
file_hash.update(out.read())
|
||||
return file_hash.hexdigest()
|
||||
|
||||
|
||||
|
|
|
@ -37,21 +37,26 @@ STORAGE_NODE_2 = os.getenv('DATA_NODE_2', 's02.neofs.devenv:8080')
|
|||
STORAGE_NODE_3 = os.getenv('DATA_NODE_3', 's03.neofs.devenv:8080')
|
||||
STORAGE_NODE_4 = os.getenv('DATA_NODE_4', 's04.neofs.devenv:8080')
|
||||
|
||||
CONTROL_NODE_1 = os.getenv('NEOFS_CONTROL_NODE_1', 's01.neofs.devenv:8081')
|
||||
CONTROL_NODE_2 = os.getenv('NEOFS_CONTROL_NODE_2', 's02.neofs.devenv:8081')
|
||||
CONTROL_NODE_3 = os.getenv('NEOFS_CONTROL_NODE_3', 's03.neofs.devenv:8081')
|
||||
CONTROL_NODE_4 = os.getenv('NEOFS_CONTROL_NODE_4', 's04.neofs.devenv:8081')
|
||||
|
||||
DEVENV_SERVICES_PATH = f"{os.getenv('DEVENV_PATH')}/services"
|
||||
NEOFS_NETMAP_DICT = {'s01': {'rpc': STORAGE_NODE_1,
|
||||
'control': 's01.neofs.devenv:8081',
|
||||
'control': CONTROL_NODE_1,
|
||||
'wallet_path':f"{DEVENV_SERVICES_PATH}/storage/wallet01.json",
|
||||
'UN-LOCODE': 'RU MOW'},
|
||||
's02': {'rpc': STORAGE_NODE_2,
|
||||
'control': 's02.neofs.devenv:8081',
|
||||
'control': CONTROL_NODE_2,
|
||||
'wallet_path': f"{DEVENV_SERVICES_PATH}/storage/wallet02.json",
|
||||
'UN-LOCODE': 'RU LED'},
|
||||
's03': {'rpc': STORAGE_NODE_3,
|
||||
'control': 's03.neofs.devenv:8081',
|
||||
'control': CONTROL_NODE_3,
|
||||
'wallet_path': f"{DEVENV_SERVICES_PATH}/storage/wallet03.json",
|
||||
'UN-LOCODE': 'SE STO'},
|
||||
's04': {'rpc': STORAGE_NODE_4,
|
||||
'control': 's04.neofs.devenv:8081',
|
||||
'control': CONTROL_NODE_4,
|
||||
'wallet_path': f"{DEVENV_SERVICES_PATH}/storage/wallet04.json",
|
||||
'UN-LOCODE': 'FI HEL'}
|
||||
}
|
||||
|
@ -73,5 +78,6 @@ S3_GATE_WALLET_PASS = 's3'
|
|||
|
||||
CONTROL_NODE_USER = os.getenv('CONTROL_NODE_USER', 'root')
|
||||
CONTROL_NODE_PWD = os.getenv('CONTROL_NODE_PWD')
|
||||
DEPLOY_PATH = os.getenv('DEPLOY_PATH', '/opt/dev-env')
|
||||
|
||||
FREE_STORAGE = os.getenv('FREE_STORAGE', "false").lower() == "true"
|
||||
|
|
Loading…
Reference in a new issue