[#314] Fix tools config
Signed-off-by: Vladimir Domnich <v.domnich@yadro.com>
This commit is contained in:
parent
2452cccba0
commit
588292dfb5
18 changed files with 239 additions and 844 deletions
|
@ -1,6 +1,6 @@
|
||||||
[tool.isort]
|
[tool.isort]
|
||||||
profile = "black"
|
profile = "black"
|
||||||
src_paths = ["reporter", "shell", "tests"]
|
src_paths = ["pytest_tests", "robot"]
|
||||||
line_length = 100
|
line_length = 100
|
||||||
|
|
||||||
[tool.black]
|
[tool.black]
|
||||||
|
|
|
@ -3,15 +3,14 @@ import time
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
import allure
|
import allure
|
||||||
|
|
||||||
from common import ASSETS_DIR, SIMPLE_OBJ_SIZE, STORAGE_GC_TIME
|
from common import ASSETS_DIR, SIMPLE_OBJ_SIZE, STORAGE_GC_TIME
|
||||||
|
|
||||||
|
|
||||||
def create_file_with_content(file_path: str = None, content: str = None) -> str:
|
def create_file_with_content(file_path: str = None, content: str = None) -> str:
|
||||||
mode = 'w+'
|
mode = "w+"
|
||||||
if not content:
|
if not content:
|
||||||
content = os.urandom(SIMPLE_OBJ_SIZE)
|
content = os.urandom(SIMPLE_OBJ_SIZE)
|
||||||
mode = 'wb'
|
mode = "wb"
|
||||||
|
|
||||||
if not file_path:
|
if not file_path:
|
||||||
file_path = f"{os.getcwd()}/{ASSETS_DIR}/{str(uuid.uuid4())}"
|
file_path = f"{os.getcwd()}/{ASSETS_DIR}/{str(uuid.uuid4())}"
|
||||||
|
@ -25,7 +24,7 @@ def create_file_with_content(file_path: str = None, content: str = None) -> str:
|
||||||
return file_path
|
return file_path
|
||||||
|
|
||||||
|
|
||||||
def get_file_content(file_path: str, content_len: int = None, mode='r') -> str:
|
def get_file_content(file_path: str, content_len: int = None, mode="r") -> str:
|
||||||
with open(file_path, mode) as out_file:
|
with open(file_path, mode) as out_file:
|
||||||
if content_len:
|
if content_len:
|
||||||
content = out_file.read(content_len)
|
content = out_file.read(content_len)
|
||||||
|
@ -37,7 +36,7 @@ def get_file_content(file_path: str, content_len: int = None, mode='r') -> str:
|
||||||
|
|
||||||
def split_file(file_path: str, parts: int) -> list[str]:
|
def split_file(file_path: str, parts: int) -> list[str]:
|
||||||
files = []
|
files = []
|
||||||
with open(file_path, 'rb') as in_file:
|
with open(file_path, "rb") as in_file:
|
||||||
data = in_file.read()
|
data = in_file.read()
|
||||||
|
|
||||||
content_size = len(data)
|
content_size = len(data)
|
||||||
|
@ -45,20 +44,20 @@ def split_file(file_path: str, parts: int) -> list[str]:
|
||||||
chunk_size = int((content_size + parts) / parts)
|
chunk_size = int((content_size + parts) / parts)
|
||||||
part_id = 1
|
part_id = 1
|
||||||
for start_position in range(0, content_size + 1, chunk_size):
|
for start_position in range(0, content_size + 1, chunk_size):
|
||||||
part_file_name = f'{file_path}_part_{part_id}'
|
part_file_name = f"{file_path}_part_{part_id}"
|
||||||
files.append(part_file_name)
|
files.append(part_file_name)
|
||||||
with open(part_file_name, 'wb') as out_file:
|
with open(part_file_name, "wb") as out_file:
|
||||||
out_file.write(data[start_position:start_position + chunk_size])
|
out_file.write(data[start_position : start_position + chunk_size])
|
||||||
part_id += 1
|
part_id += 1
|
||||||
|
|
||||||
return files
|
return files
|
||||||
|
|
||||||
|
|
||||||
def robot_time_to_int(value: str) -> int:
|
def parse_time(value: str) -> int:
|
||||||
if value.endswith('s'):
|
if value.endswith("s"):
|
||||||
return int(value[:-1])
|
return int(value[:-1])
|
||||||
|
|
||||||
if value.endswith('m'):
|
if value.endswith("m"):
|
||||||
return int(value[:-1]) * 60
|
return int(value[:-1]) * 60
|
||||||
|
|
||||||
|
|
||||||
|
@ -84,12 +83,12 @@ def placement_policy_from_container(container_info: str) -> str:
|
||||||
Returns:
|
Returns:
|
||||||
placement policy as a string
|
placement policy as a string
|
||||||
"""
|
"""
|
||||||
assert ':' in container_info, f'Could not find placement rule in the output {container_info}'
|
assert ":" in container_info, f"Could not find placement rule in the output {container_info}"
|
||||||
return container_info.split(':')[-1].replace('\n', ' ').strip()
|
return container_info.split(":")[-1].replace("\n", " ").strip()
|
||||||
|
|
||||||
|
|
||||||
def wait_for_gc_pass_on_storage_nodes() -> None:
|
def wait_for_gc_pass_on_storage_nodes() -> None:
|
||||||
# We add 15 seconds to allow some time for GC process itself
|
# We add 15 seconds to allow some time for GC process itself
|
||||||
wait_time = robot_time_to_int(STORAGE_GC_TIME)
|
wait_time = parse_time(STORAGE_GC_TIME)
|
||||||
with allure.step(f'Wait {wait_time}s until GC completes on storage nodes'):
|
with allure.step(f"Wait {wait_time}s until GC completes on storage nodes"):
|
||||||
time.sleep(wait_time)
|
time.sleep(wait_time)
|
||||||
|
|
|
@ -20,7 +20,6 @@ from env_properties import save_env_properties
|
||||||
from payment_neogo import neofs_deposit, transfer_mainnet_gas
|
from payment_neogo import neofs_deposit, transfer_mainnet_gas
|
||||||
from python_keywords.node_management import node_healthcheck
|
from python_keywords.node_management import node_healthcheck
|
||||||
from service_helper import get_storage_service_helper
|
from service_helper import get_storage_service_helper
|
||||||
from wallet import init_wallet
|
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
|
|
@ -4,38 +4,52 @@ from time import sleep
|
||||||
|
|
||||||
import allure
|
import allure
|
||||||
import pytest
|
import pytest
|
||||||
|
from common import (
|
||||||
|
COMPLEX_OBJ_SIZE,
|
||||||
|
MAINNET_BLOCK_TIME,
|
||||||
|
MORPH_BLOCK_TIME,
|
||||||
|
NEOFS_CONTRACT_CACHE_TIMEOUT,
|
||||||
|
NEOFS_NETMAP_DICT,
|
||||||
|
STORAGE_RPC_ENDPOINT_1,
|
||||||
|
STORAGE_WALLET_PASS,
|
||||||
|
)
|
||||||
from data_formatters import get_wallet_public_key
|
from data_formatters import get_wallet_public_key
|
||||||
from common import (COMPLEX_OBJ_SIZE, MAINNET_BLOCK_TIME, MORPH_BLOCK_TIME,
|
|
||||||
NEOFS_CONTRACT_CACHE_TIMEOUT,
|
|
||||||
NEOFS_NETMAP_DICT, STORAGE_RPC_ENDPOINT_1, STORAGE_WALLET_PASS)
|
|
||||||
from epoch import tick_epoch
|
from epoch import tick_epoch
|
||||||
from grpc_responses import OBJECT_NOT_FOUND, error_matches_status
|
from grpc_responses import OBJECT_NOT_FOUND, error_matches_status
|
||||||
from python_keywords.container import create_container, get_container
|
from python_keywords.container import create_container, get_container
|
||||||
from python_keywords.failover_utils import wait_object_replication_on_nodes
|
from python_keywords.failover_utils import wait_object_replication_on_nodes
|
||||||
from python_keywords.neofs_verbs import delete_object, get_object, head_object, put_object
|
from python_keywords.neofs_verbs import delete_object, get_object, head_object, put_object
|
||||||
from python_keywords.node_management import (check_node_in_map, delete_node_data, drop_object,
|
from python_keywords.node_management import (
|
||||||
exclude_node_from_network_map, get_netmap_snapshot,
|
check_node_in_map,
|
||||||
get_locode, include_node_to_network_map,
|
delete_node_data,
|
||||||
node_healthcheck, node_set_status,
|
drop_object,
|
||||||
node_shard_list, node_shard_set_mode,
|
exclude_node_from_network_map,
|
||||||
start_nodes, stop_nodes)
|
get_locode,
|
||||||
|
get_netmap_snapshot,
|
||||||
|
include_node_to_network_map,
|
||||||
|
node_healthcheck,
|
||||||
|
node_set_status,
|
||||||
|
node_shard_list,
|
||||||
|
node_shard_set_mode,
|
||||||
|
start_nodes,
|
||||||
|
stop_nodes,
|
||||||
|
)
|
||||||
from service_helper import get_storage_service_helper
|
from service_helper import get_storage_service_helper
|
||||||
from storage_policy import get_nodes_with_object, get_simple_object_copies
|
from storage_policy import get_nodes_with_object, get_simple_object_copies
|
||||||
from utility import (placement_policy_from_container, robot_time_to_int,
|
from utility import parse_time, placement_policy_from_container, wait_for_gc_pass_on_storage_nodes
|
||||||
wait_for_gc_pass_on_storage_nodes)
|
|
||||||
from utility_keywords import generate_file
|
from utility_keywords import generate_file
|
||||||
from wellknown_acl import PUBLIC_ACL
|
from wellknown_acl import PUBLIC_ACL
|
||||||
|
|
||||||
logger = logging.getLogger('NeoLogger')
|
logger = logging.getLogger("NeoLogger")
|
||||||
check_nodes = []
|
check_nodes = []
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
@allure.title('Create container and pick the node with data')
|
@allure.title("Create container and pick the node with data")
|
||||||
def create_container_and_pick_node(prepare_wallet_and_deposit):
|
def create_container_and_pick_node(prepare_wallet_and_deposit):
|
||||||
wallet = prepare_wallet_and_deposit
|
wallet = prepare_wallet_and_deposit
|
||||||
file_path = generate_file()
|
file_path = generate_file()
|
||||||
placement_rule = 'REP 1 IN X CBF 1 SELECT 1 FROM * AS X'
|
placement_rule = "REP 1 IN X CBF 1 SELECT 1 FROM * AS X"
|
||||||
|
|
||||||
cid = create_container(wallet, rule=placement_rule, basic_acl=PUBLIC_ACL)
|
cid = create_container(wallet, rule=placement_rule, basic_acl=PUBLIC_ACL)
|
||||||
oid = put_object(wallet, file_path, cid)
|
oid = put_object(wallet, file_path, cid)
|
||||||
|
@ -45,7 +59,8 @@ def create_container_and_pick_node(prepare_wallet_and_deposit):
|
||||||
node = nodes[0]
|
node = nodes[0]
|
||||||
|
|
||||||
node_name = choice(
|
node_name = choice(
|
||||||
[node_name for node_name, params in NEOFS_NETMAP_DICT.items() if params.get('rpc') == node])
|
[node_name for node_name, params in NEOFS_NETMAP_DICT.items() if params.get("rpc") == node]
|
||||||
|
)
|
||||||
|
|
||||||
yield cid, node_name
|
yield cid, node_name
|
||||||
|
|
||||||
|
@ -53,7 +68,7 @@ def create_container_and_pick_node(prepare_wallet_and_deposit):
|
||||||
assert shards
|
assert shards
|
||||||
|
|
||||||
for shard in shards:
|
for shard in shards:
|
||||||
node_shard_set_mode(node_name, shard, 'read-write')
|
node_shard_set_mode(node_name, shard, "read-write")
|
||||||
|
|
||||||
node_shard_list(node_name)
|
node_shard_list(node_name)
|
||||||
|
|
||||||
|
@ -64,7 +79,7 @@ def after_run_start_all_nodes():
|
||||||
try:
|
try:
|
||||||
start_nodes(list(NEOFS_NETMAP_DICT.keys()))
|
start_nodes(list(NEOFS_NETMAP_DICT.keys()))
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
logger.error(f'Node start fails with error:\n{err}')
|
logger.error(f"Node start fails with error:\n{err}")
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
|
@ -73,24 +88,24 @@ def return_nodes_after_test_run():
|
||||||
return_nodes()
|
return_nodes()
|
||||||
|
|
||||||
|
|
||||||
@allure.step('Return node to cluster')
|
@allure.step("Return node to cluster")
|
||||||
def return_nodes(alive_node: str = None):
|
def return_nodes(alive_node: str = None):
|
||||||
helper = get_storage_service_helper()
|
helper = get_storage_service_helper()
|
||||||
for node in list(check_nodes):
|
for node in list(check_nodes):
|
||||||
with allure.step(f'Start node {node}'):
|
with allure.step(f"Start node {node}"):
|
||||||
helper.start_node(node)
|
helper.start_node(node)
|
||||||
with allure.step(f'Waiting status ready for node {node}'):
|
with allure.step(f"Waiting status ready for node {node}"):
|
||||||
wait_for_node_to_be_ready(node)
|
wait_for_node_to_be_ready(node)
|
||||||
|
|
||||||
# We need to wait for node to establish notifications from morph-chain
|
# We need to wait for node to establish notifications from morph-chain
|
||||||
# Otherwise it will hang up when we will try to set status
|
# Otherwise it will hang up when we will try to set status
|
||||||
sleep(robot_time_to_int(MORPH_BLOCK_TIME))
|
sleep(parse_time(MORPH_BLOCK_TIME))
|
||||||
|
|
||||||
with allure.step(f'Move node {node} to online state'):
|
with allure.step(f"Move node {node} to online state"):
|
||||||
node_set_status(node, status='online', retries=2)
|
node_set_status(node, status="online", retries=2)
|
||||||
|
|
||||||
check_nodes.remove(node)
|
check_nodes.remove(node)
|
||||||
sleep(robot_time_to_int(MORPH_BLOCK_TIME))
|
sleep(parse_time(MORPH_BLOCK_TIME))
|
||||||
for __attempt in range(3):
|
for __attempt in range(3):
|
||||||
try:
|
try:
|
||||||
tick_epoch()
|
tick_epoch()
|
||||||
|
@ -101,19 +116,22 @@ def return_nodes(alive_node: str = None):
|
||||||
check_node_in_map(node, alive_node)
|
check_node_in_map(node, alive_node)
|
||||||
|
|
||||||
|
|
||||||
@allure.title('Add one node to cluster')
|
@allure.title("Add one node to cluster")
|
||||||
@pytest.mark.add_nodes
|
@pytest.mark.add_nodes
|
||||||
@pytest.mark.node_mgmt
|
@pytest.mark.node_mgmt
|
||||||
def test_add_nodes(prepare_tmp_dir, prepare_wallet_and_deposit, return_nodes_after_test_run):
|
def test_add_nodes(prepare_tmp_dir, prepare_wallet_and_deposit, return_nodes_after_test_run):
|
||||||
wallet = prepare_wallet_and_deposit
|
wallet = prepare_wallet_and_deposit
|
||||||
placement_rule_3 = 'REP 3 IN X CBF 1 SELECT 3 FROM * AS X'
|
placement_rule_3 = "REP 3 IN X CBF 1 SELECT 3 FROM * AS X"
|
||||||
placement_rule_4 = 'REP 4 IN X CBF 1 SELECT 4 FROM * AS X'
|
placement_rule_4 = "REP 4 IN X CBF 1 SELECT 4 FROM * AS X"
|
||||||
source_file_path = generate_file()
|
source_file_path = generate_file()
|
||||||
|
|
||||||
additional_node = choice([
|
additional_node = choice(
|
||||||
node for node, node_config in NEOFS_NETMAP_DICT.items()
|
[
|
||||||
if node_config.get('rpc') != STORAGE_RPC_ENDPOINT_1
|
node
|
||||||
])
|
for node, node_config in NEOFS_NETMAP_DICT.items()
|
||||||
|
if node_config.get("rpc") != STORAGE_RPC_ENDPOINT_1
|
||||||
|
]
|
||||||
|
)
|
||||||
alive_node = choice([node for node in NEOFS_NETMAP_DICT if node != additional_node])
|
alive_node = choice([node for node in NEOFS_NETMAP_DICT if node != additional_node])
|
||||||
|
|
||||||
check_node_in_map(additional_node, alive_node)
|
check_node_in_map(additional_node, alive_node)
|
||||||
|
@ -124,29 +142,32 @@ def test_add_nodes(prepare_tmp_dir, prepare_wallet_and_deposit, return_nodes_aft
|
||||||
delete_node_data(additional_node)
|
delete_node_data(additional_node)
|
||||||
|
|
||||||
cid = create_container(wallet, rule=placement_rule_3, basic_acl=PUBLIC_ACL)
|
cid = create_container(wallet, rule=placement_rule_3, basic_acl=PUBLIC_ACL)
|
||||||
oid = put_object(wallet, source_file_path, cid,
|
oid = put_object(
|
||||||
endpoint=NEOFS_NETMAP_DICT[alive_node].get('rpc'))
|
wallet, source_file_path, cid, endpoint=NEOFS_NETMAP_DICT[alive_node].get("rpc")
|
||||||
|
)
|
||||||
wait_object_replication_on_nodes(wallet, cid, oid, 3)
|
wait_object_replication_on_nodes(wallet, cid, oid, 3)
|
||||||
|
|
||||||
return_nodes(alive_node)
|
return_nodes(alive_node)
|
||||||
|
|
||||||
with allure.step('Check data could be replicated to new node'):
|
with allure.step("Check data could be replicated to new node"):
|
||||||
random_node = choice(
|
random_node = choice(
|
||||||
[node for node in NEOFS_NETMAP_DICT if node not in (additional_node, alive_node)])
|
[node for node in NEOFS_NETMAP_DICT if node not in (additional_node, alive_node)]
|
||||||
|
)
|
||||||
exclude_node_from_network_map(random_node, alive_node)
|
exclude_node_from_network_map(random_node, alive_node)
|
||||||
|
|
||||||
wait_object_replication_on_nodes(wallet, cid, oid, 3, excluded_nodes=[random_node])
|
wait_object_replication_on_nodes(wallet, cid, oid, 3, excluded_nodes=[random_node])
|
||||||
include_node_to_network_map(random_node, alive_node)
|
include_node_to_network_map(random_node, alive_node)
|
||||||
wait_object_replication_on_nodes(wallet, cid, oid, 3)
|
wait_object_replication_on_nodes(wallet, cid, oid, 3)
|
||||||
|
|
||||||
with allure.step('Check container could be created with new node'):
|
with allure.step("Check container could be created with new node"):
|
||||||
cid = create_container(wallet, rule=placement_rule_4, basic_acl=PUBLIC_ACL)
|
cid = create_container(wallet, rule=placement_rule_4, basic_acl=PUBLIC_ACL)
|
||||||
oid = put_object(wallet, source_file_path, cid,
|
oid = put_object(
|
||||||
endpoint=NEOFS_NETMAP_DICT[alive_node].get('rpc'))
|
wallet, source_file_path, cid, endpoint=NEOFS_NETMAP_DICT[alive_node].get("rpc")
|
||||||
|
)
|
||||||
wait_object_replication_on_nodes(wallet, cid, oid, 4)
|
wait_object_replication_on_nodes(wallet, cid, oid, 4)
|
||||||
|
|
||||||
|
|
||||||
@allure.title('Control Operations with storage nodes')
|
@allure.title("Control Operations with storage nodes")
|
||||||
@pytest.mark.node_mgmt
|
@pytest.mark.node_mgmt
|
||||||
def test_nodes_management(prepare_tmp_dir):
|
def test_nodes_management(prepare_tmp_dir):
|
||||||
"""
|
"""
|
||||||
|
@ -157,58 +178,59 @@ def test_nodes_management(prepare_tmp_dir):
|
||||||
|
|
||||||
# Calculate public key that identifies node in netmap (we need base58-formatted key
|
# Calculate public key that identifies node in netmap (we need base58-formatted key
|
||||||
# because keys of storage nodes are base58-encoded in netmap)
|
# because keys of storage nodes are base58-encoded in netmap)
|
||||||
random_node_wallet_path = NEOFS_NETMAP_DICT[random_node]['wallet_path']
|
random_node_wallet_path = NEOFS_NETMAP_DICT[random_node]["wallet_path"]
|
||||||
random_node_netmap_key = get_wallet_public_key(
|
random_node_netmap_key = get_wallet_public_key(
|
||||||
random_node_wallet_path,
|
random_node_wallet_path, STORAGE_WALLET_PASS, format="base58"
|
||||||
STORAGE_WALLET_PASS,
|
|
||||||
format="base58"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
with allure.step('Check node {random_node} is in netmap'):
|
with allure.step("Check node {random_node} is in netmap"):
|
||||||
snapshot = get_netmap_snapshot(node_name=alive_node)
|
snapshot = get_netmap_snapshot(node_name=alive_node)
|
||||||
assert random_node_netmap_key in snapshot, f'Expected node {random_node} in netmap'
|
assert random_node_netmap_key in snapshot, f"Expected node {random_node} in netmap"
|
||||||
|
|
||||||
with allure.step('Run health check for all storage nodes'):
|
with allure.step("Run health check for all storage nodes"):
|
||||||
for node_name in NEOFS_NETMAP_DICT.keys():
|
for node_name in NEOFS_NETMAP_DICT.keys():
|
||||||
health_check = node_healthcheck(node_name)
|
health_check = node_healthcheck(node_name)
|
||||||
assert health_check.health_status == 'READY' and health_check.network_status == 'ONLINE'
|
assert health_check.health_status == "READY" and health_check.network_status == "ONLINE"
|
||||||
|
|
||||||
with allure.step(f'Move node {random_node} to offline state'):
|
with allure.step(f"Move node {random_node} to offline state"):
|
||||||
node_set_status(random_node, status='offline')
|
node_set_status(random_node, status="offline")
|
||||||
|
|
||||||
sleep(robot_time_to_int(MORPH_BLOCK_TIME))
|
sleep(parse_time(MORPH_BLOCK_TIME))
|
||||||
tick_epoch()
|
tick_epoch()
|
||||||
|
|
||||||
with allure.step(f'Check node {random_node} went to offline'):
|
with allure.step(f"Check node {random_node} went to offline"):
|
||||||
health_check = node_healthcheck(random_node)
|
health_check = node_healthcheck(random_node)
|
||||||
assert health_check.health_status == 'READY' and health_check.network_status == 'OFFLINE'
|
assert health_check.health_status == "READY" and health_check.network_status == "OFFLINE"
|
||||||
snapshot = get_netmap_snapshot(node_name=alive_node)
|
snapshot = get_netmap_snapshot(node_name=alive_node)
|
||||||
assert random_node_netmap_key not in snapshot, f'Expected node {random_node} not in netmap'
|
assert random_node_netmap_key not in snapshot, f"Expected node {random_node} not in netmap"
|
||||||
|
|
||||||
with allure.step(f'Check node {random_node} went to online'):
|
with allure.step(f"Check node {random_node} went to online"):
|
||||||
node_set_status(random_node, status='online')
|
node_set_status(random_node, status="online")
|
||||||
|
|
||||||
sleep(robot_time_to_int(MORPH_BLOCK_TIME))
|
sleep(parse_time(MORPH_BLOCK_TIME))
|
||||||
tick_epoch()
|
tick_epoch()
|
||||||
|
|
||||||
with allure.step(f'Check node {random_node} went to online'):
|
with allure.step(f"Check node {random_node} went to online"):
|
||||||
health_check = node_healthcheck(random_node)
|
health_check = node_healthcheck(random_node)
|
||||||
assert health_check.health_status == 'READY' and health_check.network_status == 'ONLINE'
|
assert health_check.health_status == "READY" and health_check.network_status == "ONLINE"
|
||||||
snapshot = get_netmap_snapshot(node_name=alive_node)
|
snapshot = get_netmap_snapshot(node_name=alive_node)
|
||||||
assert random_node_netmap_key in snapshot, f'Expected node {random_node} in netmap'
|
assert random_node_netmap_key in snapshot, f"Expected node {random_node} in netmap"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('placement_rule,expected_copies', [
|
@pytest.mark.parametrize(
|
||||||
('REP 2 IN X CBF 2 SELECT 2 FROM * AS X', 2),
|
"placement_rule,expected_copies",
|
||||||
('REP 2 IN X CBF 1 SELECT 2 FROM * AS X', 2),
|
[
|
||||||
('REP 3 IN X CBF 1 SELECT 3 FROM * AS X', 3),
|
("REP 2 IN X CBF 2 SELECT 2 FROM * AS X", 2),
|
||||||
('REP 1 IN X CBF 1 SELECT 1 FROM * AS X', 1),
|
("REP 2 IN X CBF 1 SELECT 2 FROM * AS X", 2),
|
||||||
('REP 1 IN X CBF 2 SELECT 1 FROM * AS X', 1),
|
("REP 3 IN X CBF 1 SELECT 3 FROM * AS X", 3),
|
||||||
('REP 4 IN X CBF 1 SELECT 4 FROM * AS X', 4),
|
("REP 1 IN X CBF 1 SELECT 1 FROM * AS X", 1),
|
||||||
('REP 2 IN X CBF 1 SELECT 4 FROM * AS X', 2),
|
("REP 1 IN X CBF 2 SELECT 1 FROM * AS X", 1),
|
||||||
])
|
("REP 4 IN X CBF 1 SELECT 4 FROM * AS X", 4),
|
||||||
|
("REP 2 IN X CBF 1 SELECT 4 FROM * AS X", 2),
|
||||||
|
],
|
||||||
|
)
|
||||||
@pytest.mark.node_mgmt
|
@pytest.mark.node_mgmt
|
||||||
@allure.title('Test object copies based on placement policy')
|
@allure.title("Test object copies based on placement policy")
|
||||||
def test_placement_policy(prepare_wallet_and_deposit, placement_rule, expected_copies):
|
def test_placement_policy(prepare_wallet_and_deposit, placement_rule, expected_copies):
|
||||||
"""
|
"""
|
||||||
This test checks object's copies based on container's placement policy.
|
This test checks object's copies based on container's placement policy.
|
||||||
|
@ -218,55 +240,86 @@ def test_placement_policy(prepare_wallet_and_deposit, placement_rule, expected_c
|
||||||
validate_object_copies(wallet, placement_rule, file_path, expected_copies)
|
validate_object_copies(wallet, placement_rule, file_path, expected_copies)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('placement_rule,expected_copies,nodes', [
|
@pytest.mark.parametrize(
|
||||||
('REP 4 IN X CBF 1 SELECT 4 FROM * AS X', 4, ['s01', 's02', 's03', 's04']),
|
"placement_rule,expected_copies,nodes",
|
||||||
(
|
[
|
||||||
'REP 1 IN LOC_PLACE CBF 1 SELECT 1 FROM LOC_SW AS LOC_PLACE FILTER Country EQ Sweden AS LOC_SW',
|
("REP 4 IN X CBF 1 SELECT 4 FROM * AS X", 4, ["s01", "s02", "s03", "s04"]),
|
||||||
1, ['s03']),
|
(
|
||||||
("REP 1 CBF 1 SELECT 1 FROM LOC_SPB FILTER 'UN-LOCODE' EQ 'RU LED' AS LOC_SPB", 1, ['s02']),
|
"REP 1 IN LOC_PLACE CBF 1 SELECT 1 FROM LOC_SW AS LOC_PLACE FILTER Country EQ Sweden AS LOC_SW",
|
||||||
("REP 1 IN LOC_SPB_PLACE REP 1 IN LOC_MSK_PLACE CBF 1 SELECT 1 FROM LOC_SPB AS LOC_SPB_PLACE "
|
1,
|
||||||
"SELECT 1 FROM LOC_MSK AS LOC_MSK_PLACE "
|
["s03"],
|
||||||
"FILTER 'UN-LOCODE' EQ 'RU LED' AS LOC_SPB FILTER 'UN-LOCODE' EQ 'RU MOW' AS LOC_MSK", 2,
|
),
|
||||||
['s01', 's02']),
|
("REP 1 CBF 1 SELECT 1 FROM LOC_SPB FILTER 'UN-LOCODE' EQ 'RU LED' AS LOC_SPB", 1, ["s02"]),
|
||||||
('REP 4 CBF 1 SELECT 4 FROM LOC_EU FILTER Continent EQ Europe AS LOC_EU', 4,
|
(
|
||||||
['s01', 's02', 's03', 's04']),
|
"REP 1 IN LOC_SPB_PLACE REP 1 IN LOC_MSK_PLACE CBF 1 SELECT 1 FROM LOC_SPB AS LOC_SPB_PLACE "
|
||||||
("REP 1 CBF 1 SELECT 1 FROM LOC_SPB "
|
"SELECT 1 FROM LOC_MSK AS LOC_MSK_PLACE "
|
||||||
"FILTER 'UN-LOCODE' NE 'RU MOW' AND 'UN-LOCODE' NE 'SE STO' AND 'UN-LOCODE' NE 'FI HEL' AS LOC_SPB",
|
"FILTER 'UN-LOCODE' EQ 'RU LED' AS LOC_SPB FILTER 'UN-LOCODE' EQ 'RU MOW' AS LOC_MSK",
|
||||||
1, ['s02']),
|
2,
|
||||||
("REP 2 CBF 1 SELECT 2 FROM LOC_RU FILTER SubDivCode NE 'AB' AND SubDivCode NE '18' AS LOC_RU",
|
["s01", "s02"],
|
||||||
2, ['s01', 's02']),
|
),
|
||||||
("REP 2 CBF 1 SELECT 2 FROM LOC_RU FILTER Country EQ 'Russia' AS LOC_RU", 2, ['s01', 's02']),
|
(
|
||||||
("REP 2 CBF 1 SELECT 2 FROM LOC_EU FILTER Country NE 'Russia' AS LOC_EU", 2, ['s03', 's04']),
|
"REP 4 CBF 1 SELECT 4 FROM LOC_EU FILTER Continent EQ Europe AS LOC_EU",
|
||||||
])
|
4,
|
||||||
|
["s01", "s02", "s03", "s04"],
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"REP 1 CBF 1 SELECT 1 FROM LOC_SPB "
|
||||||
|
"FILTER 'UN-LOCODE' NE 'RU MOW' AND 'UN-LOCODE' NE 'SE STO' AND 'UN-LOCODE' NE 'FI HEL' AS LOC_SPB",
|
||||||
|
1,
|
||||||
|
["s02"],
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"REP 2 CBF 1 SELECT 2 FROM LOC_RU FILTER SubDivCode NE 'AB' AND SubDivCode NE '18' AS LOC_RU",
|
||||||
|
2,
|
||||||
|
["s01", "s02"],
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"REP 2 CBF 1 SELECT 2 FROM LOC_RU FILTER Country EQ 'Russia' AS LOC_RU",
|
||||||
|
2,
|
||||||
|
["s01", "s02"],
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"REP 2 CBF 1 SELECT 2 FROM LOC_EU FILTER Country NE 'Russia' AS LOC_EU",
|
||||||
|
2,
|
||||||
|
["s03", "s04"],
|
||||||
|
),
|
||||||
|
],
|
||||||
|
)
|
||||||
@pytest.mark.node_mgmt
|
@pytest.mark.node_mgmt
|
||||||
@allure.title('Test object copies and storage nodes based on placement policy')
|
@allure.title("Test object copies and storage nodes based on placement policy")
|
||||||
def test_placement_policy_with_nodes(prepare_wallet_and_deposit, placement_rule, expected_copies,
|
def test_placement_policy_with_nodes(
|
||||||
nodes):
|
prepare_wallet_and_deposit, placement_rule, expected_copies, nodes
|
||||||
|
):
|
||||||
"""
|
"""
|
||||||
Based on container's placement policy check that storage nodes are piked correctly and object has
|
Based on container's placement policy check that storage nodes are piked correctly and object has
|
||||||
correct copies amount.
|
correct copies amount.
|
||||||
"""
|
"""
|
||||||
wallet = prepare_wallet_and_deposit
|
wallet = prepare_wallet_and_deposit
|
||||||
file_path = generate_file()
|
file_path = generate_file()
|
||||||
cid, oid, found_nodes = validate_object_copies(wallet, placement_rule, file_path,
|
cid, oid, found_nodes = validate_object_copies(
|
||||||
expected_copies)
|
wallet, placement_rule, file_path, expected_copies
|
||||||
expected_nodes = [NEOFS_NETMAP_DICT[node_name].get('rpc') for node_name in nodes]
|
)
|
||||||
|
expected_nodes = [NEOFS_NETMAP_DICT[node_name].get("rpc") for node_name in nodes]
|
||||||
assert set(found_nodes) == set(
|
assert set(found_nodes) == set(
|
||||||
expected_nodes), f'Expected nodes {expected_nodes}, got {found_nodes}'
|
expected_nodes
|
||||||
|
), f"Expected nodes {expected_nodes}, got {found_nodes}"
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('placement_rule,expected_copies', [
|
@pytest.mark.parametrize(
|
||||||
('REP 2 IN X CBF 2 SELECT 6 FROM * AS X', 2),
|
"placement_rule,expected_copies",
|
||||||
])
|
[
|
||||||
|
("REP 2 IN X CBF 2 SELECT 6 FROM * AS X", 2),
|
||||||
|
],
|
||||||
|
)
|
||||||
@pytest.mark.node_mgmt
|
@pytest.mark.node_mgmt
|
||||||
@allure.title('Negative cases for placement policy')
|
@allure.title("Negative cases for placement policy")
|
||||||
def test_placement_policy_negative(prepare_wallet_and_deposit, placement_rule, expected_copies):
|
def test_placement_policy_negative(prepare_wallet_and_deposit, placement_rule, expected_copies):
|
||||||
"""
|
"""
|
||||||
Negative test for placement policy.
|
Negative test for placement policy.
|
||||||
"""
|
"""
|
||||||
wallet = prepare_wallet_and_deposit
|
wallet = prepare_wallet_and_deposit
|
||||||
file_path = generate_file()
|
file_path = generate_file()
|
||||||
with pytest.raises(RuntimeError, match='.*not enough nodes to SELECT from.*'):
|
with pytest.raises(RuntimeError, match=".*not enough nodes to SELECT from.*"):
|
||||||
validate_object_copies(wallet, placement_rule, file_path, expected_copies)
|
validate_object_copies(wallet, placement_rule, file_path, expected_copies)
|
||||||
|
|
||||||
|
|
||||||
|
@ -285,10 +338,11 @@ def test_replication(prepare_wallet_and_deposit, after_run_start_all_nodes):
|
||||||
oid = put_object(wallet, file_path, cid)
|
oid = put_object(wallet, file_path, cid)
|
||||||
|
|
||||||
nodes = get_nodes_with_object(wallet, cid, oid)
|
nodes = get_nodes_with_object(wallet, cid, oid)
|
||||||
assert len(
|
assert (
|
||||||
nodes) == expected_nodes_count, f'Expected {expected_nodes_count} copies, got {len(nodes)}'
|
len(nodes) == expected_nodes_count
|
||||||
|
), f"Expected {expected_nodes_count} copies, got {len(nodes)}"
|
||||||
|
|
||||||
node_names = [name for name, config in NEOFS_NETMAP_DICT.items() if config.get('rpc') in nodes]
|
node_names = [name for name, config in NEOFS_NETMAP_DICT.items() if config.get("rpc") in nodes]
|
||||||
stopped_nodes = stop_nodes(1, node_names)
|
stopped_nodes = stop_nodes(1, node_names)
|
||||||
|
|
||||||
wait_for_expected_object_copies(wallet, cid, oid)
|
wait_for_expected_object_copies(wallet, cid, oid)
|
||||||
|
@ -303,7 +357,7 @@ def test_replication(prepare_wallet_and_deposit, after_run_start_all_nodes):
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.node_mgmt
|
@pytest.mark.node_mgmt
|
||||||
@allure.title('NeoFS object could be dropped using control command')
|
@allure.title("NeoFS object could be dropped using control command")
|
||||||
def test_drop_object(prepare_wallet_and_deposit):
|
def test_drop_object(prepare_wallet_and_deposit):
|
||||||
"""
|
"""
|
||||||
Test checks object could be dropped using `neofs-cli control drop-objects` command.
|
Test checks object could be dropped using `neofs-cli control drop-objects` command.
|
||||||
|
@ -323,10 +377,11 @@ def test_drop_object(prepare_wallet_and_deposit):
|
||||||
|
|
||||||
nodes = get_nodes_with_object(wallet, cid, oid_simple)
|
nodes = get_nodes_with_object(wallet, cid, oid_simple)
|
||||||
node_name = choice(
|
node_name = choice(
|
||||||
[name for name, config in NEOFS_NETMAP_DICT.items() if config.get('rpc') in nodes])
|
[name for name, config in NEOFS_NETMAP_DICT.items() if config.get("rpc") in nodes]
|
||||||
|
)
|
||||||
|
|
||||||
for oid in (oid_simple, oid_complex):
|
for oid in (oid_simple, oid_complex):
|
||||||
with allure.step(f'Drop object {oid}'):
|
with allure.step(f"Drop object {oid}"):
|
||||||
get_object(wallet, cid, oid)
|
get_object(wallet, cid, oid)
|
||||||
head_object(wallet, cid, oid)
|
head_object(wallet, cid, oid)
|
||||||
drop_object(node_name, cid, oid)
|
drop_object(node_name, cid, oid)
|
||||||
|
@ -335,8 +390,8 @@ def test_drop_object(prepare_wallet_and_deposit):
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.node_mgmt
|
@pytest.mark.node_mgmt
|
||||||
@pytest.mark.skip(reason='Need to clarify scenario')
|
@pytest.mark.skip(reason="Need to clarify scenario")
|
||||||
@allure.title('Control Operations with storage nodes')
|
@allure.title("Control Operations with storage nodes")
|
||||||
def test_shards(prepare_wallet_and_deposit, create_container_and_pick_node):
|
def test_shards(prepare_wallet_and_deposit, create_container_and_pick_node):
|
||||||
wallet = prepare_wallet_and_deposit
|
wallet = prepare_wallet_and_deposit
|
||||||
file_path = generate_file()
|
file_path = generate_file()
|
||||||
|
@ -345,7 +400,7 @@ def test_shards(prepare_wallet_and_deposit, create_container_and_pick_node):
|
||||||
original_oid = put_object(wallet, file_path, cid)
|
original_oid = put_object(wallet, file_path, cid)
|
||||||
|
|
||||||
# for mode in ('read-only', 'degraded'):
|
# for mode in ('read-only', 'degraded'):
|
||||||
for mode in ('degraded',):
|
for mode in ("degraded",):
|
||||||
shards = node_shard_list(node_name)
|
shards = node_shard_list(node_name)
|
||||||
assert shards
|
assert shards
|
||||||
|
|
||||||
|
@ -365,7 +420,7 @@ def test_shards(prepare_wallet_and_deposit, create_container_and_pick_node):
|
||||||
get_object(wallet, cid, original_oid)
|
get_object(wallet, cid, original_oid)
|
||||||
|
|
||||||
for shard in shards:
|
for shard in shards:
|
||||||
node_shard_set_mode(node_name, shard, 'read-write')
|
node_shard_set_mode(node_name, shard, "read-write")
|
||||||
|
|
||||||
shards = node_shard_list(node_name)
|
shards = node_shard_list(node_name)
|
||||||
assert shards
|
assert shards
|
||||||
|
@ -374,62 +429,66 @@ def test_shards(prepare_wallet_and_deposit, create_container_and_pick_node):
|
||||||
delete_object(wallet, cid, oid)
|
delete_object(wallet, cid, oid)
|
||||||
|
|
||||||
|
|
||||||
@allure.step('Validate object has {expected_copies} copies')
|
@allure.step("Validate object has {expected_copies} copies")
|
||||||
def validate_object_copies(wallet: str, placement_rule: str, file_path: str, expected_copies: int):
|
def validate_object_copies(wallet: str, placement_rule: str, file_path: str, expected_copies: int):
|
||||||
cid = create_container(wallet, rule=placement_rule, basic_acl=PUBLIC_ACL)
|
cid = create_container(wallet, rule=placement_rule, basic_acl=PUBLIC_ACL)
|
||||||
got_policy = placement_policy_from_container(get_container(wallet, cid, json_mode=False))
|
got_policy = placement_policy_from_container(get_container(wallet, cid, json_mode=False))
|
||||||
assert got_policy == placement_rule.replace('\'', ''), \
|
assert got_policy == placement_rule.replace(
|
||||||
f'Expected \n{placement_rule} and got policy \n{got_policy} are the same'
|
"'", ""
|
||||||
|
), f"Expected \n{placement_rule} and got policy \n{got_policy} are the same"
|
||||||
oid = put_object(wallet, file_path, cid)
|
oid = put_object(wallet, file_path, cid)
|
||||||
nodes = get_nodes_with_object(wallet, cid, oid)
|
nodes = get_nodes_with_object(wallet, cid, oid)
|
||||||
assert len(nodes) == expected_copies, f'Expected {expected_copies} copies, got {len(nodes)}'
|
assert len(nodes) == expected_copies, f"Expected {expected_copies} copies, got {len(nodes)}"
|
||||||
return cid, oid, nodes
|
return cid, oid, nodes
|
||||||
|
|
||||||
|
|
||||||
@allure.step('Wait for node {node_name} goes online')
|
@allure.step("Wait for node {node_name} goes online")
|
||||||
def wait_for_node_go_online(node_name: str) -> None:
|
def wait_for_node_go_online(node_name: str) -> None:
|
||||||
timeout, attempts = 5, 20
|
timeout, attempts = 5, 20
|
||||||
for _ in range(attempts):
|
for _ in range(attempts):
|
||||||
try:
|
try:
|
||||||
health_check = node_healthcheck(node_name)
|
health_check = node_healthcheck(node_name)
|
||||||
assert health_check.health_status == 'READY' and health_check.network_status == 'ONLINE'
|
assert health_check.health_status == "READY" and health_check.network_status == "ONLINE"
|
||||||
return
|
return
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
logger.warning(f'Node {node_name} is not online:\n{err}')
|
logger.warning(f"Node {node_name} is not online:\n{err}")
|
||||||
sleep(timeout)
|
sleep(timeout)
|
||||||
raise AssertionError(
|
raise AssertionError(
|
||||||
f"Node {node_name} hasn't gone to the READY and ONLINE state after {timeout * attempts} second")
|
f"Node {node_name} hasn't gone to the READY and ONLINE state after {timeout * attempts} second"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@allure.step('Wait for node {node_name} is ready')
|
@allure.step("Wait for node {node_name} is ready")
|
||||||
def wait_for_node_to_be_ready(node_name: str) -> None:
|
def wait_for_node_to_be_ready(node_name: str) -> None:
|
||||||
timeout, attempts = 30, 6
|
timeout, attempts = 30, 6
|
||||||
for _ in range(attempts):
|
for _ in range(attempts):
|
||||||
try:
|
try:
|
||||||
health_check = node_healthcheck(node_name)
|
health_check = node_healthcheck(node_name)
|
||||||
if health_check.health_status == 'READY':
|
if health_check.health_status == "READY":
|
||||||
return
|
return
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
logger.warning(f'Node {node_name} is not ready:\n{err}')
|
logger.warning(f"Node {node_name} is not ready:\n{err}")
|
||||||
sleep(timeout)
|
sleep(timeout)
|
||||||
raise AssertionError(
|
raise AssertionError(
|
||||||
f"Node {node_name} hasn't gone to the READY state after {timeout * attempts} seconds")
|
f"Node {node_name} hasn't gone to the READY state after {timeout * attempts} seconds"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@allure.step('Wait for {expected_copies} object copies in the wallet')
|
@allure.step("Wait for {expected_copies} object copies in the wallet")
|
||||||
def wait_for_expected_object_copies(wallet: str, cid: str, oid: str,
|
def wait_for_expected_object_copies(
|
||||||
expected_copies: int = 2) -> None:
|
wallet: str, cid: str, oid: str, expected_copies: int = 2
|
||||||
|
) -> None:
|
||||||
for i in range(2):
|
for i in range(2):
|
||||||
copies = get_simple_object_copies(wallet, cid, oid)
|
copies = get_simple_object_copies(wallet, cid, oid)
|
||||||
if copies == expected_copies:
|
if copies == expected_copies:
|
||||||
break
|
break
|
||||||
tick_epoch()
|
tick_epoch()
|
||||||
sleep(robot_time_to_int(NEOFS_CONTRACT_CACHE_TIMEOUT))
|
sleep(parse_time(NEOFS_CONTRACT_CACHE_TIMEOUT))
|
||||||
else:
|
else:
|
||||||
raise AssertionError(f'There are no {expected_copies} copies during time')
|
raise AssertionError(f"There are no {expected_copies} copies during time")
|
||||||
|
|
||||||
|
|
||||||
@allure.step('Wait for object to be dropped')
|
@allure.step("Wait for object to be dropped")
|
||||||
def wait_for_obj_dropped(wallet: str, cid: str, oid: str, checker) -> None:
|
def wait_for_obj_dropped(wallet: str, cid: str, oid: str, checker) -> None:
|
||||||
for _ in range(3):
|
for _ in range(3):
|
||||||
try:
|
try:
|
||||||
|
@ -440,4 +499,4 @@ def wait_for_obj_dropped(wallet: str, cid: str, oid: str, checker) -> None:
|
||||||
return
|
return
|
||||||
raise AssertionError(f'Expected "{OBJECT_NOT_FOUND}" error, got\n{err}')
|
raise AssertionError(f'Expected "{OBJECT_NOT_FOUND}" error, got\n{err}')
|
||||||
|
|
||||||
raise AssertionError(f'Object {oid} was not dropped from node')
|
raise AssertionError(f"Object {oid} was not dropped from node")
|
||||||
|
|
|
@ -16,8 +16,6 @@ import allure
|
||||||
import pexpect
|
import pexpect
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
ROBOT_AUTO_KEYWORDS = False
|
|
||||||
COLOR_GREEN = "\033[92m"
|
COLOR_GREEN = "\033[92m"
|
||||||
COLOR_OFF = "\033[0m"
|
COLOR_OFF = "\033[0m"
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,6 @@ import neofs_verbs
|
||||||
from common import NEOFS_NETMAP, WALLET_CONFIG
|
from common import NEOFS_NETMAP, WALLET_CONFIG
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
ROBOT_AUTO_KEYWORDS = False
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Get Link Object")
|
@allure.step("Get Link Object")
|
||||||
|
@ -35,7 +34,7 @@ def get_link_object(
|
||||||
Returns:
|
Returns:
|
||||||
(str): Link Object ID
|
(str): Link Object ID
|
||||||
When no Link Object ID is found after all Storage Nodes polling,
|
When no Link Object ID is found after all Storage Nodes polling,
|
||||||
the function throws a native robot error.
|
the function throws an error.
|
||||||
"""
|
"""
|
||||||
for node in NEOFS_NETMAP:
|
for node in NEOFS_NETMAP:
|
||||||
try:
|
try:
|
||||||
|
@ -68,7 +67,7 @@ def get_last_object(wallet: str, cid: str, oid: str):
|
||||||
Returns:
|
Returns:
|
||||||
(str): Last Object ID
|
(str): Last Object ID
|
||||||
When no Last Object ID is found after all Storage Nodes polling,
|
When no Last Object ID is found after all Storage Nodes polling,
|
||||||
the function throws a native robot error.
|
the function throws an error.
|
||||||
"""
|
"""
|
||||||
for node in NEOFS_NETMAP:
|
for node in NEOFS_NETMAP:
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -4,7 +4,6 @@
|
||||||
This module contains keywords that utilize `neofs-cli container` commands.
|
This module contains keywords that utilize `neofs-cli container` commands.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import allure
|
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
from time import sleep
|
from time import sleep
|
||||||
|
@ -17,7 +16,6 @@ from common import NEOFS_ENDPOINT, WALLET_CONFIG
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
ROBOT_AUTO_KEYWORDS = False
|
|
||||||
DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
|
DEFAULT_PLACEMENT_RULE = "REP 2 IN X CBF 1 SELECT 4 FROM * AS X"
|
||||||
|
|
||||||
|
|
||||||
|
@ -105,7 +103,7 @@ def wait_for_container_deletion(wallet: str, cid: str, attempts: int = 30, sleep
|
||||||
raise AssertionError(f"Expected container deleted during {attempts * sleep_interval} sec.")
|
raise AssertionError(f"Expected container deleted during {attempts * sleep_interval} sec.")
|
||||||
|
|
||||||
|
|
||||||
@allure.step('List Containers')
|
@allure.step("List Containers")
|
||||||
def list_containers(wallet: str) -> list[str]:
|
def list_containers(wallet: str) -> list[str]:
|
||||||
"""
|
"""
|
||||||
A wrapper for `neofs-cli container list` call. It returns all the
|
A wrapper for `neofs-cli container list` call. It returns all the
|
||||||
|
@ -121,7 +119,7 @@ def list_containers(wallet: str) -> list[str]:
|
||||||
return output.split()
|
return output.split()
|
||||||
|
|
||||||
|
|
||||||
@allure.step('Get Container')
|
@allure.step("Get Container")
|
||||||
def get_container(wallet: str, cid: str, json_mode: bool = True) -> Union[dict, str]:
|
def get_container(wallet: str, cid: str, json_mode: bool = True) -> Union[dict, str]:
|
||||||
"""
|
"""
|
||||||
A wrapper for `neofs-cli container get` call. It extracts container's
|
A wrapper for `neofs-cli container get` call. It extracts container's
|
||||||
|
@ -150,7 +148,7 @@ def get_container(wallet: str, cid: str, json_mode: bool = True) -> Union[dict,
|
||||||
return container_info
|
return container_info
|
||||||
|
|
||||||
|
|
||||||
@allure.step('Delete Container')
|
@allure.step("Delete Container")
|
||||||
# TODO: make the error message about a non-found container more user-friendly
|
# TODO: make the error message about a non-found container more user-friendly
|
||||||
# https://github.com/nspcc-dev/neofs-contract/issues/121
|
# https://github.com/nspcc-dev/neofs-contract/issues/121
|
||||||
def delete_container(wallet: str, cid: str, force: bool = False) -> None:
|
def delete_container(wallet: str, cid: str, force: bool = False) -> None:
|
||||||
|
|
|
@ -13,7 +13,6 @@ from common import (
|
||||||
)
|
)
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
ROBOT_AUTO_KEYWORDS = False
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Get Epoch")
|
@allure.step("Get Epoch")
|
||||||
|
|
|
@ -14,7 +14,6 @@ from cli_helpers import _cmd_run
|
||||||
from common import HTTP_GATE
|
from common import HTTP_GATE
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
ROBOT_AUTO_KEYWORDS = False
|
|
||||||
|
|
||||||
ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/")
|
ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/")
|
||||||
|
|
||||||
|
|
|
@ -13,8 +13,6 @@ import base64
|
||||||
|
|
||||||
import base58
|
import base58
|
||||||
|
|
||||||
ROBOT_AUTO_KEYWORDS = False
|
|
||||||
|
|
||||||
|
|
||||||
def decode_simple_header(data: dict):
|
def decode_simple_header(data: dict):
|
||||||
"""
|
"""
|
||||||
|
@ -43,14 +41,8 @@ def decode_split_header(data: dict):
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
data["splitId"] = json_reencode(data["splitId"])
|
data["splitId"] = json_reencode(data["splitId"])
|
||||||
data["lastPart"] = (
|
data["lastPart"] = json_reencode(data["lastPart"]["value"]) if data["lastPart"] else None
|
||||||
json_reencode(data["lastPart"]["value"])
|
data["link"] = json_reencode(data["link"]["value"]) if data["link"] else None
|
||||||
if data["lastPart"] else None
|
|
||||||
)
|
|
||||||
data["link"] = (
|
|
||||||
json_reencode(data["link"]["value"])
|
|
||||||
if data["link"] else None
|
|
||||||
)
|
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
raise ValueError(f"failed to decode JSON output: {exc}") from exc
|
raise ValueError(f"failed to decode JSON output: {exc}") from exc
|
||||||
|
|
||||||
|
@ -66,16 +58,18 @@ def decode_linking_object(data: dict):
|
||||||
data = decode_simple_header(data)
|
data = decode_simple_header(data)
|
||||||
# reencoding Child Object IDs
|
# reencoding Child Object IDs
|
||||||
# { 'value': <Base58 encoded OID> } -> <Base64 encoded OID>
|
# { 'value': <Base58 encoded OID> } -> <Base64 encoded OID>
|
||||||
for ind, val in enumerate(data['header']['split']['children']):
|
for ind, val in enumerate(data["header"]["split"]["children"]):
|
||||||
data['header']['split']['children'][ind] = json_reencode(val['value'])
|
data["header"]["split"]["children"][ind] = json_reencode(val["value"])
|
||||||
data['header']['split']['splitID'] = json_reencode(data['header']['split']['splitID'])
|
data["header"]["split"]["splitID"] = json_reencode(data["header"]["split"]["splitID"])
|
||||||
data['header']['split']['previous'] = (
|
data["header"]["split"]["previous"] = (
|
||||||
json_reencode(data['header']['split']['previous']['value'])
|
json_reencode(data["header"]["split"]["previous"]["value"])
|
||||||
if data['header']['split']['previous'] else None
|
if data["header"]["split"]["previous"]
|
||||||
|
else None
|
||||||
)
|
)
|
||||||
data['header']['split']['parent'] = (
|
data["header"]["split"]["parent"] = (
|
||||||
json_reencode(data['header']['split']['parent']['value'])
|
json_reencode(data["header"]["split"]["parent"]["value"])
|
||||||
if data['header']['split']['parent'] else None
|
if data["header"]["split"]["parent"]
|
||||||
|
else None
|
||||||
)
|
)
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
raise ValueError(f"failed to decode JSON output: {exc}") from exc
|
raise ValueError(f"failed to decode JSON output: {exc}") from exc
|
||||||
|
@ -101,8 +95,7 @@ def decode_tombstone(data: dict):
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
data = decode_simple_header(data)
|
data = decode_simple_header(data)
|
||||||
data['header']['sessionToken'] = decode_session_token(
|
data["header"]["sessionToken"] = decode_session_token(data["header"]["sessionToken"])
|
||||||
data['header']['sessionToken'])
|
|
||||||
except Exception as exc:
|
except Exception as exc:
|
||||||
raise ValueError(f"failed to decode JSON output: {exc}") from exc
|
raise ValueError(f"failed to decode JSON output: {exc}") from exc
|
||||||
return data
|
return data
|
||||||
|
@ -113,10 +106,12 @@ def decode_session_token(data: dict):
|
||||||
This function reencodes a fragment of header which contains
|
This function reencodes a fragment of header which contains
|
||||||
information about session token.
|
information about session token.
|
||||||
"""
|
"""
|
||||||
data['body']['object']['address']['containerID'] = json_reencode(
|
data["body"]["object"]["address"]["containerID"] = json_reencode(
|
||||||
data['body']['object']['address']['containerID']['value'])
|
data["body"]["object"]["address"]["containerID"]["value"]
|
||||||
data['body']['object']['address']['objectID'] = json_reencode(
|
)
|
||||||
data['body']['object']['address']['objectID']['value'])
|
data["body"]["object"]["address"]["objectID"] = json_reencode(
|
||||||
|
data["body"]["object"]["address"]["objectID"]["value"]
|
||||||
|
)
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
@ -135,7 +130,7 @@ def encode_for_json(data: str):
|
||||||
This function encodes binary data for sending them as protobuf
|
This function encodes binary data for sending them as protobuf
|
||||||
structures.
|
structures.
|
||||||
"""
|
"""
|
||||||
return base64.b64encode(base58.b58decode(data)).decode('utf-8')
|
return base64.b64encode(base58.b58decode(data)).decode("utf-8")
|
||||||
|
|
||||||
|
|
||||||
def decode_common_fields(data: dict):
|
def decode_common_fields(data: dict):
|
||||||
|
|
|
@ -4,7 +4,6 @@
|
||||||
This module contains wrappers for NeoFS verbs executed via neofs-cli.
|
This module contains wrappers for NeoFS verbs executed via neofs-cli.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import allure
|
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import random
|
import random
|
||||||
|
@ -19,8 +18,6 @@ from common import ASSETS_DIR, NEOFS_ENDPOINT, NEOFS_NETMAP, WALLET_CONFIG
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
ROBOT_AUTO_KEYWORDS = False
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Get object")
|
@allure.step("Get object")
|
||||||
def get_object(
|
def get_object(
|
||||||
|
@ -320,13 +317,13 @@ def search_object(
|
||||||
if expected_objects_list:
|
if expected_objects_list:
|
||||||
if sorted(found_objects) == sorted(expected_objects_list):
|
if sorted(found_objects) == sorted(expected_objects_list):
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Found objects list '{found_objects}' ",
|
f"Found objects list '{found_objects}' "
|
||||||
f"is equal for expected list '{expected_objects_list}'",
|
f"is equal for expected list '{expected_objects_list}'"
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
f"Found object list {found_objects} ",
|
f"Found object list {found_objects} "
|
||||||
f"is not equal to expected list '{expected_objects_list}'",
|
f"is not equal to expected list '{expected_objects_list}'"
|
||||||
)
|
)
|
||||||
|
|
||||||
return found_objects
|
return found_objects
|
||||||
|
|
|
@ -12,14 +12,13 @@ from dataclasses import dataclass
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
import allure
|
import allure
|
||||||
from common import MAINNET_BLOCK_TIME, MORPH_BLOCK_TIME, NEOFS_NETMAP_DICT, STORAGE_WALLET_PASS
|
from common import MORPH_BLOCK_TIME, NEOFS_NETMAP_DICT, STORAGE_WALLET_PASS
|
||||||
from data_formatters import get_wallet_public_key
|
from data_formatters import get_wallet_public_key
|
||||||
from epoch import tick_epoch
|
from epoch import tick_epoch
|
||||||
from service_helper import get_storage_service_helper
|
from service_helper import get_storage_service_helper
|
||||||
from utility import robot_time_to_int
|
from utility import parse_time
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
ROBOT_AUTO_KEYWORDS = False
|
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
|
@ -194,7 +193,7 @@ def delete_node_data(node_name: str) -> None:
|
||||||
helper = get_storage_service_helper()
|
helper = get_storage_service_helper()
|
||||||
helper.stop_node(node_name)
|
helper.stop_node(node_name)
|
||||||
helper.delete_node_data(node_name)
|
helper.delete_node_data(node_name)
|
||||||
time.sleep(robot_time_to_int(MORPH_BLOCK_TIME))
|
time.sleep(parse_time(MORPH_BLOCK_TIME))
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Exclude node {node_to_exclude} from network map")
|
@allure.step("Exclude node {node_to_exclude} from network map")
|
||||||
|
@ -204,7 +203,7 @@ def exclude_node_from_network_map(node_to_exclude, alive_node):
|
||||||
|
|
||||||
node_set_status(node_to_exclude, status="offline")
|
node_set_status(node_to_exclude, status="offline")
|
||||||
|
|
||||||
time.sleep(robot_time_to_int(MORPH_BLOCK_TIME))
|
time.sleep(parse_time(MORPH_BLOCK_TIME))
|
||||||
tick_epoch()
|
tick_epoch()
|
||||||
|
|
||||||
snapshot = get_netmap_snapshot(node_name=alive_node)
|
snapshot = get_netmap_snapshot(node_name=alive_node)
|
||||||
|
@ -217,7 +216,7 @@ def exclude_node_from_network_map(node_to_exclude, alive_node):
|
||||||
def include_node_to_network_map(node_to_include: str, alive_node: str) -> None:
|
def include_node_to_network_map(node_to_include: str, alive_node: str) -> None:
|
||||||
node_set_status(node_to_include, status="online")
|
node_set_status(node_to_include, status="online")
|
||||||
|
|
||||||
time.sleep(robot_time_to_int(MORPH_BLOCK_TIME))
|
time.sleep(parse_time(MORPH_BLOCK_TIME))
|
||||||
tick_epoch()
|
tick_epoch()
|
||||||
|
|
||||||
check_node_in_map(node_to_include, alive_node)
|
check_node_in_map(node_to_include, alive_node)
|
||||||
|
|
|
@ -24,7 +24,6 @@ from wallet import nep17_transfer
|
||||||
from wrappers import run_sh_with_passwd_contract
|
from wrappers import run_sh_with_passwd_contract
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
ROBOT_AUTO_KEYWORDS = False
|
|
||||||
|
|
||||||
EMPTY_PASSWORD = ""
|
EMPTY_PASSWORD = ""
|
||||||
TX_PERSIST_TIMEOUT = 15 # seconds
|
TX_PERSIST_TIMEOUT = 15 # seconds
|
||||||
|
|
|
@ -1,226 +0,0 @@
|
||||||
#!/usr/bin/python3
|
|
||||||
|
|
||||||
import allure
|
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import uuid
|
|
||||||
from enum import Enum
|
|
||||||
from time import sleep
|
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
import allure
|
|
||||||
import boto3
|
|
||||||
import urllib3
|
|
||||||
from botocore.exceptions import ClientError
|
|
||||||
from cli_helpers import _run_with_passwd, log_command_execution
|
|
||||||
from common import NEOFS_ENDPOINT, S3_GATE, S3_GATE_WALLET_PASS, S3_GATE_WALLET_PATH
|
|
||||||
from data_formatters import get_wallet_public_key
|
|
||||||
|
|
||||||
##########################################################
|
|
||||||
# Disabling warnings on self-signed certificate which the
|
|
||||||
# boto library produces on requests to S3-gate in dev-env.
|
|
||||||
urllib3.disable_warnings()
|
|
||||||
##########################################################
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
|
||||||
ROBOT_AUTO_KEYWORDS = False
|
|
||||||
CREDENTIALS_CREATE_TIMEOUT = "30s"
|
|
||||||
NEOFS_EXEC = os.getenv("NEOFS_EXEC", "neofs-authmate")
|
|
||||||
|
|
||||||
# Artificial delay that we add after object deletion and container creation
|
|
||||||
# Delay is added because sometimes immediately after deletion object still appears
|
|
||||||
# to be existing (probably because tombstone object takes some time to replicate)
|
|
||||||
# TODO: remove after https://github.com/nspcc-dev/neofs-s3-gw/issues/610 is fixed
|
|
||||||
S3_SYNC_WAIT_TIME = 5
|
|
||||||
|
|
||||||
|
|
||||||
class VersioningStatus(Enum):
|
|
||||||
ENABLED = "Enabled"
|
|
||||||
SUSPENDED = "Suspended"
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Init S3 Credentials")
|
|
||||||
def init_s3_credentials(wallet_path, s3_bearer_rules_file: Optional[str] = None):
|
|
||||||
bucket = str(uuid.uuid4())
|
|
||||||
s3_bearer_rules = s3_bearer_rules_file or "robot/resources/files/s3_bearer_rules.json"
|
|
||||||
gate_public_key = get_wallet_public_key(S3_GATE_WALLET_PATH, S3_GATE_WALLET_PASS)
|
|
||||||
cmd = (
|
|
||||||
f"{NEOFS_EXEC} --debug --with-log --timeout {CREDENTIALS_CREATE_TIMEOUT} "
|
|
||||||
f"issue-secret --wallet {wallet_path} --gate-public-key={gate_public_key} "
|
|
||||||
f"--peer {NEOFS_ENDPOINT} --container-friendly-name {bucket} "
|
|
||||||
f"--bearer-rules {s3_bearer_rules}"
|
|
||||||
)
|
|
||||||
logger.info(f"Executing command: {cmd}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
output = _run_with_passwd(cmd)
|
|
||||||
logger.info(f"Command completed with output: {output}")
|
|
||||||
|
|
||||||
# output contains some debug info and then several JSON structures, so we find each
|
|
||||||
# JSON structure by curly brackets (naive approach, but works while JSON is not nested)
|
|
||||||
# and then we take JSON containing secret_access_key
|
|
||||||
json_blocks = re.findall(r"\{.*?\}", output, re.DOTALL)
|
|
||||||
for json_block in json_blocks:
|
|
||||||
try:
|
|
||||||
parsed_json_block = json.loads(json_block)
|
|
||||||
if "secret_access_key" in parsed_json_block:
|
|
||||||
return (
|
|
||||||
parsed_json_block["container_id"],
|
|
||||||
bucket,
|
|
||||||
parsed_json_block["access_key_id"],
|
|
||||||
parsed_json_block["secret_access_key"],
|
|
||||||
parsed_json_block["owner_private_key"],
|
|
||||||
)
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
raise AssertionError(f"Could not parse info from output\n{output}")
|
|
||||||
raise AssertionError(f"Could not find AWS credentials in output:\n{output}")
|
|
||||||
|
|
||||||
except Exception as exc:
|
|
||||||
raise RuntimeError(f"Failed to init s3 credentials because of error\n{exc}") from exc
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Config S3 client")
|
|
||||||
def config_s3_client(access_key_id: str, secret_access_key: str):
|
|
||||||
try:
|
|
||||||
|
|
||||||
session = boto3.session.Session()
|
|
||||||
|
|
||||||
s3_client = session.client(
|
|
||||||
service_name="s3",
|
|
||||||
aws_access_key_id=access_key_id,
|
|
||||||
aws_secret_access_key=secret_access_key,
|
|
||||||
endpoint_url=S3_GATE,
|
|
||||||
verify=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
return s3_client
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(
|
|
||||||
f'Error Message: {err.response["Error"]["Message"]}\n'
|
|
||||||
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
|
|
||||||
) from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Create bucket S3")
|
|
||||||
def create_bucket_s3(s3_client):
|
|
||||||
bucket_name = str(uuid.uuid4())
|
|
||||||
|
|
||||||
try:
|
|
||||||
s3_bucket = s3_client.create_bucket(Bucket=bucket_name)
|
|
||||||
log_command_execution(f"Created S3 bucket {bucket_name}", s3_bucket)
|
|
||||||
sleep(S3_SYNC_WAIT_TIME)
|
|
||||||
return bucket_name
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(
|
|
||||||
f'Error Message: {err.response["Error"]["Message"]}\n'
|
|
||||||
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
|
|
||||||
) from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("List buckets S3")
|
|
||||||
def list_buckets_s3(s3_client):
|
|
||||||
found_buckets = []
|
|
||||||
try:
|
|
||||||
response = s3_client.list_buckets()
|
|
||||||
log_command_execution("S3 List buckets result", response)
|
|
||||||
|
|
||||||
for bucket in response["Buckets"]:
|
|
||||||
found_buckets.append(bucket["Name"])
|
|
||||||
|
|
||||||
return found_buckets
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(
|
|
||||||
f'Error Message: {err.response["Error"]["Message"]}\n'
|
|
||||||
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
|
|
||||||
) from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Delete bucket S3")
|
|
||||||
def delete_bucket_s3(s3_client, bucket: str):
|
|
||||||
try:
|
|
||||||
response = s3_client.delete_bucket(Bucket=bucket)
|
|
||||||
log_command_execution("S3 Delete bucket result", response)
|
|
||||||
sleep(S3_SYNC_WAIT_TIME)
|
|
||||||
return response
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
log_command_execution("S3 Delete bucket error", str(err))
|
|
||||||
raise Exception(
|
|
||||||
f'Error Message: {err.response["Error"]["Message"]}\n'
|
|
||||||
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
|
|
||||||
) from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Head bucket S3")
|
|
||||||
def head_bucket(s3_client, bucket: str):
|
|
||||||
try:
|
|
||||||
response = s3_client.head_bucket(Bucket=bucket)
|
|
||||||
log_command_execution("S3 Head bucket result", response)
|
|
||||||
return response
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
log_command_execution("S3 Head bucket error", str(err))
|
|
||||||
raise Exception(
|
|
||||||
f'Error Message: {err.response["Error"]["Message"]}\n'
|
|
||||||
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
|
|
||||||
) from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Set bucket versioning status")
|
|
||||||
def set_bucket_versioning(s3_client, bucket_name: str, status: VersioningStatus) -> None:
|
|
||||||
try:
|
|
||||||
response = s3_client.put_bucket_versioning(
|
|
||||||
Bucket=bucket_name, VersioningConfiguration={"Status": status.value}
|
|
||||||
)
|
|
||||||
log_command_execution("S3 Set bucket versioning to", response)
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(f"Got error during set bucket versioning: {err}") from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Get bucket versioning status")
|
|
||||||
def get_bucket_versioning_status(s3_client, bucket_name: str) -> str:
|
|
||||||
try:
|
|
||||||
response = s3_client.get_bucket_versioning(Bucket=bucket_name)
|
|
||||||
status = response.get("Status")
|
|
||||||
log_command_execution("S3 Got bucket versioning status", response)
|
|
||||||
return status
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(f"Got error during get bucket versioning status: {err}") from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Put bucket tagging")
|
|
||||||
def put_bucket_tagging(s3_client, bucket_name: str, tags: list):
|
|
||||||
try:
|
|
||||||
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
|
|
||||||
tagging = {"TagSet": tags}
|
|
||||||
response = s3_client.put_bucket_tagging(Bucket=bucket_name, Tagging=tagging)
|
|
||||||
log_command_execution("S3 Put bucket tagging", response)
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(f"Got error during put bucket tagging: {err}") from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Get bucket tagging")
|
|
||||||
def get_bucket_tagging(s3_client, bucket_name: str) -> list:
|
|
||||||
try:
|
|
||||||
response = s3_client.get_bucket_tagging(Bucket=bucket_name)
|
|
||||||
log_command_execution("S3 Get bucket tagging", response)
|
|
||||||
return response.get("TagSet")
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(f"Got error during get bucket tagging: {err}") from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Delete bucket tagging")
|
|
||||||
def delete_bucket_tagging(s3_client, bucket_name: str) -> None:
|
|
||||||
try:
|
|
||||||
response = s3_client.delete_bucket_tagging(Bucket=bucket_name)
|
|
||||||
log_command_execution("S3 Delete bucket tagging", response)
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(f"Got error during delete bucket tagging: {err}") from err
|
|
|
@ -1,412 +0,0 @@
|
||||||
#!/usr/bin/python3.9
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import uuid
|
|
||||||
from enum import Enum
|
|
||||||
from time import sleep
|
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
import allure
|
|
||||||
import urllib3
|
|
||||||
from botocore.exceptions import ClientError
|
|
||||||
from cli_helpers import log_command_execution
|
|
||||||
from python_keywords.aws_cli_client import AwsCliClient
|
|
||||||
from python_keywords.s3_gate_bucket import S3_SYNC_WAIT_TIME
|
|
||||||
|
|
||||||
##########################################################
|
|
||||||
# Disabling warnings on self-signed certificate which the
|
|
||||||
# boto library produces on requests to S3-gate in dev-env.
|
|
||||||
urllib3.disable_warnings()
|
|
||||||
##########################################################
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
|
||||||
ROBOT_AUTO_KEYWORDS = False
|
|
||||||
CREDENTIALS_CREATE_TIMEOUT = "30s"
|
|
||||||
|
|
||||||
ASSETS_DIR = os.getenv("ASSETS_DIR", "TemporaryDir/")
|
|
||||||
|
|
||||||
|
|
||||||
class VersioningStatus(Enum):
|
|
||||||
ENABLED = "Enabled"
|
|
||||||
SUSPENDED = "Suspended"
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("List objects S3 v2")
|
|
||||||
def list_objects_s3_v2(s3_client, bucket: str) -> list:
|
|
||||||
try:
|
|
||||||
response = s3_client.list_objects_v2(Bucket=bucket)
|
|
||||||
content = response.get("Contents", [])
|
|
||||||
log_command_execution("S3 v2 List objects result", response)
|
|
||||||
obj_list = []
|
|
||||||
for obj in content:
|
|
||||||
obj_list.append(obj["Key"])
|
|
||||||
logger.info(f"Found s3 objects: {obj_list}")
|
|
||||||
return obj_list
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(
|
|
||||||
f'Error Message: {err.response["Error"]["Message"]}\n'
|
|
||||||
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
|
|
||||||
) from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("List objects S3")
|
|
||||||
def list_objects_s3(s3_client, bucket: str) -> list:
|
|
||||||
try:
|
|
||||||
response = s3_client.list_objects(Bucket=bucket)
|
|
||||||
content = response.get("Contents", [])
|
|
||||||
log_command_execution("S3 List objects result", response)
|
|
||||||
obj_list = []
|
|
||||||
for obj in content:
|
|
||||||
obj_list.append(obj["Key"])
|
|
||||||
logger.info(f"Found s3 objects: {obj_list}")
|
|
||||||
return obj_list
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(
|
|
||||||
f'Error Message: {err.response["Error"]["Message"]}\n'
|
|
||||||
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
|
|
||||||
) from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("List objects versions S3")
|
|
||||||
def list_objects_versions_s3(s3_client, bucket: str) -> list:
|
|
||||||
try:
|
|
||||||
response = s3_client.list_object_versions(Bucket=bucket)
|
|
||||||
versions = response.get("Versions", [])
|
|
||||||
log_command_execution("S3 List objects versions result", response)
|
|
||||||
return versions
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(
|
|
||||||
f'Error Message: {err.response["Error"]["Message"]}\n'
|
|
||||||
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
|
|
||||||
) from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Put object S3")
|
|
||||||
def put_object_s3(s3_client, bucket: str, filepath: str):
|
|
||||||
filename = os.path.basename(filepath)
|
|
||||||
|
|
||||||
if isinstance(s3_client, AwsCliClient):
|
|
||||||
file_content = filepath
|
|
||||||
else:
|
|
||||||
with open(filepath, "rb") as put_file:
|
|
||||||
file_content = put_file.read()
|
|
||||||
|
|
||||||
try:
|
|
||||||
response = s3_client.put_object(Body=file_content, Bucket=bucket, Key=filename)
|
|
||||||
log_command_execution("S3 Put object result", response)
|
|
||||||
return response.get("VersionId")
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(
|
|
||||||
f'Error Message: {err.response["Error"]["Message"]}\n'
|
|
||||||
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
|
|
||||||
) from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Head object S3")
|
|
||||||
def head_object_s3(s3_client, bucket: str, object_key: str, version_id: str = None):
|
|
||||||
try:
|
|
||||||
params = {"Bucket": bucket, "Key": object_key}
|
|
||||||
if version_id:
|
|
||||||
params["VersionId"] = version_id
|
|
||||||
response = s3_client.head_object(**params)
|
|
||||||
log_command_execution("S3 Head object result", response)
|
|
||||||
return response
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(
|
|
||||||
f'Error Message: {err.response["Error"]["Message"]}\n'
|
|
||||||
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
|
|
||||||
) from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Delete object S3")
|
|
||||||
def delete_object_s3(s3_client, bucket, object_key, version_id: str = None):
|
|
||||||
try:
|
|
||||||
params = {"Bucket": bucket, "Key": object_key}
|
|
||||||
if version_id:
|
|
||||||
params["VersionId"] = version_id
|
|
||||||
response = s3_client.delete_object(**params)
|
|
||||||
log_command_execution("S3 Delete object result", response)
|
|
||||||
sleep(S3_SYNC_WAIT_TIME)
|
|
||||||
return response
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(
|
|
||||||
f'Error Message: {err.response["Error"]["Message"]}\n'
|
|
||||||
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
|
|
||||||
) from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Delete objects S3")
|
|
||||||
def delete_objects_s3(s3_client, bucket: str, object_keys: list):
|
|
||||||
try:
|
|
||||||
response = s3_client.delete_objects(Bucket=bucket, Delete=_make_objs_dict(object_keys))
|
|
||||||
log_command_execution("S3 Delete objects result", response)
|
|
||||||
sleep(S3_SYNC_WAIT_TIME)
|
|
||||||
return response
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(
|
|
||||||
f'Error Message: {err.response["Error"]["Message"]}\n'
|
|
||||||
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
|
|
||||||
) from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Delete object versions S3")
|
|
||||||
def delete_object_versions_s3(s3_client, bucket: str, object_versions: list):
|
|
||||||
try:
|
|
||||||
# Build deletion list in S3 format
|
|
||||||
delete_list = {
|
|
||||||
"Objects": [
|
|
||||||
{
|
|
||||||
"Key": object_version["Key"],
|
|
||||||
"VersionId": object_version["VersionId"],
|
|
||||||
}
|
|
||||||
for object_version in object_versions
|
|
||||||
]
|
|
||||||
}
|
|
||||||
response = s3_client.delete_objects(Bucket=bucket, Delete=delete_list)
|
|
||||||
log_command_execution("S3 Delete objects result", response)
|
|
||||||
return response
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(
|
|
||||||
f'Error Message: {err.response["Error"]["Message"]}\n'
|
|
||||||
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
|
|
||||||
) from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Copy object S3")
|
|
||||||
def copy_object_s3(s3_client, bucket, object_key, bucket_dst=None):
|
|
||||||
filename = f"{os.getcwd()}/{uuid.uuid4()}"
|
|
||||||
try:
|
|
||||||
response = s3_client.copy_object(
|
|
||||||
Bucket=bucket_dst or bucket, CopySource=f"{bucket}/{object_key}", Key=filename
|
|
||||||
)
|
|
||||||
log_command_execution("S3 Copy objects result", response)
|
|
||||||
return filename
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(
|
|
||||||
f'Error Message: {err.response["Error"]["Message"]}\n'
|
|
||||||
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
|
|
||||||
) from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Get object S3")
|
|
||||||
def get_object_s3(s3_client, bucket: str, object_key: str, version_id: str = None):
|
|
||||||
filename = f"{ASSETS_DIR}/{uuid.uuid4()}"
|
|
||||||
try:
|
|
||||||
params = {"Bucket": bucket, "Key": object_key}
|
|
||||||
if version_id:
|
|
||||||
params["VersionId"] = version_id
|
|
||||||
|
|
||||||
if isinstance(s3_client, AwsCliClient):
|
|
||||||
params["file_path"] = filename
|
|
||||||
|
|
||||||
response = s3_client.get_object(**params)
|
|
||||||
log_command_execution("S3 Get objects result", response)
|
|
||||||
|
|
||||||
if not isinstance(s3_client, AwsCliClient):
|
|
||||||
with open(f"{filename}", "wb") as get_file:
|
|
||||||
chunk = response["Body"].read(1024)
|
|
||||||
while chunk:
|
|
||||||
get_file.write(chunk)
|
|
||||||
chunk = response["Body"].read(1024)
|
|
||||||
|
|
||||||
return filename
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(
|
|
||||||
f'Error Message: {err.response["Error"]["Message"]}\n'
|
|
||||||
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
|
|
||||||
) from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Create multipart upload S3")
|
|
||||||
def create_multipart_upload_s3(s3_client, bucket_name: str, object_key: str) -> str:
|
|
||||||
try:
|
|
||||||
response = s3_client.create_multipart_upload(Bucket=bucket_name, Key=object_key)
|
|
||||||
log_command_execution("S3 Created multipart upload", response)
|
|
||||||
assert response.get("UploadId"), f"Expected UploadId in response:\n{response}"
|
|
||||||
|
|
||||||
return response.get("UploadId")
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(
|
|
||||||
f'Error Message: {err.response["Error"]["Message"]}\n'
|
|
||||||
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
|
|
||||||
) from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("List multipart uploads S3")
|
|
||||||
def list_multipart_uploads_s3(s3_client, bucket_name: str) -> Optional[list[dict]]:
|
|
||||||
try:
|
|
||||||
response = s3_client.list_multipart_uploads(Bucket=bucket_name)
|
|
||||||
log_command_execution("S3 List multipart upload", response)
|
|
||||||
|
|
||||||
return response.get("Uploads")
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(
|
|
||||||
f'Error Message: {err.response["Error"]["Message"]}\n'
|
|
||||||
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
|
|
||||||
) from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Abort multipart upload S3")
|
|
||||||
def abort_multipart_uploads_s3(s3_client, bucket_name: str, object_key: str, upload_id: str):
|
|
||||||
try:
|
|
||||||
response = s3_client.abort_multipart_upload(
|
|
||||||
Bucket=bucket_name, Key=object_key, UploadId=upload_id
|
|
||||||
)
|
|
||||||
log_command_execution("S3 Abort multipart upload", response)
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(
|
|
||||||
f'Error Message: {err.response["Error"]["Message"]}\n'
|
|
||||||
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
|
|
||||||
) from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Upload part S3")
|
|
||||||
def upload_part_s3(
|
|
||||||
s3_client, bucket_name: str, object_key: str, upload_id: str, part_num: int, filepath: str
|
|
||||||
) -> str:
|
|
||||||
if isinstance(s3_client, AwsCliClient):
|
|
||||||
file_content = filepath
|
|
||||||
else:
|
|
||||||
with open(filepath, "rb") as put_file:
|
|
||||||
file_content = put_file.read()
|
|
||||||
|
|
||||||
try:
|
|
||||||
response = s3_client.upload_part(
|
|
||||||
UploadId=upload_id,
|
|
||||||
Bucket=bucket_name,
|
|
||||||
Key=object_key,
|
|
||||||
PartNumber=part_num,
|
|
||||||
Body=file_content,
|
|
||||||
)
|
|
||||||
log_command_execution("S3 Upload part", response)
|
|
||||||
assert response.get("ETag"), f"Expected ETag in response:\n{response}"
|
|
||||||
|
|
||||||
return response.get("ETag")
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(
|
|
||||||
f'Error Message: {err.response["Error"]["Message"]}\n'
|
|
||||||
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
|
|
||||||
) from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("List parts S3")
|
|
||||||
def list_parts_s3(s3_client, bucket_name: str, object_key: str, upload_id: str) -> list[dict]:
|
|
||||||
try:
|
|
||||||
response = s3_client.list_parts(UploadId=upload_id, Bucket=bucket_name, Key=object_key)
|
|
||||||
log_command_execution("S3 List part", response)
|
|
||||||
assert response.get("Parts"), f"Expected Parts in response:\n{response}"
|
|
||||||
|
|
||||||
return response.get("Parts")
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(
|
|
||||||
f'Error Message: {err.response["Error"]["Message"]}\n'
|
|
||||||
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
|
|
||||||
) from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Complete multipart upload S3")
|
|
||||||
def complete_multipart_upload_s3(
|
|
||||||
s3_client, bucket_name: str, object_key: str, upload_id: str, parts: list
|
|
||||||
):
|
|
||||||
try:
|
|
||||||
parts = [{"ETag": etag, "PartNumber": part_num} for part_num, etag in parts]
|
|
||||||
response = s3_client.complete_multipart_upload(
|
|
||||||
Bucket=bucket_name, Key=object_key, UploadId=upload_id, MultipartUpload={"Parts": parts}
|
|
||||||
)
|
|
||||||
log_command_execution("S3 Complete multipart upload", response)
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(
|
|
||||||
f'Error Message: {err.response["Error"]["Message"]}\n'
|
|
||||||
f'Http status code: {err.response["ResponseMetadata"]["HTTPStatusCode"]}'
|
|
||||||
) from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Put object tagging")
|
|
||||||
def put_object_tagging(s3_client, bucket_name: str, object_key: str, tags: list):
|
|
||||||
try:
|
|
||||||
tags = [{"Key": tag_key, "Value": tag_value} for tag_key, tag_value in tags]
|
|
||||||
tagging = {"TagSet": tags}
|
|
||||||
s3_client.put_object_tagging(Bucket=bucket_name, Key=object_key, Tagging=tagging)
|
|
||||||
log_command_execution("S3 Put object tagging", str(tags))
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(f"Got error during put object tagging: {err}") from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Get object tagging")
|
|
||||||
def get_object_tagging(s3_client, bucket_name: str, object_key: str) -> list:
|
|
||||||
try:
|
|
||||||
response = s3_client.get_object_tagging(Bucket=bucket_name, Key=object_key)
|
|
||||||
log_command_execution("S3 Get object tagging", response)
|
|
||||||
return response.get("TagSet")
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(f"Got error during get object tagging: {err}") from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Delete object tagging")
|
|
||||||
def delete_object_tagging(s3_client, bucket_name: str, object_key: str):
|
|
||||||
try:
|
|
||||||
response = s3_client.delete_object_tagging(Bucket=bucket_name, Key=object_key)
|
|
||||||
log_command_execution("S3 Delete object tagging", response)
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(f"Got error during delete object tagging: {err}") from err
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Get object tagging")
|
|
||||||
def get_object_attributes(
|
|
||||||
s3_client,
|
|
||||||
bucket_name: str,
|
|
||||||
object_key: str,
|
|
||||||
*attributes: str,
|
|
||||||
version_id: str = None,
|
|
||||||
max_parts: int = None,
|
|
||||||
part_number: int = None,
|
|
||||||
get_full_resp=True,
|
|
||||||
) -> dict:
|
|
||||||
try:
|
|
||||||
if not isinstance(s3_client, AwsCliClient):
|
|
||||||
logger.warning("Method get_object_attributes is not supported by boto3 client")
|
|
||||||
return {}
|
|
||||||
response = s3_client.get_object_attributes(
|
|
||||||
bucket_name,
|
|
||||||
object_key,
|
|
||||||
*attributes,
|
|
||||||
version_id=version_id,
|
|
||||||
max_parts=max_parts,
|
|
||||||
part_number=part_number,
|
|
||||||
)
|
|
||||||
log_command_execution("S3 Get object attributes", response)
|
|
||||||
for attr in attributes:
|
|
||||||
assert attr in response, f"Expected attribute {attr} in {response}"
|
|
||||||
|
|
||||||
if get_full_resp:
|
|
||||||
return response
|
|
||||||
else:
|
|
||||||
return response.get(attributes[0])
|
|
||||||
|
|
||||||
except ClientError as err:
|
|
||||||
raise Exception(f"Got error during get object attributes: {err}") from err
|
|
||||||
|
|
||||||
|
|
||||||
def _make_objs_dict(key_names):
|
|
||||||
objs_list = []
|
|
||||||
for key in key_names:
|
|
||||||
obj_dict = {"Key": key}
|
|
||||||
objs_list.append(obj_dict)
|
|
||||||
objs_dict = {"Objects": objs_list}
|
|
||||||
return objs_dict
|
|
|
@ -4,7 +4,6 @@
|
||||||
This module contains keywords for work with session token.
|
This module contains keywords for work with session token.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import allure
|
|
||||||
import base64
|
import base64
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
@ -18,7 +17,6 @@ from common import ASSETS_DIR, NEOFS_ENDPOINT, WALLET_CONFIG
|
||||||
from neo3 import wallet
|
from neo3 import wallet
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
ROBOT_AUTO_KEYWORDS = False
|
|
||||||
|
|
||||||
# path to neofs-cli executable
|
# path to neofs-cli executable
|
||||||
NEOFS_CLI_EXEC = os.getenv("NEOFS_CLI_EXEC", "neofs-cli")
|
NEOFS_CLI_EXEC = os.getenv("NEOFS_CLI_EXEC", "neofs-cli")
|
||||||
|
|
|
@ -16,8 +16,6 @@ from grpc_responses import OBJECT_NOT_FOUND, error_matches_status
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
ROBOT_AUTO_KEYWORDS = False
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Get Object Copies")
|
@allure.step("Get Object Copies")
|
||||||
def get_object_copies(complexity: str, wallet: str, cid: str, oid: str):
|
def get_object_copies(complexity: str, wallet: str, cid: str, oid: str):
|
||||||
|
|
|
@ -1,14 +1,11 @@
|
||||||
#!/usr/bin/python3
|
#!/usr/bin/python3
|
||||||
|
|
||||||
import allure
|
|
||||||
import json
|
import json
|
||||||
|
|
||||||
import allure
|
import allure
|
||||||
import neofs_verbs
|
import neofs_verbs
|
||||||
from neo3 import wallet
|
from neo3 import wallet
|
||||||
|
|
||||||
ROBOT_AUTO_KEYWORDS = False
|
|
||||||
|
|
||||||
|
|
||||||
@allure.step("Verify Head Tombstone")
|
@allure.step("Verify Head Tombstone")
|
||||||
def verify_head_tombstone(wallet_path: str, cid: str, oid_ts: str, oid: str):
|
def verify_head_tombstone(wallet_path: str, cid: str, oid_ts: str, oid: str):
|
||||||
|
|
Loading…
Reference in a new issue