Compare commits

..

10 commits

Author SHA1 Message Date
ceb713b2e7 [#203] Check binaries versions at setup
Signed-off-by: Liza <e.chichindaeva@yadro.com>
2024-03-07 14:13:44 +03:00
6af5ad9de5 [#202] Use creds provider for s3 client
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-02-29 23:28:12 +00:00
9068b96d69 [#199] Update shards tests
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-02-20 08:38:55 +00:00
8164d35fc8 [#197] Add switch bucket node endpoint v1.5 2024-02-19 18:43:23 +03:00
c433fe2264 [#196] Update curl related function usages
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-02-15 00:23:21 +03:00
251a7881c9 [#194] Add except bucket node 2024-02-14 13:36:09 +03:00
e453614381 [#192] Fix parse name CID
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2024-02-09 11:08:16 +03:00
fe4341893b Fixed S3 policy test case 2024-02-02 08:24:04 +00:00
f7475f9841 [OBJECT-6537] Components versions check
Components versions check

Signed-off-by: Mikhail Kadilov m.kadilov@yadro.com
2024-02-01 09:13:02 +00:00
566f1a425f [#189] parallel get remote binaries versions 2024-01-31 14:19:17 +00:00
13 changed files with 85 additions and 136 deletions

View file

@ -8,10 +8,10 @@ hosts:
skip_readiness_check: True
force_transactions: True
services:
- name: s01
- name: frostfs-storage_01
attributes:
container_name: s01
config_path: ../frostfs-dev-env/services/storage/.storage.env
config_path: /etc/frostfs/storage/config.yml
wallet_path: ../frostfs-dev-env/services/storage/wallet01.json
local_wallet_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet01.json
@ -22,10 +22,10 @@ hosts:
un_locode: "RU MOW"
http_hostname: ["no_hostname"]
s3_hostname: ["no_hostname"]
- name: s02
- name: frostfs-storage_02
attributes:
container_name: s02
config_path: ../frostfs-dev-env/services/storage/.storage.env
config_path: /etc/frostfs/storage/config.yml
wallet_path: ../frostfs-dev-env/services/storage/wallet02.json
local_wallet_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet02.json
@ -36,10 +36,10 @@ hosts:
un_locode: "RU LED"
http_hostname: ["no_hostname"]
s3_hostname: ["no_hostname"]
- name: s03
- name: frostfs-storage_03
attributes:
container_name: s03
config_path: ../frostfs-dev-env/services/storage/.storage.env
config_path: /etc/frostfs/storage/config.yml
wallet_path: ../frostfs-dev-env/services/storage/wallet03.json
local_wallet_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet03.json
@ -50,10 +50,10 @@ hosts:
un_locode: "SE STO"
http_hostname: ["no_hostname"]
s3_hostname: ["no_hostname"]
- name: s04
- name: frostfs-storage_04
attributes:
container_name: s04
config_path: ../frostfs-dev-env/services/storage/.storage.env
config_path: /etc/frostfs/storage/config.yml
wallet_path: ../frostfs-dev-env/services/storage/wallet04.json
local_wallet_config_path: ./TemporaryDir/empty-password.yml
local_wallet_path: ../frostfs-dev-env/services/storage/wallet04.json
@ -64,7 +64,7 @@ hosts:
un_locode: "FI HEL"
http_hostname: ["no_hostname"]
s3_hostname: ["no_hostname"]
- name: s3-gate01
- name: frostfs-s3_01
attributes:
container_name: s3_gate
config_path: ../frostfs-dev-env/services/s3_gate/.s3.env
@ -73,7 +73,7 @@ hosts:
local_wallet_path: ../frostfs-dev-env/services/s3_gate/wallet.json
wallet_password: "s3"
endpoint_data0: https://s3.frostfs.devenv:8080
- name: http-gate01
- name: frostfs-http_01
attributes:
container_name: http_gate
config_path: ../frostfs-dev-env/services/http_gate/.http.env
@ -82,7 +82,7 @@ hosts:
local_wallet_path: ../frostfs-dev-env/services/http_gate/wallet.json
wallet_password: "one"
endpoint_data0: http://http.frostfs.devenv
- name: ir01
- name: frostfs-ir_01
attributes:
container_name: ir01
config_path: ../frostfs-dev-env/services/ir/.ir.env
@ -90,7 +90,7 @@ hosts:
local_wallet_config_path: ./TemporaryDir/password-other.yml
local_wallet_path: ../frostfs-dev-env/services/ir/az.json
wallet_password: "one"
- name: morph-chain01
- name: neo-go_01
attributes:
container_name: morph_chain
config_path: ../frostfs-dev-env/services/morph_chain/protocol.privnet.yml
@ -99,7 +99,7 @@ hosts:
local_wallet_path: ../frostfs-dev-env/services/morph_chain/node-wallet.json
wallet_password: "one"
endpoint_internal0: http://morph-chain.frostfs.devenv:30333
- name: main-chain01
- name: main-chain_01
attributes:
container_name: main_chain
config_path: ../frostfs-dev-env/services/chain/protocol.privnet.yml
@ -108,7 +108,7 @@ hosts:
local_wallet_path: ../frostfs-dev-env/services/chain/node-wallet.json
wallet_password: "one"
endpoint_internal0: http://main-chain.frostfs.devenv:30333
- name: coredns01
- name: coredns_01
attributes:
container_name: coredns
clis:

View file

@ -11,6 +11,7 @@ import pytest
import yaml
from dateutil import parser
from frostfs_testlib import plugins, reporter
from frostfs_testlib.credentials.interfaces import CredentialsProvider
from frostfs_testlib.healthcheck.interfaces import Healthcheck
from frostfs_testlib.hosting import Hosting
from frostfs_testlib.reporter import AllureHandler, StepsLogger
@ -23,7 +24,6 @@ from frostfs_testlib.resources.common import (
)
from frostfs_testlib.s3 import AwsCliClient, Boto3ClientWrapper, S3ClientWrapper, VersioningStatus
from frostfs_testlib.shell import LocalShell, Shell
from frostfs_testlib.steps.cli.container import list_containers
from frostfs_testlib.steps.cli.object import get_netmap_netinfo
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage import get_service_registry
@ -266,25 +266,22 @@ def s3_client(
client_shell: Shell,
s3_policy: Optional[str],
cluster: Cluster,
auth_container_placement_policy: str,
request: pytest.FixtureRequest,
) -> S3ClientWrapper:
wallet = WalletInfo(path=default_wallet, password=DEFAULT_WALLET_PASS)
node = cluster.cluster_nodes[0]
(cid, access_key_id, secret_access_key) = s3_helper.init_s3_credentials(
wallet,
client_shell,
cluster,
s3gates=[cluster_node.s3_gate for cluster_node in cluster.cluster_nodes],
policy=s3_policy,
container_placement_policy=auth_container_placement_policy,
)
containers_list = list_containers(wallet.path, shell=client_shell, endpoint=cluster.default_rpc_endpoint)
assert cid in containers_list, f"Expected cid {cid} in {containers_list}"
credentials_provider = CredentialsProvider(node.host.config.s3_creds_plugin_name)
credentials_provider.stash["cluster"] = cluster
credentials_provider.stash["wallet"] = wallet
credentials_provider.stash["shell"] = client_shell
credentials_provider.stash["location_constraints"] = s3_policy
access_key_id, secret_access_key = credentials_provider.S3.provide(node)
s3_client_cls = request.param
client = s3_client_cls(access_key_id, secret_access_key, cluster.default_s3_gate_endpoint)
yield client
return client
@pytest.fixture
@ -322,11 +319,11 @@ def two_buckets(s3_client: S3ClientWrapper, request: pytest.FixtureRequest):
s3_helper.delete_bucket_with_objects(s3_client, bucket_name)
@reporter.step("[Autouse/Session] Check binary versions")
@allure.title("[Autouse/Session] Check binary versions")
@pytest.fixture(scope="session", autouse=True)
def check_binary_versions(hosting: Hosting, client_shell: Shell, request: pytest.FixtureRequest):
local_versions = version_utils.get_local_binaries_versions(client_shell)
remote_versions = version_utils.get_remote_binaries_versions(hosting)
remote_versions, exсeptions_remote_binaries_versions = version_utils.get_remote_binaries_versions(hosting)
all_versions = {
**local_versions,
@ -416,21 +413,6 @@ def default_wallet(wallet_factory: WalletFactory) -> str:
return wallet.path
@reporter.step("[Class]: Container placement policy for keys")
@pytest.fixture(scope="class")
def auth_container_placement_policy(cluster: Cluster, request: pytest.FixtureRequest):
placeholders = {
"$ALPHABET_NODE_COUNT$": 4 if len(cluster.cluster_nodes) < 8 else 8,
"$NODE_COUNT$": len(cluster.cluster_nodes),
}
placement_policy = None
if "param" in request.__dict__:
placement_policy = request.param
for key, value in placeholders.items():
placement_policy = placement_policy.replace(key, str(value))
return placement_policy
@pytest.fixture()
@allure.title("Select random node for testing")
def node_under_test(cluster: Cluster) -> ClusterNode:

View file

@ -54,7 +54,7 @@ class TestContainer(ClusterTestBase):
info_to_check = {
f"basic ACL: {PRIVATE_ACL_F} (private)",
f"owner ID: {json_wallet.get('accounts')[0].get('address')}",
f"container ID: {cid}",
f"CID: {cid}",
}
if name:
info_to_check.add(f"Name={name}")
@ -115,6 +115,10 @@ class TestContainer(ClusterTestBase):
with reporter.step("Delete containers and check they were deleted"):
for cid in cids:
delete_container(wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, await_mode=True)
containers_list = list_containers(wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
delete_container(
wallet, cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint, await_mode=True
)
containers_list = list_containers(
wallet, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint
)
assert cid not in containers_list, "Container not deleted"

View file

@ -20,6 +20,7 @@ from frostfs_testlib.steps.node_management import (
wait_for_node_to_be_ready,
)
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.steps.s3.s3_helper import search_nodes_with_bucket
from frostfs_testlib.storage.cluster import Cluster, ClusterNode, S3Gate, StorageNode
from frostfs_testlib.storage.controllers import ClusterStateController, ShardsWatcher
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
@ -126,6 +127,7 @@ class TestFailoverStorage(ClusterTestBase):
def test_unhealthy_tree(
self,
s3_client: S3ClientWrapper,
default_wallet: str,
simple_object_size: ObjectSize,
cluster_state_controller: ClusterStateController,
after_run_return_all_stopped_services,
@ -155,11 +157,22 @@ class TestFailoverStorage(ClusterTestBase):
put_object = s3_client.put_object(bucket, file_path)
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])
with reporter.step("Turn off all storage nodes except default"):
for node in self.cluster.cluster_nodes[1:]:
node_bucket = search_nodes_with_bucket(
cluster=self.cluster,
bucket_name=bucket,
wallet=default_wallet,
shell=self.shell,
endpoint=self.cluster.storage_nodes[0].get_rpc_endpoint(),
)[0]
with reporter.step("Turn off all storage nodes except bucket node"):
for node in [node_to_stop for node_to_stop in self.cluster.cluster_nodes if node_to_stop != node_bucket]:
with reporter.step(f"Stop storage service on node: {node}"):
cluster_state_controller.stop_service_of_type(node, StorageNode)
with reporter.step(f"Change s3 endpoint to bucket node"):
s3_client.set_endpoint(node_bucket.s3_gate.get_endpoint())
with reporter.step("Check that object is available"):
s3_helper.check_objects_in_bucket(s3_client, bucket, expected_objects=[file_name])

View file

@ -114,5 +114,5 @@ class Test_http_bearer(ClusterTestBase):
cid=user_container,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
request_node=self.cluster.cluster_nodes[0],
)

View file

@ -92,8 +92,7 @@ class TestHttpGate(ClusterTestBase):
cid=cid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
request_node=self.cluster.cluster_nodes[0],
)
@ -150,8 +149,7 @@ class TestHttpPut(ClusterTestBase):
cid=cid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
request_node=self.cluster.cluster_nodes[0],
)
@allure.link(
@ -204,8 +202,7 @@ class TestHttpPut(ClusterTestBase):
file_name=file_path,
cid=cid,
attrs=attributes,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
node=self.cluster.cluster_nodes[0],
)
@allure.title("Expiration-Epoch in HTTP header (epoch_gap={epoch_gap})")
@ -243,12 +240,7 @@ class TestHttpPut(ClusterTestBase):
else:
oids_to_be_expired.append(oid)
with reporter.step("This object can be got"):
get_via_http_gate(
cid=cid,
oid=oid,
endpoint=http_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
)
get_via_http_gate(cid=cid, oid=oid, node=self.cluster.cluster_nodes[0])
self.tick_epoch()
@ -260,18 +252,12 @@ class TestHttpPut(ClusterTestBase):
try_to_get_object_and_expect_error(
cid=cid,
oid=oid,
node=self.cluster.cluster_nodes[0],
error_pattern=OBJECT_NOT_FOUND_ERROR,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
)
for oid in oids_to_be_valid:
with reporter.step(f"{oid} shall be valid and can be got"):
get_via_http_gate(
cid=cid,
oid=oid,
endpoint=http_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
)
get_via_http_gate(cid=cid, oid=oid, node=self.cluster.cluster_nodes[0])
@allure.title("Zip in HTTP header")
def test_zip_in_http(self, complex_object_size: ObjectSize, simple_object_size: ObjectSize):
@ -302,12 +288,7 @@ class TestHttpPut(ClusterTestBase):
endpoint=self.cluster.default_http_gate_endpoint,
)
dir_path = get_via_zip_http_gate(
cid=cid,
prefix=common_prefix,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
)
dir_path = get_via_zip_http_gate(cid=cid, prefix=common_prefix, node=self.cluster.cluster_nodes[0])
with reporter.step("Verify hashes"):
assert get_file_hash(f"{dir_path}/file1") == get_file_hash(file_path_simple)
@ -345,8 +326,7 @@ class TestHttpPut(ClusterTestBase):
cid=cid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
request_node=self.cluster.cluster_nodes[0],
)
verify_object_hash(
oid=oid_curl,
@ -355,8 +335,7 @@ class TestHttpPut(ClusterTestBase):
cid=cid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
request_node=self.cluster.cluster_nodes[0],
object_getter=get_via_http_curl,
)
@ -393,7 +372,6 @@ class TestHttpPut(ClusterTestBase):
cid=cid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
request_node=self.cluster.cluster_nodes[0],
object_getter=get_via_http_curl,
)

View file

@ -97,8 +97,7 @@ class Test_http_headers(ClusterTestBase):
file_name=storage_object_1.file_path,
cid=storage_object_1.cid,
attrs={"Chapter2": storage_object_1.attributes["Chapter2"]},
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
node=self.cluster.cluster_nodes[0],
)
@allure.title("Get object2 with different attributes, then delete object2 and get object1")
@ -129,8 +128,7 @@ class Test_http_headers(ClusterTestBase):
file_name=storage_object_2.file_path,
cid=storage_object_2.cid,
attrs=attributes,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
node=self.cluster.cluster_nodes[0],
)
with reporter.step("Delete object#2 and verify is the container deleted"):
delete_object(
@ -143,9 +141,8 @@ class Test_http_headers(ClusterTestBase):
try_to_get_object_and_expect_error(
cid=storage_object_2.cid,
oid=storage_object_2.oid,
node=self.cluster.cluster_nodes[0],
error_pattern=OBJECT_ALREADY_REMOVED_ERROR,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
)
storage_objects_with_attributes.remove(storage_object_2)
@ -158,8 +155,7 @@ class Test_http_headers(ClusterTestBase):
file_name=storage_object_1.file_path,
cid=storage_object_1.cid,
attrs=key_value_pair,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
node=self.cluster.cluster_nodes[0],
)
@allure.title("[NEGATIVE] Put object and get right after container is deleted")
@ -215,9 +211,8 @@ class Test_http_headers(ClusterTestBase):
try_to_get_object_via_passed_request_and_expect_error(
cid=storage_object_1.cid,
oid="",
node=self.cluster.cluster_nodes[0],
error_pattern=error_pattern,
attrs=attrs_obj3,
http_request_path=request,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
)

View file

@ -91,8 +91,7 @@ class Test_http_object(ClusterTestBase):
cid=cid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
request_node=self.cluster.cluster_nodes[0],
)
with reporter.step("[Negative] try to get object: [get/$CID/chapter1/peace]"):
@ -102,11 +101,10 @@ class Test_http_object(ClusterTestBase):
try_to_get_object_via_passed_request_and_expect_error(
cid=cid,
oid=oid,
node=self.cluster.cluster_nodes[0],
error_pattern=expected_err_msg,
http_request_path=request,
attrs=attrs,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
)
with reporter.step("Download the object with attribute [get_by_attribute/$CID/chapter1/peace]"):
@ -115,18 +113,16 @@ class Test_http_object(ClusterTestBase):
file_name=file_path,
cid=cid,
attrs=attrs,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
node=self.cluster.cluster_nodes[0],
)
with reporter.step("[Negative] try to get object: get_by_attribute/$CID/$OID"):
request = f"/get_by_attribute/{cid}/{oid}"
try_to_get_object_via_passed_request_and_expect_error(
cid=cid,
oid=oid,
node=self.cluster.cluster_nodes[0],
error_pattern=expected_err_msg,
http_request_path=request,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
)
@allure.title("Put over s3, Get over HTTP with bucket name and key")
@ -156,8 +152,7 @@ class Test_http_object(ClusterTestBase):
obj_http = get_via_http_gate(
cid=None,
oid=None,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
node=self.cluster.cluster_nodes[0],
request_path=request,
)
with reporter.step("Verify hashes"):

View file

@ -61,6 +61,5 @@ class Test_http_streaming(ClusterTestBase):
cid=cid,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
request_node=self.cluster.cluster_nodes[0],
)

View file

@ -124,8 +124,7 @@ class Test_http_system_header(ClusterTestBase):
cid=user_container,
shell=self.shell,
nodes=self.cluster.storage_nodes,
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
request_node=self.cluster.cluster_nodes[0],
)
head = head_object(
wallet=self.wallet,
@ -222,9 +221,8 @@ class Test_http_system_header(ClusterTestBase):
try_to_get_object_and_expect_error(
cid=user_container,
oid=oid,
node=self.cluster.cluster_nodes[0],
error_pattern="404 Not Found",
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
)
# check that object is not available via grpc
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
@ -262,9 +260,8 @@ class Test_http_system_header(ClusterTestBase):
try_to_get_object_and_expect_error(
cid=user_container,
oid=oid,
node=self.cluster.cluster_nodes[0],
error_pattern="404 Not Found",
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
)
# check that object is not available via grpc
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
@ -304,9 +301,8 @@ class Test_http_system_header(ClusterTestBase):
try_to_get_object_and_expect_error(
cid=user_container,
oid=oid,
node=self.cluster.cluster_nodes[0],
error_pattern="404 Not Found",
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
)
# check that object is not available via grpc
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):
@ -356,9 +352,8 @@ class Test_http_system_header(ClusterTestBase):
try_to_get_object_and_expect_error(
cid=user_container,
oid=oid,
node=self.cluster.cluster_nodes[0],
error_pattern="404 Not Found",
endpoint=self.cluster.default_http_gate_endpoint,
http_hostname=self.cluster.default_http_hostname[0],
)
# check that object is not available via grpc
with pytest.raises(Exception, match=OBJECT_NOT_FOUND):

View file

@ -64,7 +64,7 @@ class TestS3GatePolicy(ClusterTestBase):
)
assert copies_1 == 1
for cluster_node in self.cluster.cluster_nodes:
cid_2 = search_container_by_name(name=bucket_1, node=cluster_node)
cid_2 = search_container_by_name(name=bucket_2, node=cluster_node)
if cid_2:
break
copies_2 = get_simple_object_copies(

View file

@ -23,21 +23,21 @@ def test_binaries_versions(request: FixtureRequest, hosting: Hosting):
Compare binaries versions from external source (url) and deployed on servers.
"""
with reporter.step("Get binaries versions from servers"):
got_versions = get_remote_binaries_versions(hosting)
got_versions, exсeptions_remote_binaries_versions = get_remote_binaries_versions(hosting)
environment_dir = request.config.getoption("--alluredir") or ASSETS_DIR
env_file = os.path.join(environment_dir, "environment.properties")
env_properties = read_env_properties(env_file)
# compare versions from servers and file
exeptions = []
exсeptions = []
additional_env_properties = {}
for binary_name, binary in got_versions.items():
version = binary["version"]
requires_check = binary["check"]
if requires_check and not fullmatch(r"^\d+\.\d+\.\d+(-.*)?(?<!dirty)", version):
exeptions.append(f"{binary_name}: Actual version doesn't conform to format '0.0.0-000-aaaaaaa': {version}")
exсeptions.append(f"{binary_name}: Actual version doesn't conform to format '0.0.0-000-aaaaaaa': {version}")
# If some binary was not listed in the env properties file, let's add it
# so that we have full information about versions in allure report
@ -47,9 +47,11 @@ def test_binaries_versions(request: FixtureRequest, hosting: Hosting):
if env_properties and additional_env_properties:
save_env_properties(env_file, additional_env_properties)
exсeptions.extend(exсeptions_remote_binaries_versions)
# create clear beautiful error with aggregation info
if exeptions:
msg = "\n".join(exeptions)
if exсeptions:
msg = "\n".join(exсeptions)
raise AssertionError(f"Found binaries with unexpected versions:\n{msg}")

View file

@ -75,20 +75,6 @@ class TestControlShard(ClusterTestBase):
)
delete_container(wallet=default_wallet, cid=cid, shell=self.shell, endpoint=self.cluster.default_rpc_endpoint)
@staticmethod
def get_shards_from_config(node: StorageNode) -> list[Shard]:
config_file = node.get_shard_config_path()
file_type = pathlib.Path(config_file).suffix
parser_method = {
".env": node.get_shards_from_env,
".yaml": node.get_shards,
".yml": node.get_shards,
}
shards = parser_method[file_type]()
return shards
@staticmethod
def get_shards_from_cli(node: StorageNode) -> list[Shard]:
wallet_path = node.get_remote_wallet_path()
@ -120,7 +106,7 @@ class TestControlShard(ClusterTestBase):
@allure.title("All shards are available")
def test_control_shard(self, cluster: Cluster):
for storage_node in cluster.storage_nodes:
shards_from_config = self.get_shards_from_config(storage_node)
shards_from_config = storage_node.get_shards()
shards_from_cli = self.get_shards_from_cli(storage_node)
assert set(shards_from_config) == set(shards_from_cli)