Compare commits

...
Sign in to create a new pull request.

5 commits

Author SHA1 Message Date
cf950f6313 WIP: Integrate component test fixtures into testlib
Signed-off-by: Vitaliy Potyarkin <v.potyarkin@yadro.com>
2025-05-07 16:56:55 +03:00
211f9a0abd Implement fixtures for deploying FrostFS components
Exported from a private playground repo @ commit
ba8c88d7e11e8e8c17e54ca1317bc2dbf8b52204

Signed-off-by: Vitaliy Potyarkin <v.potyarkin@yadro.com>
2025-05-07 15:35:07 +03:00
1fbd7b7de1 Allow ClusterNode.__init__ with missing services
Not all component test environments will provide full set of services.
It's ok for gateways and even storage nodes to be completely missing.

Signed-off-by: Vitaliy Potyarkin <v.potyarkin@yadro.com>
2025-05-07 15:35:07 +03:00
9261d46ed5 Upgrade docker API bindings
Version in pyproject.toml was not strictly bound anyways.
Now requirements.txt uses the same version spec.

Signed-off-by: Vitaliy Potyarkin <v.potyarkin@yadro.com>
2025-05-07 15:35:07 +03:00
58b14717aa [#379] Add missing dependencies: configobj, httpx
All checks were successful
DCO action / DCO (pull_request) Successful in 24s
Some packages were missing from the dependency list even though they were
being imported from our code.

Signed-off-by: Vitaliy Potyarkin <v.potyarkin@yadro.com>
2025-05-07 15:33:53 +03:00
15 changed files with 1694 additions and 9 deletions

View file

@ -29,6 +29,10 @@ dependencies = [
"tenacity==8.0.1",
"boto3==1.35.30",
"boto3-stubs[s3,iam,sts]==1.35.30",
"pydantic==2.10.6",
"configobj==5.0.6",
"httpx==0.28.1",
"testcontainers==4.10.0",
]
requires-python = ">=3.10"
@ -43,6 +47,7 @@ allure = "frostfs_testlib.reporter.allure_handler:AllureHandler"
[project.entry-points."frostfs.testlib.hosting"]
docker = "frostfs_testlib.hosting.docker_host:DockerHost"
component_tests = "frostfs_testlib.component_tests.hosting:ContainerHost"
[project.entry-points."frostfs.testlib.healthcheck"]
basic = "frostfs_testlib.healthcheck.basic_healthcheck:BasicHealthcheck"
@ -60,6 +65,7 @@ frostfs-ir = "frostfs_testlib.storage.dataclasses.frostfs_services:InnerRing"
[project.entry-points."frostfs.testlib.credentials_providers"]
authmate = "frostfs_testlib.credentials.authmate_s3_provider:AuthmateS3CredentialsProvider"
wallet_factory = "frostfs_testlib.credentials.wallet_factory_provider:WalletFactoryProvider"
component_tests = "frostfs_testlib.component_tests.hosting:ClientWalletFactory"
[project.entry-points."frostfs.testlib.bucket_cid_resolver"]
frostfs = "frostfs_testlib.clients.s3.curl_bucket_resolver:CurlBucketContainerResolver"
@ -92,4 +98,5 @@ filterwarnings = [
testpaths = ["tests"]
[project.entry-points.pytest11]
component_tests = "frostfs_testlib.component_tests.fixtures"
testlib = "frostfs_testlib"

View file

@ -1,5 +1,5 @@
allure-python-commons==2.13.2
docker==4.4.0
docker>=4.4.0
neo-mamba==1.0.0
paramiko==2.10.3
pexpect==4.8.0
@ -11,6 +11,9 @@ pytest==7.1.2
boto3==1.35.30
boto3-stubs[s3,iam,sts]==1.35.30
pydantic==2.10.6
configobj==5.0.6
httpx==0.28.1
testcontainers==4.10.0
# Dev dependencies
black==22.8.0

View file

@ -0,0 +1 @@

View file

@ -0,0 +1,267 @@
import codecs
import io
import os
import re
import tarfile
import threading
from collections.abc import Mapping
from pathlib import Path
from textwrap import dedent
from docker.models.containers import Container, ExecResult
from testcontainers.core.container import DockerContainer
from testcontainers.core.network import Network
class ContainerizedService:
"""
Testcontainers wrapper specialized for our use case.
Extra features that testcontainers do not provide:
- Copy files into container prior to executing the entrypoint.
- Stop and restart the service running in container.
"""
_testcontainer: DockerContainer = None
_container: Container = None
_network: Network = None
name: str
image: str
command: str
_default_cmd_template: str = ""
_default_cmd_rewrite = None
def __init__(self, image: str, command: str, name: str = None, network: Network = None, default_command: str = "{command}") -> None:
self.name = name
self.image = image
self.command = command
self._network = network
self._default_cmd_template = default_command
self._ip = {}
def start(self):
if self._testcontainer is None:
self.create()
self.stop()
self._pid1(f"{{ {self.command} ; }} &")
def stop(self):
self._pid1(
"""
kill -9 $(jobs -p)
wait
"""
)
@property
def ip(self) -> str:
if self._container.id in self._ip:
return self._ip[self._container.id]
inspect = self._container.client.api.inspect_container(self._container.id)
for network, options in inspect["NetworkSettings"]["Networks"].items():
if network == self._network.name:
self._ip[self._container.id] = options["IPAddress"]
return self._ip[self._container.id]
else:
raise RuntimeError(f"container not attached to {self._network.name}: {self._container.name}")
def signal(self, signal):
self._pid1(f"kill -{signal} $(jobs -p)")
def create(self) -> None:
c = DockerContainer(
self.image,
entrypoint="",
tty=True,
stdin_open=True,
user=0,
working_dir="/",
)
c.with_network(self._network)
c.with_name(self.name)
c.with_command(["sh", "-ims"])
c.start()
self._testcontainer = c
self._container = c._container
self.name = self._container.name
self._pid1("uname -a; whoami; date")
def destroy(self) -> None:
self._testcontainer.stop()
self._testcontainer = None
self._container = None
def _pid1(self, command: str) -> None:
"""
Execute a shell command in PID1. No feedback is provided, use with extreme caution!
"""
command = f"{dedent(command).strip()}\n"
socket = self._container.attach_socket(params=dict(stdin=True, stream=True))
socket._sock.send(command.encode())
socket._sock.close()
socket.close()
def add_file(self, src: Path, dest: Path) -> None:
"""
Add file from local filesystem into a running container.
Keeps a copy of the whole file in memory (TODO: stream directly from disk).
"""
file = tarfile.TarInfo(str(dest))
file.size = Path(src).stat().st_size
if file.size > (64 << 20):
raise ValueError(f"file too large for current add_file implementation: {src} ({file.size >> 20}MB)")
if self._container is None:
self.create()
archive = io.BytesIO()
with tarfile.open(fileobj=archive, mode="w|") as tar:
with open(src, "rb") as f:
tar.addfile(file, f)
archive.seek(0)
self._container.put_archive("/", archive)
def add_directory(self, src: Path, dest: Path) -> None:
"""Add all files from directory (one by one)."""
dest = Path(dest)
for root, _, files in os.walk(src):
root = Path(root)
subdir = root.relative_to(src)
for file in files:
file = Path(file)
self.add_file(root / file, dest / subdir / file)
def fetch(self, src: Path, dest: Path) -> None:
"""
Fetch file or directory from a running container.
"""
src = Path(src)
dest = Path(dest)
stream, stat = self._container.get_archive(str(src))
with tarfile.open(fileobj=generator_to_stream(stream), mode="r|*") as tar:
tar.extractall(dest.parent, filter=_tar_rename(src, dest))
def logs(self, stdout=True, stderr=True, tail="all", since=None, timeout=None):
"""
Log stream from ContainerizedService.
You should probably call close() on received object after you're done.
"""
return LogStream(
self._container.logs(
stream=True,
stdout=stdout,
stderr=stderr,
tail=tail,
since=since,
),
timeout=timeout,
)
def wait(self, regex, **kwargs):
"""
Wait until a line appears in container logs that matches provided regex.
"""
if isinstance(regex, str):
regex = re.compile(regex)
for line in self.logs(**kwargs):
if regex.search(line):
return
raise TimeoutError("log stream was closed before a matching line appeared")
def exec(self, command: str, env: Mapping = None) -> ExecResult:
"""
Execute a command in container shell.
"""
return self._container.exec_run(["sh", "-c", command], detach=False, environment=env)
def __call__(self, command: str, env: Mapping = None) -> str:
"""
Execute default templated command in container shell.
"""
if not self._default_cmd_template:
raise ValueError(f"default command was not specified during container initialization")
if self._default_cmd_rewrite:
command = self._default_cmd_rewrite(command)
result = self.exec(
command=self._default_cmd_template.format(command=command),
env=env,
)
if result.exit_code != 0:
raise ValueError(f"exit code {result.exit_code}")
return result.output.decode()
class LogStream:
def __init__(self, stream, timeout=None):
self._stream = stream
self._buffer = bytearray()
self._cancel = threading.Lock()
if timeout:
def _cancel():
"Close the stream either when timeout is reached or when the lock is released."
self._cancel.acquire(timeout=timeout)
self.close()
self._cancel.acquire()
threading.Thread(target=_cancel).run()
def __iter__(self):
return self
def __next__(self) -> str:
self._buffer.clear()
for chunk in self._stream:
self._buffer += chunk
if chunk == b"\n" or len(chunk) > 1:
break
if len(self._buffer) == 0:
raise StopIteration
return self._buffer.decode()
def close(self):
try:
self._cancel.release()
except RuntimeError: # release unlocked lock
pass
return self._stream.close()
def _tar_rename(src: Path, dest: Path) -> tarfile.TarInfo | None:
def _filter(member: tarfile.TarInfo, path: str) -> tarfile.TarInfo | None:
archive_path = Path(member.name)
if archive_path.is_absolute():
subpath = archive_path.relative_to(src).name
member = member.replace(name=subpath)
if member.name == src.name:
member = member.replace(name=dest.name)
elif member.name.startswith(f"{src.name}/"):
member = member.replace(name=member.name.replace(src.name, dest.name, 1))
return tarfile.data_filter(member, path)
return _filter
def generator_to_stream(generator, buffer_size=io.DEFAULT_BUFFER_SIZE):
"""
https://stackoverflow.com/a/51546783
"""
class GeneratorStream(io.RawIOBase):
def __init__(self):
self.leftover = None
def readable(self):
return True
def readinto(self, b):
try:
l = len(b) # : We're supposed to return at most this much
chunk = self.leftover or next(generator)
output, self.leftover = chunk[:l], chunk[l:]
b[: len(output)] = output
return len(output)
except StopIteration:
return 0 # : Indicate EOF
return io.BufferedReader(GeneratorStream())

View file

@ -0,0 +1,707 @@
"""
Reusable fixtures for deploying FrostFS components with all the dependencies.
"""
# TODO: This file is larger that desirable.
# TODO: If anyone knows how to break it into fixtures/base.py, fixtures/alphabet.py, fixtures/... - be my guest
import gzip
import importlib.resources
import json
import random
import re
import shlex
import shutil
import string
import subprocess
import tarfile
import tempfile
from base64 import b64decode
from collections.abc import Mapping
from enum import Enum
from itertools import chain
from pathlib import Path
from types import SimpleNamespace
from typing import List
from urllib.request import urlopen
import pytest
import yaml
from neo3.wallet.account import Account
from neo3.wallet.wallet import Wallet
from testcontainers.core.network import Network
from .container import ContainerizedService, ExecResult
_SCOPE = "session"
_PREFIX = "frostfs-"
glagolic = [
"az",
"buky",
"vedi",
"glagoli",
"dobro",
"yest",
"zhivete",
"dzelo",
"zemlja",
"izhe",
"izhei",
"gerv",
"kako",
"ljudi",
"mislete",
"nash",
"on",
"pokoj",
"rtsi",
"slovo",
"tverdo",
"uk",
]
class Component(Enum):
ADM = "adm"
ALPHABET = "alphabet"
CONTRACT = "contract"
HTTPGW = "httpgw"
INNERRING = "innerring"
LOCODE = "locode"
NEOGO = "neogo"
S3GW = "s3gw"
STORAGE = "storage"
def __str__(self):
return self.value
def __len__(self):
return len(self.value)
@pytest.fixture(scope=_SCOPE)
def _deployment(_deployment_dir) -> dict:
"""
Read deployment options from environment.
DO NOT REFERENCE THIS FIXTURE DIRECTLY FROM TESTS!
This fixture is to be referenced only from other *_deployment fixtures.
Runtime overrides will not be applied to _deployment() -
only to specific service fixtures, e.g. alphabet_deployment().
"""
with importlib.resources.path("frostfs_testlib.component_tests.templates", ".") as template_dir:
default = {
"dir": _deployment_dir,
"template": template_dir,
}
for service in Component:
default[f"{service}_dir"] = _deployment_dir / str(service)
default[f"{service}_template"] = template_dir / f"{service}.yml"
config = { # TODO: replace hardcoded values with reading from a config file
"adm_image": "git.frostfs.info/truecloudlab/frostfs-adm",
"adm_version": "0.44.9",
"alphabet_foo": "bar", # FIXME
"alphabet_node_count": 4,
"contract_archive_url": "https://git.frostfs.info/TrueCloudLab/frostfs-contract/releases/download/v{version}/frostfs-contract-v{version}.tar.gz",
"contract_version": "0.21.1",
"httpgw_image": "git.frostfs.info/truecloudlab/frostfs-http-gw",
"httpgw_node_count": 3,
"httpgw_version": "0.32.1-debian", # TODO: none of the published images work: either POSIX shell is missing or CORS container is required
"innerring_image": "git.frostfs.info/truecloudlab/frostfs-ir",
"innerring_version": "0.44.9",
"locode_archive_url": "https://git.frostfs.info/TrueCloudLab/frostfs-locode-db/releases/download/v{version}/locode_db.gz",
"locode_version": "0.5.2",
"neogo_image": "nspccdev/neo-go",
"neogo_min_peers": 3,
"neogo_version": "0.106.3",
"storage_image": "git.frostfs.info/truecloudlab/frostfs-storage",
"storage_node_count": 2,
"storage_version": "0.44.9",
}
default.update(config)
yield default
def _customizable_deployment(service: Component, _deployment, request):
"""Test fixture builder that allows overriding some deployment parameters later."""
config = {}
for key, value in _deployment.items():
if not key.startswith(f"{service}_"):
continue
config[key[len(service) + 1 :]] = value
override = getattr(request, "param", {})
config.update(override)
directory = config.get("dir")
if directory:
directory = Path(directory)
basename = directory.name
index = 0
while directory.exists():
index += 1
directory = directory.with_name(basename + f"-{index}")
config["dir"] = directory
config["prefix"] = f"{directory.parent.name}-{directory.name}-"
for key in ["dir", "template"]:
if isinstance(config[key], str):
config[key] = Path(config[key])
return SimpleNamespace(**config)
def _customize_decorator(service: Component, options):
"""
Test decorator that overrides deployment options for the specific service.
Docs: https://docs.pytest.org/en/latest/example/parametrize.html#indirect-parametrization
"""
return pytest.mark.parametrize(f"{service}_deployment", [options], indirect=[f"{service}_deployment"], ids=[f"custom_{service}"])
@pytest.fixture(scope=_SCOPE)
def alphabet_deployment(_deployment, request):
"""Alphabet node parameters."""
return _customizable_deployment(Component.ALPHABET, _deployment, request)
def alphabet_customize(**options):
"""Test decorator that overrides deployment options for alphabet nodes."""
return _customize_decorator(Component.ALPHABET, options)
@pytest.fixture(scope=_SCOPE)
def contract_deployment(_deployment, request):
"""Contract deployment parameters."""
return _customizable_deployment(Component.CONTRACT, _deployment, request)
def contract_customize(**options):
"""Test decorator that overrides deployment options for frostfs-contracts."""
return _customize_decorator(Component.CONTRACT, options)
@pytest.fixture(scope=_SCOPE)
def neogo_deployment(_deployment, request, alphabet_deployment):
"""neo-go deployment parameters."""
deployment = _customizable_deployment(Component.NEOGO, _deployment, request)
deployment.node_count = alphabet_deployment.node_count
return deployment
def neogo_customize(**options):
"""Test decorator that overrides deployment options for neo-go nodes."""
return _customize_decorator(Component.NEOGO, options)
@pytest.fixture(scope=_SCOPE)
def adm_deployment(_deployment, request):
"""Frostfs-adm container parameters."""
return _customizable_deployment(Component.ADM, _deployment, request)
def adm_customize(**options):
"""Test decorator that overrides deployment options for frostfs-adm container."""
return _customize_decorator(Component.ADM, options)
@pytest.fixture(scope=_SCOPE)
def locode_deployment(_deployment, request):
"""Frostfs locode database parameters."""
return _customizable_deployment(Component.LOCODE, _deployment, request)
def locode_customize(**options):
"""Test decorator that overrides deployment options for frostfs-locode-db archive."""
return _customize_decorator(Component.LOCODE, options)
@pytest.fixture(scope=_SCOPE)
def innerring_deployment(_deployment, request):
"""Innerring node parameters."""
return _customizable_deployment(Component.INNERRING, _deployment, request)
def innerring_customize(**options):
"""Test decorator that overrides deployment options for innerring nodes."""
return _customize_decorator(Component.INNERRING, options)
@pytest.fixture(scope=_SCOPE)
def storage_deployment(_deployment, request):
"""Storage node parameters."""
return _customizable_deployment(Component.STORAGE, _deployment, request)
def storage_customize(**options):
"""Test decorator that overrides deployment options for storage nodes."""
return _customize_decorator(Component.STORAGE, options)
@pytest.fixture(scope=_SCOPE)
def httpgw_deployment(_deployment, request):
"""HTTP gateway deployment parameters."""
return _customizable_deployment(Component.HTTPGW, _deployment, request)
def httpgw_customize(**options):
"""Test decorator that overrides deployment options for HTTP gateways."""
return _customize_decorator(Component.HTTPGW, options)
@pytest.fixture(scope=_SCOPE)
def _network():
"""Docker container network fixture. Should not be referenced directly from tests."""
network = Network()
network.name = f"{_PREFIX}{network.name}"
network.create()
yield network
network.remove()
@pytest.fixture(scope=_SCOPE)
def _deployment_dir() -> Path:
"""Temporary directory for a dynamic deployment. Should not be referenced directly from tests."""
tmp = Path(tempfile.mkdtemp(prefix=f"{_PREFIX}test-")).absolute()
yield tmp
shutil.rmtree(tmp)
@pytest.fixture(scope=_SCOPE)
def adm_config(alphabet_deployment):
alphabet_deployment.dir.mkdir(mode=0o700, exist_ok=False)
file = alphabet_deployment.dir / "_frostfs_adm.json"
tree = {
"alphabet-wallets": str(alphabet_deployment.dir),
"credentials": {},
}
rnd = random.SystemRandom()
for key in chain(["contract"], glagolic[: alphabet_deployment.node_count]):
tree["credentials"][key] = "".join(rnd.choice(string.ascii_letters) for _ in range(12))
with open(file, "w") as f:
json.dump(tree, f, indent=True, sort_keys=True, ensure_ascii=False)
yield tree, file
shutil.rmtree(alphabet_deployment.dir)
@pytest.fixture(scope=_SCOPE)
def alphabet_wallets(alphabet_deployment, frostfs_adm):
dest = alphabet_deployment.dir
count = alphabet_deployment.node_count
frostfs_adm(f"morph generate-alphabet --size {count}")
frostfs_adm.fetch(dest, dest)
pubkeys = _read_alphabet_public_keys(dest, count)
return pubkeys, dest
def _read_alphabet_public_keys(directory, count) -> List[str]:
public = []
for index in range(count):
letter = glagolic[index]
file = directory / f"{letter}.json"
public.append(_wallet_public_key(file))
return public
def _wallet_address(path: Path, account=0) -> str:
"""Read account address from Neo NEP-6 wallet."""
with open(path) as f:
wallet = json.load(f)
account = wallet["accounts"][account]
return account["address"]
def _wallet_public_key(path: Path, account=0) -> str:
"""Read public key from Neo NEP-6 wallet."""
with open(path) as f:
wallet = json.load(f)
account = wallet["accounts"][account]
script = b64decode(account["contract"]["script"])
if not _is_signature_contract(script):
raise ValueError(f"not a signature contract: {account['contract']['script']}")
return script[2:35].hex()
def _is_signature_contract(script: bytes) -> bool:
"""
Test if the provided script is a (single) signature contract.
Args:
script: contract script.
Copied from neo-mamba (neo3.contracts.utils.is_signature_contract).
"""
PUSHDATA1 = 0x0C
SYSCALL = 0x41
SYSTEM_CRYPTO_CHECK_STANDARD_ACCOUNT = bytes((0x56, 0xE7, 0xB3, 0x27))
if len(script) != 40:
return False
if script[0] != PUSHDATA1 or script[1] != 33 or script[35] != SYSCALL or script[36:40] != SYSTEM_CRYPTO_CHECK_STANDARD_ACCOUNT:
return False
return True
@pytest.fixture(scope=_SCOPE)
def neogo_config(neogo_deployment, adm_config, alphabet_wallets):
neogo_deployment.dir.mkdir(mode=0o700, exist_ok=False)
with open(neogo_deployment.template) as f:
template = f.read()
alphabet, alphabet_dir = alphabet_wallets
adm, _ = adm_config
credentials = adm["credentials"]
seedlist = [f"{neogo_deployment.prefix}{index}:20333" for index in range(len(alphabet))]
override = getattr(neogo_deployment, "override", {})
fields = vars(neogo_deployment)
configs = {}
for index in range(len(alphabet)):
letter = glagolic[index]
fields.update(
dict(
letter=letter,
index=index,
password=credentials[letter],
)
)
config = yaml.load(template.format(**fields), yaml.SafeLoader)
config["ProtocolConfiguration"]["Hardforks"] = {} # kludge: templating collision for {}
config["ProtocolConfiguration"]["StandbyCommittee"] = alphabet
config["ProtocolConfiguration"]["SeedList"] = seedlist[:index] + seedlist[index + 1 :]
_update(config, override)
with open(neogo_deployment.dir / f"{letter}.json", "w") as c:
json.dump(config, c, ensure_ascii=False, indent=True, sort_keys=True)
configs[letter] = config
yield configs, neogo_deployment.dir
shutil.rmtree(neogo_deployment.dir)
def _update(old: Mapping, new: Mapping) -> None:
"""Recursive version of dict.update."""
for key in new:
if key in old and isinstance(old[key], Mapping) and isinstance(new[key], Mapping):
_update(old[key], new[key])
continue
old[key] = new[key]
@pytest.fixture(scope=_SCOPE)
def neogo(neogo_deployment, neogo_config, alphabet_deployment, frostfs_adm, _network):
wallet_dir = alphabet_deployment.dir
_, config_dir = neogo_config
nodes = []
for index in range(neogo_deployment.node_count):
letter = glagolic[index]
node = ContainerizedService(
command=f"neo-go node --config-file /neogo/{letter}.json --privnet --debug",
image=f"{neogo_deployment.image}:{neogo_deployment.version}",
name=f"{neogo_deployment.prefix}{index+1}",
network=_network,
)
node.add_file(wallet_dir / f"{letter}.json", f"/wallet/{letter}.json")
node.add_file(config_dir / f"{letter}.json", f"/neogo/{letter}.json")
node.start()
nodes.append(node)
def add_rpc_endpoint(command):
for arg in shlex.split(command):
# Check that there is at least one non-flag argument
# (--version does not work with --rpc-endpoint)
if not arg.startswith("-"):
break
else:
return command
return f"--rpc-endpoint 'http://{random.choice(nodes).name}:30333' {command}"
frostfs_adm._default_cmd_rewrite = add_rpc_endpoint
yield nodes
for node in nodes:
node.destroy()
@pytest.fixture(scope=_SCOPE)
def frostfs_adm(adm_deployment, adm_config, alphabet_deployment, _network):
_, config_file = adm_config
adm = ContainerizedService(
command="sleep infinity",
default_command=f'frostfs-adm --config "{config_file}" ' "{command}",
image=f"{adm_deployment.image}:{adm_deployment.version}",
name=f"{adm_deployment.prefix.strip('-')}",
network=_network,
)
wallet_dir = alphabet_deployment.dir
adm.add_directory(wallet_dir, wallet_dir)
adm.add_file(config_file, config_file)
yield adm
adm.destroy()
@pytest.fixture(scope=_SCOPE)
def frostfs_bootstrap(frostfs_contract, frostfs_adm, neogo) -> Mapping[str, str]:
output = {}
def morph(command: str) -> str:
output[command] = frostfs_adm(f"morph {command}")
frostfs_adm.add_directory(frostfs_contract, frostfs_contract)
morph(f"init --contracts '{frostfs_contract}'")
morph(
"ape add-rule-chain "
"--target-type namespace "
"--target-name '' "
"--rule 'allow Container.* *' "
"--chain-id 'allow_container_ops'"
)
morph("set-config ContainerFee=0")
morph("set-config ContainerAliasFee=0")
morph("set-config InnerRingCandidateFee=13")
morph("set-config WithdrawFee=17")
return output
@pytest.fixture(scope=_SCOPE)
def frostfs_contract(contract_deployment):
if contract_deployment.dir.exists():
return contract_deployment.dir
contract_deployment.dir.mkdir(mode=0o700, exist_ok=False)
with urlopen(
contract_deployment.archive_url.format(
version=contract_deployment.version,
)
) as request:
with tarfile.open(fileobj=request, mode="r|*") as tar:
tar.extractall(path=contract_deployment.dir, filter=_tar_strip_components(1))
return contract_deployment.dir
@pytest.fixture(scope=_SCOPE)
def frostfs_locode(locode_deployment):
locode = locode_deployment.dir / "locode_db"
if locode.exists():
return locode
locode_deployment.dir.mkdir(mode=0o700, exist_ok=False)
with urlopen(
locode_deployment.archive_url.format(
version=locode_deployment.version,
)
) as request:
with gzip.GzipFile(fileobj=request, mode="rb") as archive:
with open(locode, "wb") as destination:
shutil.copyfileobj(archive, destination)
return locode
@pytest.fixture(scope=_SCOPE)
def innerring_config(innerring_deployment, neogo, adm_config, alphabet_wallets):
innerring_deployment.dir.mkdir(mode=0o700, exist_ok=False)
with open(innerring_deployment.template) as f:
template = f.read()
alphabet, alphabet_dir = alphabet_wallets
adm, _ = adm_config
credentials = adm["credentials"]
override = getattr(innerring_deployment, "override", {})
fields = vars(innerring_deployment)
configs = {}
for index in range(len(alphabet)):
letter = glagolic[index]
fields.update(
dict(
letter=letter,
index=index,
password=credentials[letter],
neogo=neogo[index].name,
)
)
config = yaml.load(template.format(**fields), yaml.SafeLoader)
config["morph"]["validators"] = alphabet
_update(config, override)
with open(innerring_deployment.dir / f"{letter}.json", "w") as c:
json.dump(config, c, ensure_ascii=False, indent=True, sort_keys=True)
configs[letter] = config
yield configs, innerring_deployment.dir
shutil.rmtree(innerring_deployment.dir)
@pytest.fixture(scope=_SCOPE)
def innerring(innerring_deployment, innerring_config, frostfs_locode, frostfs_bootstrap, alphabet_deployment, _network):
wallet_dir = alphabet_deployment.dir
_, config_dir = innerring_config
nodes = []
for index in range(alphabet_deployment.node_count):
letter = glagolic[index]
node = ContainerizedService(
command=f"frostfs-ir --config /innerring/{letter}.json",
image=f"{innerring_deployment.image}:{innerring_deployment.version}",
name=f"{innerring_deployment.prefix}{index+1}",
network=_network,
)
node.add_file(wallet_dir / f"{letter}.json", f"/wallet/{letter}.json")
node.add_file(config_dir / f"{letter}.json", f"/innerring/{letter}.json")
node.add_file(frostfs_locode, f"/innerring/locode.db")
node.start()
nodes.append(node)
yield nodes
for node in nodes:
node.destroy()
@pytest.fixture(scope=_SCOPE)
def storage_config(storage_deployment, neogo, frostfs_adm, innerring):
storage_deployment.dir.mkdir(mode=0o700, exist_ok=False)
with open(storage_deployment.template) as f:
template = f.read()
sidechain = []
for node in neogo:
sidechain.append(
dict(
address=f"ws://{node.name}:30333/ws",
priority=0,
)
)
override = getattr(storage_deployment, "override", {})
fields = vars(storage_deployment)
configs = []
for index in range(storage_deployment.node_count):
wallet = storage_deployment.dir / f"wallet-{index}.json"
_, password = _new_wallet(wallet)
fields.update(
dict(
index=index,
prefix=storage_deployment.prefix,
wallet=str(wallet),
password=password,
price=42,
)
)
config = yaml.load(template.format(**fields), yaml.SafeLoader)
config["morph"]["rpc_endpoint"] = sidechain
_update(config, override)
with open(storage_deployment.dir / f"config-{index}.json", "w") as c:
json.dump(config, c, ensure_ascii=False, indent=True, sort_keys=True)
frostfs_adm.add_file(wallet, wallet)
frostfs_adm(f"morph refill-gas --storage-wallet '{wallet}' --gas 50.0")
configs.append(config)
yield configs, storage_deployment.dir
shutil.rmtree(storage_deployment.dir)
@pytest.fixture(scope=_SCOPE)
def storage(storage_deployment, storage_config, frostfs_adm, _network):
nodes = []
configs, _ = storage_config
for index, config in enumerate(configs):
node = ContainerizedService(
command=f"frostfs-node --config /storage/config.json",
image=f"{storage_deployment.image}:{storage_deployment.version}",
name=f"{storage_deployment.prefix}{index+1}",
network=_network,
)
node.add_file(config["node"]["wallet"]["path"], config["node"]["wallet"]["path"])
node.add_file(storage_deployment.dir / f"config-{index}.json", f"/storage/config.json")
node.start()
nodes.append(node)
for index, node in enumerate(nodes):
# Adding storage node account to proxy contract is required to be able to use apemanager:
# https://chat.yadro.com/yadro/pl/eet5jxiuabn1i8omg6jz4yeeso
address = _wallet_address(configs[index]["node"]["wallet"]["path"])
frostfs_adm(f"morph proxy-add-account --account {address}")
frostfs_adm("morph force-new-epoch")
yield nodes
for node in nodes:
node.destroy()
@pytest.fixture(scope=_SCOPE)
def httpgw_config(httpgw_deployment, storage, neogo):
httpgw_deployment.dir.mkdir(mode=0o700, exist_ok=False)
with open(httpgw_deployment.template) as f:
template = f.read()
peers = {}
for index, node in enumerate(storage):
peers[index] = dict(
address=f"grpc://{node.name}:8802",
priority=1,
weight=1,
)
override = getattr(httpgw_deployment, "override", {})
fields = vars(httpgw_deployment)
configs = []
for index in range(httpgw_deployment.node_count):
wallet = httpgw_deployment.dir / f"wallet-{index}.json"
_, password = _new_wallet(wallet)
fields.update(
dict(
wallet=str(wallet),
password=password,
morph=neogo[index % len(neogo)].name,
)
)
config = yaml.load(template.format(**fields), yaml.SafeLoader)
config["peers"] = peers
_update(config, override)
with open(httpgw_deployment.dir / f"config-{index}.json", "w") as c:
json.dump(config, c, ensure_ascii=False, indent=True, sort_keys=True)
configs.append(config)
yield configs, httpgw_deployment.dir
shutil.rmtree(httpgw_deployment.dir)
@pytest.fixture(scope=_SCOPE)
def httpgw(httpgw_deployment, httpgw_config, _network):
nodes = []
configs, _ = httpgw_config
for index, config in enumerate(configs):
node = ContainerizedService(
command=f"frostfs-http-gw --config /httpgw/config.json",
image=f"{httpgw_deployment.image}:{httpgw_deployment.version}",
name=f"{httpgw_deployment.prefix}{index+1}",
network=_network,
)
node.add_file(config["wallet"]["path"], config["wallet"]["path"])
node.add_file(httpgw_deployment.dir / f"config-{index}.json", f"/httpgw/config.json")
node.start()
nodes.append(node)
ready = re.compile(r"starting server.*\:80")
for node in nodes:
node.wait(ready, timeout=10)
yield nodes
for node in nodes:
node.destroy()
def _new_wallet(path: Path, password: str = None) -> (Wallet, str):
"""
Create new wallet and new account.
"""
wallet = Wallet()
if password is None:
password = "".join(random.choice(string.ascii_letters) for _ in range(12))
account = Account.create_new(password)
wallet.account_add(account)
with open(path, "w") as out:
json.dump(wallet.to_json(), out)
return wallet, password
def _tar_strip_components(number=1):
"""
See --strip-components in `man tar`.
"""
sep = "/"
def _filter(member: tarfile.TarInfo, path: str) -> tarfile.TarInfo | None:
components = member.name.split(sep)
for _ in range(number):
if not components:
break
components.pop(0)
if not components:
return None
member = member.replace(name=sep.join(components))
return tarfile.data_filter(member, path)
return _filter

View file

@ -0,0 +1,171 @@
"""
Hosting object for the fixture based dynamic component test environment.
Based on testcontainers and Docker.
"""
from pathlib import Path
from typing import Any
from frostfs_testlib.credentials.interfaces import GrpcCredentialsProvider, User
from frostfs_testlib.hosting.interfaces import Host
from frostfs_testlib.resources.common import ASSETS_DIR, DEFAULT_WALLET_PASS
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from .container import ContainerizedService # TODO: move fixtures into testlib
from .fixtures import _new_wallet, _wallet_public_key
def dynamic_hosting_config(**fixtures) -> dict[str, Any]:
"""
Translate ContainerizedService fixtures into a hosting configuration tree.
"""
config = {
"hosts": [
{
"plugin_name": "component_tests",
"grpc_creds_plugin_name": "component_tests",
"healthcheck_plugin_name": "basic",
"hostname": "component_tests",
"address": "component_tests",
"attributes": {
"force_transactions": True,
"skip_readiness_check": True,
"sudo_shell": False,
"frostfs_adm": fixtures.get("frostfs_adm"),
},
"services": [],
"clis": [],
}
],
}
services = config["hosts"][0]["services"]
for name, nodes in fixtures.items():
services.extend(_services(name, nodes))
return config
def _services(name: str, nodes: list) -> None:
if name == "storage":
for index, node in enumerate(nodes, 1):
yield {
"name": f"frostfs-storage_{index:02}",
"attributes": {
"control_endpoint": f"{node.ip}:8801",
"endpoint_data0": f"{node.ip}:8802",
"endpoint_prometheus": f"{node.ip}:9090",
},
}
class ContainerHost(Host):
"""
Exposes services running in testcontainers.
"""
def get_shell(self, sudo=True):
raise NotImplementedError
def start_host(self):
raise NotImplementedError
def get_host_status(self):
raise NotImplementedError
def stop_host(self, mode):
raise NotImplementedError
def start_service(self, service_name):
raise NotImplementedError
def stop_service(self, service_name):
raise NotImplementedError
def send_signal_to_service(self, service_name, signal):
raise NotImplementedError
def mask_service(self, service_name):
raise NotImplementedError
def unmask_service(self, service_name):
raise NotImplementedError
def restart_service(self, service_name):
raise NotImplementedError
def get_data_directory(self, service_name):
raise NotImplementedError
def wait_success_suspend_process(self, process_name):
raise NotImplementedError
def wait_success_resume_process(self, process_name):
raise NotImplementedError
def delete_storage_node_data(self, service_name, cache_only=False):
raise NotImplementedError
def wipefs_storage_node_data(self, service_name):
raise NotImplementedError
def delete_fstree(self, service_name):
raise NotImplementedError
def delete_metabase(self, service_name):
raise NotImplementedError
def delete_write_cache(self, service_name):
raise NotImplementedError
def delete_blobovnicza(self, service_name):
raise NotImplementedError
def delete_file(self, file_path):
raise NotImplementedError
def is_file_exist(self, file_path):
raise NotImplementedError
def detach_disk(self, device):
raise NotImplementedError
def attach_disk(self, device, disk_info):
raise NotImplementedError
def is_disk_attached(self, device, disk_info):
raise NotImplementedError
def dump_logs(self, directory_path, since=None, until=None, filter_regex=None):
raise NotImplementedError
def get_filtered_logs(self, filter_regex, since=None, until=None, unit=None, exclude_filter=None, priority=None, word_count=None):
raise NotImplementedError
def is_message_in_logs(self, message_regex, since=None, until=None, unit=None):
raise NotImplementedError
def wait_for_service_to_be_in_state(self, systemd_service_name, expected_state, timeout):
raise NotImplementedError
class ClientWalletFactory(GrpcCredentialsProvider):
def provide(self, user: User, cluster_node: ClusterNode, **kwargs) -> WalletInfo:
if user.wallet is not None:
return user.wallet
frostfs_adm = cluster_node.host.config.attributes.get("frostfs_adm")
if not frostfs_adm:
raise RuntimeError("hosting fixture must depend directly on frostfs_adm")
directory = Path(ASSETS_DIR).absolute()
wallet = WalletInfo(
path=str(directory / f"{user.name}-wallet.json"),
password=DEFAULT_WALLET_PASS,
config_path=str(directory / f"{user.name}-config.yml"),
)
_new_wallet(wallet.path, wallet.password)
pubkey = _wallet_public_key(wallet.path)
frostfs_adm.add_file(wallet.path, wallet.path)
frostfs_adm(f"morph frostfsid create-subject --namespace '' --subject-key '{pubkey}' --subject-name '{user.name}'")
with open(wallet.config_path, "w") as config:
config.write(f"wallet: {wallet.path!r}\npassword: {wallet.password!r}\n")
user.wallet = wallet
return user.wallet

View file

@ -0,0 +1,119 @@
wallet:
path: "{wallet}"
passphrase: "{password}"
pprof:
enabled: false
address: :8083
prometheus:
enabled: false
address: :8084
tracing:
enabled: false
exporter: "otlp_grpc"
endpoint: :4317
trusted_ca: ""
logger:
level: debug
destination: stdout
server:
- address: :80
tls:
enabled: false
peers: # This config branch is replaced completely during template parsing
0:
address: grpc://storage1:8802
priority: 1
weight: 1
1:
address: grpc://storage2:8802
priority: 1
weight: 1
2:
address: grpc://storage3:8802
priority: 1
weight: 1
reconnect_interval: 1m
web:
# Per-connection buffer size for requests' reading.
# This also limits the maximum header size.
read_buffer_size: 4096
# Per-connection buffer size for responses' writing.
write_buffer_size: 4096
# ReadTimeout is the amount of time allowed to read
# the full request including body. The connection's read
# deadline is reset when the connection opens, or for
# keep-alive connections after the first byte has been read.
read_timeout: 10m
# WriteTimeout is the maximum duration before timing out
# writes of the response. It is reset after the request handler
# has returned.
write_timeout: 5m
# StreamRequestBody enables request body streaming,
# and calls the handler sooner when given body is
# larger then the current limit.
stream_request_body: true
# Maximum request body size.
# The server rejects requests with bodies exceeding this limit.
max_request_body_size: 4194304
# RPC endpoint to be able to use nns container resolving.
rpc_endpoint: http://{morph}:30333
# The order in which resolvers are used to find an container id by name.
resolve_order:
- nns
- dns
upload_header:
use_default_timestamp: false # Create timestamp for object if it isn't provided by header.
connect_timeout: 5s # Timeout to dial node.
stream_timeout: 10s # Timeout for individual operations in streaming RPC.
request_timeout: 5s # Timeout to check node health during rebalance.
rebalance_timer: 30s # Interval to check nodes health.
pool_error_threshold: 100 # The number of errors on connection after which node is considered as unhealthy.
zip:
compression: false # Enable zip compression to download files by common prefix.
runtime:
soft_memory_limit: 1gb
# Parameters of requests to FrostFS
frostfs:
# This flag enables client side object preparing.
client_cut: false
# Sets max buffer size for read payload in put operations.
buffer_max_size_for_put: 1048576
# Max attempt to make successful tree request.
# default value is 0 that means the number of attempts equals to number of nodes in pool.
tree_pool_max_attempts: 0
# Caching
cache:
# Cache which contains mapping of bucket name to bucket info
buckets:
lifetime: 1m
size: 1000
resolve_bucket:
namespace_header: X-Frostfs-Namespace
default_namespaces: [ "", "root" ]
cors:
allow_origin: ""
allow_methods: []
allow_headers: []
expose_headers: []
allow_credentials: false
max_age: 600

View file

@ -0,0 +1,88 @@
# Logger section
logger:
level: debug # Minimum enabled logging level
control:
authorized_keys: # Q: Node keys are always assumed to be trusted?
grpc:
endpoint: :8099
# Wallet settings
wallet:
path: /wallet/{letter}.json
password: {password}
# Profiler section
pprof:
enabled: true
address: :6060 # Endpoint for application pprof profiling; disabled by default
shutdown_timeout: 30s # Timeout for profiling HTTP server graceful shutdown
# Application metrics section
prometheus:
enabled: true
address: :9090 # Endpoint for application prometheus metrics; disabled by default
shutdown_timeout: 30s # Timeout for metrics HTTP server graceful shutdown
# Toggling the sidechain-only mode
without_mainnet: true
# Neo main chain RPC settings
mainnet:
endpoint:
# Neo side chain RPC settings
morph:
endpoint:
client: # List of websocket RPC endpoints in sidechain
- address: ws://{neogo}:30333/ws
validators: # This config branch is replaced completely during template parsing
- 03aa8d8a0b8f9f0c5b36ce37975cfbcab3df75e1f12c501e3ead6d5f49d6e0b6f2 # az
- 02a51fd92f9e518f46ad009cc4b1e6f6d2879c33c252e12368ca9a431f4aabcd4f # buky
- 02856ed40a58b1e4bec8e1288ef8d5b2b4d8652557c89f03da72cc3988f7b4cf61 # vedi
- 0398803ed9999ddcbe8eba0d88805fd6b2ee92553af78ab0e4364446e7cb7229b3 # glagoli
- 036f8f0e9c2cd033c5c7bf38f5fc45926950988cecbc6f6d47575781217786166a # dobro
- 023790dcc88bef1f500f549342d1ec7a4b7e48912555d93b71ed6561a309142893 # yest
- 0244cb3df7b5a81810b8bb9ff2f0442434b5e8ab7c278c09723c07b127ff30e347 # zhivete
# Network time settings
timers:
emit: 50 # Number of sidechain blocks between GAS emission cycles; disabled by default
stop_estimation:
mul: 1 # Multiplier in x/y relation of when to stop basic income estimation within the epoch
div: 4 # Divider in x/y relation of when to stop basic income estimation within the epoch
collect_basic_income:
mul: 1 # Multiplier in x/y relation of when to start basic income asset collection within the epoch
div: 2 # Divider in x/y relation of when to start basic income asset collecting within the epoch
distribute_basic_income:
mul: 3 # Multiplier in x/y relation of when to start basic income asset distribution within the epoch
div: 4 # Divider in x/y relation of when to start basic income asset distribution within the epoch
# Storage node GAS emission settings
emit:
storage:
amount: 1000000000 # Fixed8 value of sidechain GAS emitted to all storage nodes once per GAS emission cycle; disabled by default
# Storage node removal settings
netmap_cleaner:
enabled: true # Enable voting for removing stale storage nodes from network map
threshold: 3 # Number of FrostFS epoch without bootstrap request from storage node before it considered stale
# Audit settings
audit:
pdp:
max_sleep_interval: 100ms # Maximum timeout between object.RangeHash requests to the storage node
# Settlement settings
settlement:
basic_income_rate: 100000000 # Optional: override basic income rate value from network config; applied only in debug mode
audit_fee: 100000 # Optional: override audit fee value from network config; applied only in debug mode
# LOCODE database
locode:
db:
path: /innerring/locode.db # Path to UN/LOCODE database file
node:
persistent_state:
path: /innerring/{letter}.state

View file

@ -0,0 +1,70 @@
ProtocolConfiguration:
Magic: 2025042112
MaxTraceableBlocks: 200000
TimePerBlock: 1s
MemPoolSize: 50000
StandbyCommittee: # This config branch is replaced completely during parsing
- 03aa8d8a0b8f9f0c5b36ce37975cfbcab3df75e1f12c501e3ead6d5f49d6e0b6f2 # az
- 02a51fd92f9e518f46ad009cc4b1e6f6d2879c33c252e12368ca9a431f4aabcd4f # buky
- 02856ed40a58b1e4bec8e1288ef8d5b2b4d8652557c89f03da72cc3988f7b4cf61 # vedi
- 0398803ed9999ddcbe8eba0d88805fd6b2ee92553af78ab0e4364446e7cb7229b3 # glagoli
- 036f8f0e9c2cd033c5c7bf38f5fc45926950988cecbc6f6d47575781217786166a # dobro
- 023790dcc88bef1f500f549342d1ec7a4b7e48912555d93b71ed6561a309142893 # yest
- 0244cb3df7b5a81810b8bb9ff2f0442434b5e8ab7c278c09723c07b127ff30e347 # zhivete
ValidatorsCount: {node_count}
VerifyTransactions: true
StateRootInHeader: true
P2PSigExtensions: true
Hardforks: # This config branch is replaced completely during parsing
SeedList: # This config branch is replaced completely during parsing
- "{prefix}1:20333"
- "{prefix}2:20333"
- "{prefix}3:20333"
- "{prefix}4:20333"
- "{prefix}5:20333"
- "{prefix}6:20333"
- "{prefix}7:20333"
ApplicationConfiguration:
SkipBlockVerification: false
DBConfiguration:
Type: "boltdb"
BoltDBOptions:
FilePath: "/chain/{letter}.bolt"
P2P:
Addresses:
- ":20333"
DialTimeout: 3s
ProtoTickInterval: 2s
PingInterval: 30s
PingTimeout: 90s
MaxPeers: 10
AttemptConnPeers: 5
MinPeers: {min_peers}
Relay: true
Consensus:
Enabled: true
UnlockWallet:
Path: "/wallet/{letter}.json"
Password: "{password}"
RPC:
Addresses:
- ":30333"
Enabled: true
SessionEnabled: true
EnableCORSWorkaround: false
MaxGasInvoke: 100
P2PNotary:
Enabled: true
UnlockWallet:
Path: "/wallet/{letter}.json"
Password: "{password}"
Prometheus:
Addresses:
- ":20001"
Enabled: true
Pprof:
Addresses:
- ":20011"
Enabled: true

View file

@ -0,0 +1,103 @@
# Logger section
logger:
level: debug # Minimum enabled logging level
# Profiler section
pprof:
enabled: true
address: :6060 # Server address
shutdown_timeout: 15s # Timeout for profiling HTTP server graceful shutdown
# Application metrics section
prometheus:
enabled: true
address: :9090 # Server address
shutdown_timeout: 15s # Timeout for metrics HTTP server graceful shutdown
# Morph section
morph:
dial_timeout: 30s # Timeout for side chain NEO RPC client connection
rpc_endpoint: # This config branch is replaced completely during template parsing
- address: ws://morph1:30333/ws
priority: 0
- address: ws://morph2:30333/ws
priority: 0
- address: ws://morph3:30333/ws
priority: 0
- address: ws://morph4:30333/ws
priority: 0
- address: ws://morph5:30333/ws
priority: 1
- address: ws://morph6:30333/ws
priority: 1
- address: ws://morph7:30333/ws
priority: 1
# Common storage node settings
node:
wallet:
path: "{wallet}"
password: "{password}"
addresses:
- grpc://{prefix}{index}:8802
attribute_0: "User-Agent:FrostFS component tests"
attribute_1: "Price:{price}"
persistent_state:
path: /storage/state
grpc:
- endpoint: :8802
tls:
enabled: false
control:
grpc:
endpoint: :8801
# Tree section
tree:
enabled: true
# Storage engine configuration
storage:
shard:
0:
writecache:
enabled: true
path: /storage/data/wc0 # Write-cache root directory
metabase:
path: /storage/data/meta0 # Path to the metabase
blobstor:
- type: blobovnicza
path: /storage/data/blobovnicza0 # Blobovnicza root directory
depth: 2
width: 4
- type: fstree
path: /storage/data/fstree0 # FSTree root directory
depth: 2
pilorama:
path: /storage/data/pilorama0 # Path to the pilorama database
1:
writecache:
enabled: true
path: /storage/data/wc1 # Write-cache root directory
metabase:
path: /storage/data/meta1 # Path to the metabase
blobstor:
- type: blobovnicza
path: /storage/data/blobovnicza1 # Blobovnicza root directory
depth: 2
width: 4
- type: fstree
path: /storage/data/fstree1 # FSTree root directory
depth: 2
pilorama:
path: /storage/data/pilorama1 # Path to the pilorama database

View file

@ -161,17 +161,24 @@ class Cluster:
This class represents a Cluster object for the whole storage based on provided hosting
"""
default_rpc_endpoint: str
default_s3_gate_endpoint: str
default_http_gate_endpoint: str
default_rpc_endpoint: str = "not deployed"
default_s3_gate_endpoint: str = "not deployed"
default_http_gate_endpoint: str = "not deployed"
def __init__(self, hosting: Hosting) -> None:
self._hosting = hosting
self.class_registry = get_service_registry()
self.default_rpc_endpoint = self.services(StorageNode)[0].get_rpc_endpoint()
self.default_s3_gate_endpoint = self.services(S3Gate)[0].get_endpoint()
self.default_http_gate_endpoint = self.services(HTTPGate)[0].get_endpoint()
storage = self.services(StorageNode)
if storage:
self.default_rpc_endpoint = storage[0].get_rpc_endpoint()
s3gate = self.services(S3Gate)
if s3gate:
self.default_s3_gate_endpoint = s3gate[0].get_endpoint()
http_gate = self.services(HTTPGate)
if http_gate:
self.default_http_gate_endpoint = http_gate[0].get_endpoint()
@property
def hosts(self) -> list[Host]:

View file

@ -0,0 +1,44 @@
import pytest
from frostfs_testlib.component_tests.container import ContainerizedService
@pytest.mark.parametrize(
"image",
[
"busybox:musl",
"golang:1.24",
"python:3.12",
],
)
def test_containerized_wrapper(image, neogo_deployment):
demo = ContainerizedService(
image=image,
command='tick=0; while true; do tick=$((tick+1)); echo "tick=$tick"; sleep 0.1; done',
)
demo.add_file(neogo_deployment.template, "/neogo/config.yml")
demo.start()
result = demo.exec("cat /neogo/config.yml")
assert result.exit_code == 0
assert "StandbyCommittee" in str(result.output)
print(f"Checking logs for {demo.name}")
seen = []
for line in demo.logs(timeout=20):
if "tick=3" in line:
break
seen.append(line)
if len(seen) > 15:
assert False, "expected output did not appear in container logs"
demo.destroy()
@pytest.mark.timeout(15)
def test_wait_for_logs():
demo = ContainerizedService(
image="busybox:musl",
command='tick=0; while true; do tick=$((tick+1)); echo "tick=$tick"; sleep 0.1; done',
)
demo.start()
demo.wait("tick=1", timeout=5)
with pytest.raises(TimeoutError):
demo.wait("impossible line", timeout=5)

View file

@ -0,0 +1,24 @@
"""
Demonstration of overrides for *_deployment fixtures
"""
import pytest
from frostfs_testlib.component_tests.fixtures import alphabet_customize
@pytest.fixture(scope="session")
def deployment(alphabet_deployment):
print(f"{alphabet_deployment.prefix=}")
return alphabet_deployment
def test_deployment(deployment):
assert deployment.node_count == 4
assert deployment.foo == "bar"
@alphabet_customize(node_count=5)
def test_deployment_custom(deployment):
assert deployment.node_count == 5
assert deployment.foo == "bar"
print(deployment.dir)

View file

@ -0,0 +1,74 @@
import random
import re
import pytest
from frostfs_testlib.component_tests.fixtures import adm_customize
@pytest.mark.timeout(30)
def test_sidechain(neogo):
"""
Launch sidechain and wait for blocks to tick up to eleven.
"""
node = neogo[3]
needle = re.compile(r'"height": 11')
node.wait(needle)
@adm_customize(version="0.45.0-rc.10")
def test_frostfs_adm(frostfs_adm):
"""
This is just a demonstration.
Overriding frostfs-adm version from component tests is not recommended
because it will effectively redeploy everything just for that test case:
alphabet wallets depend upon frostfs-adm, and everything else depends on
alphabet wallets.
"""
output = frostfs_adm("--version")
assert "0.45.0-rc.10" in output
def test_contract_fetch(frostfs_contract):
assert (frostfs_contract / "alphabet" / "alphabet_contract.nef").exists()
def test_frostfs_bootstrap(frostfs_bootstrap):
assert len(frostfs_bootstrap) == 6
@pytest.mark.timeout(60)
def test_innerring_startup(innerring):
ir = random.choice(innerring)
block = re.compile(r'new block.*"index": \d+')
blocks_seen = 0
for line in ir.logs():
if block.search(line):
blocks_seen += 1
if blocks_seen > 3:
break
@pytest.mark.timeout(90)
def test_storage_startup(storage):
node = random.choice(storage)
block = re.compile(r'new block.*"index": \d+')
blocks_seen = 0
for line in node.logs():
if block.search(line):
blocks_seen += 1
if blocks_seen > 3:
break
@pytest.mark.timeout(90)
def test_httpgw_startup(httpgw, frostfs_adm):
dummy_cid = "Dxhf4PNprrJHWWTG5RGLdfLkJiSQ3AQqit1MSnEPRkDZ"
dummy_oid = "2m8PtaoricLouCn5zE8hAFr3gZEBDCZFe9BEgVJTSocY"
gateway = random.choice(httpgw)
rc, output = frostfs_adm.exec(f"wget -O - http://{gateway.name}/get/{dummy_cid}/{dummy_oid}")
assert rc == 1
assert "404 Not Found" in str(output)
expected = re.compile(f"{dummy_cid}.*code = 3072.*container not found")
gateway.wait(expected)