Compare commits

...

6 commits

Author SHA1 Message Date
b464591153 [TrueCloudLab/xk6-frostfs#125] Add acl option
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-02-05 15:50:38 +00:00
c978f55e90 [#170] Update metrics
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-02-05 15:42:37 +00:00
2255ee465f [#173] Add flag to remove registry file
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-02-05 12:41:29 +03:00
2ec24f4cd1 [#168] Strip components for new xk6 archive and update unit tests for 0.38
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-01-26 13:36:44 +03:00
2da1a4583f [#165] Add local flag to preset in load
Signed-off-by: Andrey Berezin <a.berezin@yadro.com>
2024-01-22 19:08:30 +03:00
cda3773fa8 [#163] Refactor frostfs-cli functional
Signed-off-by: Dmitriy Zayakin <d.zayakin@yadro.com>
2024-01-22 14:26:25 +03:00
12 changed files with 76 additions and 39 deletions

View file

@ -65,7 +65,6 @@ class FrostfsCliContainer(CliCommand):
ttl: Optional[int] = None, ttl: Optional[int] = None,
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
force: bool = False, force: bool = False,
timeout: Optional[str] = None,
) -> CommandResult: ) -> CommandResult:
""" """
Delete an existing container. Delete an existing container.
@ -81,7 +80,6 @@ class FrostfsCliContainer(CliCommand):
ttl: TTL value in request meta header (default 2). ttl: TTL value in request meta header (default 2).
wallet: WIF (NEP-2) string or path to the wallet or binary key. wallet: WIF (NEP-2) string or path to the wallet or binary key.
xhdr: Dict with request X-Headers. xhdr: Dict with request X-Headers.
timeout: Timeout for the operation (default 15s).
Returns: Returns:
Command's result. Command's result.
@ -298,9 +296,5 @@ class FrostfsCliContainer(CliCommand):
return self._execute( return self._execute(
f"container nodes {from_str}", f"container nodes {from_str}",
**{ **{param: value for param, value in locals().items() if param not in ["self", "from_file", "from_str"]},
param: value
for param, value in locals().items()
if param not in ["self", "from_file", "from_str"]
},
) )

View file

@ -124,9 +124,7 @@ class FrostfsCliObject(CliCommand):
""" """
return self._execute( return self._execute(
"object hash", "object hash",
**{ **{param: value for param, value in locals().items() if param not in ["self", "params"]},
param: value for param, value in locals().items() if param not in ["self", "params"]
},
) )
def head( def head(
@ -355,8 +353,8 @@ class FrostfsCliObject(CliCommand):
def nodes( def nodes(
self, self,
rpc_endpoint: str, rpc_endpoint: str,
wallet: str,
cid: str, cid: str,
wallet: Optional[str] = None,
address: Optional[str] = None, address: Optional[str] = None,
bearer: Optional[str] = None, bearer: Optional[str] = None,
generate_key: Optional = None, generate_key: Optional = None,

View file

@ -1,5 +1,5 @@
class Options: class Options:
DEFAULT_SHELL_TIMEOUT = 90 DEFAULT_SHELL_TIMEOUT = 120
@staticmethod @staticmethod
def get_default_shell_timeout(): def get_default_shell_timeout():

View file

@ -147,6 +147,8 @@ class Preset:
pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False) pregen_json: Optional[str] = metadata_field(all_load_scenarios, "out", "PREGEN_JSON", False)
# Workers count for preset # Workers count for preset
workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False) workers: Optional[int] = metadata_field(all_load_scenarios, "workers", None, False)
# Acl for container/buckets
acl: Optional[str] = metadata_field(all_load_scenarios, "acl", None, False)
# ------ GRPC ------ # ------ GRPC ------
# Amount of containers which should be created # Amount of containers which should be created
@ -166,6 +168,9 @@ class Preset:
# Flag to control preset erorrs # Flag to control preset erorrs
ignore_errors: Optional[bool] = metadata_field(all_load_scenarios, "ignore-errors", None, False) ignore_errors: Optional[bool] = metadata_field(all_load_scenarios, "ignore-errors", None, False)
# Flag to ensure created containers store data on local endpoints
local: Optional[bool] = metadata_field(grpc_preset_scenarios, "local", None, False)
@dataclass @dataclass
class LoadParams: class LoadParams:
@ -232,6 +237,8 @@ class LoadParams:
registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False) registry_file: Optional[str] = metadata_field(all_scenarios, None, "REGISTRY_FILE", False)
# In case if we want to use custom registry file left from another load run # In case if we want to use custom registry file left from another load run
custom_registry: Optional[str] = None custom_registry: Optional[str] = None
# In case if we want to use custom registry file left from another load run
force_fresh_registry: Optional[bool] = None
# Specifies the minimum duration of every single execution (i.e. iteration). # Specifies the minimum duration of every single execution (i.e. iteration).
# Any iterations that are shorter than this value will cause that VU to # Any iterations that are shorter than this value will cause that VU to
# sleep for the remainder of the time until the specified minimum duration is reached. # sleep for the remainder of the time until the specified minimum duration is reached.

View file

@ -107,66 +107,66 @@ class DeleteOperationMetric(OperationMetric):
class GrpcWriteOperationMetric(WriteOperationMetric): class GrpcWriteOperationMetric(WriteOperationMetric):
_SUCCESS = "frostfs_obj_put_total" _SUCCESS = "frostfs_obj_put_success"
_ERRORS = "frostfs_obj_put_fails" _ERRORS = "frostfs_obj_put_fails"
_LATENCY = "frostfs_obj_put_duration" _LATENCY = "frostfs_obj_put_duration"
class GrpcReadOperationMetric(ReadOperationMetric): class GrpcReadOperationMetric(ReadOperationMetric):
_SUCCESS = "frostfs_obj_get_total" _SUCCESS = "frostfs_obj_get_success"
_ERRORS = "frostfs_obj_get_fails" _ERRORS = "frostfs_obj_get_fails"
_LATENCY = "frostfs_obj_get_duration" _LATENCY = "frostfs_obj_get_duration"
class GrpcDeleteOperationMetric(DeleteOperationMetric): class GrpcDeleteOperationMetric(DeleteOperationMetric):
_SUCCESS = "frostfs_obj_delete_total" _SUCCESS = "frostfs_obj_delete_success"
_ERRORS = "frostfs_obj_delete_fails" _ERRORS = "frostfs_obj_delete_fails"
_LATENCY = "frostfs_obj_delete_duration" _LATENCY = "frostfs_obj_delete_duration"
class S3WriteOperationMetric(WriteOperationMetric): class S3WriteOperationMetric(WriteOperationMetric):
_SUCCESS = "aws_obj_put_total" _SUCCESS = "aws_obj_put_success"
_ERRORS = "aws_obj_put_fails" _ERRORS = "aws_obj_put_fails"
_LATENCY = "aws_obj_put_duration" _LATENCY = "aws_obj_put_duration"
class S3ReadOperationMetric(ReadOperationMetric): class S3ReadOperationMetric(ReadOperationMetric):
_SUCCESS = "aws_obj_get_total" _SUCCESS = "aws_obj_get_success"
_ERRORS = "aws_obj_get_fails" _ERRORS = "aws_obj_get_fails"
_LATENCY = "aws_obj_get_duration" _LATENCY = "aws_obj_get_duration"
class S3DeleteOperationMetric(DeleteOperationMetric): class S3DeleteOperationMetric(DeleteOperationMetric):
_SUCCESS = "aws_obj_delete_total" _SUCCESS = "aws_obj_delete_success"
_ERRORS = "aws_obj_delete_fails" _ERRORS = "aws_obj_delete_fails"
_LATENCY = "aws_obj_delete_duration" _LATENCY = "aws_obj_delete_duration"
class S3LocalWriteOperationMetric(WriteOperationMetric): class S3LocalWriteOperationMetric(WriteOperationMetric):
_SUCCESS = "s3local_obj_put_total" _SUCCESS = "s3local_obj_put_success"
_ERRORS = "s3local_obj_put_fails" _ERRORS = "s3local_obj_put_fails"
_LATENCY = "s3local_obj_put_duration" _LATENCY = "s3local_obj_put_duration"
class S3LocalReadOperationMetric(ReadOperationMetric): class S3LocalReadOperationMetric(ReadOperationMetric):
_SUCCESS = "s3local_obj_get_total" _SUCCESS = "s3local_obj_get_success"
_ERRORS = "s3local_obj_get_fails" _ERRORS = "s3local_obj_get_fails"
_LATENCY = "s3local_obj_get_duration" _LATENCY = "s3local_obj_get_duration"
class LocalWriteOperationMetric(WriteOperationMetric): class LocalWriteOperationMetric(WriteOperationMetric):
_SUCCESS = "local_obj_put_total" _SUCCESS = "local_obj_put_success"
_ERRORS = "local_obj_put_fails" _ERRORS = "local_obj_put_fails"
_LATENCY = "local_obj_put_duration" _LATENCY = "local_obj_put_duration"
class LocalReadOperationMetric(ReadOperationMetric): class LocalReadOperationMetric(ReadOperationMetric):
_SUCCESS = "local_obj_get_total" _SUCCESS = "local_obj_get_success"
_ERRORS = "local_obj_get_fails" _ERRORS = "local_obj_get_fails"
class LocalDeleteOperationMetric(DeleteOperationMetric): class LocalDeleteOperationMetric(DeleteOperationMetric):
_SUCCESS = "local_obj_delete_total" _SUCCESS = "local_obj_delete_success"
_ERRORS = "local_obj_delete_fails" _ERRORS = "local_obj_delete_fails"

View file

@ -4,6 +4,7 @@ import math
import re import re
import time import time
from dataclasses import fields from dataclasses import fields
from threading import Event
from typing import Optional from typing import Optional
from urllib.parse import urlparse from urllib.parse import urlparse
@ -30,7 +31,6 @@ from frostfs_testlib.testing import parallel, run_optionally
from frostfs_testlib.testing.test_control import retry from frostfs_testlib.testing.test_control import retry
from frostfs_testlib.utils import datetime_utils from frostfs_testlib.utils import datetime_utils
from frostfs_testlib.utils.file_keeper import FileKeeper from frostfs_testlib.utils.file_keeper import FileKeeper
from threading import Event
class RunnerBase(ScenarioRunner): class RunnerBase(ScenarioRunner):
@ -78,6 +78,10 @@ class DefaultRunner(RunnerBase):
nodes_under_load: list[ClusterNode], nodes_under_load: list[ClusterNode],
k6_dir: str, k6_dir: str,
): ):
if load_params.force_fresh_registry and load_params.custom_registry:
with reporter.step("Forcing fresh registry files"):
parallel(self._force_fresh_registry, self.loaders, load_params)
if load_params.load_type != LoadType.S3: if load_params.load_type != LoadType.S3:
return return
@ -88,6 +92,11 @@ class DefaultRunner(RunnerBase):
parallel(self._prepare_loader, self.loaders, load_params, grpc_peer, s3_public_keys, k6_dir) parallel(self._prepare_loader, self.loaders, load_params, grpc_peer, s3_public_keys, k6_dir)
def _force_fresh_registry(self, loader: Loader, load_params: LoadParams):
with reporter.step(f"Forcing fresh registry on {loader.ip}"):
shell = loader.get_shell()
shell.exec(f"rm -f {load_params.registry_file}")
def _prepare_loader( def _prepare_loader(
self, self,
loader: Loader, loader: Loader,
@ -314,7 +323,7 @@ class LocalRunner(RunnerBase):
with reporter.step("Download K6"): with reporter.step("Download K6"):
shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}") shell.exec(f"sudo rm -rf {k6_dir};sudo mkdir {k6_dir}")
shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}") shell.exec(f"sudo curl -so {k6_dir}/k6.tar.gz {load_params.k6_url}")
shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz -C {k6_dir}") shell.exec(f"sudo tar xf {k6_dir}/k6.tar.gz --strip-components 2 -C {k6_dir}")
shell.exec(f"sudo chmod -R 777 {k6_dir}") shell.exec(f"sudo chmod -R 777 {k6_dir}")
with reporter.step("Create empty_passwd"): with reporter.step("Create empty_passwd"):

View file

@ -9,4 +9,4 @@ FROSTFS_ADM_EXEC = os.getenv("FROSTFS_ADM_EXEC", "frostfs-adm")
# Config for frostfs-adm utility. Optional if tests are running against devenv # Config for frostfs-adm utility. Optional if tests are running against devenv
FROSTFS_ADM_CONFIG_PATH = os.getenv("FROSTFS_ADM_CONFIG_PATH") FROSTFS_ADM_CONFIG_PATH = os.getenv("FROSTFS_ADM_CONFIG_PATH")
CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", None) CLI_DEFAULT_TIMEOUT = os.getenv("CLI_DEFAULT_TIMEOUT", "100s")

View file

@ -1,11 +1,12 @@
import json import json
import logging import logging
import re import re
import requests
from dataclasses import dataclass from dataclasses import dataclass
from time import sleep from time import sleep
from typing import Optional, Union from typing import Optional, Union
import requests
from frostfs_testlib import reporter from frostfs_testlib import reporter
from frostfs_testlib.cli import FrostfsCli from frostfs_testlib.cli import FrostfsCli
from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC from frostfs_testlib.resources.cli import CLI_DEFAULT_TIMEOUT, FROSTFS_CLI_EXEC
@ -291,18 +292,17 @@ def delete_container(
force: bool = False, force: bool = False,
session_token: Optional[str] = None, session_token: Optional[str] = None,
await_mode: bool = False, await_mode: bool = False,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> None: ) -> None:
""" """
A wrapper for `frostfs-cli container delete` call. A wrapper for `frostfs-cli container delete` call.
Args: Args:
await_mode: Block execution until container is removed.
wallet (str): path to a wallet on whose behalf we delete the container wallet (str): path to a wallet on whose behalf we delete the container
cid (str): ID of the container to delete cid (str): ID of the container to delete
shell: executor for cli command shell: executor for cli command
endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key endpoint: FrostFS endpoint to send request to, appends to `--rpc-endpoint` key
force (bool): do not check whether container contains locks and remove immediately force (bool): do not check whether container contains locks and remove immediately
session_token: a path to session token file session_token: a path to session token file
timeout: Timeout for the operation.
This function doesn't return anything. This function doesn't return anything.
""" """
@ -314,7 +314,6 @@ def delete_container(
force=force, force=force,
session=session_token, session=session_token,
await_mode=await_mode, await_mode=await_mode,
timeout=timeout,
) )

View file

@ -732,23 +732,24 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict:
@reporter.step("Search object nodes") @reporter.step("Search object nodes")
def get_object_nodes( def get_object_nodes(
cluster: Cluster, cluster: Cluster,
wallet: str,
cid: str, cid: str,
oid: str, oid: str,
shell: Shell, alive_node: ClusterNode,
endpoint: str,
bearer: str = "", bearer: str = "",
xhdr: Optional[dict] = None, xhdr: Optional[dict] = None,
is_direct: bool = False, is_direct: bool = False,
verify_presence_all: bool = False, verify_presence_all: bool = False,
wallet_config: Optional[str] = None,
timeout: Optional[str] = CLI_DEFAULT_TIMEOUT, timeout: Optional[str] = CLI_DEFAULT_TIMEOUT,
) -> list[ClusterNode]: ) -> list[ClusterNode]:
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config or DEFAULT_WALLET_CONFIG) shell = alive_node.host.get_shell()
endpoint = alive_node.storage_node.get_rpc_endpoint()
wallet = alive_node.storage_node.get_remote_wallet_path()
wallet_config = alive_node.storage_node.get_remote_wallet_config_path()
cli = FrostfsCli(shell, FROSTFS_CLI_EXEC, wallet_config)
result_object_nodes = cli.object.nodes( result_object_nodes = cli.object.nodes(
rpc_endpoint=endpoint, rpc_endpoint=endpoint,
wallet=wallet,
cid=cid, cid=cid,
oid=oid, oid=oid,
bearer=bearer, bearer=bearer,

View file

@ -8,7 +8,8 @@ class ConfigAttributes:
SHARD_CONFIG_PATH = "shard_config_path" SHARD_CONFIG_PATH = "shard_config_path"
LOGGER_CONFIG_PATH = "logger_config_path" LOGGER_CONFIG_PATH = "logger_config_path"
LOCAL_WALLET_PATH = "local_wallet_path" LOCAL_WALLET_PATH = "local_wallet_path"
LOCAL_WALLET_CONFIG = "local_config_path" LOCAL_WALLET_CONFIG = "local_wallet_config_path"
REMOTE_WALLET_CONFIG = "remote_wallet_config_path"
ENDPOINT_DATA_0 = "endpoint_data0" ENDPOINT_DATA_0 = "endpoint_data0"
ENDPOINT_DATA_1 = "endpoint_data1" ENDPOINT_DATA_1 = "endpoint_data1"
ENDPOINT_INTERNAL = "endpoint_internal0" ENDPOINT_INTERNAL = "endpoint_internal0"

View file

@ -114,6 +114,14 @@ class NodeBase(HumanReadableABC):
ConfigAttributes.CONFIG_PATH, ConfigAttributes.CONFIG_PATH,
) )
def get_remote_wallet_config_path(self) -> str:
"""
Returns node config file path located on remote host
"""
return self._get_attribute(
ConfigAttributes.REMOTE_WALLET_CONFIG,
)
def get_wallet_config_path(self) -> str: def get_wallet_config_path(self) -> str:
return self._get_attribute( return self._get_attribute(
ConfigAttributes.LOCAL_WALLET_CONFIG, ConfigAttributes.LOCAL_WALLET_CONFIG,
@ -125,8 +133,11 @@ class NodeBase(HumanReadableABC):
Returns config path for logger located on remote host Returns config path for logger located on remote host
""" """
config_attributes = self.host.get_service_config(self.name) config_attributes = self.host.get_service_config(self.name)
return self._get_attribute( return (
ConfigAttributes.LOGGER_CONFIG_PATH) if ConfigAttributes.LOGGER_CONFIG_PATH in config_attributes.attributes else None self._get_attribute(ConfigAttributes.LOGGER_CONFIG_PATH)
if ConfigAttributes.LOGGER_CONFIG_PATH in config_attributes.attributes
else None
)
@property @property
def config_dir(self) -> str: def config_dir(self) -> str:

View file

@ -136,6 +136,7 @@ class TestLoadConfig:
def test_argument_parsing_for_grpc_scenario(self, load_params: LoadParams): def test_argument_parsing_for_grpc_scenario(self, load_params: LoadParams):
expected_preset_args = [ expected_preset_args = [
"--size '11'", "--size '11'",
"--acl 'acl'",
"--preload_obj '13'", "--preload_obj '13'",
"--out 'pregen_json'", "--out 'pregen_json'",
"--workers '7'", "--workers '7'",
@ -143,6 +144,7 @@ class TestLoadConfig:
"--policy 'container_placement_policy'", "--policy 'container_placement_policy'",
"--ignore-errors", "--ignore-errors",
"--sleep '19'", "--sleep '19'",
"--local",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 9, "DURATION": 9,
@ -172,6 +174,8 @@ class TestLoadConfig:
"--policy 'container_placement_policy'", "--policy 'container_placement_policy'",
"--ignore-errors", "--ignore-errors",
"--sleep '19'", "--sleep '19'",
"--local",
"--acl 'acl'",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 9, "DURATION": 9,
@ -209,6 +213,7 @@ class TestLoadConfig:
"--location 's3_location'", "--location 's3_location'",
"--ignore-errors", "--ignore-errors",
"--sleep '19'", "--sleep '19'",
"--acl 'acl'",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 9, "DURATION": 9,
@ -240,6 +245,7 @@ class TestLoadConfig:
"--location 's3_location'", "--location 's3_location'",
"--ignore-errors", "--ignore-errors",
"--sleep '19'", "--sleep '19'",
"--acl 'acl'",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 183900, "DURATION": 183900,
@ -277,6 +283,7 @@ class TestLoadConfig:
"--location 's3_location'", "--location 's3_location'",
"--ignore-errors", "--ignore-errors",
"--sleep '19'", "--sleep '19'",
"--acl 'acl'",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 9, "DURATION": 9,
@ -304,6 +311,7 @@ class TestLoadConfig:
@pytest.mark.parametrize("load_params", [LoadScenario.HTTP], indirect=True) @pytest.mark.parametrize("load_params", [LoadScenario.HTTP], indirect=True)
def test_argument_parsing_for_http_scenario(self, load_params: LoadParams): def test_argument_parsing_for_http_scenario(self, load_params: LoadParams):
load_params.preset.local = False
expected_preset_args = [ expected_preset_args = [
"--no-verify-ssl", "--no-verify-ssl",
"--size '11'", "--size '11'",
@ -314,6 +322,7 @@ class TestLoadConfig:
"--policy 'container_placement_policy'", "--policy 'container_placement_policy'",
"--ignore-errors", "--ignore-errors",
"--sleep '19'", "--sleep '19'",
"--acl 'acl'",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 9, "DURATION": 9,
@ -334,6 +343,7 @@ class TestLoadConfig:
@pytest.mark.parametrize("load_params", [LoadScenario.LOCAL], indirect=True) @pytest.mark.parametrize("load_params", [LoadScenario.LOCAL], indirect=True)
def test_argument_parsing_for_local_scenario(self, load_params: LoadParams): def test_argument_parsing_for_local_scenario(self, load_params: LoadParams):
load_params.preset.local = False
expected_preset_args = [ expected_preset_args = [
"--size '11'", "--size '11'",
"--preload_obj '13'", "--preload_obj '13'",
@ -343,6 +353,7 @@ class TestLoadConfig:
"--policy 'container_placement_policy'", "--policy 'container_placement_policy'",
"--ignore-errors", "--ignore-errors",
"--sleep '19'", "--sleep '19'",
"--acl 'acl'",
] ]
expected_env_vars = { expected_env_vars = {
"CONFIG_FILE": "config_file", "CONFIG_FILE": "config_file",
@ -395,6 +406,7 @@ class TestLoadConfig:
"--containers '0'", "--containers '0'",
"--policy ''", "--policy ''",
"--sleep '0'", "--sleep '0'",
"--acl ''",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 0, "DURATION": 0,
@ -423,6 +435,7 @@ class TestLoadConfig:
"--containers '0'", "--containers '0'",
"--policy ''", "--policy ''",
"--sleep '0'", "--sleep '0'",
"--acl ''",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 0, "DURATION": 0,
@ -458,6 +471,7 @@ class TestLoadConfig:
"--buckets '0'", "--buckets '0'",
"--location ''", "--location ''",
"--sleep '0'", "--sleep '0'",
"--acl ''",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 0, "DURATION": 0,
@ -486,6 +500,7 @@ class TestLoadConfig:
"--buckets '0'", "--buckets '0'",
"--location ''", "--location ''",
"--sleep '0'", "--sleep '0'",
"--acl ''",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 0, "DURATION": 0,
@ -521,6 +536,7 @@ class TestLoadConfig:
"--containers '0'", "--containers '0'",
"--policy ''", "--policy ''",
"--sleep '0'", "--sleep '0'",
"--acl ''",
] ]
expected_env_vars = { expected_env_vars = {
"DURATION": 0, "DURATION": 0,
@ -549,6 +565,7 @@ class TestLoadConfig:
"--containers '0'", "--containers '0'",
"--policy ''", "--policy ''",
"--sleep '0'", "--sleep '0'",
"--acl ''",
] ]
expected_env_vars = { expected_env_vars = {
"CONFIG_FILE": "", "CONFIG_FILE": "",