Compare commits

...

4 commits

5 changed files with 15 additions and 6 deletions

View file

@ -86,7 +86,7 @@ class SummarizedStats:
target.latencies.by_node[node_key] = operation.latency
target.throughput += operation.throughput
target.errors.threshold = load_params.error_threshold
target.total_bytes = operation.total_bytes
target.total_bytes += operation.total_bytes
if operation.failed_iterations:
target.errors.by_node[node_key] = operation.failed_iterations

View file

@ -233,6 +233,8 @@ class LoadParams:
)
# Percentage of filling of all data disks on all nodes
fill_percent: Optional[float] = None
# if specified, max payload size in GB of the storage engine. If the storage engine is already full, no new objects will be saved.
max_total_size_gb: Optional[float] = metadata_field([LoadScenario.LOCAL, LoadScenario.S3_LOCAL], None, "MAX_TOTAL_SIZE_GB")
# if set, the payload is generated on the fly and is not read into memory fully.
streaming: Optional[int] = metadata_field(all_load_scenarios, None, "STREAMING", False)
# Output format

View file

@ -729,7 +729,10 @@ class AwsCliClient(S3ClientWrapper):
f"--key {key} --upload-id {upload_id} --multipart-upload file://{file_path} "
f"--endpoint-url {self.s3gate_endpoint} --profile {self.profile}"
)
self.local_shell.exec(cmd)
output = self.local_shell.exec(cmd).stdout
response = self._to_json(output)
return response
@reporter.step("Put object lock configuration")
def put_object_lock_configuration(self, bucket: str, configuration: dict) -> dict:

View file

@ -135,7 +135,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
s3_bucket = self.boto3_client.create_bucket(**params)
log_command_execution(f"Created S3 bucket {bucket}", s3_bucket)
sleep(S3_SYNC_WAIT_TIME * 10)
sleep(S3_SYNC_WAIT_TIME)
return bucket
@reporter.step("List buckets S3")
@ -156,7 +156,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
def delete_bucket(self, bucket: str) -> None:
response = self.boto3_client.delete_bucket(Bucket=bucket)
log_command_execution("S3 Delete bucket result", response)
sleep(S3_SYNC_WAIT_TIME * 10)
sleep(S3_SYNC_WAIT_TIME)
@reporter.step("Head bucket S3")
@report_error
@ -372,7 +372,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
}
response = self.boto3_client.delete_object(**params)
log_command_execution("S3 Delete object result", response)
sleep(S3_SYNC_WAIT_TIME * 10)
sleep(S3_SYNC_WAIT_TIME)
return response
@reporter.step("Delete objects S3")
@ -383,7 +383,7 @@ class Boto3ClientWrapper(S3ClientWrapper):
assert (
"Errors" not in response
), f'The following objects have not been deleted: {[err_info["Key"] for err_info in response["Errors"]]}.\nError Message: {response["Errors"]["Message"]}'
sleep(S3_SYNC_WAIT_TIME * 10)
sleep(S3_SYNC_WAIT_TIME)
return response
@reporter.step("Delete object versions S3")
@ -571,6 +571,8 @@ class Boto3ClientWrapper(S3ClientWrapper):
)
log_command_execution("S3 Complete multipart upload", response)
return response
@reporter.step("Put object retention")
@report_error
def put_object_retention(

View file

@ -13,6 +13,7 @@ from frostfs_testlib.resources.common import ASSETS_DIR
from frostfs_testlib.shell import Shell
from frostfs_testlib.storage.cluster import Cluster, ClusterNode
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
from frostfs_testlib.testing import wait_for_success
from frostfs_testlib.utils import json_utils
from frostfs_testlib.utils.cli_utils import parse_cmd_table, parse_netmap_output
@ -695,6 +696,7 @@ def neo_go_query_height(shell: Shell, endpoint: str) -> dict:
}
@wait_for_success()
@reporter.step("Search object nodes")
def get_object_nodes(
cluster: Cluster,