[#348] Add test for multipart object in Test_http_object testsuite #348
1 changed files with 61 additions and 26 deletions
|
@ -3,7 +3,7 @@ import logging
|
||||||
import allure
|
import allure
|
||||||
import pytest
|
import pytest
|
||||||
from frostfs_testlib import reporter
|
from frostfs_testlib import reporter
|
||||||
from frostfs_testlib.clients import AwsCliClient, S3ClientWrapper
|
from frostfs_testlib.clients import S3ClientWrapper
|
||||||
from frostfs_testlib.steps import s3_helper
|
from frostfs_testlib.steps import s3_helper
|
||||||
from frostfs_testlib.steps.cli.object import put_object_to_random_node
|
from frostfs_testlib.steps.cli.object import put_object_to_random_node
|
||||||
from frostfs_testlib.steps.http_gate import (
|
from frostfs_testlib.steps.http_gate import (
|
||||||
|
@ -15,12 +15,14 @@ from frostfs_testlib.steps.http_gate import (
|
||||||
)
|
)
|
||||||
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
from frostfs_testlib.storage.dataclasses.wallet import WalletInfo
|
||||||
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
|
||||||
from frostfs_testlib.utils.file_utils import TestFile
|
from frostfs_testlib.utils.file_utils import TestFile, generate_file, split_file
|
||||||
|
|
||||||
from ....helpers.container_request import REP_2_1_4_PUBLIC, requires_container
|
from ....helpers.container_request import REP_2_1_4_PUBLIC, requires_container
|
||||||
|
|
||||||
logger = logging.getLogger("NeoLogger")
|
logger = logging.getLogger("NeoLogger")
|
||||||
|
|
||||||
|
PART_SIZE = 5 * 1024 * 1024
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.nightly
|
@pytest.mark.nightly
|
||||||
@pytest.mark.sanity
|
@pytest.mark.sanity
|
||||||
|
@ -66,6 +68,7 @@ class Test_http_object(ClusterTestBase):
|
||||||
cluster=self.cluster,
|
cluster=self.cluster,
|
||||||
attributes=f"{key_value1},{key_value2}",
|
attributes=f"{key_value1},{key_value2}",
|
||||||
)
|
)
|
||||||
|
|
||||||
with reporter.step("Get object and verify hashes [ get/$CID/$OID ]"):
|
with reporter.step("Get object and verify hashes [ get/$CID/$OID ]"):
|
||||||
verify_object_hash(
|
verify_object_hash(
|
||||||
oid=oid,
|
oid=oid,
|
||||||
|
@ -91,25 +94,19 @@ class Test_http_object(ClusterTestBase):
|
||||||
)
|
)
|
||||||
|
|
||||||
with reporter.step("Download the object with attribute [get_by_attribute/$CID/chapter1/peace]"):
|
with reporter.step("Download the object with attribute [get_by_attribute/$CID/chapter1/peace]"):
|
||||||
get_object_by_attr_and_verify_hashes(
|
get_object_by_attr_and_verify_hashes(oid, test_file, container, attrs, self.cluster.cluster_nodes[0])
|
||||||
oid=oid,
|
|
||||||
file_name=test_file.path,
|
|
||||||
cid=container,
|
|
||||||
attrs=attrs,
|
|
||||||
node=self.cluster.cluster_nodes[0],
|
|
||||||
)
|
|
||||||
with reporter.step("[Negative] try to get object: get_by_attribute/$CID/$OID"):
|
with reporter.step("[Negative] try to get object: get_by_attribute/$CID/$OID"):
|
||||||
request = f"/get_by_attribute/{container}/{oid}"
|
request = f"/get_by_attribute/{container}/{oid}"
|
||||||
try_to_get_object_via_passed_request_and_expect_error(
|
try_to_get_object_via_passed_request_and_expect_error(
|
||||||
cid=container,
|
container,
|
||||||
oid=oid,
|
oid,
|
||||||
node=self.cluster.cluster_nodes[0],
|
self.cluster.cluster_nodes[0],
|
||||||
error_pattern=expected_err_msg,
|
error_pattern=expected_err_msg,
|
||||||
http_request_path=request,
|
http_request_path=request,
|
||||||
)
|
)
|
||||||
|
|
||||||
@allure.title("Put over s3, Get over HTTP with bucket name and key (object_size={object_size})")
|
@allure.title("Put object over S3, get over HTTP with bucket name and key (s3_client={s3_client}, object_size={object_size})")
|
||||||
@pytest.mark.parametrize("s3_client", [AwsCliClient], indirect=True)
|
|
||||||
def test_object_put_get_bucketname_key(self, test_file: TestFile, s3_client: S3ClientWrapper):
|
def test_object_put_get_bucketname_key(self, test_file: TestFile, s3_client: S3ClientWrapper):
|
||||||
"""
|
"""
|
||||||
Test that object can be put using s3-gateway interface and got via HTTP with bucket name and object key.
|
Test that object can be put using s3-gateway interface and got via HTTP with bucket name and object key.
|
||||||
|
@ -125,17 +122,55 @@ class Test_http_object(ClusterTestBase):
|
||||||
Hashes must be the same.
|
Hashes must be the same.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
object_key = s3_helper.object_key_from_file_path(test_file.path)
|
object_key = s3_helper.object_key_from_file_path(test_file)
|
||||||
bucket = s3_client.create_bucket(acl="public-read-write")
|
|
||||||
s3_client.put_object(bucket=bucket, filepath=test_file.path, key=object_key)
|
|
||||||
obj_s3 = s3_client.get_object(bucket=bucket, key=object_key)
|
|
||||||
|
|
||||||
request = f"/get/{bucket}/{object_key}"
|
with reporter.step("Create public bucket"):
|
||||||
obj_http = get_via_http_gate(
|
bucket = s3_client.create_bucket(acl="public-read-write")
|
||||||
cid=None,
|
|
||||||
oid=None,
|
with reporter.step("Put object"):
|
||||||
node=self.cluster.cluster_nodes[0],
|
s3_client.put_object(bucket, test_file, object_key)
|
||||||
request_path=request,
|
|
||||||
)
|
with reporter.step("Get object via S3 gate"):
|
||||||
with reporter.step("Verify hashes"):
|
obj_s3 = s3_client.get_object(bucket, object_key)
|
||||||
|
|
||||||
|
with reporter.step("Get object via HTTP gate"):
|
||||||
|
obj_http = get_via_http_gate(bucket, object_key, node=self.cluster.cluster_nodes[0])
|
||||||
|
|
||||||
|
with reporter.step("Make sure the hashes of both objects are the same"):
|
||||||
assert_hashes_are_equal(test_file.path, obj_http, obj_s3)
|
assert_hashes_are_equal(test_file.path, obj_http, obj_s3)
|
||||||
|
|
||||||
|
@allure.title("Put multipart object over S3, get over HTTP with bucket name and key (s3_client={s3_client})")
|
||||||
|
def test_object_put_get_bucketname_key_multipart(self, s3_client: S3ClientWrapper):
|
||||||
|
parts = []
|
||||||
|
parts_count = 5
|
||||||
|
original_size = PART_SIZE * parts_count
|
||||||
|
|
||||||
|
with reporter.step("Create public container"):
|
||||||
|
bucket = s3_client.create_bucket(acl="public-read-write")
|
||||||
|
|
||||||
|
with reporter.step("Generate original object and split it into parts"):
|
||||||
|
original_file = generate_file(original_size)
|
||||||
|
file_parts = split_file(original_file, parts_count)
|
||||||
|
object_key = s3_helper.object_key_from_file_path(original_file)
|
||||||
|
|
||||||
|
with reporter.step("Create multipart and upload parts"):
|
||||||
|
upload_id = s3_client.create_multipart_upload(bucket, object_key)
|
||||||
|
for part_id, file_path in enumerate(file_parts, start=1):
|
||||||
|
etag = s3_client.upload_part(bucket, object_key, upload_id, part_id, file_path)
|
||||||
|
parts.append((part_id, etag))
|
||||||
|
|
||||||
|
with reporter.step("Check all parts are visible in bucket"):
|
||||||
|
got_parts = s3_client.list_parts(bucket, object_key, upload_id)
|
||||||
|
assert len(got_parts) == len(file_parts), f"Expected {parts_count} parts, got:\n{got_parts}"
|
||||||
|
|
||||||
|
with reporter.step("Complete multipart upload"):
|
||||||
|
s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
|
||||||
|
|
||||||
|
with reporter.step("Get multipart object via S3 gate"):
|
||||||
|
obj_s3 = s3_client.get_object(bucket, object_key)
|
||||||
|
|
||||||
|
with reporter.step("Get multipart object via HTTP gate"):
|
||||||
|
obj_http = get_via_http_gate(bucket, object_key, self.cluster.cluster_nodes[0])
|
||||||
|
|
||||||
|
with reporter.step("Make sure the hashes of both objects are the same"):
|
||||||
|
assert_hashes_are_equal(original_file, obj_http, obj_s3)
|
||||||
|
|
Loading…
Reference in a new issue