import allure
import pytest
from frostfs_testlib.s3 import S3ClientWrapper, VersioningStatus
from frostfs_testlib.steps.cli.container import list_objects, search_container_by_name
from frostfs_testlib.steps.s3 import s3_helper
from frostfs_testlib.storage.dataclasses.object_size import ObjectSize
from frostfs_testlib.testing.cluster_test_base import ClusterTestBase
from frostfs_testlib.utils.file_utils import generate_file, get_file_hash, split_file

PART_SIZE = 5 * 1024 * 1024


@pytest.mark.s3_gate
@pytest.mark.s3_gate_multipart
class TestS3GateMultipart(ClusterTestBase):
    NO_SUCH_UPLOAD = "The upload ID may be invalid, or the upload may have been aborted or completed."

    @allure.title("Object Multipart API (s3_client={s3_client})")
    @pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED], indirect=True)
    def test_s3_object_multipart(self, s3_client: S3ClientWrapper, bucket: str):
        parts_count = 5
        file_name_large = generate_file(PART_SIZE * parts_count)  # 5Mb - min part
        object_key = s3_helper.object_key_from_file_path(file_name_large)
        part_files = split_file(file_name_large, parts_count)
        parts = []

        with allure.step("Upload first part"):
            upload_id = s3_client.create_multipart_upload(bucket, object_key)
            uploads = s3_client.list_multipart_uploads(bucket)
            etag = s3_client.upload_part(bucket, object_key, upload_id, 1, part_files[0])
            parts.append((1, etag))
            got_parts = s3_client.list_parts(bucket, object_key, upload_id)
            assert len(got_parts) == 1, f"Expected {1} parts, got\n{got_parts}"

        with allure.step("Upload last parts"):
            for part_id, file_path in enumerate(part_files[1:], start=2):
                etag = s3_client.upload_part(bucket, object_key, upload_id, part_id, file_path)
                parts.append((part_id, etag))
            got_parts = s3_client.list_parts(bucket, object_key, upload_id)
            s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
            assert len(got_parts) == len(part_files), f"Expected {parts_count} parts, got\n{got_parts}"

        with allure.step("Check upload list is empty"):
            uploads = s3_client.list_multipart_uploads(bucket)
            assert not uploads, f"Expected there is no uploads in bucket {bucket}"

        with allure.step("Check we can get whole object from bucket"):
            got_object = s3_client.get_object(bucket, object_key)
            assert get_file_hash(got_object) == get_file_hash(file_name_large)

    @allure.title("Abort Multipart Upload (s3_client={s3_client})")
    @pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED], indirect=True)
    def test_s3_abort_multipart(
        self,
        s3_client: S3ClientWrapper,
        default_wallet: str,
        bucket: str,
        simple_object_size: ObjectSize,
        complex_object_size: ObjectSize,
    ):
        complex_file = generate_file(complex_object_size.value)
        simple_file = generate_file(simple_object_size.value)
        to_upload = [complex_file, complex_file, simple_file]
        files_count = len(to_upload)
        upload_key = "multipart_abort"

        with allure.step(f"Get related container_id for bucket '{bucket}'"):
            container_id = search_container_by_name(
                default_wallet, bucket, self.shell, self.cluster.default_rpc_endpoint
            )

        with allure.step("Create multipart upload"):
            upload_id = s3_client.create_multipart_upload(bucket, upload_key)

        with allure.step(f"Upload {files_count} files to multipart upload"):
            for i, file in enumerate(to_upload, 1):
                s3_client.upload_part(bucket, upload_key, upload_id, i, file)

        with allure.step(f"Check that we have {files_count} files in bucket"):
            parts = s3_client.list_parts(bucket, upload_key, upload_id)
            assert len(parts) == files_count, f"Expected {files_count} parts, got\n{parts}"

        with allure.step(f"Check that we have {files_count} files in container '{container_id}'"):
            objects = list_objects(default_wallet, self.shell, container_id, self.cluster.default_rpc_endpoint)
            assert len(objects) == files_count, f"Expected {files_count} objects in container, got\n{objects}"

        with allure.step("Abort multipart upload"):
            s3_client.abort_multipart_upload(bucket, upload_key, upload_id)
            uploads = s3_client.list_multipart_uploads(bucket)
            assert not uploads, f"Expected no uploads in bucket {bucket}"

        with allure.step("Check that we have no files in bucket since upload was aborted"):
            with pytest.raises(Exception, match=self.NO_SUCH_UPLOAD):
                s3_client.list_parts(bucket, upload_key, upload_id)

        with allure.step("Check that we have no files in container since upload was aborted"):
            objects = list_objects(default_wallet, self.shell, container_id, self.cluster.default_rpc_endpoint)
            assert len(objects) == 0, f"Expected no objects in container, got\n{objects}"

    @allure.title("Upload Part Copy (s3_client={s3_client})")
    @pytest.mark.parametrize("versioning_status", [VersioningStatus.ENABLED], indirect=True)
    def test_s3_multipart_copy(self, s3_client: S3ClientWrapper, bucket: str):
        parts_count = 3
        file_name_large = generate_file(PART_SIZE * parts_count)  # 5Mb - min part
        object_key = s3_helper.object_key_from_file_path(file_name_large)
        part_files = split_file(file_name_large, parts_count)
        parts = []
        objs = []

        with allure.step(f"Put {parts_count} objects in bucket"):
            for part in part_files:
                s3_client.put_object(bucket, part)
                objs.append(s3_helper.object_key_from_file_path(part))
            s3_helper.check_objects_in_bucket(s3_client, bucket, objs)

        with allure.step("Create multipart upload object"):
            upload_id = s3_client.create_multipart_upload(bucket, object_key)
            uploads = s3_client.list_multipart_uploads(bucket)
            assert uploads, f"Expected there are uploads in bucket {bucket}"

        with allure.step("Upload parts to multipart upload"):
            for part_id, obj_key in enumerate(objs, start=1):
                etag = s3_client.upload_part_copy(bucket, object_key, upload_id, part_id, f"{bucket}/{obj_key}")
                parts.append((part_id, etag))
            got_parts = s3_client.list_parts(bucket, object_key, upload_id)

        with allure.step("Complete multipart upload"):
            s3_client.complete_multipart_upload(bucket, object_key, upload_id, parts)
            assert len(got_parts) == len(part_files), f"Expected {parts_count} parts, got\n{got_parts}"

        with allure.step("Check we can get whole object from bucket"):
            got_object = s3_client.get_object(bucket, object_key)
            assert get_file_hash(got_object) == get_file_hash(file_name_large)