package handler

import (
	"crypto/md5"
	"crypto/rand"
	"crypto/tls"
	"encoding/base64"
	"encoding/hex"
	"encoding/xml"
	"fmt"
	"net/http"
	"net/http/httptest"
	"net/url"
	"strconv"
	"testing"

	"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
	"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
	apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
	"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
	"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
	"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
	oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
	oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
	usertest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user/test"
	"github.com/stretchr/testify/require"
)

const (
	partNumberMarkerQuery = "part-number-marker"
)

func TestMultipartUploadInvalidPart(t *testing.T) {
	hc := prepareHandlerContext(t)

	bktName, objName := "bucket-to-upload-part", "object-multipart"
	createTestBucket(hc, bktName)
	partSize := 8 // less than min part size

	multipartUpload := createMultipartUpload(hc, bktName, objName, map[string]string{})
	etag1, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 1, partSize)
	etag2, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 2, partSize)
	w := completeMultipartUploadBase(hc, bktName, objName, multipartUpload.UploadID, []string{etag1, etag2})
	assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrEntityTooSmall))
}

func TestDeleteMultipartAllParts(t *testing.T) {
	hc := prepareHandlerContext(t)

	partSize := layer.UploadMinSize
	objLen := 6 * partSize

	bktName, bktName2, objName := "bucket", "bucket2", "object"

	// unversioned bucket
	createTestBucket(hc, bktName)
	multipartUpload(hc, bktName, objName, nil, objLen, partSize)
	deleteObject(t, hc, bktName, objName, emptyVersion)
	require.Empty(t, hc.tp.Objects())

	// encrypted multipart
	multipartUploadEncrypted(hc, bktName, objName, nil, objLen, partSize)
	deleteObject(t, hc, bktName, objName, emptyVersion)
	require.Empty(t, hc.tp.Objects())

	// versions bucket
	createTestBucket(hc, bktName2)
	putBucketVersioning(t, hc, bktName2, true)
	multipartUpload(hc, bktName2, objName, nil, objLen, partSize)
	_, hdr := getObject(hc, bktName2, objName)
	versionID := hdr.Get("X-Amz-Version-Id")
	deleteObject(t, hc, bktName2, objName, emptyVersion)
	deleteObject(t, hc, bktName2, objName, versionID)
	require.Empty(t, hc.tp.Objects())
}

func TestSpecialMultipartName(t *testing.T) {
	hc := prepareHandlerContextWithMinCache(t)

	bktName, objName := "bucket", "bucket-settings"

	createTestBucket(hc, bktName)
	putBucketVersioning(t, hc, bktName, true)

	createMultipartUpload(hc, bktName, objName, nil)
	res := getBucketVersioning(hc, bktName)
	require.Equal(t, enabledValue, res.Status)
}

func TestMultipartReUploadPart(t *testing.T) {
	hc := prepareHandlerContext(t)

	bktName, objName := "bucket-to-upload-part", "object-multipart"
	bktInfo := createTestBucket(hc, bktName)
	partSizeLast := 8 // less than min part size
	partSizeFirst := 5 * 1024 * 1024

	uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
	etag1, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSizeLast)
	etag2, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 2, partSizeFirst)

	list := listParts(hc, bktName, objName, uploadInfo.UploadID, "0", http.StatusOK)
	require.Len(t, list.Parts, 2)
	require.Equal(t, etag1, list.Parts[0].ETag)
	require.Equal(t, etag2, list.Parts[1].ETag)

	w := completeMultipartUploadBase(hc, bktName, objName, uploadInfo.UploadID, []string{etag1, etag2})
	assertS3Error(hc.t, w, apierr.GetAPIError(apierr.ErrEntityTooSmall))

	etag1, data1 := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSizeFirst)
	etag2, data2 := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 2, partSizeLast)

	list = listParts(hc, bktName, objName, uploadInfo.UploadID, "0", http.StatusOK)
	require.Len(t, list.Parts, 2)
	require.Equal(t, etag1, list.Parts[0].ETag)
	require.Equal(t, etag2, list.Parts[1].ETag)

	innerUploadInfo, err := hc.tree.GetMultipartUpload(hc.context, bktInfo, objName, uploadInfo.UploadID)
	require.NoError(t, err)
	treeParts, err := hc.tree.GetParts(hc.Context(), bktInfo, innerUploadInfo.ID)
	require.NoError(t, err)
	require.Len(t, treeParts, len(list.Parts))

	w = completeMultipartUploadBase(hc, bktName, objName, uploadInfo.UploadID, []string{etag1, etag2})
	assertStatus(hc.t, w, http.StatusOK)

	data, _ := getObject(hc, bktName, objName)
	equalDataSlices(t, append(data1, data2...), data)
}

func TestMultipartRemovePartsSplit(t *testing.T) {
	bktName, objName := "bucket-to-upload-part", "object-multipart"
	partSize := 8

	t.Run("reupload part", func(t *testing.T) {
		hc := prepareHandlerContext(t)
		bktInfo := createTestBucket(hc, bktName)
		uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})

		uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)

		multipartInfo, err := hc.tree.GetMultipartUpload(hc.Context(), bktInfo, uploadInfo.Key, uploadInfo.UploadID)
		require.NoError(t, err)

		objID := oidtest.ID()
		_, err = hc.treeMock.AddNode(hc.Context(), bktInfo, "system", multipartInfo.ID, map[string]string{
			"Number": "1",
			"OID":    objID.EncodeToString(),
			"Owner":  usertest.ID().EncodeToString(),
			"ETag":   "etag",
		})
		require.NoError(t, err)

		hc.tp.AddObject(bktInfo.CID.EncodeToString()+"/"+objID.EncodeToString(), object.New())
		require.Len(t, hc.tp.Objects(), 2)

		list := listParts(hc, bktName, objName, uploadInfo.UploadID, "0", http.StatusOK)
		require.Len(t, list.Parts, 1)
		require.Equal(t, `"etag"`, list.Parts[0].ETag)

		etag1, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)
		list = listParts(hc, bktName, objName, uploadInfo.UploadID, "0", http.StatusOK)
		require.Len(t, list.Parts, 1)
		require.Equal(t, etag1, list.Parts[0].ETag)

		require.Len(t, hc.tp.Objects(), 1)
	})

	t.Run("abort multipart", func(t *testing.T) {
		hc := prepareHandlerContext(t)
		bktInfo := createTestBucket(hc, bktName)
		uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})

		uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)

		multipartInfo, err := hc.tree.GetMultipartUpload(hc.Context(), bktInfo, uploadInfo.Key, uploadInfo.UploadID)
		require.NoError(t, err)

		objID := oidtest.ID()
		_, err = hc.treeMock.AddNode(hc.Context(), bktInfo, "system", multipartInfo.ID, map[string]string{
			"Number": "1",
			"OID":    objID.EncodeToString(),
			"Owner":  usertest.ID().EncodeToString(),
			"ETag":   "etag",
		})
		require.NoError(t, err)

		hc.tp.AddObject(bktInfo.CID.EncodeToString()+"/"+objID.EncodeToString(), object.New())
		require.Len(t, hc.tp.Objects(), 2)

		abortMultipartUpload(hc, bktName, objName, uploadInfo.UploadID)
		require.Empty(t, hc.tp.Objects())
	})

	t.Run("complete multipart", func(t *testing.T) {
		hc := prepareHandlerContext(t)
		bktInfo := createTestBucket(hc, bktName)
		uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})

		etag1, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)

		multipartInfo, err := hc.tree.GetMultipartUpload(hc.Context(), bktInfo, uploadInfo.Key, uploadInfo.UploadID)
		require.NoError(t, err)

		objID := oidtest.ID()
		_, err = hc.treeMock.AddNode(hc.Context(), bktInfo, "system", multipartInfo.ID, map[string]string{
			"Number": "1",
			"OID":    objID.EncodeToString(),
			"Owner":  usertest.ID().EncodeToString(),
			"ETag":   "etag",
		})
		require.NoError(t, err)

		hc.tp.AddObject(bktInfo.CID.EncodeToString()+"/"+objID.EncodeToString(), object.New())
		require.Len(t, hc.tp.Objects(), 2)

		completeMultipartUpload(hc, bktName, objName, uploadInfo.UploadID, []string{etag1})
		require.Falsef(t, containsOID(hc.tp.Objects(), objID), "frostfs contains '%s' object, but shouldn't", objID)
	})
}

func containsOID(objects []*object.Object, objID oid.ID) bool {
	for _, o := range objects {
		oID, _ := o.ID()
		if oID.Equals(objID) {
			return true
		}
	}

	return false
}

func TestListMultipartUploads(t *testing.T) {
	hc := prepareHandlerContext(t)

	bktName := "bucket-to-list-uploads"
	createTestBucket(hc, bktName)

	objName1 := "/my/object/name"
	uploadInfo1 := createMultipartUpload(hc, bktName, objName1, map[string]string{})
	objName2 := "/my/object2"
	uploadInfo2 := createMultipartUpload(hc, bktName, objName2, map[string]string{})
	objName3 := "/zzz/object/name3"
	uploadInfo3 := createMultipartUpload(hc, bktName, objName3, map[string]string{})

	t.Run("check upload key", func(t *testing.T) {
		listUploads := listAllMultipartUploads(hc, bktName)
		require.Len(t, listUploads.Uploads, 3)
		for i, upload := range []*InitiateMultipartUploadResponse{uploadInfo1, uploadInfo2, uploadInfo3} {
			require.Equal(t, upload.UploadID, listUploads.Uploads[i].UploadID)
			require.Equal(t, upload.Key, listUploads.Uploads[i].Key)
		}
	})

	t.Run("check max uploads", func(t *testing.T) {
		listUploads := listMultipartUploads(hc, bktName, "", "", "", "", 2)
		require.Len(t, listUploads.Uploads, 2)
		require.Equal(t, uploadInfo1.UploadID, listUploads.Uploads[0].UploadID)
		require.Equal(t, uploadInfo2.UploadID, listUploads.Uploads[1].UploadID)
	})

	t.Run("check prefix", func(t *testing.T) {
		listUploads := listMultipartUploads(hc, bktName, "/my", "", "", "", -1)
		require.Len(t, listUploads.Uploads, 2)
		require.Equal(t, uploadInfo1.UploadID, listUploads.Uploads[0].UploadID)
		require.Equal(t, uploadInfo2.UploadID, listUploads.Uploads[1].UploadID)
	})

	t.Run("check markers", func(t *testing.T) {
		t.Run("check only key-marker", func(t *testing.T) {
			listUploads := listMultipartUploads(hc, bktName, "", "", "", objName2, -1)
			require.Len(t, listUploads.Uploads, 1)
			// If upload-id-marker is not specified, only the keys lexicographically greater than the specified key-marker will be included in the list.
			require.Equal(t, uploadInfo3.UploadID, listUploads.Uploads[0].UploadID)
		})

		t.Run("check only upload-id-marker", func(t *testing.T) {
			uploadIDMarker := uploadInfo1.UploadID
			if uploadIDMarker > uploadInfo2.UploadID {
				uploadIDMarker = uploadInfo2.UploadID
			}
			listUploads := listMultipartUploads(hc, bktName, "", "", uploadIDMarker, "", -1)
			// If key-marker is not specified, the upload-id-marker parameter is ignored.
			require.Len(t, listUploads.Uploads, 3)
		})

		t.Run("check key-marker along with upload-id-marker", func(t *testing.T) {
			uploadIDMarker := "00000000-0000-0000-0000-000000000000"

			listUploads := listMultipartUploads(hc, bktName, "", "", uploadIDMarker, objName3, -1)
			require.Len(t, listUploads.Uploads, 1)
			// If upload-id-marker is specified, any multipart uploads for a key equal to the key-marker might also be included,
			// provided those multipart uploads have upload IDs lexicographically greater than the specified upload-id-marker.
			require.Equal(t, uploadInfo3.UploadID, listUploads.Uploads[0].UploadID)
		})
	})
}

func TestMultipartUploadSize(t *testing.T) {
	hc := prepareHandlerContext(t)

	bktName, objName := "bucket-for-test-multipart-size", "object-multipart"
	createTestBucket(hc, bktName)

	partSize := layer.UploadMinSize
	objLen := 2 * partSize
	headers := map[string]string{}

	data := multipartUpload(hc, bktName, objName, headers, objLen, partSize)
	require.Equal(t, objLen, len(data))

	t.Run("check correct size in list v1", func(t *testing.T) {
		listV1 := listObjectsV1(hc, bktName, "", "", "", -1)
		require.Len(t, listV1.Contents, 1)
		require.Equal(t, objLen, int(listV1.Contents[0].Size))
		require.Equal(t, objName, listV1.Contents[0].Key)
	})

	t.Run("check correct size in list v2", func(t *testing.T) {
		listV2 := listObjectsV2(hc, bktName, "", "", "", "", -1)
		require.Len(t, listV2.Contents, 1)
		require.Equal(t, objLen, int(listV2.Contents[0].Size))
		require.Equal(t, objName, listV2.Contents[0].Key)
	})

	t.Run("check correct get", func(t *testing.T) {
		_, hdr := getObject(hc, bktName, objName)
		require.Equal(t, strconv.Itoa(objLen), hdr.Get(api.ContentLength))

		part := getObjectRange(t, hc, bktName, objName, partSize, objLen-1)
		equalDataSlices(t, data[partSize:], part)
	})

	t.Run("check correct size when part copy", func(_ *testing.T) {
		objName2 := "obj2"
		uploadInfo := createMultipartUpload(hc, bktName, objName2, headers)
		sourceCopy := bktName + "/" + objName
		uploadPartCopy(hc, bktName, objName2, uploadInfo.UploadID, 1, sourceCopy, 0, 0)
		uploadPartCopy(hc, bktName, objName2, uploadInfo.UploadID, 2, sourceCopy, 0, partSize)
	})

	t.Run("check correct size when copy part from encrypted source", func(t *testing.T) {
		newBucket, newObjName := "new-bucket", "new-object-multipart"
		bktInfo := createTestBucket(hc, newBucket)

		srcObjName := "source-object"
		key := []byte("firstencriptionkeyofsourceobject")
		keyMd5 := md5.Sum(key)
		srcEnc, err := encryption.NewParams(key)
		require.NoError(t, err)
		srcObjInfo := createTestObject(hc, bktInfo, srcObjName, *srcEnc)

		multipartInfo := createMultipartUpload(hc, newBucket, newObjName, headers)

		sourceCopy := newBucket + "/" + srcObjName

		query := make(url.Values)
		query.Set(uploadIDQuery, multipartInfo.UploadID)
		query.Set(partNumberQuery, "1")

		// empty copy-source-sse headers
		w, r := prepareTestRequestWithQuery(hc, newBucket, newObjName, query, nil)
		r.TLS = &tls.ConnectionState{}
		r.Header.Set(api.AmzCopySource, sourceCopy)
		hc.Handler().UploadPartCopy(w, r)

		assertStatus(t, w, http.StatusBadRequest)

		// success copy
		w, r = prepareTestRequestWithQuery(hc, newBucket, newObjName, query, nil)
		r.TLS = &tls.ConnectionState{}
		r.Header.Set(api.AmzCopySource, sourceCopy)
		r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerAlgorithm, layer.AESEncryptionAlgorithm)
		r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKey, base64.StdEncoding.EncodeToString(key))
		r.Header.Set(api.AmzCopySourceServerSideEncryptionCustomerKeyMD5, base64.StdEncoding.EncodeToString(keyMd5[:]))
		hc.Handler().UploadPartCopy(w, r)

		uploadPartCopyResponse := &UploadPartCopyResponse{}
		readResponse(hc.t, w, http.StatusOK, uploadPartCopyResponse)

		completeMultipartUpload(hc, newBucket, newObjName, multipartInfo.UploadID, []string{uploadPartCopyResponse.ETag})
		attr := getObjectAttributes(hc, newBucket, newObjName, objectParts)
		require.Equal(t, 1, attr.ObjectParts.PartsCount)
		require.Equal(t, srcObjInfo.Headers[layer.AttributeDecryptedSize], strconv.Itoa(attr.ObjectParts.Parts[0].Size))
	})
}

func TestListParts(t *testing.T) {
	hc := prepareHandlerContext(t)

	bktName, objName := "bucket-for-test-list-parts", "object-multipart"
	_ = createTestBucket(hc, bktName)
	partSize := 5 * 1024 * 1024

	uploadInfo := createMultipartUpload(hc, bktName, objName, map[string]string{})
	etag1, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 1, partSize)
	etag2, _ := uploadPart(hc, bktName, objName, uploadInfo.UploadID, 2, partSize)

	list := listParts(hc, bktName, objName, uploadInfo.UploadID, "0", http.StatusOK)
	require.Len(t, list.Parts, 2)
	require.Equal(t, etag1, list.Parts[0].ETag)
	require.Equal(t, etag2, list.Parts[1].ETag)
	require.Zero(t, list.PartNumberMarker)
	require.Equal(t, 2, list.NextPartNumberMarker)

	list = listParts(hc, bktName, objName, uploadInfo.UploadID, "1", http.StatusOK)
	require.Len(t, list.Parts, 1)
	require.Equal(t, etag2, list.Parts[0].ETag)
	require.Equal(t, 1, list.PartNumberMarker)
	require.Equal(t, 2, list.NextPartNumberMarker)

	list = listParts(hc, bktName, objName, uploadInfo.UploadID, "2", http.StatusOK)
	require.Len(t, list.Parts, 0)
	require.Equal(t, 2, list.PartNumberMarker)
	require.Equal(t, 0, list.NextPartNumberMarker)

	list = listParts(hc, bktName, objName, uploadInfo.UploadID, "7", http.StatusOK)
	require.Len(t, list.Parts, 0)

	list = listParts(hc, bktName, objName, uploadInfo.UploadID, "-1", http.StatusInternalServerError)
	require.Len(t, list.Parts, 0)
}

func TestMultipartUploadWithContentLanguage(t *testing.T) {
	hc := prepareHandlerContext(t)

	bktName, objName := "bucket-1", "object-1"
	createTestBucket(hc, bktName)

	partSize := 5 * 1024 * 1024
	expectedContentLanguage := "en"
	headers := map[string]string{
		api.ContentLanguage: expectedContentLanguage,
	}

	multipartUpload := createMultipartUpload(hc, bktName, objName, headers)
	etag1, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 1, partSize)
	etag2, _ := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 2, partSize)
	w := completeMultipartUploadBase(hc, bktName, objName, multipartUpload.UploadID, []string{etag1, etag2})
	assertStatus(t, w, http.StatusOK)

	w, r := prepareTestRequest(hc, bktName, objName, nil)
	hc.Handler().HeadObjectHandler(w, r)
	require.Equal(t, expectedContentLanguage, w.Header().Get(api.ContentLanguage))
}

func TestMultipartUploadEnabledMD5(t *testing.T) {
	hc := prepareHandlerContext(t)
	hc.config.md5Enabled = true
	hc.layerFeatures.SetMD5Enabled(true)

	bktName, objName := "bucket-md5", "object-md5"
	createTestBucket(hc, bktName)

	partSize := 5 * 1024 * 1024
	multipartUpload := createMultipartUpload(hc, bktName, objName, map[string]string{})
	etag1, partBody1 := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 1, partSize)
	md5Sum1 := md5.Sum(partBody1)
	require.Equal(t, data.Quote(hex.EncodeToString(md5Sum1[:])), etag1)

	etag2, partBody2 := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 2, partSize)
	md5Sum2 := md5.Sum(partBody2)
	require.Equal(t, data.Quote(hex.EncodeToString(md5Sum2[:])), etag2)

	w := completeMultipartUploadBase(hc, bktName, objName, multipartUpload.UploadID, []string{etag1, etag2})
	assertStatus(t, w, http.StatusOK)
	resp := &CompleteMultipartUploadResponse{}
	err := xml.NewDecoder(w.Result().Body).Decode(resp)
	require.NoError(t, err)
	completeMD5Sum := md5.Sum(append(md5Sum1[:], md5Sum2[:]...))
	require.Equal(t, data.Quote(hex.EncodeToString(completeMD5Sum[:])+"-2"), resp.ETag)
}

func TestUploadPartCheckContentSHA256(t *testing.T) {
	hc := prepareHandlerContext(t)

	bktName, objName := "bucket-1", "object-1"
	createTestBucket(hc, bktName)
	partSize := 5 * 1024 * 1024

	for _, tc := range []struct {
		name    string
		hash    string
		content []byte
		error   bool
	}{
		{
			name:    "invalid hash value",
			hash:    "d1b2a59fbea7e20077af9f91b27e95e865061b270be03ff539ab3b73587882e8",
			content: []byte("content"),
			error:   true,
		},
		{
			name:    "correct hash for empty payload",
			hash:    "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
			content: []byte(""),
			error:   false,
		},
		{
			name:    "unsigned payload",
			hash:    "UNSIGNED-PAYLOAD",
			content: []byte("content"),
			error:   false,
		},
		{
			name:    "correct hash",
			hash:    "ed7002b439e9ac845f22357d822bac1444730fbdb6016d3ec9432297b9ec9f73",
			content: []byte("content"),
			error:   false,
		},
	} {
		t.Run(tc.name, func(t *testing.T) {
			multipartUpload := createMultipartUpload(hc, bktName, objName, map[string]string{})

			etag1, data1 := uploadPart(hc, bktName, objName, multipartUpload.UploadID, 1, partSize)

			query := make(url.Values)
			query.Set(uploadIDQuery, multipartUpload.UploadID)
			query.Set(partNumberQuery, strconv.Itoa(2))

			w, r := prepareTestRequestWithQuery(hc, bktName, objName, query, tc.content)
			r.Header.Set(api.AmzContentSha256, tc.hash)
			hc.Handler().UploadPartHandler(w, r)
			if tc.error {
				assertS3Error(t, w, apierr.GetAPIError(apierr.ErrContentSHA256Mismatch))

				list := listParts(hc, bktName, objName, multipartUpload.UploadID, "0", http.StatusOK)
				require.Len(t, list.Parts, 1)

				w := completeMultipartUploadBase(hc, bktName, objName, multipartUpload.UploadID, []string{etag1})
				assertStatus(t, w, http.StatusOK)

				data, _ := getObject(hc, bktName, objName)
				equalDataSlices(t, data1, data)
				return
			}
			assertStatus(t, w, http.StatusOK)

			list := listParts(hc, bktName, objName, multipartUpload.UploadID, "0", http.StatusOK)
			require.Len(t, list.Parts, 2)

			etag2 := w.Header().Get(api.ETag)
			w = completeMultipartUploadBase(hc, bktName, objName, multipartUpload.UploadID, []string{etag1, etag2})
			assertStatus(t, w, http.StatusOK)

			data, _ := getObject(hc, bktName, objName)
			equalDataSlices(t, append(data1, tc.content...), data)
		})
	}
}

func TestMultipartObjectLocation(t *testing.T) {
	for _, tc := range []struct {
		req        *http.Request
		bucket     string
		object     string
		vhsEnabled bool
		expected   string
	}{
		{
			req: &http.Request{
				Host:   "127.0.0.1:8084",
				Header: map[string][]string{"X-Forwarded-Scheme": {"http"}},
			},
			bucket:   "testbucket1",
			object:   "test/1.txt",
			expected: "http://127.0.0.1:8084/testbucket1/test/1.txt",
		},
		{
			req: &http.Request{
				Host:   "localhost:8084",
				Header: map[string][]string{"X-Forwarded-Scheme": {"https"}},
			},
			bucket:   "testbucket1",
			object:   "test/1.txt",
			expected: "https://localhost:8084/testbucket1/test/1.txt",
		},
		{
			req: &http.Request{
				Host:   "s3.mybucket.org",
				Header: map[string][]string{"X-Forwarded-Scheme": {"http"}},
			},
			bucket:   "mybucket",
			object:   "test/1.txt",
			expected: "http://s3.mybucket.org/mybucket/test/1.txt",
		},
		{
			req:      &http.Request{Host: "mys3.mybucket.org"},
			bucket:   "mybucket",
			object:   "test/1.txt",
			expected: "http://mys3.mybucket.org/mybucket/test/1.txt",
		},
		{
			req:      &http.Request{Host: "s3.bucket.org", TLS: &tls.ConnectionState{}},
			bucket:   "bucket",
			object:   "obj",
			expected: "https://s3.bucket.org/bucket/obj",
		},
		{
			req: &http.Request{
				Host: "mybucket.s3dev.frostfs.devenv",
			},
			bucket:     "mybucket",
			object:     "test/1.txt",
			vhsEnabled: true,
			expected:   "http://mybucket.s3dev.frostfs.devenv/test/1.txt",
		},
		{
			req: &http.Request{
				Host:   "mybucket.s3dev.frostfs.devenv",
				Header: map[string][]string{"X-Forwarded-Scheme": {"https"}},
			},
			bucket:     "mybucket",
			object:     "test/1.txt",
			vhsEnabled: true,
			expected:   "https://mybucket.s3dev.frostfs.devenv/test/1.txt",
		},
	} {
		t.Run("", func(t *testing.T) {
			location := getObjectLocation(tc.req, tc.bucket, tc.object, tc.vhsEnabled)
			require.Equal(t, tc.expected, location)
		})
	}
}

func TestUploadPartWithNegativeContentLength(t *testing.T) {
	hc := prepareHandlerContext(t)

	bktName, objName := "bucket-to-upload-part", "object-multipart"
	createTestBucket(hc, bktName)
	partSize := 5 * 1024 * 1024

	multipartUpload := createMultipartUpload(hc, bktName, objName, map[string]string{})

	partBody := make([]byte, partSize)
	_, err := rand.Read(partBody)
	require.NoError(hc.t, err)

	query := make(url.Values)
	query.Set(uploadIDQuery, multipartUpload.UploadID)
	query.Set(partNumberQuery, "1")

	w, r := prepareTestRequestWithQuery(hc, bktName, objName, query, partBody)
	r.ContentLength = -1
	hc.Handler().UploadPartHandler(w, r)
	assertStatus(hc.t, w, http.StatusOK)

	completeMultipartUpload(hc, bktName, objName, multipartUpload.UploadID, []string{w.Header().Get(api.ETag)})
	res, _ := getObject(hc, bktName, objName)
	equalDataSlices(t, partBody, res)

	resp := getObjectAttributes(hc, bktName, objName, objectParts)
	require.Len(t, resp.ObjectParts.Parts, 1)
	require.Equal(t, partSize, resp.ObjectParts.Parts[0].Size)
}

func TestListMultipartUploadsEncoding(t *testing.T) {
	hc := prepareHandlerContext(t)

	bktName := "bucket-to-list-uploads-encoding"
	createTestBucket(hc, bktName)

	listAllMultipartUploadsErr(hc, bktName, "invalid", apierr.GetAPIError(apierr.ErrInvalidEncodingMethod))

	objects := []string{"foo()/bar", "foo()/bar/xyzzy", "asdf+b"}
	for _, objName := range objects {
		createMultipartUpload(hc, bktName, objName, nil)
	}

	listResponse := listMultipartUploadsURL(hc, bktName, "foo(", ")", "", "", -1)

	require.Len(t, listResponse.CommonPrefixes, 1)
	require.Equal(t, "foo%28%29", listResponse.CommonPrefixes[0].Prefix)
	require.Equal(t, "foo%28", listResponse.Prefix)
	require.Equal(t, "%29", listResponse.Delimiter)
	require.Equal(t, "url", listResponse.EncodingType)
	require.Equal(t, maxObjectList, listResponse.MaxUploads)

	listResponse = listMultipartUploads(hc, bktName, "", "", "", "", 1)
	require.Empty(t, listResponse.EncodingType)

	listResponse = listMultipartUploadsURL(hc, bktName, "", "", "", listResponse.NextKeyMarker, 1)

	require.Len(t, listResponse.CommonPrefixes, 0)
	require.Len(t, listResponse.Uploads, 1)
	require.Equal(t, "foo%28%29/bar", listResponse.Uploads[0].Key)
	require.Equal(t, "asdf%2Bb", listResponse.KeyMarker)
	require.Equal(t, "foo%28%29/bar", listResponse.NextKeyMarker)
	require.Equal(t, "url", listResponse.EncodingType)
	require.Equal(t, 1, listResponse.MaxUploads)
}

func uploadPartCopy(hc *handlerContext, bktName, objName, uploadID string, num int, srcObj string, start, end int) *UploadPartCopyResponse {
	return uploadPartCopyBase(hc, bktName, objName, false, uploadID, num, srcObj, start, end)
}

func uploadPartCopyBase(hc *handlerContext, bktName, objName string, encrypted bool, uploadID string, num int, srcObj string, start, end int) *UploadPartCopyResponse {
	query := make(url.Values)
	query.Set(uploadIDQuery, uploadID)
	query.Set(partNumberQuery, strconv.Itoa(num))

	w, r := prepareTestRequestWithQuery(hc, bktName, objName, query, nil)
	if encrypted {
		setEncryptHeaders(r)
	}
	r.Header.Set(api.AmzCopySource, srcObj)
	if start+end > 0 {
		r.Header.Set(api.AmzCopySourceRange, fmt.Sprintf("bytes=%d-%d", start, end))
	}

	hc.Handler().UploadPartCopy(w, r)
	uploadPartCopyResponse := &UploadPartCopyResponse{}
	readResponse(hc.t, w, http.StatusOK, uploadPartCopyResponse)

	return uploadPartCopyResponse
}

func listMultipartUploads(hc *handlerContext, bktName, prefix, delimiter, uploadIDMarker, keyMarker string, maxUploads int) *ListMultipartUploadsResponse {
	w := listMultipartUploadsBase(hc, bktName, prefix, delimiter, uploadIDMarker, keyMarker, "", maxUploads)
	assertStatus(hc.t, w, http.StatusOK)
	res := &ListMultipartUploadsResponse{}
	parseTestResponse(hc.t, w, res)
	return res
}

func listMultipartUploadsURL(hc *handlerContext, bktName, prefix, delimiter, uploadIDMarker, keyMarker string, maxUploads int) *ListMultipartUploadsResponse {
	w := listMultipartUploadsBase(hc, bktName, prefix, delimiter, uploadIDMarker, keyMarker, urlEncodingType, maxUploads)
	assertStatus(hc.t, w, http.StatusOK)
	res := &ListMultipartUploadsResponse{}
	parseTestResponse(hc.t, w, res)
	return res
}

func listAllMultipartUploads(hc *handlerContext, bktName string) *ListMultipartUploadsResponse {
	w := listMultipartUploadsBase(hc, bktName, "", "", "", "", "", -1)
	assertStatus(hc.t, w, http.StatusOK)
	res := &ListMultipartUploadsResponse{}
	parseTestResponse(hc.t, w, res)
	return res
}

func listAllMultipartUploadsErr(hc *handlerContext, bktName, encoding string, err apierr.Error) {
	w := listMultipartUploadsBase(hc, bktName, "", "", "", "", encoding, -1)
	assertS3Error(hc.t, w, err)
}

func listMultipartUploadsBase(hc *handlerContext, bktName, prefix, delimiter, uploadIDMarker, keyMarker, encoding string, maxUploads int) *httptest.ResponseRecorder {
	query := make(url.Values)
	query.Set(prefixQueryName, prefix)
	query.Set(delimiterQueryName, delimiter)
	query.Set(uploadIDMarkerQueryName, uploadIDMarker)
	query.Set(keyMarkerQueryName, keyMarker)
	query.Set(encodingTypeQueryName, encoding)
	if maxUploads != -1 {
		query.Set(maxUploadsQueryName, strconv.Itoa(maxUploads))
	}

	w, r := prepareTestRequestWithQuery(hc, bktName, "", query, nil)

	hc.Handler().ListMultipartUploadsHandler(w, r)
	return w
}

func listParts(hc *handlerContext, bktName, objName string, uploadID, partNumberMarker string, status int) *ListPartsResponse {
	return listPartsBase(hc, bktName, objName, false, uploadID, partNumberMarker, status)
}

func listPartsBase(hc *handlerContext, bktName, objName string, encrypted bool, uploadID, partNumberMarker string, status int) *ListPartsResponse {
	query := make(url.Values)
	query.Set(uploadIDQuery, uploadID)
	query.Set(partNumberMarkerQuery, partNumberMarker)

	w, r := prepareTestRequestWithQuery(hc, bktName, objName, query, nil)
	if encrypted {
		setEncryptHeaders(r)
	}

	hc.Handler().ListPartsHandler(w, r)
	listPartsResponse := &ListPartsResponse{}
	readResponse(hc.t, w, status, listPartsResponse)

	return listPartsResponse
}