package handler import ( "bytes" "context" "crypto/rand" "encoding/json" "errors" "io" "mime/multipart" "net/http" "net/http/httptest" "runtime" "strconv" "strings" "testing" "time" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth" v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4" s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware" "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox" "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/stretchr/testify/require" ) func TestCheckBucketName(t *testing.T) { for _, tc := range []struct { name string err bool }{ {name: "bucket"}, {name: "2bucket"}, {name: "buc.ket"}, {name: "buc-ket"}, {name: "abc"}, {name: "63aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}, {name: "buc.-ket", err: true}, {name: "bucket.", err: true}, {name: ".bucket", err: true}, {name: "bucket.", err: true}, {name: "bucket-", err: true}, {name: "-bucket", err: true}, {name: "Bucket", err: true}, {name: "buc.-ket", err: true}, {name: "buc-.ket", err: true}, {name: "Bucket", err: true}, {name: "buc!ket", err: true}, {name: "buc_ket", err: true}, {name: "xn--bucket", err: true}, {name: "bucket-s3alias", err: true}, {name: "192.168.0.1", err: true}, {name: "as", err: true}, {name: "64aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", err: true}, } { err := checkBucketName(tc.name) if tc.err { require.Error(t, err, "bucket name: %s", tc.name) } else { require.NoError(t, err, "bucket name: %s", tc.name) } } } func TestCustomJSONMarshal(t *testing.T) { data := []byte(` { "expiration": "2015-12-30T12:00:00.000Z", "conditions": [ ["content-length-range", 1048576, 10485760], {"bucket": "bucketName"}, ["starts-with", "$key", "user/user1/"] ] }`) parsedTime, err := time.Parse(time.RFC3339, "2015-12-30T12:00:00.000Z") require.NoError(t, err) expectedPolicy := &postPolicy{ Expiration: parsedTime, Conditions: []*policyCondition{ { Matching: "content-length-range", Key: "1048576", Value: "10485760", }, { Matching: "eq", Key: "bucket", Value: "bucketName", }, { Matching: "starts-with", Key: "key", Value: "user/user1/", }, }, } policy := &postPolicy{} err = json.Unmarshal(data, policy) require.NoError(t, err) require.Equal(t, expectedPolicy, policy) } func TestEmptyPostPolicy(t *testing.T) { r := &http.Request{ MultipartForm: &multipart.Form{ Value: map[string][]string{ "key": {"some-key"}, }, }, } reqInfo := &middleware.ReqInfo{} metadata := make(map[string]string) _, err := checkPostPolicy(r, reqInfo, metadata) require.NoError(t, err) } func TestPutObjectOverrideCopiesNumber(t *testing.T) { tc := prepareHandlerContext(t) bktName, objName := "bucket-for-copies-number", "object-for-copies-number" bktInfo := createTestBucket(tc, bktName) w, r := prepareTestRequest(tc, bktName, objName, nil) r.Header.Set(api.MetadataPrefix+strings.ToUpper(layer.AttributeFrostfsCopiesNumber), "1") tc.Handler().PutObjectHandler(w, r) p := &layer.HeadObjectParams{ BktInfo: bktInfo, Object: objName, } objInfo, err := tc.Layer().GetObjectInfo(tc.Context(), p) require.NoError(t, err) require.Equal(t, "1", objInfo.Headers[layer.AttributeFrostfsCopiesNumber]) } func TestPutObjectWithNegativeContentLength(t *testing.T) { tc := prepareHandlerContext(t) bktName, objName := "bucket-for-put", "object-for-put" createTestBucket(tc, bktName) content := []byte("content") w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content)) r.ContentLength = -1 tc.Handler().PutObjectHandler(w, r) assertStatus(t, w, http.StatusOK) w, r = prepareTestRequest(tc, bktName, objName, nil) tc.Handler().HeadObjectHandler(w, r) assertStatus(t, w, http.StatusOK) require.Equal(t, strconv.Itoa(len(content)), w.Header().Get(api.ContentLength)) } func TestPutObjectWithStreamBodyError(t *testing.T) { tc := prepareHandlerContext(t) bktName, objName := "bucket-for-put", "object-for-put" createTestBucket(tc, bktName) content := []byte("content") w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content)) r.Header.Set(api.AmzContentSha256, api.StreamingContentSHA256) r.Header.Set(api.ContentEncoding, api.AwsChunked) tc.Handler().PutObjectHandler(w, r) assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrMissingContentLength)) checkNotFound(t, tc, bktName, objName, emptyVersion) } func TestPutObjectWithWrapReaderDiscardOnError(t *testing.T) { tc := prepareHandlerContext(t) bktName, objName := "bucket-for-put", "object-for-put" createTestBucket(tc, bktName) content := make([]byte, 128*1024) _, err := rand.Read(content) require.NoError(t, err) w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content)) tc.tp.SetObjectPutError(objName, errors.New("some error")) numGoroutineBefore := runtime.NumGoroutine() tc.Handler().PutObjectHandler(w, r) numGoroutineAfter := runtime.NumGoroutine() require.Equal(t, numGoroutineBefore, numGoroutineAfter, "goroutines shouldn't leak during put object") } func TestPutObjectWithStreamBodyAWSExample(t *testing.T) { hc := prepareHandlerContext(t) bktName, objName := "examplebucket", "chunkObject.txt" createTestBucket(hc, bktName) w, req, chunk := getChunkedRequest(hc.context, t, bktName, objName) hc.Handler().PutObjectHandler(w, req) assertStatus(t, w, http.StatusOK) data := getObjectRange(t, hc, bktName, objName, 0, 66824) for i := range chunk { require.Equal(t, chunk[i], data[i]) } } func TestPutChunkedTestContentEncoding(t *testing.T) { hc := prepareHandlerContext(t) bktName, objName := "examplebucket", "chunkObject.txt" createTestBucket(hc, bktName) w, req, _ := getChunkedRequest(hc.context, t, bktName, objName) req.Header.Set(api.ContentEncoding, api.AwsChunked+",gzip") hc.Handler().PutObjectHandler(w, req) assertStatus(t, w, http.StatusOK) resp := headObjectBase(hc, bktName, objName, emptyVersion) require.Equal(t, "gzip", resp.Header().Get(api.ContentEncoding)) w, req, _ = getChunkedRequest(hc.context, t, bktName, objName) req.Header.Set(api.ContentEncoding, "gzip") hc.Handler().PutObjectHandler(w, req) assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrInvalidEncodingMethod)) hc.config.bypassContentEncodingInChunks = true w, req, _ = getChunkedRequest(hc.context, t, bktName, objName) req.Header.Set(api.ContentEncoding, "gzip") hc.Handler().PutObjectHandler(w, req) assertStatus(t, w, http.StatusOK) resp = headObjectBase(hc, bktName, objName, emptyVersion) require.Equal(t, "gzip", resp.Header().Get(api.ContentEncoding)) } func getChunkedRequest(ctx context.Context, t *testing.T, bktName, objName string) (*httptest.ResponseRecorder, *http.Request, []byte) { chunk := make([]byte, 65*1024) for i := range chunk { chunk[i] = 'a' } chunk1 := chunk[:64*1024] chunk2 := chunk[64*1024:] AWSAccessKeyID := "AKIAIOSFODNN7EXAMPLE" AWSSecretAccessKey := "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" awsCreds := credentials.NewStaticCredentials(AWSAccessKeyID, AWSSecretAccessKey, "") signer := v4.NewSigner(awsCreds) reqBody := bytes.NewBufferString("10000;chunk-signature=ad80c730a21e5b8d04586a2213dd63b9a0e99e0e2307b0ade35a65485a288648\r\n") _, err := reqBody.Write(chunk1) require.NoError(t, err) _, err = reqBody.WriteString("\r\n400;chunk-signature=0055627c9e194cb4542bae2aa5492e3c1575bbb81b612b7d234b86a503ef5497\r\n") require.NoError(t, err) _, err = reqBody.Write(chunk2) require.NoError(t, err) _, err = reqBody.WriteString("\r\n0;chunk-signature=b6c6ea8a5354eaf15b3cb7646744f4275b71ea724fed81ceb9323e279d449df9\r\n\r\n") require.NoError(t, err) req, err := http.NewRequest("PUT", "https://s3.amazonaws.com/"+bktName+"/"+objName, nil) require.NoError(t, err) req.Header.Set("content-encoding", "aws-chunked") req.Header.Set("content-length", "66824") req.Header.Set("x-amz-content-sha256", "STREAMING-AWS4-HMAC-SHA256-PAYLOAD") req.Header.Set("x-amz-decoded-content-length", "66560") req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY") signTime, err := time.Parse("20060102T150405Z", "20130524T000000Z") require.NoError(t, err) _, err = signer.Sign(req, nil, "s3", "us-east-1", signTime) require.NoError(t, err) req.Body = io.NopCloser(reqBody) w := httptest.NewRecorder() reqInfo := middleware.NewReqInfo(w, req, middleware.ObjectRequest{Bucket: bktName, Object: objName}) req = req.WithContext(middleware.SetReqInfo(ctx, reqInfo)) req = req.WithContext(middleware.SetClientTime(req.Context(), signTime)) req = req.WithContext(middleware.SetAuthHeaders(req.Context(), &auth.AuthHeader{ AccessKeyID: AWSAccessKeyID, SignatureV4: "4f232c4386841ef735655705268965c44a0e4690baa4adea153f7db9fa80a0a9", Service: "s3", Region: "us-east-1", })) req = req.WithContext(middleware.SetBoxData(req.Context(), &accessbox.Box{ Gate: &accessbox.GateData{ AccessKey: AWSSecretAccessKey, }, })) return w, req, chunk } func TestCreateBucket(t *testing.T) { hc := prepareHandlerContext(t) bktName := "bkt-name" box, _ := createAccessBox(t) createBucket(t, hc, bktName, box) createBucketAssertS3Error(hc, bktName, box, s3errors.ErrBucketAlreadyOwnedByYou) box2, _ := createAccessBox(t) createBucketAssertS3Error(hc, bktName, box2, s3errors.ErrBucketAlreadyExists) } func TestPutObjectClientCut(t *testing.T) { hc := prepareHandlerContext(t) bktName, objName1, objName2 := "bkt-name", "obj-name1", "obj-name2" createTestBucket(hc, bktName) putObject(hc, bktName, objName1) obj1 := getObjectFromLayer(hc, objName1)[0] require.Empty(t, getObjectAttribute(obj1, "s3-client-cut")) hc.layerFeatures.SetClientCut(true) putObject(hc, bktName, objName2) obj2 := getObjectFromLayer(hc, objName2)[0] require.Equal(t, "true", getObjectAttribute(obj2, "s3-client-cut")) } func getObjectFromLayer(hc *handlerContext, objName string) []*object.Object { var res []*object.Object for _, o := range hc.tp.Objects() { if objName == getObjectAttribute(o, object.AttributeFilePath) { res = append(res, o) } } return res } func getObjectAttribute(obj *object.Object, attrName string) string { for _, attr := range obj.Attributes() { if attr.Key() == attrName { return attr.Value() } } return "" }