bugfix/607-support_trailer_headers #620
9 changed files with 949 additions and 124 deletions
|
@ -1,4 +1,6 @@
|
|||
// This file is adopting https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/v4/stream.go for sigv4a.
|
||||
// with changes
|
||||
// * add VerifyTrailerSignature
|
||||
|
||||
package v4a
|
||||
|
||||
|
@ -88,6 +90,39 @@ func (s *StreamSigner) buildEventStreamStringToSign(headers, payload, previousSi
|
|||
}, "\n")
|
||||
}
|
||||
|
||||
func (s *StreamSigner) VerifyTrailerSignature(payload []byte, signingTime time.Time, signature []byte) error {
|
||||
prevSignature := s.prevSignature
|
||||
|
||||
st := v4Internal.NewSigningTime(signingTime)
|
||||
|
||||
scope := buildCredentialScope(st, s.service)
|
||||
|
||||
stringToSign := s.buildEventStreamStringToSignTrailer(payload, prevSignature, scope, &st)
|
||||
|
||||
ok, err := signerCrypto.VerifySignature(&s.credentials.PrivateKey.PublicKey, makeHash(sha256.New(), []byte(stringToSign)), signature)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
return fmt.Errorf("v4a: invalid signature")
|
||||
}
|
||||
|
||||
s.prevSignature = signature
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *StreamSigner) buildEventStreamStringToSignTrailer(payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string {
|
||||
hash := sha256.New()
|
||||
return strings.Join([]string{
|
||||
"AWS4-ECDSA-P256-SHA256-TRAILER",
|
||||
signingTime.TimeFormat(),
|
||||
credentialScope,
|
||||
hex.EncodeToString(previousSignature),
|
||||
hex.EncodeToString(makeHash(hash, payload)),
|
||||
}, "\n")
|
||||
}
|
||||
|
||||
func buildCredentialScope(st v4Internal.SigningTime, service string) string {
|
||||
return strings.Join([]string{
|
||||
st.Format(shortTimeFormat),
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
// This is https://github.com/aws/aws-sdk-go-v2/blob/a2b751d1ba71f59175a41f9cae5f159f1044360f/aws/signer/v4/stream.go
|
||||
// with changes
|
||||
// * add GetTrailingSignature
|
||||
|
||||
package v4
|
||||
|
||||
|
@ -87,3 +89,32 @@ func (s *StreamSigner) buildEventStreamStringToSign(headers, payload, previousSi
|
|||
hex.EncodeToString(makeHash(hash, payload)),
|
||||
}, "\n")
|
||||
}
|
||||
|
||||
// GetTrailerSignature signs the provided header and payload bytes.
|
||||
func (s *StreamSigner) GetTrailerSignature(payload []byte, signingTime time.Time) ([]byte, error) {
|
||||
prevSignature := s.prevSignature
|
||||
|
||||
st := v4Internal.NewSigningTime(signingTime)
|
||||
|
||||
sigKey := s.signingKeyDeriver.DeriveKey(s.credentials, s.service, s.region, st)
|
||||
|
||||
scope := v4Internal.BuildCredentialScope(st, s.region, s.service)
|
||||
|
||||
stringToSign := s.buildEventStreamStringToSignTrailer(payload, prevSignature, scope, &st)
|
||||
|
||||
signature := v4Internal.HMACSHA256(sigKey, []byte(stringToSign))
|
||||
s.prevSignature = signature
|
||||
|
||||
return signature, nil
|
||||
}
|
||||
|
||||
func (s *StreamSigner) buildEventStreamStringToSignTrailer(payload, previousSignature []byte, credentialScope string, signingTime *v4Internal.SigningTime) string {
|
||||
hash := sha256.New()
|
||||
return strings.Join([]string{
|
||||
"AWS4-HMAC-SHA256-TRAILER",
|
||||
signingTime.TimeFormat(),
|
||||
credentialScope,
|
||||
hex.EncodeToString(previousSignature),
|
||||
hex.EncodeToString(makeHash(hash, payload)),
|
||||
}, "\n")
|
||||
}
|
||||
|
|
|
@ -311,10 +311,23 @@ func (h *handler) PutObjectHandler(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
|
||||
func (h *handler) getBodyReader(r *http.Request) (io.ReadCloser, error) {
|
||||
type BodyReader interface {
|
||||
io.ReadCloser
|
||||
TrailerHeaders() map[string]string
|
||||
}
|
||||
|
||||
type noTrailerBodyReader struct {
|
||||
io.ReadCloser
|
||||
}
|
||||
|
||||
func (r *noTrailerBodyReader) TrailerHeaders() map[string]string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *handler) getBodyReader(r *http.Request) (BodyReader, error) {
|
||||
shaType, streaming := api.IsSignedStreamingV4(r)
|
||||
if !streaming {
|
||||
return r.Body, nil
|
||||
return &noTrailerBodyReader{r.Body}, nil
|
||||
}
|
||||
|
||||
encodings := r.Header.Values(api.ContentEncoding)
|
||||
|
@ -350,12 +363,15 @@ func (h *handler) getBodyReader(r *http.Request) (io.ReadCloser, error) {
|
|||
|
||||
var (
|
||||
err error
|
||||
chunkReader io.ReadCloser
|
||||
chunkReader BodyReader
|
||||
)
|
||||
if shaType == api.StreamingContentV4aSHA256 {
|
||||
chunkReader, err = newSignV4aChunkedReader(r)
|
||||
} else {
|
||||
switch shaType {
|
||||
case api.StreamingContentSHA256, api.StreamingContentSHA256Trailer:
|
||||
chunkReader, err = newSignV4ChunkedReader(r)
|
||||
case api.StreamingContentV4aSHA256, api.StreamingContentV4aSHA256Trailer:
|
||||
chunkReader, err = newSignV4aChunkedReader(r)
|
||||
default:
|
||||
chunkReader, err = newUnsignedChunkedReader(r.Body)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
|
|
@ -377,6 +377,77 @@ func TestPutObjectCheckContentSHA256(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestPutObjectWithStreamUnsignedBodySmall(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "test2", "tmp.txt"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
w, req, chunk := getChunkedRequestUnsignedTrailingSmall(hc.context, t, bktName, objName)
|
||||
hc.Handler().PutObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
w, req = prepareTestRequest(hc, bktName, objName, nil)
|
||||
hc.Handler().HeadObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
require.Equal(t, "5", w.Header().Get(api.ContentLength))
|
||||
|
||||
data := getObjectRange(t, hc, bktName, objName, 0, 5)
|
||||
for i := range chunk {
|
||||
require.Equal(t, chunk[i], data[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutObjectWithStreamUnsignedBody(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "examplebucket", "chunkObject.txt"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
w, req, chunk := getChunkedRequestUnsignedTrailing(hc.context, t, bktName, objName)
|
||||
hc.Handler().PutObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
w, req = prepareTestRequest(hc, bktName, objName, nil)
|
||||
hc.Handler().HeadObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
require.Equal(t, strconv.Itoa(awsChunkedRequestExampleDecodedContentLength), w.Header().Get(api.ContentLength))
|
||||
|
||||
data := getObjectRange(t, hc, bktName, objName, 0, awsChunkedRequestExampleDecodedContentLength)
|
||||
for i := range chunk {
|
||||
require.Equal(t, chunk[i], data[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestPutObjectWithStreamBodyAWSExampleTrailing(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
bktName, objName := "examplebucket", "chunkObject.txt"
|
||||
createTestBucket(hc, bktName)
|
||||
|
||||
t.Run("valid trailer signature", func(t *testing.T) {
|
||||
w, req, chunk := getChunkedRequestTrailing(hc.context, t, bktName, objName)
|
||||
hc.Handler().PutObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
w, req = prepareTestRequest(hc, bktName, objName, nil)
|
||||
hc.Handler().HeadObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
require.Equal(t, strconv.Itoa(awsChunkedRequestExampleDecodedContentLength), w.Header().Get(api.ContentLength))
|
||||
|
||||
data := getObjectRange(t, hc, bktName, objName, 0, awsChunkedRequestExampleDecodedContentLength)
|
||||
equalDataSlices(t, chunk, data)
|
||||
})
|
||||
|
||||
t.Run("invalid trailer signature", func(t *testing.T) {
|
||||
w, req, _ := getChunkedRequestTrailing(hc.context, t, bktName, objName)
|
||||
body := req.Body.(*customNopCloser)
|
||||
body.Bytes()[body.Len()-2] = 'a'
|
||||
hc.Handler().PutObjectHandler(w, req)
|
||||
assertStatus(t, w, http.StatusForbidden)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPutObjectWithStreamBodyAWSExample(t *testing.T) {
|
||||
hc := prepareHandlerContext(t)
|
||||
|
||||
|
@ -476,9 +547,9 @@ func getChunkedRequest(ctx context.Context, t *testing.T, bktName, objName strin
|
|||
|
||||
req, err := http.NewRequest("PUT", "https://s3.amazonaws.com/"+bktName+"/"+objName, nil)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("content-encoding", "aws-chunked")
|
||||
req.Header.Set("content-encoding", api.AwsChunked)
|
||||
req.Header.Set("content-length", strconv.Itoa(awsChunkedRequestExampleContentLength))
|
||||
req.Header.Set("x-amz-content-sha256", "STREAMING-AWS4-HMAC-SHA256-PAYLOAD")
|
||||
req.Header.Set("x-amz-content-sha256", api.StreamingContentSHA256)
|
||||
req.Header.Set("x-amz-decoded-content-length", strconv.Itoa(awsChunkedRequestExampleDecodedContentLength))
|
||||
req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
|
||||
|
||||
|
@ -510,6 +581,202 @@ func getChunkedRequest(ctx context.Context, t *testing.T, bktName, objName strin
|
|||
return w, req, chunk
|
||||
}
|
||||
|
||||
type customNopCloser struct {
|
||||
*bytes.Buffer
|
||||
}
|
||||
|
||||
func (c *customNopCloser) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// getChunkedRequestTrailing implements request example from
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-streaming-trailers.html
|
||||
func getChunkedRequestTrailing(ctx context.Context, t *testing.T, bktName, objName string) (*httptest.ResponseRecorder, *http.Request, []byte) {
|
||||
chunk := make([]byte, 65*1024)
|
||||
for i := range chunk {
|
||||
chunk[i] = 'a'
|
||||
}
|
||||
chunk1 := chunk[:64*1024]
|
||||
chunk2 := chunk[64*1024:]
|
||||
|
||||
AWSAccessKeyID := "AKIAIOSFODNN7EXAMPLE"
|
||||
AWSSecretAccessKey := "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
|
||||
|
||||
awsCreds := aws.Credentials{AccessKeyID: AWSAccessKeyID, SecretAccessKey: AWSSecretAccessKey}
|
||||
signer := v4.NewSigner()
|
||||
|
||||
reqBody := bytes.NewBufferString("10000;chunk-signature=b474d8862b1487a5145d686f57f013e54db672cee1c953b3010fb58501ef5aa2\r\n")
|
||||
_, err := reqBody.Write(chunk1)
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\n400;chunk-signature=1c1344b170168f8e65b41376b44b20fe354e373826ccbbe2c1d40a8cae51e5c7\r\n")
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.Write(chunk2)
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\n0;chunk-signature=2ca2aba2005185cf7159c6277faf83795951dd77a3a99e6e65d5c9f85863f992\r\n")
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("x-amz-checksum-crc32c:sOO8/Q==\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
// original signature is 63bddb248ad2590c92712055f51b8e78ab024eead08276b24f010b0efd74843f,
|
||||
// but we use d81f82fc3505edab99d459891051a732e8730629a2e4a59689829ca17fe2e435
|
||||
// because original signature is incorrect
|
||||
// it was calculated using the`AWS4-HMAC-SHA256-PAYLOAD` constant in canonical string instead of
|
||||
// `AWS4-HMAC-SHA256-TRAILER` that actually must be used by spec
|
||||
// (java sdk use correct `AWS4-HMAC-SHA256-TRAILER` string).
|
||||
_, err = reqBody.WriteString("x-amz-trailer-signature:d81f82fc3505edab99d459891051a732e8730629a2e4a59689829ca17fe2e435")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("PUT", "https://s3.amazonaws.com/"+bktName+"/"+objName, nil)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("content-encoding", api.AwsChunked)
|
||||
req.Header.Set("content-length", strconv.Itoa(awsChunkedRequestExampleContentLength))
|
||||
req.Header.Set("x-amz-content-sha256", api.StreamingContentSHA256Trailer)
|
||||
req.Header.Set("x-amz-decoded-content-length", strconv.Itoa(awsChunkedRequestExampleDecodedContentLength))
|
||||
req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
|
||||
req.Header.Set("x-amz-trailer", "x-amz-checksum-crc32c")
|
||||
|
||||
signTime, err := time.Parse("20060102T150405Z", "20130524T000000Z")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = signer.SignHTTP(ctx, awsCreds, req, api.StreamingContentSHA256Trailer, "s3", "us-east-1", signTime)
|
||||
require.NoError(t, err)
|
||||
|
||||
req.Body = &customNopCloser{Buffer: reqBody}
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
reqInfo := middleware.NewReqInfo(w, req, middleware.ObjectRequest{Bucket: bktName, Object: objName}, "")
|
||||
req = req.WithContext(middleware.SetReqInfo(ctx, reqInfo))
|
||||
req = req.WithContext(middleware.SetBox(req.Context(), &middleware.Box{
|
||||
ClientTime: signTime,
|
||||
AuthHeaders: &middleware.AuthHeader{
|
||||
AccessKeyID: AWSAccessKeyID,
|
||||
SignatureV4: "106e2a8a18243abcf37539882f36619c00e2dfc72633413f02d3b74544bfeb8e",
|
||||
Region: "us-east-1",
|
||||
},
|
||||
AccessBox: &accessbox.Box{
|
||||
Gate: &accessbox.GateData{
|
||||
SecretKey: AWSSecretAccessKey,
|
||||
},
|
||||
},
|
||||
}))
|
||||
|
||||
return w, req, chunk
|
||||
}
|
||||
|
||||
func getChunkedRequestUnsignedTrailing(ctx context.Context, t *testing.T, bktName, objName string) (*httptest.ResponseRecorder, *http.Request, []byte) {
|
||||
chunk := make([]byte, 65*1024)
|
||||
for i := range chunk {
|
||||
chunk[i] = 'a'
|
||||
}
|
||||
//chunk1 := chunk[:64*1024]
|
||||
//chunk2 := chunk[64*1024:]
|
||||
|
||||
AWSAccessKeyID := "9uEm8zMrGWsEDWiPCnVuQLKTiGtCEXpYXt8eBG7agupw0JDySJZMFuej7PTcPzRqBUyPtFowNu1RtvHULU8XHjie6"
|
||||
AWSSecretAccessKey := "9f546428957ed7e189b7be928906ce7d1d9cb3042dd4d2d5194e28ce8c4c3b8e"
|
||||
|
||||
awsCreds := aws.Credentials{AccessKeyID: AWSAccessKeyID, SecretAccessKey: AWSSecretAccessKey}
|
||||
signer := v4.NewSigner()
|
||||
|
||||
reqBody := bytes.NewBufferString("10400\r\n")
|
||||
_, err := reqBody.Write(chunk)
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\n0\r\n")
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\nx-amz-checksum-crc64nvme:pRf+emrnL+A=\r\n\r\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("PUT", "https://localhost:8184/"+bktName+"/"+objName, nil)
|
||||
//req, err := http.NewRequest("PUT", "https://localhost:8184/test2/body", nil)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("x-amz-sdk-checksum-algorithm", "CRC64NVME")
|
||||
req.Header.Set("content-encoding", api.AwsChunked)
|
||||
req.Header.Set("x-amz-trailer", "x-amz-checksum-crc64nvme")
|
||||
req.Header.Set("x-amz-content-sha256", api.StreamingUnsignedPayloadTrailer)
|
||||
req.Header.Set("x-amz-decoded-content-length", strconv.Itoa(awsChunkedRequestExampleDecodedContentLength))
|
||||
req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
|
||||
|
||||
signTime, err := time.Parse("20060102T150405Z", "20250131T140527Z")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = signer.SignHTTP(ctx, awsCreds, req, api.StreamingContentSHA256Trailer, "s3", "ru", signTime)
|
||||
require.NoError(t, err)
|
||||
|
||||
req.Body = io.NopCloser(reqBody)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
reqInfo := middleware.NewReqInfo(w, req, middleware.ObjectRequest{Bucket: bktName, Object: objName}, "")
|
||||
req = req.WithContext(middleware.SetReqInfo(ctx, reqInfo))
|
||||
req = req.WithContext(middleware.SetBox(req.Context(), &middleware.Box{
|
||||
ClientTime: signTime,
|
||||
AuthHeaders: &middleware.AuthHeader{
|
||||
AccessKeyID: AWSAccessKeyID,
|
||||
SignatureV4: "a075c83779d1c3c02254fbe4c9eff0a21556d15556fc6a25db69147c4838226b",
|
||||
Region: "ru",
|
||||
},
|
||||
AccessBox: &accessbox.Box{
|
||||
Gate: &accessbox.GateData{
|
||||
SecretKey: AWSSecretAccessKey,
|
||||
},
|
||||
},
|
||||
}))
|
||||
|
||||
return w, req, chunk
|
||||
}
|
||||
|
||||
func getChunkedRequestUnsignedTrailingSmall(ctx context.Context, t *testing.T, bktName, objName string) (*httptest.ResponseRecorder, *http.Request, []byte) {
|
||||
AWSAccessKeyID := "9uEm8zMrGWsEDWiPCnVuQLKTiGtCEXpYXt8eBG7agupw0JDySJZMFuej7PTcPzRqBUyPtFowNu1RtvHULU8XHjie6"
|
||||
AWSSecretAccessKey := "9f546428957ed7e189b7be928906ce7d1d9cb3042dd4d2d5194e28ce8c4c3b8e"
|
||||
|
||||
awsCreds := aws.Credentials{AccessKeyID: AWSAccessKeyID, SecretAccessKey: AWSSecretAccessKey}
|
||||
signer := v4.NewSigner()
|
||||
|
||||
chunk := "tmp2\n"
|
||||
|
||||
reqBody := bytes.NewBufferString("5\r\n")
|
||||
_, err := reqBody.WriteString(chunk)
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\n0\r\n")
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("x-amz-checksum-crc64nvme:q1EYl4rI0TU=\r\n\r\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("PUT", "https://localhost:8184/"+bktName+"/"+objName, nil)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("x-amz-sdk-checksum-algorithm", "CRC64NVME")
|
||||
req.Header.Set("content-encoding", api.AwsChunked)
|
||||
req.Header.Set("x-amz-trailer", "x-amz-checksum-crc64nvme")
|
||||
req.Header.Set("x-amz-content-sha256", api.StreamingUnsignedPayloadTrailer)
|
||||
req.Header.Set("x-amz-decoded-content-length", "5")
|
||||
req.Header.Set("x-amz-storage-class", "REDUCED_REDUNDANCY")
|
||||
|
||||
signTime, err := time.Parse("20060102T150405Z", "20250203T063745Z")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = signer.SignHTTP(ctx, awsCreds, req, api.StreamingContentSHA256Trailer, "s3", "ru", signTime)
|
||||
require.NoError(t, err)
|
||||
|
||||
req.Body = io.NopCloser(reqBody)
|
||||
|
||||
w := httptest.NewRecorder()
|
||||
reqInfo := middleware.NewReqInfo(w, req, middleware.ObjectRequest{Bucket: bktName, Object: objName}, "")
|
||||
req = req.WithContext(middleware.SetReqInfo(ctx, reqInfo))
|
||||
req = req.WithContext(middleware.SetBox(req.Context(), &middleware.Box{
|
||||
ClientTime: signTime,
|
||||
AuthHeaders: &middleware.AuthHeader{
|
||||
AccessKeyID: AWSAccessKeyID,
|
||||
SignatureV4: "a075c83779d1c3c02254fbe4c9eff0a21556d15556fc6a25db69147c4838226b",
|
||||
Region: "ru",
|
||||
},
|
||||
AccessBox: &accessbox.Box{
|
||||
Gate: &accessbox.GateData{
|
||||
SecretKey: AWSSecretAccessKey,
|
||||
},
|
||||
},
|
||||
}))
|
||||
|
||||
return w, req, []byte(chunk)
|
||||
}
|
||||
|
||||
func getEmptyChunkedRequest(ctx context.Context, t *testing.T, bktName, objName string) (*httptest.ResponseRecorder, *http.Request) {
|
||||
AWSAccessKeyID := "48c1K4PLVb7SvmV3PjDKEuXaMh8yZMXZ8Wx9msrkKcYw06dZeaxeiPe8vyFm2WsoeVaNt7UWEjNsVkagDs8oX4XXh"
|
||||
AWSSecretAccessKey := "09260955b4eb0279dc017ba20a1ddac909cbd226c86cbb2d868e55534c8e64b0"
|
||||
|
|
|
@ -8,6 +8,8 @@ import (
|
|||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v4 "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4sdk2/signer/v4"
|
||||
|
@ -27,16 +29,19 @@ type (
|
|||
reader *bufio.Reader
|
||||
streamSigner *v4.StreamSigner
|
||||
|
||||
requestTime time.Time
|
||||
buffer []byte
|
||||
offset int
|
||||
err error
|
||||
trailerHeaders []string
|
||||
trailers map[string]string
|
||||
requestTime time.Time
|
||||
buffer []byte
|
||||
offset int
|
||||
err error
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
errGiantChunk = errors.New("chunk too big: choose chunk size <= 16MiB")
|
||||
errMalformedChunkedEncoding = errors.New("malformed chunked encoding")
|
||||
errMalformedTrailerHeaders = errors.New("malformed trailer headers")
|
||||
)
|
||||
|
||||
func (c *s3ChunkReader) Close() (err error) {
|
||||
|
@ -107,29 +112,9 @@ func (c *s3ChunkReader) Read(buf []byte) (num int, err error) {
|
|||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err := c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b != '\r' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b != '\n' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
|
||||
if err = c.readCRLF(); err != nil {
|
||||
return num, err
|
||||
}
|
||||
|
||||
if cap(c.buffer) < size {
|
||||
|
@ -147,23 +132,6 @@ func (c *s3ChunkReader) Read(buf []byte) (num int, err error) {
|
|||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if b != '\r' || err != nil {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b != '\n' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
// Once we have read the entire chunk successfully, we verify
|
||||
// that the received signature matches our computed signature.
|
||||
|
@ -181,16 +149,99 @@ func (c *s3ChunkReader) Read(buf []byte) (num int, err error) {
|
|||
// If the chunk size is zero we return io.EOF. As specified by AWS,
|
||||
// only the last chunk is zero-sized.
|
||||
if size == 0 {
|
||||
if len(c.trailerHeaders) != 0 {
|
||||
if err = c.readTrailers(); err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
} else if err = c.readCRLF(); err != nil {
|
||||
return num, err
|
||||
}
|
||||
|
||||
c.err = io.EOF
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
if err = c.readCRLF(); err != nil {
|
||||
return num, err
|
||||
}
|
||||
|
||||
c.offset = copy(buf, c.buffer)
|
||||
num += c.offset
|
||||
return num, err
|
||||
}
|
||||
|
||||
func newSignV4ChunkedReader(req *http.Request) (io.ReadCloser, error) {
|
||||
func (c *s3ChunkReader) readCRLF() error {
|
||||
for _, ch := range [2]byte{'\r', '\n'} {
|
||||
b, err := c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return c.err
|
||||
}
|
||||
if b != ch {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return c.err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *s3ChunkReader) readTrailers() error {
|
||||
var k, v []byte
|
||||
var err error
|
||||
for err == nil {
|
||||
k, err = c.reader.ReadBytes(':')
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
c.err = errMalformedTrailerHeaders
|
||||
return c.err
|
||||
}
|
||||
v, err = c.reader.ReadBytes('\n')
|
||||
if err != nil && err != io.EOF {
|
||||
c.err = errMalformedTrailerHeaders
|
||||
return c.err
|
||||
}
|
||||
if len(v) >= 2 && v[len(v)-2] == '\r' {
|
||||
v[len(v)-2] = '\n'
|
||||
v = v[:len(v)-1]
|
||||
}
|
||||
|
||||
switch {
|
||||
case slices.Contains(c.trailerHeaders, string(k[:len(k)-1])):
|
||||
c.buffer = append(append(c.buffer, k...), v...) // todo use copy
|
||||
case string(k) == "x-amz-trailer-signature:":
|
||||
calculatedSignature, err := c.streamSigner.GetTrailerSignature(c.buffer, c.requestTime)
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return c.err
|
||||
}
|
||||
if string(v[:64]) != hex.EncodeToString(calculatedSignature) {
|
||||
c.err = errs.GetAPIError(errs.ErrSignatureDoesNotMatch)
|
||||
return c.err
|
||||
}
|
||||
default:
|
||||
c.err = errMalformedTrailerHeaders
|
||||
return c.err
|
||||
}
|
||||
|
||||
c.trailers[string(k[:len(k)-1])] = string(v[:len(v)-1])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *s3ChunkReader) TrailerHeaders() map[string]string {
|
||||
return c.trailers
|
||||
}
|
||||
|
||||
func newSignV4ChunkedReader(req *http.Request) (*s3ChunkReader, error) {
|
||||
ctx := req.Context()
|
||||
box, err := middleware.GetBoxData(ctx)
|
||||
if err != nil {
|
||||
|
@ -214,11 +265,19 @@ func newSignV4ChunkedReader(req *http.Request) (io.ReadCloser, error) {
|
|||
}
|
||||
newStreamSigner := v4.NewStreamSigner(currentCredentials, "s3", authHeaders.Region, seed)
|
||||
|
||||
var trailerHeaders []string
|
||||
trailer := req.Header.Get("x-amz-trailer")
|
||||
if trailer != "" {
|
||||
trailerHeaders = strings.Split(trailer, ";")
|
||||
}
|
||||
|
||||
return &s3ChunkReader{
|
||||
ctx: ctx,
|
||||
reader: bufio.NewReader(req.Body),
|
||||
streamSigner: newStreamSigner,
|
||||
requestTime: reqTime,
|
||||
buffer: make([]byte, 64*1024),
|
||||
ctx: ctx,
|
||||
reader: bufio.NewReader(req.Body),
|
||||
streamSigner: newStreamSigner,
|
||||
requestTime: reqTime,
|
||||
buffer: make([]byte, 64*1024),
|
||||
trailerHeaders: trailerHeaders,
|
||||
trailers: make(map[string]string, len(trailerHeaders)),
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -2,8 +2,12 @@ package handler
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -12,22 +16,102 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSigV4AStreaming(t *testing.T) {
|
||||
accessKeyID := "2XEbqH4M3ym7a3E3esxfZ2gRLnMwDXrCN4y1SkQg5fHa09sThVmVL3EE6xeKsyMzaqu5jPi41YCaVbnwbwCTF3bx1"
|
||||
secretKey := "00637f53f842573aaa06c2164c598973cd986880987111416cf71f1619def537"
|
||||
func TestSigV4AChunkedReader(t *testing.T) {
|
||||
t.Run("with trailers", func(t *testing.T) {
|
||||
accessKeyID := "9uEm8zMrGWsEDWiPCnVuQLKTiGtCEXpYXt8eBG7agupw0JDySJZMFuej7PTcPzRqBUyPtFowNu1RtvHULU8XHjie6"
|
||||
secretKey := "9f546428957ed7e189b7be928906ce7d1d9cb3042dd4d2d5194e28ce8c4c3b8e"
|
||||
|
||||
chunk1 := "Testing with the {sdk-java}"
|
||||
reqBody := bytes.NewBufferString("1b;chunk-signature=3045022100b63692a1b20759bdabd342011823427a8952df75c93174d98ad043abca8052e002201695228a91ba986171b8d0ad20856d3d94ca3614d0a90a50a531ba8e52447b9b**\r\n")
|
||||
_, err := reqBody.WriteString(chunk1)
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\n0;chunk-signature=30440220455885a2d4e9f705256ca6b0a5a22f7f784780ccbd1c0a371e5db3059c91745b022073259dd44746cbd63261d628a04d25be5a32a974c077c5c2d83c8157fb323b9f****\r\n\r\n")
|
||||
require.NoError(t, err)
|
||||
chunk1 := "Testing with the {sdk-java}"
|
||||
body := "1b;chunk-signature=3045022100956ca03d2166100b455b532de542892f73925fbcea2f6498674a39a61bb4860902202977c1d47aea548d434540f89640ce97e605d18353cbbd75a619874f02e3dd22**\r\n" +
|
||||
chunk1 +
|
||||
"\r\n0;chunk-signature=304502210097dcc1721675469910ef8712fc2af0678eb90c12216dd6228c6b621fb6f805a0022047d27d21ae2af8a8172f2ef83c81ce9d4746aa88fc9ee0ca783eaa5e71aaef6c**\r\n" +
|
||||
"x-amz-checksum-crc32:Np6zMg==\r\n" +
|
||||
"x-amz-trailer-signature:304502200ecacd9aa2c432af5a2327c22a2ff9b32f44ab8559de00309219aef105eaaac102210092cbc0e78c4bcd56490a73da8ceed1934be80f3affeffb14d8c743fc292dda4f**\r\n\r\n"
|
||||
|
||||
req, err := http.NewRequest("PUT", "http://localhost:8084/test/tmp", reqBody)
|
||||
require.NoError(t, err)
|
||||
reqBody := bytes.NewBufferString(body)
|
||||
req, err := http.NewRequest("PUT", "https://localhost:8184/test2/tmp", reqBody)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("x-amz-trailer", "x-amz-checksum-crc32")
|
||||
|
||||
signature := "30440220574244c5ff5deba388c4e3b0541a42113179b6839b3e6b4212d255a118fa9089022056f7b9b72c93f67dbcd25fe9ca67950b5913fc00bb7a62bc276c21e828c0b6c7"
|
||||
signingTime, err := time.Parse("20060102T150405Z", "20240904T133253Z")
|
||||
signature := "3045022100ddbc6ab11785d7f23d299de7db97379116f543377a44e38170a4e43b38b0d62b02201d8dca13c67f04f45491345152db4b704768eb8bb89b5215fd59bb4a4d9d7b61"
|
||||
signingTime, err := time.Parse("20060102T150405Z", "20250203T144621Z")
|
||||
require.NoError(t, err)
|
||||
|
||||
key, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
accessBox, err := newTestAccessBox(key)
|
||||
require.NoError(t, err)
|
||||
accessBox.Gate.SecretKey = secretKey
|
||||
|
||||
ctx := middleware.SetBox(req.Context(), &middleware.Box{
|
||||
AccessBox: accessBox,
|
||||
AuthHeaders: &middleware.AuthHeader{
|
||||
AccessKeyID: accessKeyID,
|
||||
SignatureV4: signature,
|
||||
},
|
||||
ClientTime: signingTime,
|
||||
})
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
r, err := newSignV4aChunkedReader(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, chunk1, string(data))
|
||||
})
|
||||
|
||||
t.Run("without trailers", func(t *testing.T) {
|
||||
accessKeyID := "2XEbqH4M3ym7a3E3esxfZ2gRLnMwDXrCN4y1SkQg5fHa09sThVmVL3EE6xeKsyMzaqu5jPi41YCaVbnwbwCTF3bx1"
|
||||
secretKey := "00637f53f842573aaa06c2164c598973cd986880987111416cf71f1619def537"
|
||||
|
||||
chunk1 := "Testing with the {sdk-java}"
|
||||
reqBody := bytes.NewBufferString("1b;chunk-signature=3045022100b63692a1b20759bdabd342011823427a8952df75c93174d98ad043abca8052e002201695228a91ba986171b8d0ad20856d3d94ca3614d0a90a50a531ba8e52447b9b**\r\n")
|
||||
_, err := reqBody.WriteString(chunk1)
|
||||
require.NoError(t, err)
|
||||
_, err = reqBody.WriteString("\r\n0;chunk-signature=30440220455885a2d4e9f705256ca6b0a5a22f7f784780ccbd1c0a371e5db3059c91745b022073259dd44746cbd63261d628a04d25be5a32a974c077c5c2d83c8157fb323b9f****\r\n\r\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
req, err := http.NewRequest("PUT", "http://localhost:8084/test/tmp", reqBody)
|
||||
require.NoError(t, err)
|
||||
|
||||
signature := "30440220574244c5ff5deba388c4e3b0541a42113179b6839b3e6b4212d255a118fa9089022056f7b9b72c93f67dbcd25fe9ca67950b5913fc00bb7a62bc276c21e828c0b6c7"
|
||||
signingTime, err := time.Parse("20060102T150405Z", "20240904T133253Z")
|
||||
require.NoError(t, err)
|
||||
|
||||
key, err := keys.NewPrivateKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
accessBox, err := newTestAccessBox(key)
|
||||
require.NoError(t, err)
|
||||
accessBox.Gate.SecretKey = secretKey
|
||||
|
||||
ctx := middleware.SetBox(req.Context(), &middleware.Box{
|
||||
AccessBox: accessBox,
|
||||
AuthHeaders: &middleware.AuthHeader{
|
||||
AccessKeyID: accessKeyID,
|
||||
SignatureV4: signature,
|
||||
},
|
||||
ClientTime: signingTime,
|
||||
})
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
r, err := newSignV4aChunkedReader(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, chunk1, string(data))
|
||||
})
|
||||
}
|
||||
|
||||
func TestSigV4ChunkedReader(t *testing.T) {
|
||||
accessKeyID := "9uEm8zMrGWsEDWiPCnVuQLKTiGtCEXpYXt8eBG7agupw0JDySJZMFuej7PTcPzRqBUyPtFowNu1RtvHULU8XHjie6"
|
||||
secretKey := "9f546428957ed7e189b7be928906ce7d1d9cb3042dd4d2d5194e28ce8c4c3b8e"
|
||||
|
||||
signature := "b740b3b2a08c541c3fc4bd155a448e25408b509a29af98a86356b894930b93e8"
|
||||
signingTime, err := time.Parse("20060102T150405Z", "20250203T134442Z")
|
||||
require.NoError(t, err)
|
||||
|
||||
key, err := keys.NewPrivateKey()
|
||||
|
@ -37,21 +121,117 @@ func TestSigV4AStreaming(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
accessBox.Gate.SecretKey = secretKey
|
||||
|
||||
ctx := middleware.SetBox(req.Context(), &middleware.Box{
|
||||
AccessBox: accessBox,
|
||||
AuthHeaders: &middleware.AuthHeader{
|
||||
AccessKeyID: accessKeyID,
|
||||
SignatureV4: signature,
|
||||
},
|
||||
ClientTime: signingTime,
|
||||
setBoxFn := func(ctx context.Context) context.Context {
|
||||
return middleware.SetBox(ctx, &middleware.Box{
|
||||
AccessBox: accessBox,
|
||||
AuthHeaders: &middleware.AuthHeader{
|
||||
AccessKeyID: accessKeyID,
|
||||
SignatureV4: signature,
|
||||
Region: "us-east-1",
|
||||
},
|
||||
ClientTime: signingTime,
|
||||
})
|
||||
}
|
||||
|
||||
chunk1 := "Testing with the {sdk-java}"
|
||||
|
||||
t.Run("with trailers", func(t *testing.T) {
|
||||
body := "1b;chunk-signature=a6a9be5fff05db0b542aedb2203d892b4162250885d06b1422b173ee0ea92ba5\r\n" +
|
||||
chunk1 +
|
||||
"\r\n0;chunk-signature=31afd083a57c416c46afaf101649d7f0c6c0627cfa60c0f93d1f7ea84396ee42\r\n" +
|
||||
"x-amz-checksum-crc32:Np6zMg==\r\n" +
|
||||
"x-amz-trailer-signature:40ec0046ac730fa27a1451d00d849056c49553ee753f5d158306d05671a42125\r\n\r\n"
|
||||
|
||||
reqBody := bytes.NewBufferString(body)
|
||||
req, err := http.NewRequest("PUT", "https://localhost:8184/test2/tmp", reqBody)
|
||||
require.NoError(t, err)
|
||||
req.Header.Set("x-amz-trailer", "x-amz-checksum-crc32")
|
||||
|
||||
req = req.WithContext(setBoxFn(req.Context()))
|
||||
|
||||
r, err := newSignV4ChunkedReader(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, chunk1, string(data))
|
||||
})
|
||||
req = req.WithContext(ctx)
|
||||
|
||||
r, err := newSignV4aChunkedReader(req)
|
||||
require.NoError(t, err)
|
||||
t.Run("without trailers", func(t *testing.T) {
|
||||
body := "1b;chunk-signature=a6a9be5fff05db0b542aedb2203d892b4162250885d06b1422b173ee0ea92ba5\r\n" +
|
||||
chunk1 +
|
||||
"\r\n0;chunk-signature=31afd083a57c416c46afaf101649d7f0c6c0627cfa60c0f93d1f7ea84396ee42\r\n\r\n"
|
||||
reqBody := bytes.NewBufferString(body)
|
||||
req, err := http.NewRequest("PUT", "https://localhost:8184/test2/tmp", reqBody)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
req = req.WithContext(setBoxFn(req.Context()))
|
||||
|
||||
require.Equal(t, chunk1, string(data))
|
||||
r, err := newSignV4ChunkedReader(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, chunk1, string(data))
|
||||
})
|
||||
}
|
||||
|
||||
func TestUnsignedChunkReader(t *testing.T) {
|
||||
chunk1 := "chunk1"
|
||||
chunk2 := "chunk2"
|
||||
|
||||
t.Run("with trailer", func(t *testing.T) {
|
||||
chunks := []string{chunk1, chunk2}
|
||||
trailer := map[string]string{"x-amz-checksum-crc64nvme": "q1EYl4rI0TU="}
|
||||
body, expected := getChunkedBody(t, chunks, trailer)
|
||||
|
||||
r, err := newUnsignedChunkedReader(body)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, string(data))
|
||||
|
||||
require.EqualValues(t, trailer, r.TrailerHeaders())
|
||||
})
|
||||
|
||||
t.Run("without trailer", func(t *testing.T) {
|
||||
chunks := []string{chunk1, chunk2}
|
||||
body, expected := getChunkedBody(t, chunks, nil)
|
||||
|
||||
r, err := newUnsignedChunkedReader(body)
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := io.ReadAll(r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, string(data))
|
||||
})
|
||||
}
|
||||
|
||||
func getChunkedBody(t *testing.T, chunks []string, trailers map[string]string) (*bytes.Buffer, string) {
|
||||
res := bytes.NewBufferString("")
|
||||
|
||||
for i, chunk := range chunks {
|
||||
meta := strconv.FormatInt(int64(len(chunk)), 16) + "\r\n"
|
||||
if i != 0 {
|
||||
meta = "\r\n" + meta
|
||||
}
|
||||
_, err := res.WriteString(meta)
|
||||
require.NoError(t, err)
|
||||
_, err = res.WriteString(chunk)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
_, err := res.WriteString("\r\n0\r\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
for k, v := range trailers {
|
||||
_, err := res.WriteString(fmt.Sprintf("%s:%s\n", k, v))
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
_, err = res.WriteString("\r\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
return res, strings.Join(chunks, "")
|
||||
}
|
||||
|
|
161
api/handler/s3unsignedreader.go
Normal file
161
api/handler/s3unsignedreader.go
Normal file
|
@ -0,0 +1,161 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
)
|
||||
|
||||
type (
|
||||
s3UnsignedChunkReader struct {
|
||||
reader *bufio.Reader
|
||||
|
||||
trailers map[string]string
|
||||
buffer []byte
|
||||
offset int
|
||||
err error
|
||||
}
|
||||
)
|
||||
|
||||
func (c *s3UnsignedChunkReader) Close() (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *s3UnsignedChunkReader) Read(buf []byte) (num int, err error) {
|
||||
if c.offset > 0 {
|
||||
num = copy(buf, c.buffer[c.offset:])
|
||||
if num == len(buf) {
|
||||
c.offset += num
|
||||
return num, nil
|
||||
}
|
||||
c.offset = 0
|
||||
buf = buf[num:]
|
||||
}
|
||||
|
||||
var size int
|
||||
var b byte
|
||||
for {
|
||||
b, err = c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b == '\r' {
|
||||
break
|
||||
}
|
||||
|
||||
// Manually deserialize the size since AWS specified
|
||||
// the chunk size to be of variable width. In particular,
|
||||
// a size of 16 is encoded as `10` while a size of 64 KB
|
||||
// is `10000`.
|
||||
switch {
|
||||
case b >= '0' && b <= '9':
|
||||
size = size<<4 | int(b-'0')
|
||||
case b >= 'a' && b <= 'f':
|
||||
size = size<<4 | int(b-('a'-10))
|
||||
case b >= 'A' && b <= 'F':
|
||||
size = size<<4 | int(b-('A'-10))
|
||||
default:
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
if size > maxChunkSize {
|
||||
c.err = errGiantChunk
|
||||
return num, c.err
|
||||
}
|
||||
}
|
||||
|
||||
if b != '\r' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b != '\n' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
if cap(c.buffer) < size {
|
||||
c.buffer = make([]byte, size)
|
||||
} else {
|
||||
c.buffer = c.buffer[:size]
|
||||
}
|
||||
|
||||
// Now, we read the payload and compute its SHA-256 hash.
|
||||
_, err = io.ReadFull(c.reader, c.buffer)
|
||||
if err == io.EOF && size != 0 {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil && err != io.EOF {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
// If the chunk size is zero we return io.EOF. As specified by AWS,
|
||||
// only the last chunk is zero-sized.
|
||||
if size == 0 {
|
||||
var k, v string
|
||||
for err == nil {
|
||||
k, err = c.reader.ReadString(':')
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
c.err = errMalformedTrailerHeaders
|
||||
return num, c.err
|
||||
}
|
||||
v, err = c.reader.ReadString('\n')
|
||||
if err != nil {
|
||||
c.err = errMalformedTrailerHeaders
|
||||
return num, c.err
|
||||
}
|
||||
c.trailers[k[:len(k)-1]] = v[:len(v)-1]
|
||||
}
|
||||
|
||||
c.err = io.EOF
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
b, err = c.reader.ReadByte()
|
||||
if b != '\r' || err != nil {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
if b != '\n' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
c.offset = copy(buf, c.buffer)
|
||||
num += c.offset
|
||||
return num, err
|
||||
}
|
||||
|
||||
func (c *s3UnsignedChunkReader) TrailerHeaders() map[string]string {
|
||||
return c.trailers
|
||||
}
|
||||
|
||||
func newUnsignedChunkedReader(body io.Reader) (*s3UnsignedChunkReader, error) {
|
||||
return &s3UnsignedChunkReader{
|
||||
reader: bufio.NewReader(body),
|
||||
trailers: map[string]string{},
|
||||
buffer: make([]byte, 64*1024),
|
||||
}, nil
|
||||
}
|
|
@ -7,6 +7,8 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v4a "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth/signer/v4asdk2"
|
||||
|
@ -20,10 +22,12 @@ type (
|
|||
reader *bufio.Reader
|
||||
streamSigner *v4a.StreamSigner
|
||||
|
||||
requestTime time.Time
|
||||
buffer []byte
|
||||
offset int
|
||||
err error
|
||||
trailerHeaders []string
|
||||
trailers map[string]string
|
||||
requestTime time.Time
|
||||
buffer []byte
|
||||
offset int
|
||||
err error
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -87,21 +91,9 @@ func (c *s3v4aChunkReader) Read(buf []byte) (num int, err error) {
|
|||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err := c.reader.ReadByte()
|
||||
if err != nil {
|
||||
return c.handleErr(num, err)
|
||||
}
|
||||
if b != '\r' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if err != nil {
|
||||
return c.handleErr(num, err)
|
||||
}
|
||||
if b != '\n' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
|
||||
if err = c.readCRLF(); err != nil {
|
||||
return num, err
|
||||
}
|
||||
|
||||
if cap(c.buffer) < size {
|
||||
|
@ -119,19 +111,6 @@ func (c *s3v4aChunkReader) Read(buf []byte) (num int, err error) {
|
|||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if b != '\r' || err != nil {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
b, err = c.reader.ReadByte()
|
||||
if err != nil {
|
||||
return c.handleErr(num, err)
|
||||
}
|
||||
if b != '\n' {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
// Once we have read the entire chunk successfully, we verify
|
||||
// that the received signature is valid.
|
||||
|
@ -150,10 +129,23 @@ func (c *s3v4aChunkReader) Read(buf []byte) (num int, err error) {
|
|||
// If the chunk size is zero we return io.EOF. As specified by AWS,
|
||||
// only the last chunk is zero-sized.
|
||||
if size == 0 {
|
||||
if len(c.trailerHeaders) != 0 {
|
||||
if err = c.readTrailers(); err != nil {
|
||||
c.err = err
|
||||
return num, c.err
|
||||
}
|
||||
} else if err = c.readCRLF(); err != nil {
|
||||
return num, err
|
||||
}
|
||||
|
||||
c.err = io.EOF
|
||||
return num, c.err
|
||||
}
|
||||
|
||||
if err = c.readCRLF(); err != nil {
|
||||
return num, err
|
||||
}
|
||||
|
||||
c.offset = copy(buf, c.buffer)
|
||||
num += c.offset
|
||||
return num, err
|
||||
|
@ -168,7 +160,78 @@ func (c *s3v4aChunkReader) handleErr(num int, err error) (int, error) {
|
|||
return num, c.err
|
||||
}
|
||||
|
||||
func newSignV4aChunkedReader(req *http.Request) (io.ReadCloser, error) {
|
||||
func (c *s3v4aChunkReader) readCRLF() error {
|
||||
for _, ch := range [2]byte{'\r', '\n'} {
|
||||
b, err := c.reader.ReadByte()
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
c.err = err
|
||||
return c.err
|
||||
}
|
||||
if b != ch {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return c.err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *s3v4aChunkReader) readTrailers() error {
|
||||
var k, v []byte
|
||||
var err error
|
||||
for err == nil {
|
||||
k, err = c.reader.ReadBytes(':')
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
c.err = errMalformedTrailerHeaders
|
||||
return c.err
|
||||
}
|
||||
v, err = c.reader.ReadBytes('\n')
|
||||
if err != nil && err != io.EOF {
|
||||
c.err = errMalformedTrailerHeaders
|
||||
return c.err
|
||||
}
|
||||
if len(v) >= 2 && v[len(v)-2] == '\r' {
|
||||
v[len(v)-2] = '\n'
|
||||
v = v[:len(v)-1]
|
||||
}
|
||||
|
||||
switch {
|
||||
case slices.Contains(c.trailerHeaders, string(k[:len(k)-1])):
|
||||
|
||||
c.buffer = append(append(c.buffer, k...), v...) // todo use copy
|
||||
case string(k) == "x-amz-trailer-signature:":
|
||||
n, err := hex.Decode(v[:], bytes.TrimRight(v[:], "*\n"))
|
||||
alexvanin
commented
Didn't know Didn't know `bytes.TrimRight` supports wildcard `*` 👍
dkirillov
commented
It doesn't. It's literally It doesn't. It's literally `*` character
fyrchik
commented
Hm, wait, is it some kind of a wildcard or just a Hm, wait, is it some kind of a wildcard or just a `*`?
dkirillov
commented
Just Just `*`
|
||||
if err != nil {
|
||||
c.err = errMalformedChunkedEncoding
|
||||
return c.err
|
||||
}
|
||||
|
||||
if err = c.streamSigner.VerifyTrailerSignature(c.buffer, c.requestTime, v[:n]); err != nil {
|
||||
c.err = fmt.Errorf("%w: %s", errs.GetAPIError(errs.ErrSignatureDoesNotMatch), err.Error())
|
||||
return c.err
|
||||
}
|
||||
default:
|
||||
c.err = errMalformedTrailerHeaders
|
||||
return c.err
|
||||
}
|
||||
|
||||
c.trailers[string(k[:len(k)-1])] = string(v[:len(v)-1])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *s3v4aChunkReader) TrailerHeaders() map[string]string {
|
||||
return c.trailers
|
||||
}
|
||||
|
||||
func newSignV4aChunkedReader(req *http.Request) (*s3v4aChunkReader, error) {
|
||||
box, err := middleware.GetBoxData(req.Context())
|
||||
if err != nil {
|
||||
return nil, errs.GetAPIError(errs.ErrAuthorizationHeaderMalformed)
|
||||
|
@ -200,10 +263,18 @@ func newSignV4aChunkedReader(req *http.Request) (io.ReadCloser, error) {
|
|||
|
||||
newStreamSigner := v4a.NewStreamSigner(creds, "s3", seed)
|
||||
|
||||
var trailerHeaders []string
|
||||
trailer := req.Header.Get("x-amz-trailer")
|
||||
if trailer != "" {
|
||||
trailerHeaders = strings.Split(trailer, ";")
|
||||
}
|
||||
|
||||
return &s3v4aChunkReader{
|
||||
reader: bufio.NewReader(req.Body),
|
||||
streamSigner: newStreamSigner,
|
||||
requestTime: reqTime,
|
||||
buffer: make([]byte, 64*1024),
|
||||
reader: bufio.NewReader(req.Body),
|
||||
streamSigner: newStreamSigner,
|
||||
requestTime: reqTime,
|
||||
buffer: make([]byte, 64*1024),
|
||||
trailerHeaders: trailerHeaders,
|
||||
trailers: make(map[string]string, len(trailerHeaders)),
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -94,8 +94,11 @@ const (
|
|||
|
||||
DefaultLocationConstraint = "default"
|
||||
|
||||
StreamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
|
||||
StreamingContentV4aSHA256 = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD"
|
||||
StreamingContentSHA256 = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD"
|
||||
StreamingContentSHA256Trailer = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER"
|
||||
StreamingContentV4aSHA256 = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD"
|
||||
StreamingContentV4aSHA256Trailer = "STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER"
|
||||
StreamingUnsignedPayloadTrailer = "STREAMING-UNSIGNED-PAYLOAD-TRAILER"
|
||||
|
||||
DefaultStorageClass = "STANDARD"
|
||||
)
|
||||
|
@ -129,6 +132,8 @@ var SystemMetadata = map[string]struct{}{
|
|||
func IsSignedStreamingV4(r *http.Request) (string, bool) {
|
||||
shaHeader := r.Header.Get(AmzContentSha256)
|
||||
return shaHeader,
|
||||
(shaHeader == StreamingContentSHA256 || shaHeader == StreamingContentV4aSHA256) &&
|
||||
(shaHeader == StreamingContentSHA256 || shaHeader == StreamingContentSHA256Trailer ||
|
||||
shaHeader == StreamingContentV4aSHA256 || shaHeader == StreamingContentV4aSHA256Trailer ||
|
||||
shaHeader == StreamingUnsignedPayloadTrailer) &&
|
||||
r.Method == http.MethodPut
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue
Can we assume that
k
always has at least the length of one, so this code will never panic?Yes. The following code should prevent as from panic
k, err = c.reader.ReadBytes(':')
if err != nil {
if err == io.EOF {
break
}
c.err = errMalformedTrailerHeaders
return c.err
}
we got error if we couldn't meet
:
that lead to breaking the loop