2021-11-25 15:05:58 +00:00
|
|
|
package handler
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/xml"
|
2023-03-22 07:52:23 +00:00
|
|
|
"fmt"
|
2023-03-20 08:13:56 +00:00
|
|
|
"io"
|
2021-11-25 15:05:58 +00:00
|
|
|
"net/http"
|
2021-11-25 15:08:02 +00:00
|
|
|
"net/url"
|
2021-11-25 15:05:58 +00:00
|
|
|
"strconv"
|
|
|
|
"time"
|
|
|
|
|
2023-03-07 14:38:08 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
2023-07-05 14:05:45 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
2023-08-23 11:07:52 +00:00
|
|
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
|
2021-11-25 15:05:58 +00:00
|
|
|
"github.com/google/uuid"
|
|
|
|
"go.uber.org/zap"
|
|
|
|
)
|
|
|
|
|
|
|
|
type (
|
|
|
|
InitiateMultipartUploadResponse struct {
|
|
|
|
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ InitiateMultipartUploadResult" json:"-"`
|
|
|
|
Bucket string `xml:"Bucket"`
|
|
|
|
Key string `xml:"Key"`
|
|
|
|
UploadID string `xml:"UploadId"`
|
|
|
|
}
|
|
|
|
|
|
|
|
CompleteMultipartUploadResponse struct {
|
|
|
|
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUploadResult" json:"-"`
|
|
|
|
Bucket string `xml:"Bucket"`
|
|
|
|
Key string `xml:"Key"`
|
|
|
|
ETag string `xml:"ETag"`
|
|
|
|
}
|
|
|
|
|
|
|
|
ListMultipartUploadsResponse struct {
|
|
|
|
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListMultipartUploadsResult" json:"-"`
|
|
|
|
Bucket string `xml:"Bucket"`
|
|
|
|
CommonPrefixes []CommonPrefix `xml:"CommonPrefixes"`
|
|
|
|
Delimiter string `xml:"Delimiter,omitempty"`
|
|
|
|
EncodingType string `xml:"EncodingType,omitempty"`
|
|
|
|
IsTruncated bool `xml:"IsTruncated"`
|
|
|
|
KeyMarker string `xml:"KeyMarker"`
|
|
|
|
MaxUploads int `xml:"MaxUploads"`
|
|
|
|
NextKeyMarker string `xml:"NextKeyMarker,omitempty"`
|
|
|
|
NextUploadIDMarker string `xml:"NextUploadIdMarker,omitempty"`
|
|
|
|
Prefix string `xml:"Prefix"`
|
|
|
|
Uploads []MultipartUpload `xml:"Upload"`
|
|
|
|
UploadIDMarker string `xml:"UploadIdMarker,omitempty"`
|
|
|
|
}
|
|
|
|
|
|
|
|
ListPartsResponse struct {
|
|
|
|
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ ListPartsResult" json:"-"`
|
|
|
|
Bucket string `xml:"Bucket"`
|
|
|
|
Initiator Initiator `xml:"Initiator"`
|
|
|
|
IsTruncated bool `xml:"IsTruncated"`
|
|
|
|
Key string `xml:"Key"`
|
|
|
|
MaxParts int `xml:"MaxParts,omitempty"`
|
|
|
|
NextPartNumberMarker int `xml:"NextPartNumberMarker,omitempty"`
|
|
|
|
Owner Owner `xml:"Owner"`
|
|
|
|
Parts []*layer.Part `xml:"Part"`
|
|
|
|
PartNumberMarker int `xml:"PartNumberMarker,omitempty"`
|
|
|
|
StorageClass string `xml:"StorageClass,omitempty"`
|
|
|
|
UploadID string `xml:"UploadId"`
|
|
|
|
}
|
|
|
|
|
|
|
|
MultipartUpload struct {
|
|
|
|
Initiated string `xml:"Initiated"`
|
|
|
|
Initiator Initiator `xml:"Initiator"`
|
|
|
|
Key string `xml:"Key"`
|
|
|
|
Owner Owner `xml:"Owner"`
|
|
|
|
StorageClass string `xml:"StorageClass,omitempty"`
|
|
|
|
UploadID string `xml:"UploadId"`
|
|
|
|
}
|
|
|
|
|
|
|
|
Initiator struct {
|
|
|
|
ID string `xml:"ID"`
|
|
|
|
DisplayName string `xml:"DisplayName"`
|
|
|
|
}
|
|
|
|
|
|
|
|
CompleteMultipartUpload struct {
|
|
|
|
XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload"`
|
|
|
|
Parts []*layer.CompletedPart `xml:"Part"`
|
|
|
|
}
|
|
|
|
|
|
|
|
UploadPartCopyResponse struct {
|
|
|
|
ETag string `xml:"ETag"`
|
|
|
|
LastModified string `xml:"LastModified"`
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
uploadIDHeaderName = "uploadId"
|
|
|
|
partNumberHeaderName = "partNumber"
|
2023-08-16 09:44:19 +00:00
|
|
|
|
|
|
|
prefixQueryName = "prefix"
|
|
|
|
delimiterQueryName = "delimiter"
|
|
|
|
maxUploadsQueryName = "max-uploads"
|
|
|
|
encodingTypeQueryName = "encoding-type"
|
|
|
|
keyMarkerQueryName = "key-marker"
|
|
|
|
uploadIDMarkerQueryName = "upload-id-marker"
|
2021-11-25 15:05:58 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func (h *handler) CreateMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
2023-07-05 14:05:45 +00:00
|
|
|
reqInfo := middleware.GetReqInfo(r.Context())
|
2021-11-25 15:05:58 +00:00
|
|
|
|
2022-03-18 13:04:09 +00:00
|
|
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
2021-11-25 15:05:58 +00:00
|
|
|
if err != nil {
|
|
|
|
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-05-23 14:34:13 +00:00
|
|
|
uploadID := uuid.New()
|
2023-07-18 09:11:43 +00:00
|
|
|
additional := []zap.Field{zap.String("uploadID", uploadID.String())}
|
2022-05-23 14:34:13 +00:00
|
|
|
|
|
|
|
p := &layer.CreateMultipartParams{
|
|
|
|
Info: &layer.UploadInfoParams{
|
2021-11-25 15:05:58 +00:00
|
|
|
UploadID: uploadID.String(),
|
|
|
|
Bkt: bktInfo,
|
|
|
|
Key: reqInfo.ObjectName,
|
2022-05-23 14:34:13 +00:00
|
|
|
},
|
2022-05-24 14:55:56 +00:00
|
|
|
Data: &layer.UploadData{},
|
2022-05-23 14:34:13 +00:00
|
|
|
}
|
2021-11-25 15:05:58 +00:00
|
|
|
|
|
|
|
if containsACLHeaders(r) {
|
2022-04-06 08:27:47 +00:00
|
|
|
key, err := h.bearerTokenIssuerKey(r.Context())
|
2021-11-25 15:05:58 +00:00
|
|
|
if err != nil {
|
2023-07-18 09:11:43 +00:00
|
|
|
h.logAndSendError(w, "couldn't get gate key", reqInfo, err, additional...)
|
2021-11-25 15:05:58 +00:00
|
|
|
return
|
|
|
|
}
|
2022-05-23 14:34:13 +00:00
|
|
|
if _, err = parseACLHeaders(r.Header, key); err != nil {
|
2023-07-18 09:11:43 +00:00
|
|
|
h.logAndSendError(w, "could not parse acl", reqInfo, err, additional...)
|
2021-11-25 15:05:58 +00:00
|
|
|
return
|
|
|
|
}
|
2022-05-24 14:55:56 +00:00
|
|
|
p.Data.ACLHeaders = formACLHeadersForMultipart(r.Header)
|
2021-11-25 15:05:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if len(r.Header.Get(api.AmzTagging)) > 0 {
|
2022-05-24 14:55:56 +00:00
|
|
|
p.Data.TagSet, err = parseTaggingHeader(r.Header)
|
2021-11-25 15:05:58 +00:00
|
|
|
if err != nil {
|
|
|
|
h.logAndSendError(w, "could not parse tagging", reqInfo, err, additional...)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-09 10:07:18 +00:00
|
|
|
p.Info.Encryption, err = formEncryptionParams(r)
|
2022-08-01 16:52:09 +00:00
|
|
|
if err != nil {
|
2023-07-18 09:11:43 +00:00
|
|
|
h.logAndSendError(w, "invalid sse headers", reqInfo, err, additional...)
|
2022-08-01 16:52:09 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-05-23 14:34:13 +00:00
|
|
|
p.Header = parseMetadata(r)
|
2021-11-25 15:05:58 +00:00
|
|
|
if contentType := r.Header.Get(api.ContentType); len(contentType) > 0 {
|
2022-05-23 14:34:13 +00:00
|
|
|
p.Header[api.ContentType] = contentType
|
2021-11-25 15:05:58 +00:00
|
|
|
}
|
|
|
|
|
2023-04-24 23:49:12 +00:00
|
|
|
p.CopiesNumbers, err = h.pickCopiesNumbers(p.Header, bktInfo.LocationConstraint)
|
2022-08-17 11:18:36 +00:00
|
|
|
if err != nil {
|
2023-07-18 09:11:43 +00:00
|
|
|
h.logAndSendError(w, "invalid copies number", reqInfo, err, additional...)
|
2022-08-17 11:18:36 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-05-23 14:34:13 +00:00
|
|
|
if err = h.obj.CreateMultipartUpload(r.Context(), p); err != nil {
|
2022-08-01 16:52:09 +00:00
|
|
|
h.logAndSendError(w, "could create multipart upload", reqInfo, err, additional...)
|
2021-11-25 15:05:58 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-08-01 16:52:09 +00:00
|
|
|
if p.Info.Encryption.Enabled() {
|
|
|
|
addSSECHeaders(w.Header(), r.Header)
|
|
|
|
}
|
|
|
|
|
2021-11-25 15:05:58 +00:00
|
|
|
resp := InitiateMultipartUploadResponse{
|
2022-05-23 14:34:13 +00:00
|
|
|
Bucket: reqInfo.BucketName,
|
|
|
|
Key: reqInfo.ObjectName,
|
|
|
|
UploadID: uploadID.String(),
|
2021-11-25 15:05:58 +00:00
|
|
|
}
|
|
|
|
|
2023-07-05 14:05:45 +00:00
|
|
|
if err = middleware.EncodeToResponse(w, resp); err != nil {
|
2021-11-25 15:05:58 +00:00
|
|
|
h.logAndSendError(w, "could not encode InitiateMultipartUploadResponse to response", reqInfo, err, additional...)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-23 14:34:13 +00:00
|
|
|
func formACLHeadersForMultipart(header http.Header) map[string]string {
|
|
|
|
result := make(map[string]string)
|
|
|
|
|
|
|
|
if value := header.Get(api.AmzACL); value != "" {
|
|
|
|
result[api.AmzACL] = value
|
|
|
|
}
|
|
|
|
if value := header.Get(api.AmzGrantRead); value != "" {
|
|
|
|
result[api.AmzGrantRead] = value
|
|
|
|
}
|
|
|
|
if value := header.Get(api.AmzGrantFullControl); value != "" {
|
|
|
|
result[api.AmzGrantFullControl] = value
|
|
|
|
}
|
|
|
|
if value := header.Get(api.AmzGrantWrite); value != "" {
|
|
|
|
result[api.AmzGrantWrite] = value
|
|
|
|
}
|
|
|
|
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2021-11-25 15:05:58 +00:00
|
|
|
func (h *handler) UploadPartHandler(w http.ResponseWriter, r *http.Request) {
|
2023-07-05 14:05:45 +00:00
|
|
|
reqInfo := middleware.GetReqInfo(r.Context())
|
2021-11-25 15:05:58 +00:00
|
|
|
|
2022-03-18 13:04:09 +00:00
|
|
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
2021-11-25 15:05:58 +00:00
|
|
|
if err != nil {
|
|
|
|
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
queryValues = r.URL.Query()
|
|
|
|
uploadID = queryValues.Get(uploadIDHeaderName)
|
2023-07-18 09:11:43 +00:00
|
|
|
partNumStr = queryValues.Get(partNumberHeaderName)
|
|
|
|
additional = []zap.Field{zap.String("uploadID", uploadID), zap.String("partNumber", partNumStr)}
|
2021-11-25 15:05:58 +00:00
|
|
|
)
|
|
|
|
|
2023-07-18 09:11:43 +00:00
|
|
|
partNumber, err := strconv.Atoi(partNumStr)
|
2021-11-25 15:05:58 +00:00
|
|
|
if err != nil || partNumber < layer.UploadMinPartNumber || partNumber > layer.UploadMaxPartNumber {
|
2023-07-18 09:11:43 +00:00
|
|
|
h.logAndSendError(w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber), additional...)
|
2021-11-25 15:05:58 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-07-07 14:31:22 +00:00
|
|
|
body, err := h.getBodyReader(r)
|
|
|
|
if err != nil {
|
2023-07-18 09:11:43 +00:00
|
|
|
h.logAndSendError(w, "failed to get body reader", reqInfo, err, additional...)
|
2023-07-07 14:31:22 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-06-01 13:45:28 +00:00
|
|
|
var size uint64
|
|
|
|
if r.ContentLength > 0 {
|
|
|
|
size = uint64(r.ContentLength)
|
|
|
|
}
|
|
|
|
|
2021-11-25 15:05:58 +00:00
|
|
|
p := &layer.UploadPartParams{
|
|
|
|
Info: &layer.UploadInfoParams{
|
|
|
|
UploadID: uploadID,
|
|
|
|
Bkt: bktInfo,
|
|
|
|
Key: reqInfo.ObjectName,
|
|
|
|
},
|
2022-08-17 11:18:36 +00:00
|
|
|
PartNumber: partNumber,
|
2023-06-01 13:45:28 +00:00
|
|
|
Size: size,
|
2023-07-07 14:31:22 +00:00
|
|
|
Reader: body,
|
2023-06-02 07:44:25 +00:00
|
|
|
}
|
|
|
|
|
2022-11-09 10:07:18 +00:00
|
|
|
p.Info.Encryption, err = formEncryptionParams(r)
|
2022-08-01 16:52:09 +00:00
|
|
|
if err != nil {
|
2023-07-18 09:11:43 +00:00
|
|
|
h.logAndSendError(w, "invalid sse headers", reqInfo, err, additional...)
|
2022-08-01 16:52:09 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-05-24 11:30:37 +00:00
|
|
|
hash, err := h.obj.UploadPart(r.Context(), p)
|
2021-11-25 15:05:58 +00:00
|
|
|
if err != nil {
|
|
|
|
h.logAndSendError(w, "could not upload a part", reqInfo, err, additional...)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-08-01 16:52:09 +00:00
|
|
|
if p.Info.Encryption.Enabled() {
|
|
|
|
addSSECHeaders(w.Header(), r.Header)
|
|
|
|
}
|
|
|
|
|
2022-05-24 11:30:37 +00:00
|
|
|
w.Header().Set(api.ETag, hash)
|
2023-07-05 14:05:45 +00:00
|
|
|
middleware.WriteSuccessResponseHeadersOnly(w)
|
2021-11-25 15:05:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (h *handler) UploadPartCopy(w http.ResponseWriter, r *http.Request) {
|
|
|
|
var (
|
2022-03-18 13:04:09 +00:00
|
|
|
versionID string
|
2023-06-09 13:19:23 +00:00
|
|
|
ctx = r.Context()
|
2023-07-05 14:05:45 +00:00
|
|
|
reqInfo = middleware.GetReqInfo(ctx)
|
2021-11-25 15:05:58 +00:00
|
|
|
queryValues = reqInfo.URL.Query()
|
|
|
|
uploadID = queryValues.Get(uploadIDHeaderName)
|
2023-07-18 09:11:43 +00:00
|
|
|
partNumStr = queryValues.Get(partNumberHeaderName)
|
|
|
|
additional = []zap.Field{zap.String("uploadID", uploadID), zap.String("partNumber", partNumStr)}
|
2021-11-25 15:05:58 +00:00
|
|
|
)
|
|
|
|
|
2023-07-18 09:11:43 +00:00
|
|
|
partNumber, err := strconv.Atoi(partNumStr)
|
2021-11-25 15:05:58 +00:00
|
|
|
if err != nil || partNumber < layer.UploadMinPartNumber || partNumber > layer.UploadMaxPartNumber {
|
2023-07-18 09:11:43 +00:00
|
|
|
h.logAndSendError(w, "invalid part number", reqInfo, errors.GetAPIError(errors.ErrInvalidPartNumber), additional...)
|
2021-11-25 15:05:58 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
src := r.Header.Get(api.AmzCopySource)
|
2021-11-25 15:08:02 +00:00
|
|
|
if u, err := url.Parse(src); err == nil {
|
|
|
|
versionID = u.Query().Get(api.QueryVersionID)
|
|
|
|
src = u.Path
|
|
|
|
}
|
2022-08-24 13:12:05 +00:00
|
|
|
srcBucket, srcObject, err := path2BucketObject(src)
|
|
|
|
if err != nil {
|
2023-07-18 09:11:43 +00:00
|
|
|
h.logAndSendError(w, "invalid source copy", reqInfo, err, additional...)
|
2022-08-24 13:12:05 +00:00
|
|
|
return
|
|
|
|
}
|
2021-11-25 15:05:58 +00:00
|
|
|
|
|
|
|
srcRange, err := parseRange(r.Header.Get(api.AmzCopySourceRange))
|
|
|
|
if err != nil {
|
|
|
|
h.logAndSendError(w, "could not parse copy range", reqInfo,
|
|
|
|
errors.GetAPIError(errors.ErrInvalidCopyPartRange), additional...)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-03-18 13:04:09 +00:00
|
|
|
srcBktInfo, err := h.getBucketAndCheckOwner(r, srcBucket, api.AmzSourceExpectedBucketOwner)
|
|
|
|
if err != nil {
|
2023-07-18 09:11:43 +00:00
|
|
|
h.logAndSendError(w, "could not get source bucket info", reqInfo, err, additional...)
|
2022-03-18 13:04:09 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
|
|
|
if err != nil {
|
2023-07-18 09:11:43 +00:00
|
|
|
h.logAndSendError(w, "could not get target bucket info", reqInfo, err, additional...)
|
2022-03-18 13:04:09 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-07-06 13:37:53 +00:00
|
|
|
headPrm := &layer.HeadObjectParams{
|
2022-03-18 13:04:09 +00:00
|
|
|
BktInfo: srcBktInfo,
|
2021-11-25 15:08:02 +00:00
|
|
|
Object: srcObject,
|
|
|
|
VersionID: versionID,
|
2023-07-06 13:37:53 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
srcInfo, err := h.obj.GetObjectInfo(ctx, headPrm)
|
2021-11-25 15:05:58 +00:00
|
|
|
if err != nil {
|
2021-11-25 15:08:02 +00:00
|
|
|
if errors.IsS3Error(err, errors.ErrNoSuchKey) && versionID != "" {
|
|
|
|
h.logAndSendError(w, "could not head source object version", reqInfo,
|
|
|
|
errors.GetAPIError(errors.ErrBadRequest), additional...)
|
|
|
|
return
|
|
|
|
}
|
2021-11-25 15:05:58 +00:00
|
|
|
h.logAndSendError(w, "could not head source object", reqInfo, err, additional...)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
args, err := parseCopyObjectArgs(r.Header)
|
|
|
|
if err != nil {
|
|
|
|
h.logAndSendError(w, "could not parse copy object args", reqInfo,
|
|
|
|
errors.GetAPIError(errors.ErrInvalidCopyPartRange), additional...)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-08-05 00:54:21 +00:00
|
|
|
if err = checkPreconditions(srcInfo, args.Conditional); err != nil {
|
2021-11-25 15:05:58 +00:00
|
|
|
h.logAndSendError(w, "precondition failed", reqInfo, errors.GetAPIError(errors.ErrPreconditionFailed),
|
|
|
|
additional...)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
p := &layer.UploadCopyParams{
|
2023-07-06 13:37:53 +00:00
|
|
|
Versioned: headPrm.Versioned(),
|
2021-11-25 15:05:58 +00:00
|
|
|
Info: &layer.UploadInfoParams{
|
|
|
|
UploadID: uploadID,
|
|
|
|
Bkt: bktInfo,
|
|
|
|
Key: reqInfo.ObjectName,
|
|
|
|
},
|
2022-08-17 11:18:36 +00:00
|
|
|
SrcObjInfo: srcInfo,
|
|
|
|
SrcBktInfo: srcBktInfo,
|
|
|
|
PartNumber: partNumber,
|
|
|
|
Range: srcRange,
|
2021-11-25 15:05:58 +00:00
|
|
|
}
|
|
|
|
|
2022-11-09 10:07:18 +00:00
|
|
|
p.Info.Encryption, err = formEncryptionParams(r)
|
2022-08-01 16:52:09 +00:00
|
|
|
if err != nil {
|
2023-07-18 09:11:43 +00:00
|
|
|
h.logAndSendError(w, "invalid sse headers", reqInfo, err, additional...)
|
2022-08-01 16:52:09 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-08-11 08:48:58 +00:00
|
|
|
if err = p.Info.Encryption.MatchObjectEncryption(layer.FormEncryptionInfo(srcInfo.Headers)); err != nil {
|
2023-07-18 09:11:43 +00:00
|
|
|
h.logAndSendError(w, "encryption doesn't match object", reqInfo, fmt.Errorf("%w: %s", errors.GetAPIError(errors.ErrBadRequest), err), additional...)
|
2022-08-01 16:52:09 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-06-09 13:19:23 +00:00
|
|
|
info, err := h.obj.UploadPartCopy(ctx, p)
|
2021-11-25 15:05:58 +00:00
|
|
|
if err != nil {
|
|
|
|
h.logAndSendError(w, "could not upload part copy", reqInfo, err, additional...)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
response := UploadPartCopyResponse{
|
|
|
|
ETag: info.HashSum,
|
2022-01-25 13:13:17 +00:00
|
|
|
LastModified: info.Created.UTC().Format(time.RFC3339),
|
2021-11-25 15:05:58 +00:00
|
|
|
}
|
|
|
|
|
2022-08-01 16:52:09 +00:00
|
|
|
if p.Info.Encryption.Enabled() {
|
|
|
|
addSSECHeaders(w.Header(), r.Header)
|
|
|
|
}
|
|
|
|
|
2023-07-05 14:05:45 +00:00
|
|
|
if err = middleware.EncodeToResponse(w, response); err != nil {
|
2023-07-18 09:11:43 +00:00
|
|
|
h.logAndSendError(w, "something went wrong", reqInfo, err, additional...)
|
2021-11-25 15:05:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *handler) CompleteMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
2023-07-05 14:05:45 +00:00
|
|
|
reqInfo := middleware.GetReqInfo(r.Context())
|
2021-11-25 15:05:58 +00:00
|
|
|
|
2022-03-18 13:04:09 +00:00
|
|
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
2021-11-25 15:05:58 +00:00
|
|
|
if err != nil {
|
|
|
|
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
uploadID = r.URL.Query().Get(uploadIDHeaderName)
|
|
|
|
uploadInfo = &layer.UploadInfoParams{
|
|
|
|
UploadID: uploadID,
|
|
|
|
Bkt: bktInfo,
|
|
|
|
Key: reqInfo.ObjectName,
|
|
|
|
}
|
2023-07-18 09:11:43 +00:00
|
|
|
additional = []zap.Field{zap.String("uploadID", uploadID)}
|
2021-11-25 15:05:58 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
reqBody := new(CompleteMultipartUpload)
|
2023-03-02 14:54:33 +00:00
|
|
|
if err = h.cfg.XMLDecoder.NewCompleteMultipartDecoder(r.Body).Decode(reqBody); err != nil {
|
2021-11-25 15:05:58 +00:00
|
|
|
h.logAndSendError(w, "could not read complete multipart upload xml", reqInfo,
|
|
|
|
errors.GetAPIError(errors.ErrMalformedXML), additional...)
|
|
|
|
return
|
|
|
|
}
|
2021-11-26 09:19:06 +00:00
|
|
|
if len(reqBody.Parts) == 0 {
|
|
|
|
h.logAndSendError(w, "invalid xml with parts", reqInfo, errors.GetAPIError(errors.ErrMalformedXML), additional...)
|
|
|
|
return
|
|
|
|
}
|
2021-11-25 15:05:58 +00:00
|
|
|
|
2022-04-18 15:35:25 +00:00
|
|
|
c := &layer.CompleteMultipartParams{
|
|
|
|
Info: uploadInfo,
|
|
|
|
Parts: reqBody.Parts,
|
|
|
|
}
|
2022-08-01 16:52:09 +00:00
|
|
|
|
2023-03-20 08:44:35 +00:00
|
|
|
// Next operations might take some time, so we want to keep client's
|
|
|
|
// connection alive. To do so, gateway sends periodic white spaces
|
|
|
|
// back to the client the same way as Amazon S3 service does.
|
|
|
|
stopPeriodicResponseWriter := periodicXMLWriter(w, h.cfg.CompleteMultipartKeepalive)
|
|
|
|
|
2023-03-22 07:52:23 +00:00
|
|
|
// Start complete multipart upload which may take some time to fetch object
|
|
|
|
// and re-upload it part by part.
|
2023-03-22 08:02:39 +00:00
|
|
|
objInfo, err := h.completeMultipartUpload(r, c, bktInfo, reqInfo)
|
2023-03-22 07:52:23 +00:00
|
|
|
|
|
|
|
// Stop periodic writer as complete multipart upload is finished
|
|
|
|
// successfully or not.
|
|
|
|
headerIsWritten := stopPeriodicResponseWriter()
|
|
|
|
|
2023-07-05 14:05:45 +00:00
|
|
|
responseWriter := middleware.EncodeToResponse
|
2023-03-22 07:52:23 +00:00
|
|
|
errLogger := h.logAndSendError
|
|
|
|
// Do not send XML and HTTP headers if periodic writer was invoked at this point.
|
|
|
|
if headerIsWritten {
|
2023-07-05 14:05:45 +00:00
|
|
|
responseWriter = middleware.EncodeToResponseNoHeader
|
2023-03-22 07:52:23 +00:00
|
|
|
errLogger = h.logAndSendErrorNoHeader
|
|
|
|
}
|
|
|
|
|
2022-04-18 15:35:25 +00:00
|
|
|
if err != nil {
|
2023-03-22 07:52:23 +00:00
|
|
|
errLogger(w, "complete multipart error", reqInfo, err, additional...)
|
2022-04-18 15:35:25 +00:00
|
|
|
return
|
|
|
|
}
|
2023-03-22 07:52:23 +00:00
|
|
|
|
|
|
|
response := CompleteMultipartUploadResponse{
|
|
|
|
Bucket: objInfo.Bucket,
|
|
|
|
ETag: objInfo.HashSum,
|
|
|
|
Key: objInfo.Name,
|
|
|
|
}
|
|
|
|
|
|
|
|
// Here we previously set api.AmzVersionID header for versioned bucket.
|
|
|
|
// It is not possible after #60, because of periodic white
|
|
|
|
// space XML writer to keep connection with the client.
|
|
|
|
|
|
|
|
if err = responseWriter(w, response); err != nil {
|
2023-07-18 09:11:43 +00:00
|
|
|
errLogger(w, "something went wrong", reqInfo, err, additional...)
|
2023-03-22 07:52:23 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-07-05 14:05:45 +00:00
|
|
|
func (h *handler) completeMultipartUpload(r *http.Request, c *layer.CompleteMultipartParams, bktInfo *data.BucketInfo, reqInfo *middleware.ReqInfo) (*data.ObjectInfo, error) {
|
2023-06-09 13:19:23 +00:00
|
|
|
ctx := r.Context()
|
|
|
|
uploadData, extendedObjInfo, err := h.obj.CompleteMultipartUpload(ctx, c)
|
2023-03-22 07:52:23 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("could not complete multipart upload: %w", err)
|
|
|
|
}
|
2022-10-14 14:36:43 +00:00
|
|
|
objInfo := extendedObjInfo.ObjectInfo
|
2022-04-18 15:35:25 +00:00
|
|
|
|
|
|
|
if len(uploadData.TagSet) != 0 {
|
2022-10-14 14:36:43 +00:00
|
|
|
tagPrm := &layer.PutObjectTaggingParams{
|
|
|
|
ObjectVersion: &layer.ObjectVersion{
|
|
|
|
BktInfo: bktInfo,
|
|
|
|
ObjectName: objInfo.Name,
|
|
|
|
VersionID: objInfo.VersionID(),
|
|
|
|
},
|
|
|
|
TagSet: uploadData.TagSet,
|
|
|
|
NodeVersion: extendedObjInfo.NodeVersion,
|
2022-04-18 15:35:25 +00:00
|
|
|
}
|
2023-06-09 13:19:23 +00:00
|
|
|
if _, err = h.obj.PutObjectTagging(ctx, tagPrm); err != nil {
|
2023-03-22 07:52:23 +00:00
|
|
|
return nil, fmt.Errorf("could not put tagging file of completed multipart upload: %w", err)
|
2022-04-18 15:35:25 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-24 14:55:56 +00:00
|
|
|
if len(uploadData.ACLHeaders) != 0 {
|
2023-06-09 13:19:23 +00:00
|
|
|
sessionTokenSetEACL, err := getSessionTokenSetEACL(ctx)
|
2023-03-22 08:02:39 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("couldn't get eacl token: %w", err)
|
|
|
|
}
|
2023-06-09 13:19:23 +00:00
|
|
|
key, err := h.bearerTokenIssuerKey(ctx)
|
2022-05-24 14:55:56 +00:00
|
|
|
if err != nil {
|
2023-03-22 07:52:23 +00:00
|
|
|
return nil, fmt.Errorf("couldn't get gate key: %w", err)
|
2022-05-24 14:55:56 +00:00
|
|
|
}
|
|
|
|
acl, err := parseACLHeaders(r.Header, key)
|
|
|
|
if err != nil {
|
2023-03-22 07:52:23 +00:00
|
|
|
return nil, fmt.Errorf("could not parse acl: %w", err)
|
2022-05-24 14:55:56 +00:00
|
|
|
}
|
|
|
|
|
2022-04-18 15:35:25 +00:00
|
|
|
resInfo := &resourceInfo{
|
|
|
|
Bucket: objInfo.Bucket,
|
|
|
|
Object: objInfo.Name,
|
|
|
|
}
|
2022-05-24 14:55:56 +00:00
|
|
|
astObject, err := aclToAst(acl, resInfo)
|
2022-04-18 15:35:25 +00:00
|
|
|
if err != nil {
|
2023-03-22 07:52:23 +00:00
|
|
|
return nil, fmt.Errorf("could not translate acl of completed multipart upload to ast: %w", err)
|
2022-04-18 15:35:25 +00:00
|
|
|
}
|
2023-03-22 08:02:39 +00:00
|
|
|
if _, err = h.updateBucketACL(r, astObject, bktInfo, sessionTokenSetEACL); err != nil {
|
2023-03-22 07:52:23 +00:00
|
|
|
return nil, fmt.Errorf("could not update bucket acl while completing multipart upload: %w", err)
|
2021-11-25 15:05:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-04-29 13:08:22 +00:00
|
|
|
s := &SendNotificationParams{
|
2022-07-18 14:51:34 +00:00
|
|
|
Event: EventObjectCreatedCompleteMultipartUpload,
|
|
|
|
NotificationInfo: data.NotificationInfoFromObject(objInfo),
|
|
|
|
BktInfo: bktInfo,
|
|
|
|
ReqInfo: reqInfo,
|
2022-03-31 06:21:38 +00:00
|
|
|
}
|
2023-06-09 13:19:23 +00:00
|
|
|
if err = h.sendNotifications(ctx, s); err != nil {
|
2023-08-23 11:07:52 +00:00
|
|
|
h.reqLogger(ctx).Error(logs.CouldntSendNotification, zap.Error(err))
|
2022-03-31 06:21:38 +00:00
|
|
|
}
|
|
|
|
|
2023-03-22 07:52:23 +00:00
|
|
|
return objInfo, nil
|
2021-11-25 15:05:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (h *handler) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Request) {
|
2023-07-05 14:05:45 +00:00
|
|
|
reqInfo := middleware.GetReqInfo(r.Context())
|
2021-11-25 15:05:58 +00:00
|
|
|
|
2022-03-18 13:04:09 +00:00
|
|
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
2021-11-25 15:05:58 +00:00
|
|
|
if err != nil {
|
|
|
|
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
2023-08-16 09:44:19 +00:00
|
|
|
queryValues = reqInfo.URL.Query()
|
|
|
|
maxUploadsStr = queryValues.Get(maxUploadsQueryName)
|
|
|
|
maxUploads = layer.MaxSizeUploadsList
|
2021-11-25 15:05:58 +00:00
|
|
|
)
|
|
|
|
|
2023-08-16 09:44:19 +00:00
|
|
|
if maxUploadsStr != "" {
|
|
|
|
val, err := strconv.Atoi(maxUploadsStr)
|
|
|
|
if err != nil || val < 1 || val > 1000 {
|
2021-11-25 15:05:58 +00:00
|
|
|
h.logAndSendError(w, "invalid maxUploads", reqInfo, errors.GetAPIError(errors.ErrInvalidMaxUploads))
|
|
|
|
return
|
|
|
|
}
|
2023-08-16 09:44:19 +00:00
|
|
|
maxUploads = val
|
2021-11-25 15:05:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
p := &layer.ListMultipartUploadsParams{
|
|
|
|
Bkt: bktInfo,
|
2023-08-16 09:44:19 +00:00
|
|
|
Delimiter: queryValues.Get(delimiterQueryName),
|
|
|
|
EncodingType: queryValues.Get(encodingTypeQueryName),
|
|
|
|
KeyMarker: queryValues.Get(keyMarkerQueryName),
|
2021-11-25 15:05:58 +00:00
|
|
|
MaxUploads: maxUploads,
|
2023-08-16 09:44:19 +00:00
|
|
|
Prefix: queryValues.Get(prefixQueryName),
|
|
|
|
UploadIDMarker: queryValues.Get(uploadIDMarkerQueryName),
|
2021-11-25 15:05:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
list, err := h.obj.ListMultipartUploads(r.Context(), p)
|
|
|
|
if err != nil {
|
|
|
|
h.logAndSendError(w, "could not list multipart uploads", reqInfo, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-07-05 14:05:45 +00:00
|
|
|
if err = middleware.EncodeToResponse(w, encodeListMultipartUploadsToResponse(list, p)); err != nil {
|
2021-11-25 15:05:58 +00:00
|
|
|
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *handler) ListPartsHandler(w http.ResponseWriter, r *http.Request) {
|
2023-07-05 14:05:45 +00:00
|
|
|
reqInfo := middleware.GetReqInfo(r.Context())
|
2021-11-25 15:05:58 +00:00
|
|
|
|
2022-03-18 13:04:09 +00:00
|
|
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
2021-11-25 15:05:58 +00:00
|
|
|
if err != nil {
|
|
|
|
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
partNumberMarker int
|
|
|
|
|
|
|
|
queryValues = reqInfo.URL.Query()
|
|
|
|
uploadID = queryValues.Get(uploadIDHeaderName)
|
|
|
|
additional = []zap.Field{zap.String("uploadID", uploadID), zap.String("Key", reqInfo.ObjectName)}
|
|
|
|
maxParts = layer.MaxSizePartsList
|
|
|
|
)
|
|
|
|
|
|
|
|
if queryValues.Get("max-parts") != "" {
|
|
|
|
val, err := strconv.Atoi(queryValues.Get("max-parts"))
|
|
|
|
if err != nil || val < 0 {
|
|
|
|
h.logAndSendError(w, "invalid MaxParts", reqInfo, errors.GetAPIError(errors.ErrInvalidMaxParts), additional...)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if val < layer.MaxSizePartsList {
|
|
|
|
maxParts = val
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if queryValues.Get("part-number-marker") != "" {
|
|
|
|
if partNumberMarker, err = strconv.Atoi(queryValues.Get("part-number-marker")); err != nil || partNumberMarker <= 0 {
|
|
|
|
h.logAndSendError(w, "invalid PartNumberMarker", reqInfo, err, additional...)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
p := &layer.ListPartsParams{
|
|
|
|
Info: &layer.UploadInfoParams{
|
|
|
|
UploadID: uploadID,
|
|
|
|
Bkt: bktInfo,
|
|
|
|
Key: reqInfo.ObjectName,
|
|
|
|
},
|
|
|
|
MaxParts: maxParts,
|
|
|
|
PartNumberMarker: partNumberMarker,
|
|
|
|
}
|
|
|
|
|
2022-11-09 10:07:18 +00:00
|
|
|
p.Info.Encryption, err = formEncryptionParams(r)
|
2022-08-01 16:52:09 +00:00
|
|
|
if err != nil {
|
|
|
|
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-11-25 15:05:58 +00:00
|
|
|
list, err := h.obj.ListParts(r.Context(), p)
|
|
|
|
if err != nil {
|
|
|
|
h.logAndSendError(w, "could not list parts", reqInfo, err, additional...)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-07-05 14:05:45 +00:00
|
|
|
if err = middleware.EncodeToResponse(w, encodeListPartsToResponse(list, p)); err != nil {
|
2021-11-25 15:05:58 +00:00
|
|
|
h.logAndSendError(w, "something went wrong", reqInfo, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *handler) AbortMultipartUploadHandler(w http.ResponseWriter, r *http.Request) {
|
2023-07-05 14:05:45 +00:00
|
|
|
reqInfo := middleware.GetReqInfo(r.Context())
|
2021-11-25 15:05:58 +00:00
|
|
|
|
2022-03-18 13:04:09 +00:00
|
|
|
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
2021-11-25 15:05:58 +00:00
|
|
|
if err != nil {
|
|
|
|
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-05-24 13:27:15 +00:00
|
|
|
uploadID := reqInfo.URL.Query().Get(uploadIDHeaderName)
|
|
|
|
additional := []zap.Field{zap.String("uploadID", uploadID), zap.String("Key", reqInfo.ObjectName)}
|
2021-11-25 15:05:58 +00:00
|
|
|
|
2022-05-24 13:27:15 +00:00
|
|
|
p := &layer.UploadInfoParams{
|
|
|
|
UploadID: uploadID,
|
|
|
|
Bkt: bktInfo,
|
|
|
|
Key: reqInfo.ObjectName,
|
|
|
|
}
|
2021-11-25 15:05:58 +00:00
|
|
|
|
2022-11-09 10:07:18 +00:00
|
|
|
p.Encryption, err = formEncryptionParams(r)
|
2022-08-01 16:52:09 +00:00
|
|
|
if err != nil {
|
|
|
|
h.logAndSendError(w, "invalid sse headers", reqInfo, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2022-06-22 19:40:52 +00:00
|
|
|
if err = h.obj.AbortMultipartUpload(r.Context(), p); err != nil {
|
2021-11-25 15:05:58 +00:00
|
|
|
h.logAndSendError(w, "could not abort multipart upload", reqInfo, err, additional...)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
w.WriteHeader(http.StatusNoContent)
|
|
|
|
}
|
|
|
|
|
|
|
|
func encodeListMultipartUploadsToResponse(info *layer.ListMultipartUploadsInfo, params *layer.ListMultipartUploadsParams) *ListMultipartUploadsResponse {
|
|
|
|
res := ListMultipartUploadsResponse{
|
|
|
|
Bucket: params.Bkt.Name,
|
|
|
|
CommonPrefixes: fillPrefixes(info.Prefixes, params.EncodingType),
|
|
|
|
Delimiter: params.Delimiter,
|
|
|
|
EncodingType: params.EncodingType,
|
|
|
|
IsTruncated: info.IsTruncated,
|
|
|
|
KeyMarker: params.KeyMarker,
|
|
|
|
MaxUploads: params.MaxUploads,
|
|
|
|
NextKeyMarker: info.NextKeyMarker,
|
|
|
|
NextUploadIDMarker: info.NextUploadIDMarker,
|
|
|
|
Prefix: params.Prefix,
|
|
|
|
UploadIDMarker: params.UploadIDMarker,
|
|
|
|
}
|
|
|
|
|
|
|
|
uploads := make([]MultipartUpload, 0, len(info.Uploads))
|
|
|
|
for _, u := range info.Uploads {
|
|
|
|
m := MultipartUpload{
|
2022-01-25 13:13:17 +00:00
|
|
|
Initiated: u.Created.UTC().Format(time.RFC3339),
|
2021-11-25 15:05:58 +00:00
|
|
|
Initiator: Initiator{
|
|
|
|
ID: u.Owner.String(),
|
|
|
|
DisplayName: u.Owner.String(),
|
|
|
|
},
|
|
|
|
Key: u.Key,
|
|
|
|
Owner: Owner{
|
|
|
|
ID: u.Owner.String(),
|
|
|
|
DisplayName: u.Owner.String(),
|
|
|
|
},
|
|
|
|
UploadID: u.UploadID,
|
|
|
|
}
|
|
|
|
uploads = append(uploads, m)
|
|
|
|
}
|
|
|
|
|
|
|
|
res.Uploads = uploads
|
|
|
|
|
|
|
|
return &res
|
|
|
|
}
|
|
|
|
|
|
|
|
func encodeListPartsToResponse(info *layer.ListPartsInfo, params *layer.ListPartsParams) *ListPartsResponse {
|
|
|
|
return &ListPartsResponse{
|
|
|
|
XMLName: xml.Name{},
|
|
|
|
Bucket: params.Info.Bkt.Name,
|
|
|
|
Initiator: Initiator{
|
|
|
|
ID: info.Owner.String(),
|
|
|
|
DisplayName: info.Owner.String(),
|
|
|
|
},
|
|
|
|
IsTruncated: info.IsTruncated,
|
|
|
|
Key: params.Info.Key,
|
|
|
|
MaxParts: params.MaxParts,
|
|
|
|
NextPartNumberMarker: info.NextPartNumberMarker,
|
|
|
|
Owner: Owner{
|
|
|
|
ID: info.Owner.String(),
|
|
|
|
DisplayName: info.Owner.String(),
|
|
|
|
},
|
|
|
|
PartNumberMarker: params.PartNumberMarker,
|
|
|
|
UploadID: params.Info.UploadID,
|
|
|
|
Parts: info.Parts,
|
|
|
|
}
|
|
|
|
}
|
2023-03-20 08:13:56 +00:00
|
|
|
|
|
|
|
// periodicXMLWriter creates go routine to write xml header and whitespaces
|
|
|
|
// over time to avoid connection drop from the client. To work properly,
|
|
|
|
// pass `http.ResponseWriter` with implemented `http.Flusher` interface.
|
|
|
|
// Returns stop function which returns boolean if writer has been used
|
|
|
|
// during goroutine execution. To disable writer, pass 0 duration value.
|
|
|
|
func periodicXMLWriter(w io.Writer, dur time.Duration) (stop func() bool) {
|
|
|
|
if dur == 0 { // 0 duration disables periodic writer
|
|
|
|
return func() bool { return false }
|
|
|
|
}
|
|
|
|
|
|
|
|
whitespaceChar := []byte(" ")
|
|
|
|
closer := make(chan struct{})
|
|
|
|
done := make(chan struct{})
|
|
|
|
headerWritten := false
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer close(done)
|
|
|
|
|
|
|
|
tick := time.NewTicker(dur)
|
|
|
|
defer tick.Stop()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-tick.C:
|
|
|
|
if !headerWritten {
|
|
|
|
_, err := w.Write([]byte(xml.Header))
|
|
|
|
headerWritten = err == nil
|
|
|
|
}
|
|
|
|
_, err := w.Write(whitespaceChar)
|
|
|
|
if err != nil {
|
|
|
|
return // is there anything we can do better than ignore error?
|
|
|
|
}
|
|
|
|
if buffered, ok := w.(http.Flusher); ok {
|
|
|
|
buffered.Flush()
|
|
|
|
}
|
|
|
|
case <-closer:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
stop = func() bool {
|
|
|
|
close(closer)
|
|
|
|
<-done // wait for goroutine to stop
|
|
|
|
return headerWritten
|
|
|
|
}
|
|
|
|
|
|
|
|
return stop
|
|
|
|
}
|