[#462] Implement PATCH for simple objects
All checks were successful
/ DCO (pull_request) Successful in 1m6s
/ Vulncheck (pull_request) Successful in 1m8s
/ Builds (1.21) (pull_request) Successful in 1m28s
/ Builds (1.22) (pull_request) Successful in 1m21s
/ Lint (pull_request) Successful in 1m31s
/ Tests (1.21) (pull_request) Successful in 1m32s
/ Tests (1.22) (pull_request) Successful in 1m36s
All checks were successful
/ DCO (pull_request) Successful in 1m6s
/ Vulncheck (pull_request) Successful in 1m8s
/ Builds (1.21) (pull_request) Successful in 1m28s
/ Builds (1.22) (pull_request) Successful in 1m21s
/ Lint (pull_request) Successful in 1m31s
/ Tests (1.21) (pull_request) Successful in 1m32s
/ Tests (1.22) (pull_request) Successful in 1m36s
Signed-off-by: Marina Biryukova <m.biryukova@yadro.com>
This commit is contained in:
parent
20719bd85c
commit
5fdb834e56
15 changed files with 719 additions and 21 deletions
|
@ -187,6 +187,9 @@ const (
|
|||
ErrInvalidRequestLargeCopy
|
||||
ErrInvalidStorageClass
|
||||
VersionIDMarkerWithoutKeyMarker
|
||||
ErrInvalidRangeLength
|
||||
ErrRangeOutOfBounds
|
||||
ErrMissingContentRange
|
||||
|
||||
ErrMalformedJSON
|
||||
ErrInsecureClientRequest
|
||||
|
@ -1739,6 +1742,24 @@ var errorCodes = errorCodeMap{
|
|||
Description: "Part number must be an integer between 1 and 10000, inclusive",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidRangeLength: {
|
||||
ErrCode: ErrInvalidRangeLength,
|
||||
Code: "InvalidRange",
|
||||
Description: "Provided range length must be equal to content length",
|
||||
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
|
||||
},
|
||||
ErrRangeOutOfBounds: {
|
||||
ErrCode: ErrRangeOutOfBounds,
|
||||
Code: "InvalidRange",
|
||||
Description: "Provided range is outside of object bounds",
|
||||
HTTPStatusCode: http.StatusRequestedRangeNotSatisfiable,
|
||||
},
|
||||
ErrMissingContentRange: {
|
||||
ErrCode: ErrMissingContentRange,
|
||||
Code: "MissingContentRange",
|
||||
Description: "Content-Range header is mandatory for this type of request",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// Add your error structure here.
|
||||
}
|
||||
|
||||
|
|
|
@ -228,6 +228,14 @@ func getObjectRange(t *testing.T, tc *handlerContext, bktName, objName string, s
|
|||
return content
|
||||
}
|
||||
|
||||
func getObjectVersion(tc *handlerContext, bktName, objName, version string) []byte {
|
||||
w := getObjectBaseResponse(tc, bktName, objName, version)
|
||||
assertStatus(tc.t, w, http.StatusOK)
|
||||
content, err := io.ReadAll(w.Result().Body)
|
||||
require.NoError(tc.t, err)
|
||||
return content
|
||||
}
|
||||
|
||||
func getObjectAssertS3Error(hc *handlerContext, bktName, objName, version string, code errors.ErrorCode) {
|
||||
w := getObjectBaseResponse(hc, bktName, objName, version)
|
||||
assertS3Error(hc.t, w, errors.GetAPIError(code))
|
||||
|
|
186
api/handler/patch.go
Normal file
186
api/handler/patch.go
Normal file
|
@ -0,0 +1,186 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
const maxPatchSize = 5 * 1024 * 1024 * 1024
|
||||
|
||||
func (h *handler) PatchObjectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
|
||||
if _, ok := r.Header[api.ContentRange]; !ok {
|
||||
h.logAndSendError(w, "missing Content-Range", reqInfo, errors.GetAPIError(errors.ErrMissingContentRange))
|
||||
return
|
||||
}
|
||||
|
||||
if _, ok := r.Header[api.ContentLength]; !ok {
|
||||
h.logAndSendError(w, "missing Content-Length", reqInfo, errors.GetAPIError(errors.ErrMissingContentLength))
|
||||
return
|
||||
}
|
||||
|
||||
conditional, err := parsePatchConditionalHeaders(r.Header)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not parse conditional headers", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
bktInfo, err := h.getBucketAndCheckOwner(r, reqInfo.BucketName)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket info", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
settings, err := h.obj.GetBucketSettings(ctx, bktInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not get bucket settings", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
srcObjPrm := &layer.HeadObjectParams{
|
||||
Object: reqInfo.ObjectName,
|
||||
BktInfo: bktInfo,
|
||||
VersionID: reqInfo.URL.Query().Get(api.QueryVersionID),
|
||||
}
|
||||
|
||||
extendedSrcObjInfo, err := h.obj.GetExtendedObjectInfo(ctx, srcObjPrm)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not find object", reqInfo, err)
|
||||
return
|
||||
}
|
||||
srcObjInfo := extendedSrcObjInfo.ObjectInfo
|
||||
|
||||
if err = checkPreconditions(srcObjInfo, conditional, h.cfg.MD5Enabled()); err != nil {
|
||||
h.logAndSendError(w, "precondition failed", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
srcSize, err := layer.GetObjectSize(srcObjInfo)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "failed to get source object size", reqInfo, err)
|
||||
return
|
||||
}
|
||||
|
||||
byteRange, err := parsePatchByteRange(r.Header.Get(api.ContentRange), srcSize)
|
||||
if err != nil {
|
||||
h.logAndSendError(w, "could not parse byte range", reqInfo, errors.GetAPIError(errors.ErrInvalidRange), zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
if maxPatchSize < byteRange.End-byteRange.Start+1 {
|
||||
h.logAndSendError(w, "byte range length is longer than allowed", reqInfo, errors.GetAPIError(errors.ErrInvalidRange), zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
if uint64(r.ContentLength) != (byteRange.End - byteRange.Start + 1) {
|
||||
h.logAndSendError(w, "content-length must be equal to byte range length", reqInfo, errors.GetAPIError(errors.ErrInvalidRangeLength))
|
||||
return
|
||||
}
|
||||
|
||||
if byteRange.Start > srcSize {
|
||||
h.logAndSendError(w, "start byte is greater than object size", reqInfo, errors.GetAPIError(errors.ErrRangeOutOfBounds))
|
||||
return
|
||||
}
|
||||
|
||||
params := &layer.PatchObjectParams{
|
||||
Object: srcObjInfo,
|
||||
BktInfo: bktInfo,
|
||||
NewBytes: r.Body,
|
||||
Range: byteRange,
|
||||
VersioningEnabled: settings.VersioningEnabled(),
|
||||
}
|
||||
|
||||
extendedObjInfo, err := h.obj.PatchObject(ctx, params)
|
||||
if err != nil {
|
||||
if isErrObjectLocked(err) {
|
||||
h.logAndSendError(w, "object is locked", reqInfo, errors.GetAPIError(errors.ErrAccessDenied))
|
||||
} else {
|
||||
h.logAndSendError(w, "could not patch object", reqInfo, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
w.Header().Set(api.AmzVersionID, extendedObjInfo.ObjectInfo.VersionID())
|
||||
w.Header().Set(api.ETag, data.Quote(extendedObjInfo.ObjectInfo.ETag(h.cfg.MD5Enabled())))
|
||||
|
||||
resp := PatchObjectResult{
|
||||
Object: PatchObject{
|
||||
LastModified: extendedObjInfo.ObjectInfo.Created.UTC().Format(time.RFC3339),
|
||||
ETag: data.Quote(extendedObjInfo.ObjectInfo.ETag(h.cfg.MD5Enabled())),
|
||||
},
|
||||
}
|
||||
|
||||
if err = middleware.EncodeToResponse(w, resp); err != nil {
|
||||
h.logAndSendError(w, "could not encode PatchObjectResult to response", reqInfo, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func parsePatchConditionalHeaders(headers http.Header) (*conditionalArgs, error) {
|
||||
var err error
|
||||
args := &conditionalArgs{
|
||||
IfMatch: data.UnQuote(headers.Get(api.IfMatch)),
|
||||
}
|
||||
|
||||
if args.IfUnmodifiedSince, err = parseHTTPTime(headers.Get(api.IfUnmodifiedSince)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return args, nil
|
||||
}
|
||||
|
||||
func parsePatchByteRange(rangeStr string, objSize uint64) (*layer.RangeParams, error) {
|
||||
const prefix = "bytes "
|
||||
|
||||
if rangeStr == "" {
|
||||
return nil, fmt.Errorf("empty range")
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(rangeStr, prefix) {
|
||||
return nil, fmt.Errorf("unknown unit in range header")
|
||||
}
|
||||
|
||||
parts := strings.Split(strings.TrimPrefix(rangeStr, prefix), "/")
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("invalid range: %s", rangeStr)
|
||||
}
|
||||
|
||||
parts = strings.Split(parts[0], "-")
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("invalid range: %s", rangeStr)
|
||||
}
|
||||
|
||||
start, err := strconv.ParseUint(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid start byte: %s", parts[0])
|
||||
}
|
||||
|
||||
end := objSize - 1
|
||||
if len(parts[1]) > 0 {
|
||||
end, err = strconv.ParseUint(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid end byte: %s", parts[1])
|
||||
}
|
||||
}
|
||||
|
||||
if start > end {
|
||||
return nil, fmt.Errorf("start byte is greater than end byte")
|
||||
}
|
||||
|
||||
return &layer.RangeParams{
|
||||
Start: start,
|
||||
End: end,
|
||||
}, nil
|
||||
}
|
292
api/handler/patch_test.go
Normal file
292
api/handler/patch_test.go
Normal file
|
@ -0,0 +1,292 @@
|
|||
package handler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
|
||||
s3errors "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPatch(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
tc.config.md5Enabled = true
|
||||
|
||||
bktName, objName := "bucket-for-patch", "object-for-patch"
|
||||
createTestBucket(tc, bktName)
|
||||
|
||||
content := []byte("old object content")
|
||||
md5Hash := md5.New()
|
||||
md5Hash.Write(content)
|
||||
etag := data.Quote(hex.EncodeToString(md5Hash.Sum(nil)))
|
||||
|
||||
w, r := prepareTestPayloadRequest(tc, bktName, objName, bytes.NewReader(content))
|
||||
created := time.Now()
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
require.Equal(t, etag, w.Header().Get(api.ETag))
|
||||
|
||||
patchPayload := []byte("new")
|
||||
sha256Hash := sha256.New()
|
||||
sha256Hash.Write(patchPayload)
|
||||
sha256Hash.Write(content[len(patchPayload):])
|
||||
hash := hex.EncodeToString(sha256Hash.Sum(nil))
|
||||
|
||||
for _, tt := range []struct {
|
||||
name string
|
||||
rng string
|
||||
headers map[string]string
|
||||
code s3errors.ErrorCode
|
||||
}{
|
||||
{
|
||||
name: "success",
|
||||
rng: "bytes 0-2/*",
|
||||
headers: map[string]string{
|
||||
api.IfUnmodifiedSince: created.Format(http.TimeFormat),
|
||||
api.IfMatch: etag,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid range syntax",
|
||||
rng: "bytes 0-2",
|
||||
code: s3errors.ErrInvalidRange,
|
||||
},
|
||||
{
|
||||
name: "invalid range length",
|
||||
rng: "bytes 0-5/*",
|
||||
code: s3errors.ErrInvalidRangeLength,
|
||||
},
|
||||
{
|
||||
name: "invalid range start",
|
||||
rng: "bytes 20-22/*",
|
||||
code: s3errors.ErrRangeOutOfBounds,
|
||||
},
|
||||
{
|
||||
name: "range is too long",
|
||||
rng: "bytes 0-5368709120/*",
|
||||
code: s3errors.ErrInvalidRange,
|
||||
},
|
||||
{
|
||||
name: "If-Unmodified-Since precondition are not satisfied",
|
||||
rng: "bytes 0-2/*",
|
||||
headers: map[string]string{
|
||||
api.IfUnmodifiedSince: created.Add(-24 * time.Hour).Format(http.TimeFormat),
|
||||
},
|
||||
code: s3errors.ErrPreconditionFailed,
|
||||
},
|
||||
{
|
||||
name: "If-Match precondition are not satisfied",
|
||||
rng: "bytes 0-2/*",
|
||||
headers: map[string]string{
|
||||
api.IfMatch: "etag",
|
||||
},
|
||||
code: s3errors.ErrPreconditionFailed,
|
||||
},
|
||||
} {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.code == 0 {
|
||||
res := patchObject(t, tc, bktName, objName, tt.rng, patchPayload, tt.headers)
|
||||
require.Equal(t, data.Quote(hash), res.Object.ETag)
|
||||
} else {
|
||||
patchObjectErr(t, tc, bktName, objName, tt.rng, patchPayload, tt.headers, tt.code)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatchWithVersion(t *testing.T) {
|
||||
hc := prepareHandlerContextWithMinCache(t)
|
||||
bktName, objName := "bucket", "obj"
|
||||
createVersionedBucket(hc, bktName)
|
||||
objHeader := putObjectContent(hc, bktName, objName, "content")
|
||||
|
||||
putObjectContent(hc, bktName, objName, "some content")
|
||||
|
||||
patchObjectVersion(t, hc, bktName, objName, objHeader.Get(api.AmzVersionID), "bytes 7-14/*", []byte(" updated"))
|
||||
|
||||
res := listObjectsVersions(hc, bktName, "", "", "", "", 3)
|
||||
require.False(t, res.IsTruncated)
|
||||
require.Len(t, res.Version, 3)
|
||||
|
||||
for _, version := range res.Version {
|
||||
content := getObjectVersion(hc, bktName, objName, version.VersionID)
|
||||
if version.IsLatest {
|
||||
require.Equal(t, []byte("content updated"), content)
|
||||
continue
|
||||
}
|
||||
if version.VersionID == objHeader.Get(api.AmzVersionID) {
|
||||
require.Equal(t, []byte("content"), content)
|
||||
continue
|
||||
}
|
||||
require.Equal(t, []byte("some content"), content)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPatchEncryptedObject(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
bktName, objName := "bucket-for-patch-encrypted", "object-for-patch-encrypted"
|
||||
createTestBucket(tc, bktName)
|
||||
|
||||
w, r := prepareTestPayloadRequest(tc, bktName, objName, strings.NewReader("object content"))
|
||||
setEncryptHeaders(r)
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
patchObjectErr(t, tc, bktName, objName, "bytes 2-4/*", []byte("new"), nil, s3errors.ErrInternalError)
|
||||
}
|
||||
|
||||
func TestPatchMissingHeaders(t *testing.T) {
|
||||
tc := prepareHandlerContext(t)
|
||||
bktName, objName := "bucket-for-patch-missing-headers", "object-for-patch-missing-headers"
|
||||
createTestBucket(tc, bktName)
|
||||
|
||||
w, r := prepareTestPayloadRequest(tc, bktName, objName, strings.NewReader("object content"))
|
||||
setEncryptHeaders(r)
|
||||
tc.Handler().PutObjectHandler(w, r)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
w = httptest.NewRecorder()
|
||||
r = httptest.NewRequest(http.MethodPatch, defaultURL, strings.NewReader("new"))
|
||||
tc.Handler().PatchObjectHandler(w, r)
|
||||
assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrMissingContentRange))
|
||||
|
||||
w = httptest.NewRecorder()
|
||||
r = httptest.NewRequest(http.MethodPatch, defaultURL, strings.NewReader("new"))
|
||||
r.Header.Set(api.ContentRange, "bytes 0-2/*")
|
||||
tc.Handler().PatchObjectHandler(w, r)
|
||||
assertS3Error(t, w, s3errors.GetAPIError(s3errors.ErrMissingContentLength))
|
||||
}
|
||||
|
||||
func TestParsePatchByteRange(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
rng string
|
||||
size uint64
|
||||
expected *layer.RangeParams
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
rng: "bytes 2-7/*",
|
||||
expected: &layer.RangeParams{
|
||||
Start: 2,
|
||||
End: 7,
|
||||
},
|
||||
},
|
||||
{
|
||||
rng: "bytes 2-7/3",
|
||||
expected: &layer.RangeParams{
|
||||
Start: 2,
|
||||
End: 7,
|
||||
},
|
||||
},
|
||||
{
|
||||
rng: "bytes 2-/*",
|
||||
size: 9,
|
||||
expected: &layer.RangeParams{
|
||||
Start: 2,
|
||||
End: 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
rng: "bytes 2-/3",
|
||||
size: 9,
|
||||
expected: &layer.RangeParams{
|
||||
Start: 2,
|
||||
End: 8,
|
||||
},
|
||||
},
|
||||
{
|
||||
rng: "",
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
rng: "2-7/*",
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
rng: "bytes 7-2/*",
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
rng: "bytes 2-7",
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
rng: "bytes 2/*",
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
rng: "bytes a-7/*",
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
rng: "bytes 2-a/*",
|
||||
err: true,
|
||||
},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("case: %s", tt.rng), func(t *testing.T) {
|
||||
rng, err := parsePatchByteRange(tt.rng, tt.size)
|
||||
if tt.err {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.expected.Start, rng.Start)
|
||||
require.Equal(t, tt.expected.End, rng.End)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func patchObject(t *testing.T, tc *handlerContext, bktName, objName, rng string, payload []byte, headers map[string]string) *PatchObjectResult {
|
||||
w := patchObjectBase(tc, bktName, objName, "", rng, payload, headers)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
result := &PatchObjectResult{}
|
||||
err := xml.NewDecoder(w.Result().Body).Decode(result)
|
||||
require.NoError(t, err)
|
||||
return result
|
||||
}
|
||||
|
||||
func patchObjectVersion(t *testing.T, tc *handlerContext, bktName, objName, version, rng string, payload []byte) *PatchObjectResult {
|
||||
w := patchObjectBase(tc, bktName, objName, version, rng, payload, nil)
|
||||
assertStatus(t, w, http.StatusOK)
|
||||
|
||||
result := &PatchObjectResult{}
|
||||
err := xml.NewDecoder(w.Result().Body).Decode(result)
|
||||
require.NoError(t, err)
|
||||
return result
|
||||
}
|
||||
|
||||
func patchObjectErr(t *testing.T, tc *handlerContext, bktName, objName, rng string, payload []byte, headers map[string]string, code s3errors.ErrorCode) {
|
||||
w := patchObjectBase(tc, bktName, objName, "", rng, payload, headers)
|
||||
assertS3Error(t, w, s3errors.GetAPIError(code))
|
||||
}
|
||||
|
||||
func patchObjectBase(tc *handlerContext, bktName, objName, version, rng string, payload []byte, headers map[string]string) *httptest.ResponseRecorder {
|
||||
query := make(url.Values)
|
||||
if len(version) > 0 {
|
||||
query.Add(api.QueryVersionID, version)
|
||||
}
|
||||
|
||||
w, r := prepareTestRequestWithQuery(tc, bktName, objName, query, payload)
|
||||
r.Header.Set(api.ContentRange, rng)
|
||||
r.Header.Set(api.ContentLength, strconv.Itoa(len(payload)))
|
||||
for k, v := range headers {
|
||||
r.Header.Set(k, v)
|
||||
}
|
||||
|
||||
tc.Handler().PatchObjectHandler(w, r)
|
||||
return w
|
||||
}
|
|
@ -195,6 +195,15 @@ type PostResponse struct {
|
|||
ETag string `xml:"Etag"`
|
||||
}
|
||||
|
||||
type PatchObjectResult struct {
|
||||
Object PatchObject `xml:"Object"`
|
||||
}
|
||||
|
||||
type PatchObject struct {
|
||||
LastModified string `xml:"LastModified"`
|
||||
ETag string `xml:"ETag"`
|
||||
}
|
||||
|
||||
// MarshalXML -- StringMap marshals into XML.
|
||||
func (s StringMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
tokens := []xml.Token{start}
|
||||
|
|
|
@ -194,6 +194,27 @@ type PrmObjectSearch struct {
|
|||
FilePrefix string
|
||||
}
|
||||
|
||||
// PrmObjectPatch groups parameters of FrostFS.PatchObject operation.
|
||||
type PrmObjectPatch struct {
|
||||
// Authentication parameters.
|
||||
PrmAuth
|
||||
|
||||
// Container of the patched object.
|
||||
Container cid.ID
|
||||
|
||||
// Identifier of the patched object.
|
||||
Object oid.ID
|
||||
|
||||
// Object patch payload encapsulated in io.Reader primitive.
|
||||
Payload io.Reader
|
||||
|
||||
// Object range to patch.
|
||||
Range *RangeParams
|
||||
|
||||
// Size of original object payload.
|
||||
ObjectSize uint64
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrAccessDenied is returned from FrostFS in case of access violation.
|
||||
ErrAccessDenied = errors.New("access denied")
|
||||
|
@ -288,6 +309,15 @@ type FrostFS interface {
|
|||
// prevented the objects from being selected.
|
||||
SearchObjects(context.Context, PrmObjectSearch) ([]oid.ID, error)
|
||||
|
||||
// PatchObject performs object patch in the FrostFS container.
|
||||
// It returns the ID of the patched object.
|
||||
//
|
||||
// It returns ErrAccessDenied on selection access violation.
|
||||
//
|
||||
// It returns exactly one non-nil value. It returns any error encountered which
|
||||
// prevented the objects from being patched.
|
||||
PatchObject(context.Context, PrmObjectPatch) (oid.ID, error)
|
||||
|
||||
// TimeToEpoch computes current epoch and the epoch that corresponds to the provided now and future time.
|
||||
// Note:
|
||||
// * future time must be after the now
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||
oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
|
||||
oidtest "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id/test"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/session"
|
||||
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/user"
|
||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||
|
@ -404,6 +405,42 @@ func (t *TestFrostFS) SearchObjects(_ context.Context, prm PrmObjectSearch) ([]o
|
|||
return res, nil
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) PatchObject(ctx context.Context, prm PrmObjectPatch) (oid.ID, error) {
|
||||
obj, err := t.retrieveObject(ctx, prm.Container, prm.Object)
|
||||
if err != nil {
|
||||
return oid.ID{}, err
|
||||
}
|
||||
|
||||
newObj := *obj
|
||||
|
||||
patchBytes, err := io.ReadAll(prm.Payload)
|
||||
if err != nil {
|
||||
return oid.ID{}, err
|
||||
}
|
||||
|
||||
var newPayload []byte
|
||||
if prm.Range.Start > 0 {
|
||||
newPayload = append(newPayload, obj.Payload()[:prm.Range.Start]...)
|
||||
}
|
||||
newPayload = append(newPayload, patchBytes...)
|
||||
if prm.Range.End < obj.PayloadSize()-1 {
|
||||
newPayload = append(newPayload, obj.Payload()[prm.Range.End+1:]...)
|
||||
}
|
||||
newObj.SetPayload(newPayload)
|
||||
newObj.SetPayloadSize(uint64(len(newPayload)))
|
||||
|
||||
var hash checksum.Checksum
|
||||
checksum.Calculate(&hash, checksum.SHA256, newPayload)
|
||||
newObj.SetPayloadChecksum(hash)
|
||||
|
||||
newID := oidtest.ID()
|
||||
newObj.SetID(newID)
|
||||
|
||||
t.objects[newAddress(prm.Container, newID).EncodeToString()] = &newObj
|
||||
|
||||
return newID, nil
|
||||
}
|
||||
|
||||
func (t *TestFrostFS) checkAccess(cnrID cid.ID, owner user.ID) bool {
|
||||
cnr, ok := t.containers[cnrID.EncodeToString()]
|
||||
if !ok {
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
stderrors "errors"
|
||||
|
@ -157,6 +158,15 @@ type (
|
|||
DstEncryption encryption.Params
|
||||
CopiesNumbers []uint32
|
||||
}
|
||||
|
||||
PatchObjectParams struct {
|
||||
Object *data.ObjectInfo
|
||||
BktInfo *data.BucketInfo
|
||||
NewBytes io.Reader
|
||||
Range *RangeParams
|
||||
VersioningEnabled bool
|
||||
}
|
||||
|
||||
// CreateBucketParams stores bucket create request parameters.
|
||||
CreateBucketParams struct {
|
||||
Name string
|
||||
|
@ -531,6 +541,72 @@ func (n *Layer) CopyObject(ctx context.Context, p *CopyObjectParams) (*data.Exte
|
|||
})
|
||||
}
|
||||
|
||||
func (n *Layer) PatchObject(ctx context.Context, p *PatchObjectParams) (*data.ExtendedObjectInfo, error) {
|
||||
if p.Object.Headers[AttributeDecryptedSize] != "" {
|
||||
return nil, fmt.Errorf("patch encrypted object")
|
||||
}
|
||||
|
||||
if p.Object.Headers[MultipartObjectSize] != "" {
|
||||
// TODO: support multipart object patch
|
||||
return nil, fmt.Errorf("patch multipart object")
|
||||
}
|
||||
|
||||
prmPatch := PrmObjectPatch{
|
||||
Container: p.BktInfo.CID,
|
||||
Object: p.Object.ID,
|
||||
Payload: p.NewBytes,
|
||||
Range: p.Range,
|
||||
ObjectSize: p.Object.Size,
|
||||
}
|
||||
n.prepareAuthParameters(ctx, &prmPatch.PrmAuth, p.BktInfo.Owner)
|
||||
|
||||
objID, err := n.frostFS.PatchObject(ctx, prmPatch)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("patch object: %w", err)
|
||||
}
|
||||
|
||||
prmHead := PrmObjectHead{
|
||||
Container: p.BktInfo.CID,
|
||||
Object: objID,
|
||||
}
|
||||
n.prepareAuthParameters(ctx, &prmHead.PrmAuth, p.BktInfo.Owner)
|
||||
|
||||
obj, err := n.frostFS.HeadObject(ctx, prmHead)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("head object: %w", err)
|
||||
}
|
||||
|
||||
payloadChecksum, _ := obj.PayloadChecksum()
|
||||
hashSum := hex.EncodeToString(payloadChecksum.Value())
|
||||
newVersion := &data.NodeVersion{
|
||||
BaseNodeVersion: data.BaseNodeVersion{
|
||||
OID: objID,
|
||||
ETag: hashSum,
|
||||
FilePath: p.Object.Name,
|
||||
Size: obj.PayloadSize(),
|
||||
Created: &p.Object.Created,
|
||||
Owner: &n.gateOwner,
|
||||
// TODO: Add creation epoch
|
||||
},
|
||||
IsUnversioned: !p.VersioningEnabled,
|
||||
IsCombined: p.Object.Headers[MultipartObjectSize] != "",
|
||||
}
|
||||
|
||||
if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {
|
||||
return nil, fmt.Errorf("couldn't add new verion to tree service: %w", err)
|
||||
}
|
||||
|
||||
p.Object.ID = objID
|
||||
p.Object.Size = obj.PayloadSize()
|
||||
p.Object.MD5Sum = ""
|
||||
p.Object.HashSum = hashSum
|
||||
|
||||
return &data.ExtendedObjectInfo{
|
||||
ObjectInfo: p.Object,
|
||||
NodeVersion: newVersion,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getRandomOID() (oid.ID, error) {
|
||||
b := [32]byte{}
|
||||
if _, err := rand.Read(b[:]); err != nil {
|
||||
|
|
|
@ -74,6 +74,7 @@ const (
|
|||
AbortMultipartUploadOperation = "AbortMultipartUpload"
|
||||
DeleteObjectTaggingOperation = "DeleteObjectTagging"
|
||||
DeleteObjectOperation = "DeleteObject"
|
||||
PatchObjectOperation = "PatchObject"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -357,6 +357,8 @@ func determineObjectOperation(r *http.Request) string {
|
|||
switch r.Method {
|
||||
case http.MethodOptions:
|
||||
return OptionsObjectOperation
|
||||
case http.MethodPatch:
|
||||
return PatchObjectOperation
|
||||
case http.MethodHead:
|
||||
return HeadObjectOperation
|
||||
case http.MethodGet:
|
||||
|
|
|
@ -87,6 +87,7 @@ type (
|
|||
AbortMultipartUploadHandler(http.ResponseWriter, *http.Request)
|
||||
ListPartsHandler(w http.ResponseWriter, r *http.Request)
|
||||
ListMultipartUploadsHandler(http.ResponseWriter, *http.Request)
|
||||
PatchObjectHandler(http.ResponseWriter, *http.Request)
|
||||
|
||||
ResolveBucket(ctx context.Context, bucket string) (*data.BucketInfo, error)
|
||||
ResolveCID(ctx context.Context, bucket string) (cid.ID, error)
|
||||
|
@ -376,6 +377,8 @@ func objectRouter(h Handler, l *zap.Logger) chi.Router {
|
|||
|
||||
objRouter.Head("/*", named(s3middleware.HeadObjectOperation, h.HeadObjectHandler))
|
||||
|
||||
objRouter.Patch("/*", named(s3middleware.PatchObjectOperation, h.PatchObjectHandler))
|
||||
|
||||
// GET method handlers
|
||||
objRouter.Group(func(r chi.Router) {
|
||||
r.Method(http.MethodGet, "/*", NewHandlerFilter().
|
||||
|
|
|
@ -534,6 +534,10 @@ func (h *handlerMock) ListMultipartUploadsHandler(w http.ResponseWriter, r *http
|
|||
h.writeResponse(w, res)
|
||||
}
|
||||
|
||||
func (h *handlerMock) PatchObjectHandler(http.ResponseWriter, *http.Request) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (h *handlerMock) ResolveBucket(ctx context.Context, name string) (*data.BucketInfo, error) {
|
||||
reqInfo := middleware.GetReqInfo(ctx)
|
||||
bktInfo, ok := h.buckets[reqInfo.Namespace+name]
|
||||
|
|
13
go.mod
13
go.mod
|
@ -3,10 +3,10 @@ module git.frostfs.info/TrueCloudLab/frostfs-s3-gw
|
|||
go 1.21
|
||||
|
||||
require (
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240716113920-f517e3949164
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240809081817-47a48969b067
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240722121227-fa89999d919c
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240813155821-98aabc45a720
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240611102930-ac965e8d176a
|
||||
git.frostfs.info/TrueCloudLab/zapjournald v0.0.0-20240124114243-cb2e66427d02
|
||||
github.com/aws/aws-sdk-go v1.44.6
|
||||
|
@ -32,7 +32,7 @@ require (
|
|||
golang.org/x/exp v0.0.0-20240222234643-814bf88cf225
|
||||
golang.org/x/net v0.23.0
|
||||
golang.org/x/text v0.14.0
|
||||
google.golang.org/grpc v1.62.0
|
||||
google.golang.org/grpc v1.63.2
|
||||
google.golang.org/protobuf v1.33.0
|
||||
)
|
||||
|
||||
|
@ -52,7 +52,6 @@ require (
|
|||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/go-logr/logr v1.2.4 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/golang/snappy v0.0.1 // indirect
|
||||
github.com/gorilla/websocket v1.5.1 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect
|
||||
|
@ -92,9 +91,9 @@ require (
|
|||
golang.org/x/sync v0.6.0 // indirect
|
||||
golang.org/x/sys v0.18.0 // indirect
|
||||
golang.org/x/term v0.18.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c // indirect
|
||||
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
|
26
go.sum
26
go.sum
|
@ -36,16 +36,16 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX
|
|||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240716113920-f517e3949164 h1:XxvwQKJT/f16qS3df5PBQPRYKkhy0/A7zH6644QpKD0=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240716113920-f517e3949164/go.mod h1:OBDSr+DqV1z4VDouoX3YMleNc4DPBVBWTG3WDT2PK1o=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240809081817-47a48969b067 h1:/da6lloTPujJgEYF/dgqbxY9h6TMaRHclOV9yvCcE8s=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-api-go/v2 v2.16.1-0.20240809081817-47a48969b067/go.mod h1:mc7j6Cc1GU1tJZNmDwEYiJJ339biNnU1Bz3wZGogMe0=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e h1:kcBqZBiFIUBATUqEuvVigtkJJWQ2Gug/eYXn967o3M4=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-contract v0.19.3-0.20240621131249-49e5270f673e/go.mod h1:F/fe1OoIDKr5Bz99q4sriuHDuf3aZefZy9ZsCqEtgxc=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSVCB8JNSfPG7Uk4r2oSk=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6 h1:aGQ6QaAnTerQ5Dq5b2/f9DUQtSqPkZZ/bkMx/HKuLCo=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6/go.mod h1:W8Nn08/l6aQ7UlIbpF7FsQou7TVpcRD1ZT1KG4TrFhE=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240722121227-fa89999d919c h1:8ZS6eUFnOhzUo9stFqwq1Zyq+Y5YNcYAidCGICcZVL4=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240722121227-fa89999d919c/go.mod h1:vluJ/+yQMcq8ZIZZSA7Te+JKClr0lgtRErjICvb8wto=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240813155821-98aabc45a720 h1:15UXpW42bfshIv/X5kww92jG2o0drHgsdFd+UJ6zD7g=
|
||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20240813155821-98aabc45a720/go.mod h1:XRX/bBQsDJKr040N/a0YnDhxJqaUv1XyMVj3qxnb5K0=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
|
||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1/go.mod h1:C1Ygde2n843yTZEQ0FP69jYiuaYV0kriLvP4zm8JuvM=
|
||||
git.frostfs.info/TrueCloudLab/policy-engine v0.0.0-20240611102930-ac965e8d176a h1:Bk1fB4cQASPKgAVGCdlBOEp5ohZfDxqK6fZM8eP+Emo=
|
||||
|
@ -160,8 +160,6 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
|
|||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
|
||||
github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
|
@ -654,12 +652,12 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D
|
|||
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9 h1:9+tzLLstTlPTRyJTh+ah5wIMsBW5c4tQwGTN3thOW9Y=
|
||||
google.golang.org/genproto v0.0.0-20240213162025-012b6fc9bca9/go.mod h1:mqHbVIp48Muh7Ywss/AD6I5kNVKZMmAa/QEW58Gxp2s=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014 h1:x9PwdEgd11LgK+orcck69WVRo7DezSO4VUMPI4xpc8A=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240205150955-31a09d347014/go.mod h1:rbHMSEDyoYX62nRVLOCc4Qt1HbsdytAYoVwgjiOhF3I=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c h1:NUsgEN92SQQqzfA+YtqYNqYmB3DMMYLlIwUZAQFVFbo=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240221002015-b0ce06bbee7c/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY=
|
||||
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY=
|
||||
google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
|
@ -680,8 +678,8 @@ google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
|
|||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk=
|
||||
google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
|
||||
google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
|
||||
google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
|
|
@ -389,6 +389,38 @@ func (x *FrostFS) SearchObjects(ctx context.Context, prm layer.PrmObjectSearch)
|
|||
return buf, handleObjectError("read object list", err)
|
||||
}
|
||||
|
||||
func (x *FrostFS) PatchObject(ctx context.Context, prm layer.PrmObjectPatch) (oid.ID, error) {
|
||||
var addr oid.Address
|
||||
addr.SetContainer(prm.Container)
|
||||
addr.SetObject(prm.Object)
|
||||
|
||||
var prmPatch pool.PrmObjectPatch
|
||||
prmPatch.SetAddress(addr)
|
||||
|
||||
var rng object.Range
|
||||
rng.SetOffset(prm.Range.Start)
|
||||
rng.SetLength(prm.Range.End - prm.Range.Start + 1)
|
||||
if prm.Range.End >= prm.ObjectSize {
|
||||
rng.SetLength(prm.ObjectSize - prm.Range.Start)
|
||||
}
|
||||
|
||||
prmPatch.SetRange(&rng)
|
||||
prmPatch.SetPayloadReader(prm.Payload)
|
||||
|
||||
if prm.BearerToken != nil {
|
||||
prmPatch.UseBearer(*prm.BearerToken)
|
||||
} else {
|
||||
prmPatch.UseKey(prm.PrivateKey)
|
||||
}
|
||||
|
||||
res, err := x.pool.PatchObject(ctx, prmPatch)
|
||||
if err != nil {
|
||||
return oid.ID{}, handleObjectError("patch object via connection pool", err)
|
||||
}
|
||||
|
||||
return res.ObjectID, nil
|
||||
}
|
||||
|
||||
// ResolverFrostFS represents virtual connection to the FrostFS network.
|
||||
// It implements resolver.FrostFS.
|
||||
type ResolverFrostFS struct {
|
||||
|
|
Loading…
Reference in a new issue