feature/client-cut #192
20 changed files with 113 additions and 19 deletions
|
@ -25,6 +25,7 @@ This document outlines major changes between releases.
|
||||||
- Support multiple version credentials using GSet (#135)
|
- Support multiple version credentials using GSet (#135)
|
||||||
- Implement chunk uploading (#106)
|
- Implement chunk uploading (#106)
|
||||||
- Add new `kludge.bypass_content_encoding_check_in_chunks` config param (#146)
|
- Add new `kludge.bypass_content_encoding_check_in_chunks` config param (#146)
|
||||||
|
- Add new `frostfs.client_cut` config param (#192)
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
- Update prometheus to v1.15.0 (#94)
|
- Update prometheus to v1.15.0 (#94)
|
||||||
|
|
|
@ -17,7 +17,7 @@ func TestGetObjectPartsAttributes(t *testing.T) {
|
||||||
|
|
||||||
createTestBucket(hc, bktName)
|
createTestBucket(hc, bktName)
|
||||||
|
|
||||||
putObject(t, hc, bktName, objName)
|
putObject(hc, bktName, objName)
|
||||||
result := getObjectAttributes(hc, bktName, objName, objectParts)
|
result := getObjectAttributes(hc, bktName, objName, objectParts)
|
||||||
require.Nil(t, result.ObjectParts)
|
require.Nil(t, result.ObjectParts)
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@ func TestDeleteBucketOnAlreadyRemovedError(t *testing.T) {
|
||||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
bktName, objName := "bucket-for-removal", "object-to-delete"
|
||||||
bktInfo := createTestBucket(hc, bktName)
|
bktInfo := createTestBucket(hc, bktName)
|
||||||
|
|
||||||
putObject(t, hc, bktName, objName)
|
putObject(hc, bktName, objName)
|
||||||
|
|
||||||
addr := getAddressOfLastVersion(hc, bktInfo, objName)
|
addr := getAddressOfLastVersion(hc, bktInfo, objName)
|
||||||
hc.tp.SetObjectError(addr, &apistatus.ObjectAlreadyRemoved{})
|
hc.tp.SetObjectError(addr, &apistatus.ObjectAlreadyRemoved{})
|
||||||
|
@ -66,7 +66,7 @@ func TestDeleteBucketOnNotFoundError(t *testing.T) {
|
||||||
bktName, objName := "bucket-for-removal", "object-to-delete"
|
bktName, objName := "bucket-for-removal", "object-to-delete"
|
||||||
bktInfo := createTestBucket(hc, bktName)
|
bktInfo := createTestBucket(hc, bktName)
|
||||||
|
|
||||||
putObject(t, hc, bktName, objName)
|
putObject(hc, bktName, objName)
|
||||||
|
|
||||||
nodeVersion, err := hc.tree.GetUnversioned(hc.context, bktInfo, objName)
|
nodeVersion, err := hc.tree.GetUnversioned(hc.context, bktInfo, objName)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -98,7 +98,7 @@ func TestDeleteObjectFromSuspended(t *testing.T) {
|
||||||
bktName, objName := "bucket-versioned-for-removal", "object-to-delete"
|
bktName, objName := "bucket-versioned-for-removal", "object-to-delete"
|
||||||
|
|
||||||
createSuspendedBucket(t, tc, bktName)
|
createSuspendedBucket(t, tc, bktName)
|
||||||
putObject(t, tc, bktName, objName)
|
putObject(tc, bktName, objName)
|
||||||
|
|
||||||
versionID, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
|
versionID, isDeleteMarker := deleteObject(t, tc, bktName, objName, emptyVersion)
|
||||||
require.True(t, isDeleteMarker)
|
require.True(t, isDeleteMarker)
|
||||||
|
@ -255,7 +255,7 @@ func TestDeleteMarkerSuspended(t *testing.T) {
|
||||||
|
|
||||||
t.Run("remove last unversioned non delete marker", func(t *testing.T) {
|
t.Run("remove last unversioned non delete marker", func(t *testing.T) {
|
||||||
objName := "obj3"
|
objName := "obj3"
|
||||||
putObject(t, tc, bktName, objName)
|
putObject(tc, bktName, objName)
|
||||||
|
|
||||||
nodeVersion, err := tc.tree.GetUnversioned(tc.Context(), bktInfo, objName)
|
nodeVersion, err := tc.tree.GetUnversioned(tc.Context(), bktInfo, objName)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -475,11 +475,11 @@ func getVersion(resp *ListObjectsVersionsResponse, objName string) []*ObjectVers
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
func putObject(t *testing.T, tc *handlerContext, bktName, objName string) {
|
func putObject(hc *handlerContext, bktName, objName string) {
|
||||||
body := bytes.NewReader([]byte("content"))
|
body := bytes.NewReader([]byte("content"))
|
||||||
w, r := prepareTestPayloadRequest(tc, bktName, objName, body)
|
w, r := prepareTestPayloadRequest(hc, bktName, objName, body)
|
||||||
tc.Handler().PutObjectHandler(w, r)
|
hc.Handler().PutObjectHandler(w, r)
|
||||||
assertStatus(t, w, http.StatusOK)
|
assertStatus(hc.t, w, http.StatusOK)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createSuspendedBucket(t *testing.T, tc *handlerContext, bktName string) *data.BucketInfo {
|
func createSuspendedBucket(t *testing.T, tc *handlerContext, bktName string) *data.BucketInfo {
|
||||||
|
|
|
@ -186,7 +186,7 @@ func TestGetObject(t *testing.T) {
|
||||||
bktName, objName := "bucket", "obj"
|
bktName, objName := "bucket", "obj"
|
||||||
bktInfo, objInfo := createVersionedBucketAndObject(hc.t, hc, bktName, objName)
|
bktInfo, objInfo := createVersionedBucketAndObject(hc.t, hc, bktName, objName)
|
||||||
|
|
||||||
putObject(hc.t, hc, bktName, objName)
|
putObject(hc, bktName, objName)
|
||||||
|
|
||||||
checkFound(hc.t, hc, bktName, objName, objInfo.VersionID())
|
checkFound(hc.t, hc, bktName, objName, objInfo.VersionID())
|
||||||
checkFound(hc.t, hc, bktName, objName, emptyVersion)
|
checkFound(hc.t, hc, bktName, objName, emptyVersion)
|
||||||
|
|
|
@ -38,6 +38,8 @@ type handlerContext struct {
|
||||||
tree *tree.Tree
|
tree *tree.Tree
|
||||||
context context.Context
|
context context.Context
|
||||||
kludge *kludgeSettingsMock
|
kludge *kludgeSettingsMock
|
||||||
|
|
||||||
|
layerFeatures *layer.FeatureSettingsMock
|
||||||
}
|
}
|
||||||
|
|
||||||
func (hc *handlerContext) Handler() *handler {
|
func (hc *handlerContext) Handler() *handler {
|
||||||
|
@ -123,11 +125,14 @@ func prepareHandlerContextBase(t *testing.T, minCache bool) *handlerContext {
|
||||||
cacheCfg = getMinCacheConfig(l)
|
cacheCfg = getMinCacheConfig(l)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
features := &layer.FeatureSettingsMock{}
|
||||||
|
|
||||||
layerCfg := &layer.Config{
|
layerCfg := &layer.Config{
|
||||||
Caches: cacheCfg,
|
Caches: cacheCfg,
|
||||||
AnonKey: layer.AnonymousKey{Key: key},
|
AnonKey: layer.AnonymousKey{Key: key},
|
||||||
Resolver: testResolver,
|
Resolver: testResolver,
|
||||||
TreeService: treeMock,
|
TreeService: treeMock,
|
||||||
|
Features: features,
|
||||||
}
|
}
|
||||||
|
|
||||||
var pp netmap.PlacementPolicy
|
var pp netmap.PlacementPolicy
|
||||||
|
@ -154,6 +159,8 @@ func prepareHandlerContextBase(t *testing.T, minCache bool) *handlerContext {
|
||||||
tree: treeMock,
|
tree: treeMock,
|
||||||
context: middleware.SetBoxData(context.Background(), newTestAccessBox(t, key)),
|
context: middleware.SetBoxData(context.Background(), newTestAccessBox(t, key)),
|
||||||
kludge: kludge,
|
kludge: kludge,
|
||||||
|
|
||||||
|
layerFeatures: features,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -114,7 +114,7 @@ func TestHeadObject(t *testing.T) {
|
||||||
bktName, objName := "bucket", "obj"
|
bktName, objName := "bucket", "obj"
|
||||||
bktInfo, objInfo := createVersionedBucketAndObject(hc.t, hc, bktName, objName)
|
bktInfo, objInfo := createVersionedBucketAndObject(hc.t, hc, bktName, objName)
|
||||||
|
|
||||||
putObject(hc.t, hc, bktName, objName)
|
putObject(hc, bktName, objName)
|
||||||
|
|
||||||
checkFound(hc.t, hc, bktName, objName, objInfo.VersionID())
|
checkFound(hc.t, hc, bktName, objName, objInfo.VersionID())
|
||||||
checkFound(hc.t, hc, bktName, objName, emptyVersion)
|
checkFound(hc.t, hc, bktName, objName, emptyVersion)
|
||||||
|
|
|
@ -536,7 +536,7 @@ func TestPutObjectWithLock(t *testing.T) {
|
||||||
createTestBucketWithLock(hc, bktName, lockConfig)
|
createTestBucketWithLock(hc, bktName, lockConfig)
|
||||||
|
|
||||||
objDefault := "obj-default-retention"
|
objDefault := "obj-default-retention"
|
||||||
putObject(t, hc, bktName, objDefault)
|
putObject(hc, bktName, objDefault)
|
||||||
|
|
||||||
getObjectRetentionApproximate(hc, bktName, objDefault, governanceMode, time.Now().Add(24*time.Hour))
|
getObjectRetentionApproximate(hc, bktName, objDefault, governanceMode, time.Now().Add(24*time.Hour))
|
||||||
getObjectLegalHold(hc, bktName, objDefault, legalHoldOff)
|
getObjectLegalHold(hc, bktName, objDefault, legalHoldOff)
|
||||||
|
@ -587,7 +587,7 @@ func TestPutLockErrors(t *testing.T) {
|
||||||
headers[api.AmzObjectLockRetainUntilDate] = "dummy"
|
headers[api.AmzObjectLockRetainUntilDate] = "dummy"
|
||||||
putObjectWithLockFailed(t, hc, bktName, objName, headers, apiErrors.ErrInvalidRetentionDate)
|
putObjectWithLockFailed(t, hc, bktName, objName, headers, apiErrors.ErrInvalidRetentionDate)
|
||||||
|
|
||||||
putObject(t, hc, bktName, objName)
|
putObject(hc, bktName, objName)
|
||||||
|
|
||||||
retention := &data.Retention{Mode: governanceMode}
|
retention := &data.Retention{Mode: governanceMode}
|
||||||
putObjectRetentionFailed(t, hc, bktName, objName, retention, apiErrors.ErrMalformedXML)
|
putObjectRetentionFailed(t, hc, bktName, objName, retention, apiErrors.ErrMalformedXML)
|
||||||
|
|
|
@ -23,6 +23,7 @@ import (
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/middleware"
|
||||||
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/creds/accessbox"
|
||||||
|
"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
|
||||||
"github.com/aws/aws-sdk-go/aws/credentials"
|
"github.com/aws/aws-sdk-go/aws/credentials"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
@ -309,3 +310,37 @@ func TestCreateBucket(t *testing.T) {
|
||||||
box2, _ := createAccessBox(t)
|
box2, _ := createAccessBox(t)
|
||||||
createBucketAssertS3Error(hc, bktName, box2, s3errors.ErrBucketAlreadyExists)
|
createBucketAssertS3Error(hc, bktName, box2, s3errors.ErrBucketAlreadyExists)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPutObjectClientCut(t *testing.T) {
|
||||||
|
hc := prepareHandlerContext(t)
|
||||||
|
bktName, objName1, objName2 := "bkt-name", "obj-name1", "obj-name2"
|
||||||
|
createTestBucket(hc, bktName)
|
||||||
|
|
||||||
|
putObject(hc, bktName, objName1)
|
||||||
|
obj1 := getObjectFromLayer(hc, objName1)[0]
|
||||||
|
require.Empty(t, getObjectAttribute(obj1, "s3-client-cut"))
|
||||||
|
|
||||||
|
hc.layerFeatures.SetClientCut(true)
|
||||||
|
putObject(hc, bktName, objName2)
|
||||||
|
obj2 := getObjectFromLayer(hc, objName2)[0]
|
||||||
|
require.Equal(t, "true", getObjectAttribute(obj2, "s3-client-cut"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func getObjectFromLayer(hc *handlerContext, objName string) []*object.Object {
|
||||||
|
var res []*object.Object
|
||||||
|
for _, o := range hc.tp.Objects() {
|
||||||
|
if objName == getObjectAttribute(o, object.AttributeFilePath) {
|
||||||
|
res = append(res, o)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func getObjectAttribute(obj *object.Object, attrName string) string {
|
||||||
|
for _, attr := range obj.Attributes() {
|
||||||
|
if attr.Key() == attrName {
|
||||||
|
return attr.Value()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
|
@ -111,6 +111,9 @@ type PrmObjectCreate struct {
|
||||||
|
|
||||||
// Number of object copies that is enough to consider put successful.
|
// Number of object copies that is enough to consider put successful.
|
||||||
CopiesNumber []uint32
|
CopiesNumber []uint32
|
||||||
|
|
||||||
|
// Enables client side object preparing.
|
||||||
|
ClientCut bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrmObjectDelete groups parameters of FrostFS.DeleteObject operation.
|
// PrmObjectDelete groups parameters of FrostFS.DeleteObject operation.
|
||||||
|
|
|
@ -25,6 +25,18 @@ import (
|
||||||
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type FeatureSettingsMock struct {
|
||||||
|
clientCut bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *FeatureSettingsMock) ClientCut() bool {
|
||||||
|
return k.clientCut
|
||||||
|
}
|
||||||
|
|
||||||
|
func (k *FeatureSettingsMock) SetClientCut(clientCut bool) {
|
||||||
|
k.clientCut = clientCut
|
||||||
|
}
|
||||||
|
|
||||||
type TestFrostFS struct {
|
type TestFrostFS struct {
|
||||||
FrostFS
|
FrostFS
|
||||||
|
|
||||||
|
@ -222,6 +234,13 @@ func (t *TestFrostFS) CreateObject(_ context.Context, prm PrmObjectCreate) (oid.
|
||||||
attrs = append(attrs, *a)
|
attrs = append(attrs, *a)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if prm.ClientCut {
|
||||||
|
a := object.NewAttribute()
|
||||||
|
a.SetKey("s3-client-cut")
|
||||||
|
a.SetValue("true")
|
||||||
|
attrs = append(attrs, *a)
|
||||||
|
}
|
||||||
|
|
||||||
for i := range prm.Attributes {
|
for i := range prm.Attributes {
|
||||||
a := object.NewAttribute()
|
a := object.NewAttribute()
|
||||||
a.SetKey(prm.Attributes[i][0])
|
a.SetKey(prm.Attributes[i][0])
|
||||||
|
|
|
@ -45,6 +45,10 @@ type (
|
||||||
Resolve(ctx context.Context, name string) (cid.ID, error)
|
Resolve(ctx context.Context, name string) (cid.ID, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
FeatureSettings interface {
|
||||||
|
ClientCut() bool
|
||||||
|
}
|
||||||
|
|
||||||
layer struct {
|
layer struct {
|
||||||
frostFS FrostFS
|
frostFS FrostFS
|
||||||
gateOwner user.ID
|
gateOwner user.ID
|
||||||
|
@ -54,6 +58,7 @@ type (
|
||||||
ncontroller EventListener
|
ncontroller EventListener
|
||||||
cache *Cache
|
cache *Cache
|
||||||
treeService TreeService
|
treeService TreeService
|
||||||
|
features FeatureSettings
|
||||||
}
|
}
|
||||||
|
|
||||||
Config struct {
|
Config struct {
|
||||||
|
@ -63,6 +68,7 @@ type (
|
||||||
AnonKey AnonymousKey
|
AnonKey AnonymousKey
|
||||||
Resolver BucketResolver
|
Resolver BucketResolver
|
||||||
TreeService TreeService
|
TreeService TreeService
|
||||||
|
Features FeatureSettings
|
||||||
}
|
}
|
||||||
|
|
||||||
// AnonymousKey contains data for anonymous requests.
|
// AnonymousKey contains data for anonymous requests.
|
||||||
|
@ -301,6 +307,7 @@ func NewLayer(log *zap.Logger, frostFS FrostFS, config *Config) Client {
|
||||||
resolver: config.Resolver,
|
resolver: config.Resolver,
|
||||||
cache: NewCache(config.Caches),
|
cache: NewCache(config.Caches),
|
||||||
treeService: config.TreeService,
|
treeService: config.TreeService,
|
||||||
|
features: config.Features,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -458,6 +458,7 @@ func (n *layer) objectDelete(ctx context.Context, bktInfo *data.BucketInfo, idOb
|
||||||
// Returns object ID and payload sha256 hash.
|
// Returns object ID and payload sha256 hash.
|
||||||
func (n *layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktInfo *data.BucketInfo) (uint64, oid.ID, []byte, error) {
|
func (n *layer) objectPutAndHash(ctx context.Context, prm PrmObjectCreate, bktInfo *data.BucketInfo) (uint64, oid.ID, []byte, error) {
|
||||||
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
n.prepareAuthParameters(ctx, &prm.PrmAuth, bktInfo.Owner)
|
||||||
|
prm.ClientCut = n.features.ClientCut()
|
||||||
var size uint64
|
var size uint64
|
||||||
hash := sha256.New()
|
hash := sha256.New()
|
||||||
prm.Payload = wrapReader(prm.Payload, 64*1024, func(buf []byte) {
|
prm.Payload = wrapReader(prm.Payload, 64*1024, func(buf []byte) {
|
||||||
|
|
|
@ -170,6 +170,7 @@ func prepareContext(t *testing.T, cachesConfig ...*CachesConfig) *testContext {
|
||||||
Caches: config,
|
Caches: config,
|
||||||
AnonKey: AnonymousKey{Key: key},
|
AnonKey: AnonymousKey{Key: key},
|
||||||
TreeService: NewTreeService(),
|
TreeService: NewTreeService(),
|
||||||
|
Features: &FeatureSettingsMock{},
|
||||||
}
|
}
|
||||||
|
|
||||||
return &testContext{
|
return &testContext{
|
||||||
|
|
|
@ -71,6 +71,7 @@ type (
|
||||||
xmlDecoder *xml.DecoderProvider
|
xmlDecoder *xml.DecoderProvider
|
||||||
maxClient maxClientsConfig
|
maxClient maxClientsConfig
|
||||||
bypassContentEncodingInChunks atomic.Bool
|
bypassContentEncodingInChunks atomic.Bool
|
||||||
|
clientCut atomic.Bool
|
||||||
}
|
}
|
||||||
|
|
||||||
maxClientsConfig struct {
|
maxClientsConfig struct {
|
||||||
|
@ -144,6 +145,7 @@ func (a *App) initLayer(ctx context.Context) {
|
||||||
GateOwner: gateOwner,
|
GateOwner: gateOwner,
|
||||||
Resolver: a.bucketResolver,
|
Resolver: a.bucketResolver,
|
||||||
TreeService: tree.NewTree(services.NewPoolWrapper(a.treePool), a.log),
|
TreeService: tree.NewTree(services.NewPoolWrapper(a.treePool), a.log),
|
||||||
|
Features: a.settings,
|
||||||
}
|
}
|
||||||
|
|
||||||
// prepare object layer
|
// prepare object layer
|
||||||
|
@ -176,6 +178,7 @@ func newAppSettings(log *Logger, v *viper.Viper) *appSettings {
|
||||||
}
|
}
|
||||||
|
|
||||||
settings.setBypassContentEncodingInChunks(v.GetBool(cfgKludgeBypassContentEncodingCheckInChunks))
|
settings.setBypassContentEncodingInChunks(v.GetBool(cfgKludgeBypassContentEncodingCheckInChunks))
|
||||||
|
settings.setClientCut(v.GetBool(cfgClientCut))
|
||||||
|
|
||||||
return settings
|
return settings
|
||||||
}
|
}
|
||||||
|
@ -188,6 +191,14 @@ func (s *appSettings) setBypassContentEncodingInChunks(bypass bool) {
|
||||||
s.bypassContentEncodingInChunks.Store(bypass)
|
s.bypassContentEncodingInChunks.Store(bypass)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) ClientCut() bool {
|
||||||
|
return s.clientCut.Load()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *appSettings) setClientCut(clientCut bool) {
|
||||||
|
s.clientCut.Store(clientCut)
|
||||||
|
}
|
||||||
|
|
||||||
func (a *App) initAPI(ctx context.Context) {
|
func (a *App) initAPI(ctx context.Context) {
|
||||||
a.initLayer(ctx)
|
a.initLayer(ctx)
|
||||||
a.initHandler()
|
a.initHandler()
|
||||||
|
@ -568,6 +579,7 @@ func (a *App) updateSettings() {
|
||||||
|
|
||||||
a.settings.xmlDecoder.UseDefaultNamespaceForCompleteMultipart(a.cfg.GetBool(cfgKludgeUseDefaultXMLNSForCompleteMultipartUpload))
|
a.settings.xmlDecoder.UseDefaultNamespaceForCompleteMultipart(a.cfg.GetBool(cfgKludgeUseDefaultXMLNSForCompleteMultipartUpload))
|
||||||
a.settings.setBypassContentEncodingInChunks(a.cfg.GetBool(cfgKludgeBypassContentEncodingCheckInChunks))
|
a.settings.setBypassContentEncodingInChunks(a.cfg.GetBool(cfgKludgeBypassContentEncodingCheckInChunks))
|
||||||
|
a.settings.setClientCut(a.cfg.GetBool(cfgClientCut))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *App) startServices() {
|
func (a *App) startServices() {
|
||||||
|
|
|
@ -138,6 +138,8 @@ const ( // Settings.
|
||||||
// Configuration of parameters of requests to FrostFS.
|
// Configuration of parameters of requests to FrostFS.
|
||||||
// Number of the object copies to consider PUT to FrostFS successful.
|
// Number of the object copies to consider PUT to FrostFS successful.
|
||||||
cfgSetCopiesNumber = "frostfs.set_copies_number"
|
cfgSetCopiesNumber = "frostfs.set_copies_number"
|
||||||
|
// Enabling client side object preparing for PUT operations.
|
||||||
|
cfgClientCut = "frostfs.client_cut"
|
||||||
|
|
||||||
// List of allowed AccessKeyID prefixes.
|
// List of allowed AccessKeyID prefixes.
|
||||||
cfgAllowedAccessKeyIDPrefixes = "allowed_access_key_id_prefixes"
|
cfgAllowedAccessKeyIDPrefixes = "allowed_access_key_id_prefixes"
|
||||||
|
|
|
@ -125,6 +125,8 @@ S3_GW_CORS_DEFAULT_MAX_AGE=600
|
||||||
# to consider PUT to FrostFS successful.
|
# to consider PUT to FrostFS successful.
|
||||||
# `0` or empty list means that object will be processed according to the container's placement policy
|
# `0` or empty list means that object will be processed according to the container's placement policy
|
||||||
S3_GW_FROSTFS_SET_COPIES_NUMBER=0
|
S3_GW_FROSTFS_SET_COPIES_NUMBER=0
|
||||||
|
# This flag enables client side object preparing.
|
||||||
|
S3_GW_FROSTFS_CLIENT_CUT=false
|
||||||
|
|
||||||
# List of allowed AccessKeyID prefixes
|
# List of allowed AccessKeyID prefixes
|
||||||
# If not set, S3 GW will accept all AccessKeyIDs
|
# If not set, S3 GW will accept all AccessKeyIDs
|
||||||
|
|
|
@ -150,6 +150,8 @@ frostfs:
|
||||||
# Numbers of the object copies (for each replica) to consider PUT to FrostFS successful.
|
# Numbers of the object copies (for each replica) to consider PUT to FrostFS successful.
|
||||||
# `[0]` or empty list means that object will be processed according to the container's placement policy
|
# `[0]` or empty list means that object will be processed according to the container's placement policy
|
||||||
set_copies_number: [0]
|
set_copies_number: [0]
|
||||||
|
# This flag enables client side object preparing.
|
||||||
|
client_cut: false
|
||||||
|
|
||||||
# List of allowed AccessKeyID prefixes
|
# List of allowed AccessKeyID prefixes
|
||||||
# If the parameter is omitted, S3 GW will accept all AccessKeyIDs
|
# If the parameter is omitted, S3 GW will accept all AccessKeyIDs
|
||||||
|
|
|
@ -500,17 +500,20 @@ tracing:
|
||||||
# `frostfs` section
|
# `frostfs` section
|
||||||
|
|
||||||
Contains parameters of requests to FrostFS.
|
Contains parameters of requests to FrostFS.
|
||||||
This value can be overridden with `X-Amz-Meta-Frostfs-Copies-Number` (value is comma separated numbers: `1,2,3`)
|
|
||||||
|
The `set_copies_number` value can be overridden with `X-Amz-Meta-Frostfs-Copies-Number` (value is comma separated numbers: `1,2,3`)
|
||||||
header for `PutObject`, `CopyObject`, `CreateMultipartUpload`.
|
header for `PutObject`, `CopyObject`, `CreateMultipartUpload`.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
frostfs:
|
frostfs:
|
||||||
set_copies_number: [0]
|
set_copies_number: [0]
|
||||||
|
client_cut: false
|
||||||
```
|
```
|
||||||
|
|
||||||
| Parameter | Type | Default value | Description |
|
| Parameter | Type | SIGHUP reload | Default value | Description |
|
||||||
|---------------------|------------|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
|---------------------|------------|---------------|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
| `set_copies_number` | `[]uint32` | `[0]` | Numbers of the object copies (for each replica) to consider PUT to FrostFS successful. <br/>Default value `[0]` or empty list means that object will be processed according to the container's placement policy |
|
| `set_copies_number` | `[]uint32` | yes | `[0]` | Numbers of the object copies (for each replica) to consider PUT to FrostFS successful. <br/>Default value `[0]` or empty list means that object will be processed according to the container's placement policy |
|
||||||
|
| `client_cut` | `bool` | yes | `false` | This flag enables client side object preparing. |
|
||||||
|
|
||||||
# `resolve_bucket` section
|
# `resolve_bucket` section
|
||||||
|
|
||||||
|
|
2
go.sum
2
go.sum
|
@ -44,8 +44,6 @@ git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0 h1:FxqFDhQYYgpe41qsIHVOcdzSV
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
|
git.frostfs.info/TrueCloudLab/frostfs-crypto v0.6.0/go.mod h1:RUIKZATQLJ+TaYQa60X2fTDwfuhMfm8Ar60bQ5fr+vU=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6 h1:aGQ6QaAnTerQ5Dq5b2/f9DUQtSqPkZZ/bkMx/HKuLCo=
|
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6 h1:aGQ6QaAnTerQ5Dq5b2/f9DUQtSqPkZZ/bkMx/HKuLCo=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6/go.mod h1:W8Nn08/l6aQ7UlIbpF7FsQou7TVpcRD1ZT1KG4TrFhE=
|
git.frostfs.info/TrueCloudLab/frostfs-observability v0.0.0-20230531082742-c97d21411eb6/go.mod h1:W8Nn08/l6aQ7UlIbpF7FsQou7TVpcRD1ZT1KG4TrFhE=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230821073319-342524159ac3 h1:GBRTOTRrtIvxi2TgxG7z/J7uRXiyb1SxR4247FaYCgU=
|
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230821073319-342524159ac3/go.mod h1:t1akKcUH7iBrFHX8rSXScYMP17k2kYQXMbZooiL5Juw=
|
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230821090303-202412230a05 h1:OuViMF54N87FXmaBEpYw3jhzaLrJ/EWOlPL1wUkimE0=
|
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230821090303-202412230a05 h1:OuViMF54N87FXmaBEpYw3jhzaLrJ/EWOlPL1wUkimE0=
|
||||||
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230821090303-202412230a05/go.mod h1:t1akKcUH7iBrFHX8rSXScYMP17k2kYQXMbZooiL5Juw=
|
git.frostfs.info/TrueCloudLab/frostfs-sdk-go v0.0.0-20230821090303-202412230a05/go.mod h1:t1akKcUH7iBrFHX8rSXScYMP17k2kYQXMbZooiL5Juw=
|
||||||
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
|
git.frostfs.info/TrueCloudLab/hrw v1.2.1 h1:ccBRK21rFvY5R1WotI6LNoPlizk7qSvdfD8lNIRudVc=
|
||||||
|
|
|
@ -243,6 +243,7 @@ func (x *FrostFS) CreateObject(ctx context.Context, prm layer.PrmObjectCreate) (
|
||||||
prmPut.SetHeader(*obj)
|
prmPut.SetHeader(*obj)
|
||||||
prmPut.SetPayload(prm.Payload)
|
prmPut.SetPayload(prm.Payload)
|
||||||
prmPut.SetCopiesNumberVector(prm.CopiesNumber)
|
prmPut.SetCopiesNumberVector(prm.CopiesNumber)
|
||||||
|
prmPut.SetClientCut(prm.ClientCut)
|
||||||
|
|
||||||
if prm.BearerToken != nil {
|
if prm.BearerToken != nil {
|
||||||
prmPut.UseBearer(*prm.BearerToken)
|
prmPut.UseBearer(*prm.BearerToken)
|
||||||
|
|
Loading…
Reference in a new issue