Sync commits #6

Merged
alexvanin merged 6 commits from sync into master 2022-12-28 14:06:33 +00:00
9 changed files with 186 additions and 24 deletions

View file

@ -4,25 +4,22 @@ This document outlines major changes between releases.
## [Unreleased]
### Fixed
- Empty bucket policy (#740)
- Big object removal (#749)
## [0.26.0] - 2022-12-28
### Added
- Use client time as `now` in some requests (#726)
- Timeout for individual operations in streaming RPC (#740)
- Reload policies on SIGHUP (#747)
### Added
- Authmate flags for pool timeouts (#760)
- Multiple server listeners (#742)
### Changed
- Placement policy configuration (#568)
- Improved debug logging of CID and OID values (#754)
### Removed
- Deprecated linters (#755)
### Updating from v0.25.0
### Updating from v0.25.1
New config parameters were added. And old one `defaul_policy` were changed.
```yaml
placement_policy:
@ -43,6 +40,17 @@ If you configure application using `.yaml` file change:
* `tls.cert_file` -> `server.0.tls.cert_file` (and set `server.0.tls.enabled: true`)
* `tls.key_file` -> `server.0.tls.key_file` (and set `server.0.tls.enabled: true`)
## [0.25.1] - 2022-10-30
### Fixed
- Empty bucket policy (#740)
- Big object removal (#749)
- Checksum panic (#741)
### Added
- Debian packaging (#737)
- Timeout for individual operations in streaming RPC (#750)
## [0.25.0] - 2022-10-31
### Fixed

View file

@ -1 +1 @@
v0.25.0
v0.26.0

View file

@ -730,6 +730,9 @@ func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
return
}
h.log.Info("bucket is created", zap.String("reqId", reqInfo.RequestID),
zap.String("bucket", reqInfo.BucketName), zap.Stringer("container_id", bktInfo.CID))
if p.ObjectLockEnabled {
sp := &layer.PutSettingsParams{
BktInfo: bktInfo,
@ -742,8 +745,6 @@ func (h *handler) CreateBucketHandler(w http.ResponseWriter, r *http.Request) {
}
}
h.log.Info("bucket is created", zap.Stringer("container_id", bktInfo.CID))
api.WriteSuccessResponseHeadersOnly(w)
}

View file

@ -482,11 +482,27 @@ func (n *layer) GetObjectInfo(ctx context.Context, p *HeadObjectParams) (*data.O
// GetExtendedObjectInfo returns meta information and corresponding info from the tree service about the object.
func (n *layer) GetExtendedObjectInfo(ctx context.Context, p *HeadObjectParams) (*data.ExtendedObjectInfo, error) {
var objInfo *data.ExtendedObjectInfo
var err error
if len(p.VersionID) == 0 {
return n.headLastVersionIfNotDeleted(ctx, p.BktInfo, p.Object)
objInfo, err = n.headLastVersionIfNotDeleted(ctx, p.BktInfo, p.Object)
} else {
objInfo, err = n.headVersion(ctx, p.BktInfo, p)
}
if err != nil {
return nil, err
}
return n.headVersion(ctx, p.BktInfo, p)
reqInfo := api.GetReqInfo(ctx)
n.log.Debug("get object",
zap.String("reqId", reqInfo.RequestID),
zap.String("bucket", p.BktInfo.Name),
zap.Stringer("cid", p.BktInfo.CID),
zap.String("object", objInfo.ObjectInfo.Name),
zap.Stringer("oid", objInfo.ObjectInfo.ID))
return objInfo, nil
}
// CopyObject from one bucket into another bucket.
@ -645,7 +661,12 @@ func (n *layer) CreateBucket(ctx context.Context, p *CreateBucketParams) (*data.
func (n *layer) ResolveBucket(ctx context.Context, name string) (cid.ID, error) {
var cnrID cid.ID
if err := cnrID.DecodeString(name); err != nil {
return n.resolver.Resolve(ctx, name)
if cnrID, err = n.resolver.Resolve(ctx, name); err != nil {
return cid.ID{}, err
}
reqInfo := api.GetReqInfo(ctx)
n.log.Info("resolve bucket", zap.String("reqId", reqInfo.RequestID), zap.String("bucket", name), zap.Stringer("cid", cnrID))
}
return cnrID, nil

View file

@ -11,6 +11,7 @@ import (
"strings"
"time"
"github.com/TrueCloudLab/frostfs-s3-gw/api"
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
"github.com/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
@ -228,6 +229,13 @@ func (n *layer) uploadPart(ctx context.Context, multipartInfo *data.MultipartInf
return nil, err
}
reqInfo := api.GetReqInfo(ctx)
n.log.Debug("upload part",
zap.String("reqId", reqInfo.RequestID),
zap.String("bucket", bktInfo.Name), zap.Stringer("cid", bktInfo.CID),
zap.String("multipart upload", p.Info.UploadID),
zap.Int("part number", p.PartNumber), zap.String("object", p.Info.Key), zap.Stringer("oid", id))
partInfo := &data.PartInfo{
Key: p.Info.Key,
UploadID: p.Info.UploadID,
@ -609,10 +617,24 @@ func (n *layer) getUploadParts(ctx context.Context, p *UploadInfoParams) (*data.
}
res := make(map[int]*data.PartInfo, len(parts))
for _, part := range parts {
partsNumbers := make([]int, len(parts))
oids := make([]string, len(parts))
for i, part := range parts {
res[part.Number] = part
partsNumbers[i] = part.Number
oids[i] = part.OID.EncodeToString()
}
reqInfo := api.GetReqInfo(ctx)
n.log.Debug("part details",
zap.String("reqId", reqInfo.RequestID),
zap.String("bucket", p.Bkt.Name),
zap.Stringer("cid", p.Bkt.CID),
zap.String("object", p.Key),
zap.String("upload id", p.UploadID),
zap.Ints("part numbers", partsNumbers),
zap.Strings("oids", oids))
return multipartInfo, res, nil
}

View file

@ -248,6 +248,12 @@ func (n *layer) PutObject(ctx context.Context, p *PutObjectParams) (*data.Extend
return nil, err
}
reqInfo := api.GetReqInfo(ctx)
n.log.Debug("put object",
zap.String("reqId", reqInfo.RequestID),
zap.String("bucket", p.BktInfo.Name), zap.Stringer("cid", p.BktInfo.CID),
zap.String("object", p.Object), zap.Stringer("oid", id))
newVersion.OID = id
newVersion.ETag = hex.EncodeToString(hash)
if newVersion.ID, err = n.treeService.AddVersion(ctx, p.BktInfo, newVersion); err != nil {

View file

@ -4,11 +4,13 @@ import (
"context"
errorsStd "errors"
"github.com/TrueCloudLab/frostfs-s3-gw/api"
"github.com/TrueCloudLab/frostfs-s3-gw/api/data"
"github.com/TrueCloudLab/frostfs-s3-gw/api/errors"
cid "github.com/TrueCloudLab/frostfs-sdk-go/container/id"
oid "github.com/TrueCloudLab/frostfs-sdk-go/object/id"
"github.com/TrueCloudLab/frostfs-sdk-go/user"
"go.uber.org/zap"
)
type GetObjectTaggingParams struct {
@ -175,6 +177,14 @@ func (n *layer) getNodeVersion(ctx context.Context, objVersion *ObjectVersion) (
return nil, errors.GetAPIError(errors.ErrNoSuchKey)
}
if err == nil && version != nil && !version.IsDeleteMarker() {
reqInfo := api.GetReqInfo(ctx)
n.log.Debug("target details",
zap.String("reqId", reqInfo.RequestID),
zap.String("bucket", objVersion.BktInfo.Name), zap.Stringer("cid", objVersion.BktInfo.CID),
zap.String("object", objVersion.ObjectName), zap.Stringer("oid", version.OID))
}
return version, err
}

View file

@ -32,13 +32,25 @@ import (
)
const (
poolConnectTimeout = 5 * time.Second
poolRequestTimeout = 5 * time.Second
poolDialTimeout = 5 * time.Second
poolHealthcheckTimeout = 5 * time.Second
poolRebalanceInterval = 30 * time.Second
poolStreamTimeout = 10 * time.Second
// a month.
defaultLifetime = 30 * 24 * time.Hour
defaultPresignedLifetime = 12 * time.Hour
)
type PoolConfig struct {
Key *ecdsa.PrivateKey
Address string
DialTimeout time.Duration
HealthcheckTimeout time.Duration
StreamTimeout time.Duration
RebalanceInterval time.Duration
}
var (
walletPathFlag string
accountAddressFlag string
@ -65,6 +77,12 @@ var (
containerPolicies string
awcCliCredFile string
timeoutFlag time.Duration
// pool timeouts flag.
poolDialTimeoutFlag time.Duration
poolHealthcheckTimeoutFlag time.Duration
poolRebalanceIntervalFlag time.Duration
poolStreamTimeoutFlag time.Duration
)
const (
@ -245,6 +263,34 @@ It will be ceil rounded to the nearest amount of epoch.`,
Required: false,
Destination: &awcCliCredFile,
},
&cli.DurationFlag{
Name: "pool-dial-timeout",
Usage: `Timeout for connection to the node in pool to be established`,
Required: false,
Destination: &poolDialTimeoutFlag,
Value: poolDialTimeout,
},
&cli.DurationFlag{
Name: "pool-healthcheck-timeout",
Usage: `Timeout for request to node to decide if it is alive`,
Required: false,
Destination: &poolHealthcheckTimeoutFlag,
Value: poolHealthcheckTimeout,
},
&cli.DurationFlag{
Name: "pool-rebalance-interval",
Usage: `Interval for updating nodes health status`,
Required: false,
Destination: &poolRebalanceIntervalFlag,
Value: poolRebalanceInterval,
},
&cli.DurationFlag{
Name: "pool-stream-timeout",
Usage: `Timeout for individual operation in streaming RPC`,
Required: false,
Destination: &poolStreamTimeoutFlag,
Value: poolStreamTimeout,
},
},
Action: func(c *cli.Context) error {
ctx, log := prepare()
@ -258,7 +304,16 @@ It will be ceil rounded to the nearest amount of epoch.`,
ctx, cancel := context.WithCancel(ctx)
defer cancel()
frostFS, err := createFrostFS(ctx, log, &key.PrivateKey, peerAddressFlag)
poolCfg := PoolConfig{
Key: &key.PrivateKey,
Address: peerAddressFlag,
DialTimeout: poolDialTimeoutFlag,
HealthcheckTimeout: poolHealthcheckTimeoutFlag,
StreamTimeout: poolStreamTimeoutFlag,
RebalanceInterval: poolRebalanceIntervalFlag,
}
frostFS, err := createFrostFS(ctx, log, poolCfg)
if err != nil {
return cli.Exit(fmt.Sprintf("failed to create FrostFS component: %s", err), 2)
}
@ -542,6 +597,34 @@ func obtainSecret() *cli.Command {
Required: true,
Destination: &accessKeyIDFlag,
},
&cli.DurationFlag{
Name: "pool-dial-timeout",
Usage: `Timeout for connection to the node in pool to be established`,
Required: false,
Destination: &poolDialTimeoutFlag,
Value: poolDialTimeout,
},
&cli.DurationFlag{
Name: "pool-healthcheck-timeout",
Usage: `Timeout for request to node to decide if it is alive`,
Required: false,
Destination: &poolHealthcheckTimeoutFlag,
Value: poolHealthcheckTimeout,
},
&cli.DurationFlag{
Name: "pool-rebalance-interval",
Usage: `Interval for updating nodes health status`,
Required: false,
Destination: &poolRebalanceIntervalFlag,
Value: poolRebalanceInterval,
},
&cli.DurationFlag{
Name: "pool-stream-timeout",
Usage: `Timeout for individual operation in streaming RPC`,
Required: false,
Destination: &poolStreamTimeoutFlag,
Value: poolStreamTimeout,
},
},
Action: func(c *cli.Context) error {
ctx, log := prepare()
@ -555,7 +638,16 @@ func obtainSecret() *cli.Command {
ctx, cancel := context.WithCancel(ctx)
defer cancel()
frostFS, err := createFrostFS(ctx, log, &key.PrivateKey, peerAddressFlag)
poolCfg := PoolConfig{
Key: &key.PrivateKey,
Address: peerAddressFlag,
DialTimeout: poolDialTimeoutFlag,
HealthcheckTimeout: poolHealthcheckTimeoutFlag,
StreamTimeout: poolStreamTimeoutFlag,
RebalanceInterval: poolRebalanceIntervalFlag,
}
frostFS, err := createFrostFS(ctx, log, poolCfg)
if err != nil {
return cli.Exit(fmt.Sprintf("failed to create FrostFS component: %s", err), 2)
}
@ -591,14 +683,16 @@ func obtainSecret() *cli.Command {
return command
}
func createFrostFS(ctx context.Context, log *zap.Logger, key *ecdsa.PrivateKey, peerAddress string) (authmate.FrostFS, error) {
func createFrostFS(ctx context.Context, log *zap.Logger, cfg PoolConfig) (authmate.FrostFS, error) {
log.Debug("prepare connection pool")
var prm pool.InitParameters
prm.SetKey(key)
prm.SetNodeDialTimeout(poolConnectTimeout)
prm.SetHealthcheckTimeout(poolRequestTimeout)
prm.AddNode(pool.NewNodeParam(1, peerAddress, 1))
prm.SetKey(cfg.Key)
prm.SetNodeDialTimeout(cfg.DialTimeout)
prm.SetHealthcheckTimeout(cfg.HealthcheckTimeout)
prm.SetNodeStreamTimeout(cfg.StreamTimeout)
prm.SetClientRebalanceInterval(cfg.RebalanceInterval)
prm.AddNode(pool.NewNodeParam(1, cfg.Address, 1))
p, err := pool.NewPool(prm)
if err != nil {

View file

@ -265,7 +265,7 @@ placement_policy:
| Parameter | Type | SIGHUP reload | Default value | Description |
|------------------|----------|---------------|---------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| `default` | `string` | yes | `REP 3` | Default policy of placing containers in FrostFS. If a user sends a request `CreateBucket` and doesn't define policy for placing of a container in FrostFS, the S3 Gateway will put the container with default policy. |
| `region_mapping` | `string` | yes | | Path to file that maps aws `LocationContraint` values to FrostFS placement policy. The similar to `--container-policy` flag in `frostfs-s3-authmate` util. |
| `region_mapping` | `string` | yes | | Path to file that maps aws `LocationContraint` values to FrostFS placement policy. The similar to `--container-policy` flag in `frostfs-s3-authmate` util, see in [docs](./authmate.md#containers-policy) |
File for `region_mapping` must contain something like this: