forked from TrueCloudLab/frostfs-node
[#972] Use min/max builtins
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
This commit is contained in:
parent
45fd4e4ff1
commit
89784b2e0a
10 changed files with 11 additions and 53 deletions
|
@ -100,10 +100,7 @@ func registerCandidates(c *helper.InitializeContext) error {
|
|||
// Register candidates in batches in order to overcome the signers amount limit.
|
||||
// See: https://github.com/nspcc-dev/neo-go/blob/master/pkg/core/transaction/transaction.go#L27
|
||||
for i := 0; i < need; i += registerBatchSize {
|
||||
start, end := i, i+registerBatchSize
|
||||
if end > need {
|
||||
end = need
|
||||
}
|
||||
start, end := i, min(i+registerBatchSize, need)
|
||||
// This check is sound because transactions are accepted/rejected atomically.
|
||||
if have >= end {
|
||||
continue
|
||||
|
|
|
@ -82,10 +82,7 @@ func (c *cleanupTable) touch(keyString string, now uint64, binNodeInfo []byte) b
|
|||
result := !ok || access.removeFlag || !bytes.Equal(access.binNodeInfo, binNodeInfo)
|
||||
|
||||
access.removeFlag = false // reset remove flag on each touch
|
||||
if now > access.epoch {
|
||||
access.epoch = now
|
||||
}
|
||||
|
||||
access.epoch = max(access.epoch, now)
|
||||
access.binNodeInfo = binNodeInfo // update binary node info
|
||||
|
||||
c.lastAccess[keyString] = access
|
||||
|
|
|
@ -56,14 +56,6 @@ func (b *Blobovnicza) iterateBounds(useObjLimitBound bool, f func(uint64, uint64
|
|||
return nil
|
||||
}
|
||||
|
||||
func max(a, b uint64) uint64 {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// IterationElement represents a unit of elements through which Iterate operation passes.
|
||||
type IterationElement struct {
|
||||
addr oid.Address
|
||||
|
|
|
@ -134,10 +134,7 @@ func (e *StorageEngine) ListWithCursor(ctx context.Context, prm ListWithCursorPr
|
|||
continue
|
||||
}
|
||||
|
||||
count := prm.count - uint32(len(result))
|
||||
if count > batchSize {
|
||||
count = batchSize
|
||||
}
|
||||
count := min(prm.count-uint32(len(result)), batchSize)
|
||||
|
||||
var shardPrm shard.ListWithCursorPrm
|
||||
shardPrm.WithCount(count)
|
||||
|
|
|
@ -343,16 +343,8 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) {
|
|||
}
|
||||
|
||||
func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) {
|
||||
workerCount = minExpiredWorkers
|
||||
batchSize = minExpiredBatchSize
|
||||
|
||||
if s.gc.gcCfg.expiredCollectorBatchSize > batchSize {
|
||||
batchSize = s.gc.gcCfg.expiredCollectorBatchSize
|
||||
}
|
||||
|
||||
if s.gc.gcCfg.expiredCollectorWorkerCount > workerCount {
|
||||
workerCount = s.gc.gcCfg.expiredCollectorWorkerCount
|
||||
}
|
||||
workerCount = max(minExpiredWorkers, s.gc.gcCfg.expiredCollectorWorkerCount)
|
||||
batchSize = max(minExpiredBatchSize, s.gc.gcCfg.expiredCollectorBatchSize)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -162,11 +162,7 @@ func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (res util.Uin
|
|||
return util.Uint256{}, fmt.Errorf("can't get previous expiration value: %w", err)
|
||||
}
|
||||
|
||||
till := int64(bc + delta)
|
||||
if till < currentTill {
|
||||
till = currentTill
|
||||
}
|
||||
|
||||
till := max(int64(bc+delta), currentTill)
|
||||
return c.depositNotary(amount, till)
|
||||
}
|
||||
|
||||
|
|
|
@ -343,10 +343,7 @@ func PayloadRange(ctx context.Context, prm PayloadRangePrm) (*PayloadRangeRes, e
|
|||
return nil, new(apistatus.ObjectOutOfRange)
|
||||
}
|
||||
|
||||
ln := prm.ln
|
||||
if ln > maxInitialBufferSize {
|
||||
ln = maxInitialBufferSize
|
||||
}
|
||||
ln := min(prm.ln, maxInitialBufferSize)
|
||||
|
||||
w := bytes.NewBuffer(make([]byte, ln))
|
||||
_, err = io.CopyN(w, rdr, int64(prm.ln))
|
||||
|
|
|
@ -164,10 +164,7 @@ func (s *searchStreamMsgSizeCtrl) Send(resp *object.SearchResponse) error {
|
|||
newResp.SetBody(body)
|
||||
}
|
||||
|
||||
cut := s.addrAmount
|
||||
if cut > ln {
|
||||
cut = ln
|
||||
}
|
||||
cut := min(s.addrAmount, ln)
|
||||
|
||||
body.SetIDList(ids[:cut])
|
||||
newResp.SetMetaHeader(resp.GetMetaHeader())
|
||||
|
|
|
@ -388,10 +388,7 @@ func (it *sliceKeySpaceIterator) Next(_ context.Context, size uint32) ([]objectc
|
|||
if it.cur >= len(it.objs) {
|
||||
return nil, engine.ErrEndOfListing
|
||||
}
|
||||
end := it.cur + int(size)
|
||||
if end > len(it.objs) {
|
||||
end = len(it.objs)
|
||||
}
|
||||
end := min(it.cur+int(size), len(it.objs))
|
||||
ret := it.objs[it.cur:end]
|
||||
it.cur = end
|
||||
return ret, nil
|
||||
|
|
|
@ -167,9 +167,7 @@ func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *piloram
|
|||
merged <- ms[minTimeMoveIndex]
|
||||
height := ms[minTimeMoveIndex].Time
|
||||
if ms[minTimeMoveIndex] = <-streams[minTimeMoveIndex]; ms[minTimeMoveIndex] == nil {
|
||||
if minStreamedLastHeight > height {
|
||||
minStreamedLastHeight = height
|
||||
}
|
||||
minStreamedLastHeight = min(minStreamedLastHeight, height)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -203,9 +201,7 @@ func (s *Service) applyOperationStream(ctx context.Context, cid cid.ID, treeID s
|
|||
errGroup.Go(func() error {
|
||||
if err := s.forest.TreeApply(ctx, cid, treeID, m, true); err != nil {
|
||||
heightMtx.Lock()
|
||||
if m.Time < unappliedOperationHeight {
|
||||
unappliedOperationHeight = m.Time
|
||||
}
|
||||
unappliedOperationHeight = min(unappliedOperationHeight, m.Time)
|
||||
heightMtx.Unlock()
|
||||
return err
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue