forked from TrueCloudLab/frostfs-node
Revert "[#972] Use min/max builtins"
This reverts commit 89784b2e0a
.
Signed-off-by: Evgenii Stratonikov <e.stratonikov@yadro.com>
This commit is contained in:
parent
4bfc6d29b9
commit
dad56d2e98
10 changed files with 53 additions and 11 deletions
|
@ -100,7 +100,10 @@ func registerCandidates(c *helper.InitializeContext) error {
|
|||
// Register candidates in batches in order to overcome the signers amount limit.
|
||||
// See: https://github.com/nspcc-dev/neo-go/blob/master/pkg/core/transaction/transaction.go#L27
|
||||
for i := 0; i < need; i += registerBatchSize {
|
||||
start, end := i, min(i+registerBatchSize, need)
|
||||
start, end := i, i+registerBatchSize
|
||||
if end > need {
|
||||
end = need
|
||||
}
|
||||
// This check is sound because transactions are accepted/rejected atomically.
|
||||
if have >= end {
|
||||
continue
|
||||
|
|
|
@ -82,7 +82,10 @@ func (c *cleanupTable) touch(keyString string, now uint64, binNodeInfo []byte) b
|
|||
result := !ok || access.removeFlag || !bytes.Equal(access.binNodeInfo, binNodeInfo)
|
||||
|
||||
access.removeFlag = false // reset remove flag on each touch
|
||||
access.epoch = max(access.epoch, now)
|
||||
if now > access.epoch {
|
||||
access.epoch = now
|
||||
}
|
||||
|
||||
access.binNodeInfo = binNodeInfo // update binary node info
|
||||
|
||||
c.lastAccess[keyString] = access
|
||||
|
|
|
@ -56,6 +56,14 @@ func (b *Blobovnicza) iterateBounds(useObjLimitBound bool, f func(uint64, uint64
|
|||
return nil
|
||||
}
|
||||
|
||||
func max(a, b uint64) uint64 {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// IterationElement represents a unit of elements through which Iterate operation passes.
|
||||
type IterationElement struct {
|
||||
addr oid.Address
|
||||
|
|
|
@ -134,7 +134,10 @@ func (e *StorageEngine) ListWithCursor(ctx context.Context, prm ListWithCursorPr
|
|||
continue
|
||||
}
|
||||
|
||||
count := min(prm.count-uint32(len(result)), batchSize)
|
||||
count := prm.count - uint32(len(result))
|
||||
if count > batchSize {
|
||||
count = batchSize
|
||||
}
|
||||
|
||||
var shardPrm shard.ListWithCursorPrm
|
||||
shardPrm.WithCount(count)
|
||||
|
|
|
@ -343,8 +343,16 @@ func (s *Shard) removeGarbage(pctx context.Context) (result gcRunResult) {
|
|||
}
|
||||
|
||||
func (s *Shard) getExpiredObjectsParameters() (workerCount, batchSize int) {
|
||||
workerCount = max(minExpiredWorkers, s.gc.gcCfg.expiredCollectorWorkerCount)
|
||||
batchSize = max(minExpiredBatchSize, s.gc.gcCfg.expiredCollectorBatchSize)
|
||||
workerCount = minExpiredWorkers
|
||||
batchSize = minExpiredBatchSize
|
||||
|
||||
if s.gc.gcCfg.expiredCollectorBatchSize > batchSize {
|
||||
batchSize = s.gc.gcCfg.expiredCollectorBatchSize
|
||||
}
|
||||
|
||||
if s.gc.gcCfg.expiredCollectorWorkerCount > workerCount {
|
||||
workerCount = s.gc.gcCfg.expiredCollectorWorkerCount
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -162,7 +162,11 @@ func (c *Client) DepositNotary(amount fixedn.Fixed8, delta uint32) (res util.Uin
|
|||
return util.Uint256{}, fmt.Errorf("can't get previous expiration value: %w", err)
|
||||
}
|
||||
|
||||
till := max(int64(bc+delta), currentTill)
|
||||
till := int64(bc + delta)
|
||||
if till < currentTill {
|
||||
till = currentTill
|
||||
}
|
||||
|
||||
return c.depositNotary(amount, till)
|
||||
}
|
||||
|
||||
|
|
|
@ -343,7 +343,10 @@ func PayloadRange(ctx context.Context, prm PayloadRangePrm) (*PayloadRangeRes, e
|
|||
return nil, new(apistatus.ObjectOutOfRange)
|
||||
}
|
||||
|
||||
ln := min(prm.ln, maxInitialBufferSize)
|
||||
ln := prm.ln
|
||||
if ln > maxInitialBufferSize {
|
||||
ln = maxInitialBufferSize
|
||||
}
|
||||
|
||||
w := bytes.NewBuffer(make([]byte, ln))
|
||||
_, err = io.CopyN(w, rdr, int64(prm.ln))
|
||||
|
|
|
@ -164,7 +164,10 @@ func (s *searchStreamMsgSizeCtrl) Send(resp *object.SearchResponse) error {
|
|||
newResp.SetBody(body)
|
||||
}
|
||||
|
||||
cut := min(s.addrAmount, ln)
|
||||
cut := s.addrAmount
|
||||
if cut > ln {
|
||||
cut = ln
|
||||
}
|
||||
|
||||
body.SetIDList(ids[:cut])
|
||||
newResp.SetMetaHeader(resp.GetMetaHeader())
|
||||
|
|
|
@ -388,7 +388,10 @@ func (it *sliceKeySpaceIterator) Next(_ context.Context, size uint32) ([]objectc
|
|||
if it.cur >= len(it.objs) {
|
||||
return nil, engine.ErrEndOfListing
|
||||
}
|
||||
end := min(it.cur+int(size), len(it.objs))
|
||||
end := it.cur + int(size)
|
||||
if end > len(it.objs) {
|
||||
end = len(it.objs)
|
||||
}
|
||||
ret := it.objs[it.cur:end]
|
||||
it.cur = end
|
||||
return ret, nil
|
||||
|
|
|
@ -167,7 +167,9 @@ func mergeOperationStreams(streams []chan *pilorama.Move, merged chan<- *piloram
|
|||
merged <- ms[minTimeMoveIndex]
|
||||
height := ms[minTimeMoveIndex].Time
|
||||
if ms[minTimeMoveIndex] = <-streams[minTimeMoveIndex]; ms[minTimeMoveIndex] == nil {
|
||||
minStreamedLastHeight = min(minStreamedLastHeight, height)
|
||||
if minStreamedLastHeight > height {
|
||||
minStreamedLastHeight = height
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -201,7 +203,9 @@ func (s *Service) applyOperationStream(ctx context.Context, cid cid.ID, treeID s
|
|||
errGroup.Go(func() error {
|
||||
if err := s.forest.TreeApply(ctx, cid, treeID, m, true); err != nil {
|
||||
heightMtx.Lock()
|
||||
unappliedOperationHeight = min(unappliedOperationHeight, m.Time)
|
||||
if m.Time < unappliedOperationHeight {
|
||||
unappliedOperationHeight = m.Time
|
||||
}
|
||||
heightMtx.Unlock()
|
||||
return err
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue