diff --git a/api/cache/listmultipart.go b/api/cache/listmultipart.go
new file mode 100644
index 0000000..81acac8
--- /dev/null
+++ b/api/cache/listmultipart.go
@@ -0,0 +1,109 @@
+package cache
+
+import (
+	"fmt"
+	"time"
+
+	"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
+	"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/internal/logs"
+	cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
+	"github.com/bluele/gcache"
+	"go.uber.org/zap"
+)
+
+type (
+	// ListMultipartSessionCache contains cache for list multiparts session (during pagination).
+	ListMultipartSessionCache struct {
+		cache  gcache.Cache
+		logger *zap.Logger
+	}
+
+	// ListMultipartSessionKey is a key to find a ListMultipartSessionCache's entry.
+	ListMultipartSessionKey struct {
+		cid      cid.ID
+		prefix   string
+		marker   string
+		uploadID string
+	}
+)
+
+const (
+	// DefaultListMultipartSessionCacheLifetime is a default lifetime of entries in cache of ListMultipartUploads.
+	DefaultListMultipartSessionCacheLifetime = time.Second * 60
+	// DefaultListMultipartSessionCacheSize is a default size of cache of ListMultipartUploads.
+	DefaultListMultipartSessionCacheSize = 100
+)
+
+// DefaultListMultipartSessionConfig returns new default cache expiration values.
+func DefaultListMultipartSessionConfig(logger *zap.Logger) *Config {
+	return &Config{
+		Size:     DefaultListMultipartSessionCacheSize,
+		Lifetime: DefaultListMultipartSessionCacheLifetime,
+		Logger:   logger,
+	}
+}
+
+func (k *ListMultipartSessionKey) String() string {
+	return k.cid.EncodeToString() + k.prefix + k.marker + k.uploadID
+}
+
+// NewListMultipartSessionCache is a constructor which creates an object of ListObjectsCache with the given lifetime of entries.
+func NewListMultipartSessionCache(config *Config) *ListMultipartSessionCache {
+	gc := gcache.New(config.Size).LRU().Expiration(config.Lifetime).EvictedFunc(func(_ interface{}, val interface{}) {
+		session, ok := val.(*data.ListMultipartSession)
+		if !ok {
+			config.Logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", val)),
+				zap.String("expected", fmt.Sprintf("%T", session)))
+		}
+
+		if !session.Acquired.Load() {
+			session.Cancel()
+		}
+	}).Build()
+	return &ListMultipartSessionCache{cache: gc, logger: config.Logger}
+}
+
+// GetListMultipartSession returns a session of ListMultipartUploads request.
+func (l *ListMultipartSessionCache) GetListMultipartSession(key ListMultipartSessionKey) *data.ListMultipartSession {
+	entry, err := l.cache.Get(key)
+	if err != nil {
+		return nil
+	}
+
+	result, ok := entry.(*data.ListMultipartSession)
+	if !ok {
+		l.logger.Warn(logs.InvalidCacheEntryType, zap.String("actual", fmt.Sprintf("%T", entry)),
+			zap.String("expected", fmt.Sprintf("%T", result)))
+		return nil
+	}
+
+	return result
+}
+
+// PutListMultipartSession puts a ListMultipartUploads session info to cache.
+func (l *ListMultipartSessionCache) PutListMultipartSession(key ListMultipartSessionKey, session *data.ListMultipartSession) error {
+	s := l.GetListMultipartSession(key)
+	if s != nil && s != session {
+		if !s.Acquired.Load() {
+			s.Cancel()
+		}
+	}
+	return l.cache.Set(key, session)
+}
+
+// DeleteListMultipartSession removes key from cache.
+func (l *ListMultipartSessionCache) DeleteListMultipartSession(key ListMultipartSessionKey) {
+	l.cache.Remove(key)
+}
+
+// CreateListMultipartSessionCacheKey returns ListMultipartSessionKey with the given CID, prefix, marker and uploadID.
+func CreateListMultipartSessionCacheKey(cnr cid.ID, prefix, marker, uploadID string) ListMultipartSessionKey {
+	p := ListMultipartSessionKey{
+		cid:      cnr,
+		prefix:   prefix,
+		marker:   marker,
+		uploadID: uploadID,
+	}
+
+	return p
+}
diff --git a/api/data/listsession.go b/api/data/listsession.go
index a13f2e4..62ea419 100644
--- a/api/data/listsession.go
+++ b/api/data/listsession.go
@@ -9,11 +9,27 @@ type VersionsStream interface {
 	Next(ctx context.Context) (*NodeVersion, error)
 }
 
-type ListSession struct {
-	Next     []*ExtendedNodeVersion
-	Stream   VersionsStream
-	NamesMap map[string]struct{}
+type CommonSession struct {
 	Context  context.Context
 	Cancel   context.CancelFunc
 	Acquired atomic.Bool
 }
+
+type ListSession struct {
+	CommonSession
+	Next     []*ExtendedNodeVersion
+	Stream   VersionsStream
+	NamesMap map[string]struct{}
+}
+
+type MultipartInfoStream interface {
+	Next(marker, uploadID string) (*MultipartInfo, error)
+}
+
+type ListMultipartSession struct {
+	CommonSession
+	Next       *MultipartInfo
+	Stream     MultipartInfoStream
+	RootID     []uint64
+	TailPrefix string
+}
diff --git a/api/data/tree.go b/api/data/tree.go
index c75d936..ef01653 100644
--- a/api/data/tree.go
+++ b/api/data/tree.go
@@ -207,3 +207,21 @@ func (l LockInfo) UntilDate() string {
 func (l LockInfo) IsCompliance() bool {
 	return l.isCompliance
 }
+
+type MultiID []uint64
+
+func (m MultiID) Equal(id MultiID) bool {
+	seen := make(map[uint64]struct{}, len(m))
+
+	for i := range m {
+		seen[m[i]] = struct{}{}
+	}
+
+	for i := range id {
+		if _, ok := seen[id[i]]; !ok {
+			return false
+		}
+	}
+
+	return true
+}
diff --git a/api/handler/handlers_test.go b/api/handler/handlers_test.go
index dc32ff9..82e546f 100644
--- a/api/handler/handlers_test.go
+++ b/api/handler/handlers_test.go
@@ -244,6 +244,7 @@ func getMinCacheConfig(logger *zap.Logger) *layer.CachesConfig {
 		Buckets:       minCacheCfg,
 		System:        minCacheCfg,
 		AccessControl: minCacheCfg,
+		MultipartList: minCacheCfg,
 		NetworkInfo:   &cache.NetworkInfoCacheConfig{Lifetime: minCacheCfg.Lifetime},
 	}
 }
diff --git a/api/handler/multipart_upload.go b/api/handler/multipart_upload.go
index 0a719dc..9c0365e 100644
--- a/api/handler/multipart_upload.go
+++ b/api/handler/multipart_upload.go
@@ -509,7 +509,7 @@ func (h *handler) ListMultipartUploadsHandler(w http.ResponseWriter, r *http.Req
 
 	if maxUploadsStr != "" {
 		val, err := strconv.Atoi(maxUploadsStr)
-		if err != nil || val < 1 || val > 1000 {
+		if err != nil || val < 1 || val > maxObjectList {
 			h.logAndSendError(w, "invalid maxUploads", reqInfo, errors.GetAPIError(errors.ErrInvalidMaxUploads))
 			return
 		}
diff --git a/api/layer/cache.go b/api/layer/cache.go
index c3ceb7c..ecf5ac4 100644
--- a/api/layer/cache.go
+++ b/api/layer/cache.go
@@ -12,15 +12,16 @@ import (
 )
 
 type Cache struct {
-	logger           *zap.Logger
-	listsCache       *cache.ObjectsListCache
-	sessionListCache *cache.ListSessionCache
-	objCache         *cache.ObjectsCache
-	namesCache       *cache.ObjectsNameCache
-	bucketCache      *cache.BucketCache
-	systemCache      *cache.SystemCache
-	accessCache      *cache.AccessControlCache
-	networkInfoCache *cache.NetworkInfoCache
+	logger                *zap.Logger
+	listsCache            *cache.ObjectsListCache
+	sessionListCache      *cache.ListSessionCache
+	objCache              *cache.ObjectsCache
+	namesCache            *cache.ObjectsNameCache
+	bucketCache           *cache.BucketCache
+	systemCache           *cache.SystemCache
+	accessCache           *cache.AccessControlCache
+	networkInfoCache      *cache.NetworkInfoCache
+	sessionMultipartCache *cache.ListMultipartSessionCache
 }
 
 // CachesConfig contains params for caches.
@@ -33,6 +34,7 @@ type CachesConfig struct {
 	Buckets       *cache.Config
 	System        *cache.Config
 	AccessControl *cache.Config
+	MultipartList *cache.Config
 	NetworkInfo   *cache.NetworkInfoCacheConfig
 }
 
@@ -48,20 +50,22 @@ func DefaultCachesConfigs(logger *zap.Logger) *CachesConfig {
 		System:        cache.DefaultSystemConfig(logger),
 		AccessControl: cache.DefaultAccessControlConfig(logger),
 		NetworkInfo:   cache.DefaultNetworkInfoConfig(logger),
+		MultipartList: cache.DefaultListMultipartSessionConfig(logger),
 	}
 }
 
 func NewCache(cfg *CachesConfig) *Cache {
 	return &Cache{
-		logger:           cfg.Logger,
-		listsCache:       cache.NewObjectsListCache(cfg.ObjectsList),
-		sessionListCache: cache.NewListSessionCache(cfg.SessionList),
-		objCache:         cache.New(cfg.Objects),
-		namesCache:       cache.NewObjectsNameCache(cfg.Names),
-		bucketCache:      cache.NewBucketCache(cfg.Buckets),
-		systemCache:      cache.NewSystemCache(cfg.System),
-		accessCache:      cache.NewAccessControlCache(cfg.AccessControl),
-		networkInfoCache: cache.NewNetworkInfoCache(cfg.NetworkInfo),
+		logger:                cfg.Logger,
+		listsCache:            cache.NewObjectsListCache(cfg.ObjectsList),
+		sessionListCache:      cache.NewListSessionCache(cfg.SessionList),
+		objCache:              cache.New(cfg.Objects),
+		namesCache:            cache.NewObjectsNameCache(cfg.Names),
+		bucketCache:           cache.NewBucketCache(cfg.Buckets),
+		systemCache:           cache.NewSystemCache(cfg.System),
+		accessCache:           cache.NewAccessControlCache(cfg.AccessControl),
+		networkInfoCache:      cache.NewNetworkInfoCache(cfg.NetworkInfo),
+		sessionMultipartCache: cache.NewListMultipartSessionCache(cfg.MultipartList),
 	}
 }
 
@@ -161,6 +165,14 @@ func (c *Cache) GetListSession(owner user.ID, key cache.ListSessionKey) *data.Li
 	return c.sessionListCache.GetListSession(key)
 }
 
+func (c *Cache) GetListMultipartSession(owner user.ID, key cache.ListMultipartSessionKey) *data.ListMultipartSession {
+	if !c.accessCache.Get(owner, key.String()) {
+		return nil
+	}
+
+	return c.sessionMultipartCache.GetListMultipartSession(key)
+}
+
 func (c *Cache) PutListSession(owner user.ID, key cache.ListSessionKey, session *data.ListSession) {
 	if err := c.sessionListCache.PutListSession(key, session); err != nil {
 		c.logger.Warn(logs.CouldntCacheListSession, zap.Error(err))
@@ -176,6 +188,21 @@ func (c *Cache) DeleteListSession(owner user.ID, key cache.ListSessionKey) {
 	c.accessCache.Delete(owner, key.String())
 }
 
+func (c *Cache) PutListMultipartSession(owner user.ID, key cache.ListMultipartSessionKey, session *data.ListMultipartSession) {
+	if err := c.sessionMultipartCache.PutListMultipartSession(key, session); err != nil {
+		c.logger.Warn(logs.CouldntCacheListMultipartSession, zap.Error(err))
+	}
+
+	if err := c.accessCache.Put(owner, key.String()); err != nil {
+		c.logger.Warn(logs.CouldntCacheAccessControlOperation, zap.Error(err))
+	}
+}
+
+func (c *Cache) DeleteListMultipartSession(owner user.ID, key cache.ListMultipartSessionKey) {
+	c.sessionMultipartCache.DeleteListMultipartSession(key)
+	c.accessCache.Delete(owner, key.String())
+}
+
 func (c *Cache) GetTagging(owner user.ID, key string) map[string]string {
 	if !c.accessCache.Get(owner, key) {
 		return nil
diff --git a/api/layer/layer.go b/api/layer/layer.go
index 0768c83..ffac46a 100644
--- a/api/layer/layer.go
+++ b/api/layer/layer.go
@@ -814,7 +814,7 @@ func (n *Layer) ResolveBucket(ctx context.Context, zone, name string) (cid.ID, e
 
 func (n *Layer) DeleteBucket(ctx context.Context, p *DeleteBucketParams) error {
 	if !p.SkipCheck {
-		res, _, err := n.getAllObjectsVersions(ctx, commonVersionsListingParams{
+		res, _, err := n.getAllObjectsVersions(ctx, commonListingParams{
 			BktInfo: p.BktInfo,
 			MaxKeys: 1,
 		})
diff --git a/api/layer/listing.go b/api/layer/listing.go
index abc8b7e..c414482 100644
--- a/api/layer/listing.go
+++ b/api/layer/listing.go
@@ -73,17 +73,18 @@ type (
 		VersionIDMarker     string
 	}
 
-	commonVersionsListingParams struct {
+	commonListingParams struct {
 		BktInfo   *data.BucketInfo
 		Delimiter string
 		Prefix    string
 		MaxKeys   int
 		Marker    string
-		Bookmark  string
+		// key to store session in cache
+		Bookmark string
 	}
 
 	commonLatestVersionsListingParams struct {
-		commonVersionsListingParams
+		commonListingParams
 		ListType ListType
 	}
 )
@@ -100,7 +101,7 @@ func (n *Layer) ListObjectsV1(ctx context.Context, p *ListObjectsParamsV1) (*Lis
 	var result ListObjectsInfoV1
 
 	prm := commonLatestVersionsListingParams{
-		commonVersionsListingParams: commonVersionsListingParams{
+		commonListingParams: commonListingParams{
 			BktInfo:   p.BktInfo,
 			Delimiter: p.Delimiter,
 			Prefix:    p.Prefix,
@@ -131,7 +132,7 @@ func (n *Layer) ListObjectsV2(ctx context.Context, p *ListObjectsParamsV2) (*Lis
 	var result ListObjectsInfoV2
 
 	prm := commonLatestVersionsListingParams{
-		commonVersionsListingParams: commonVersionsListingParams{
+		commonListingParams: commonListingParams{
 			BktInfo:   p.BktInfo,
 			Delimiter: p.Delimiter,
 			Prefix:    p.Prefix,
@@ -158,7 +159,7 @@ func (n *Layer) ListObjectsV2(ctx context.Context, p *ListObjectsParamsV2) (*Lis
 }
 
 func (n *Layer) ListObjectVersions(ctx context.Context, p *ListObjectVersionsParams) (*ListObjectVersionsInfo, error) {
-	prm := commonVersionsListingParams{
+	prm := commonListingParams{
 		BktInfo:   p.BktInfo,
 		Delimiter: p.Delimiter,
 		Prefix:    p.Prefix,
@@ -193,13 +194,12 @@ func (n *Layer) getLatestObjectsVersions(ctx context.Context, p commonLatestVers
 		return nil, nil, nil
 	}
 
-	session, err := n.getListLatestVersionsSession(ctx, p)
+	session, err := n.getListVersionsSession(ctx, p.commonListingParams, true)
 	if err != nil {
 		return nil, nil, err
 	}
-
-	generator, errorCh := nodesGeneratorStream(ctx, p.commonVersionsListingParams, session)
-	objOutCh, err := n.initWorkerPool(ctx, 2, p.commonVersionsListingParams, generator)
+	generator, errorCh := nodesGeneratorStream(ctx, p.commonListingParams, session)
+	objOutCh, err := n.initWorkerPool(ctx, 2, p.commonListingParams, generator)
 	if err != nil {
 		return nil, nil, fmt.Errorf("failed to init worker pool: %w", err)
 	}
@@ -225,12 +225,12 @@ func (n *Layer) getLatestObjectsVersions(ctx context.Context, p commonLatestVers
 	return
 }
 
-func (n *Layer) getAllObjectsVersions(ctx context.Context, p commonVersionsListingParams) ([]*data.ExtendedNodeVersion, bool, error) {
+func (n *Layer) getAllObjectsVersions(ctx context.Context, p commonListingParams) ([]*data.ExtendedNodeVersion, bool, error) {
 	if p.MaxKeys == 0 {
 		return nil, false, nil
 	}
 
-	session, err := n.getListAllVersionsSession(ctx, p)
+	session, err := n.getListVersionsSession(ctx, p, false)
 	if err != nil {
 		return nil, false, err
 	}
@@ -259,7 +259,7 @@ func (n *Layer) getAllObjectsVersions(ctx context.Context, p commonVersionsListi
 	return allObjects, isTruncated, nil
 }
 
-func handleGeneratedVersions(objOutCh <-chan *data.ExtendedNodeVersion, p commonVersionsListingParams, session *data.ListSession) []*data.ExtendedNodeVersion {
+func handleGeneratedVersions(objOutCh <-chan *data.ExtendedNodeVersion, p commonListingParams, session *data.ListSession) []*data.ExtendedNodeVersion {
 	var lastName string
 	var listRowStartIndex int
 	allObjects := make([]*data.ExtendedNodeVersion, 0, p.MaxKeys)
@@ -301,48 +301,31 @@ func formVersionsListRow(objects []*data.ExtendedNodeVersion, rowStartIndex int,
 	}
 }
 
-func (n *Layer) getListLatestVersionsSession(ctx context.Context, p commonLatestVersionsListingParams) (*data.ListSession, error) {
-	return n.getListVersionsSession(ctx, p.commonVersionsListingParams, true)
-}
-
-func (n *Layer) getListAllVersionsSession(ctx context.Context, p commonVersionsListingParams) (*data.ListSession, error) {
-	return n.getListVersionsSession(ctx, p, false)
-}
-
-func (n *Layer) getListVersionsSession(ctx context.Context, p commonVersionsListingParams, latestOnly bool) (*data.ListSession, error) {
+func (n *Layer) getListVersionsSession(ctx context.Context, p commonListingParams, latestOnly bool) (session *data.ListSession, err error) {
 	owner := n.BearerOwner(ctx)
 
 	cacheKey := cache.CreateListSessionCacheKey(p.BktInfo.CID, p.Prefix, p.Bookmark)
-	session := n.cache.GetListSession(owner, cacheKey)
-	if session == nil {
-		return n.initNewVersionsByPrefixSession(ctx, p, latestOnly)
+	session = n.cache.GetListSession(owner, cacheKey)
+	if session == nil || session.Acquired.Swap(true) {
+		session = n.newSession(ctx)
+		session.Stream, err = n.treeService.InitVersionsByPrefixStream(session.Context, p.BktInfo, p.Prefix, "version", latestOnly)
+		return session, err
 	}
 
-	if session.Acquired.Swap(true) {
-		return n.initNewVersionsByPrefixSession(ctx, p, latestOnly)
-	}
-
-	// after reading next object from stream in session
-	// the current cache value already doesn't match with next token in cache key
 	n.cache.DeleteListSession(owner, cacheKey)
-
 	return session, nil
 }
 
-func (n *Layer) initNewVersionsByPrefixSession(ctx context.Context, p commonVersionsListingParams, latestOnly bool) (session *data.ListSession, err error) {
-	session = &data.ListSession{NamesMap: make(map[string]struct{})}
+func (n *Layer) newSession(ctx context.Context) *data.ListSession {
+	session := &data.ListSession{NamesMap: make(map[string]struct{})}
 	session.Context, session.Cancel = context.WithCancel(context.Background())
 
+	// save access box data for next requests
 	if bd, err := middleware.GetBoxData(ctx); err == nil {
 		session.Context = middleware.SetBox(session.Context, &middleware.Box{AccessBox: bd})
 	}
 
-	session.Stream, err = n.treeService.InitVersionsByPrefixStream(session.Context, p.BktInfo, p.Prefix, latestOnly)
-	if err != nil {
-		return nil, err
-	}
-
-	return session, nil
+	return session
 }
 
 func (n *Layer) putListLatestVersionsSession(ctx context.Context, p commonLatestVersionsListingParams, session *data.ListSession, allObjects []*data.ExtendedNodeVersion) {
@@ -366,7 +349,7 @@ func (n *Layer) putListLatestVersionsSession(ctx context.Context, p commonLatest
 	n.cache.PutListSession(n.BearerOwner(ctx), cacheKey, session)
 }
 
-func (n *Layer) putListAllVersionsSession(ctx context.Context, p commonVersionsListingParams, session *data.ListSession, allObjects []*data.ExtendedNodeVersion) {
+func (n *Layer) putListAllVersionsSession(ctx context.Context, p commonListingParams, session *data.ListSession, allObjects []*data.ExtendedNodeVersion) {
 	if len(allObjects) <= p.MaxKeys {
 		return
 	}
@@ -383,7 +366,7 @@ func (n *Layer) putListAllVersionsSession(ctx context.Context, p commonVersionsL
 	n.cache.PutListSession(n.BearerOwner(ctx), cacheKey, session)
 }
 
-func nodesGeneratorStream(ctx context.Context, p commonVersionsListingParams, stream *data.ListSession) (<-chan *data.ExtendedNodeVersion, <-chan error) {
+func nodesGeneratorStream(ctx context.Context, p commonListingParams, stream *data.ListSession) (<-chan *data.ExtendedNodeVersion, <-chan error) {
 	nodeCh := make(chan *data.ExtendedNodeVersion, 1000)
 	errCh := make(chan error, 1)
 	existed := stream.NamesMap
@@ -439,7 +422,7 @@ func nodesGeneratorStream(ctx context.Context, p commonVersionsListingParams, st
 	return nodeCh, errCh
 }
 
-func nodesGeneratorVersions(ctx context.Context, p commonVersionsListingParams, stream *data.ListSession) (<-chan *data.ExtendedNodeVersion, <-chan error) {
+func nodesGeneratorVersions(ctx context.Context, p commonListingParams, stream *data.ListSession) (<-chan *data.ExtendedNodeVersion, <-chan error) {
 	nodeCh := make(chan *data.ExtendedNodeVersion, 1000)
 	errCh := make(chan error, 1)
 	existed := stream.NamesMap
@@ -498,7 +481,7 @@ func nodesGeneratorVersions(ctx context.Context, p commonVersionsListingParams,
 	return nodeCh, errCh
 }
 
-func (n *Layer) initWorkerPool(ctx context.Context, size int, p commonVersionsListingParams, input <-chan *data.ExtendedNodeVersion) (<-chan *data.ExtendedNodeVersion, error) {
+func (n *Layer) initWorkerPool(ctx context.Context, size int, p commonListingParams, input <-chan *data.ExtendedNodeVersion) (<-chan *data.ExtendedNodeVersion, error) {
 	reqLog := n.reqLogger(ctx)
 	pool, err := ants.NewPool(size, ants.WithLogger(&logWrapper{reqLog}))
 	if err != nil {
@@ -567,7 +550,7 @@ func (n *Layer) initWorkerPool(ctx context.Context, size int, p commonVersionsLi
 	return objCh, nil
 }
 
-func shouldSkip(node *data.ExtendedNodeVersion, p commonVersionsListingParams, existed map[string]struct{}) bool {
+func shouldSkip(node *data.ExtendedNodeVersion, p commonListingParams, existed map[string]struct{}) bool {
 	if node.NodeVersion.IsDeleteMarker {
 		return true
 	}
@@ -598,7 +581,7 @@ func shouldSkip(node *data.ExtendedNodeVersion, p commonVersionsListingParams, e
 	return false
 }
 
-func shouldSkipVersions(node *data.ExtendedNodeVersion, p commonVersionsListingParams, existed map[string]struct{}) bool {
+func shouldSkipVersions(node *data.ExtendedNodeVersion, p commonListingParams, existed map[string]struct{}) bool {
 	filePath := node.NodeVersion.FilePath
 	if node.DirName != "" {
 		filePath = node.DirName
diff --git a/api/layer/multipart_upload.go b/api/layer/multipart_upload.go
index ed7612c..6b725f1 100644
--- a/api/layer/multipart_upload.go
+++ b/api/layer/multipart_upload.go
@@ -16,6 +16,7 @@ import (
 	"time"
 
 	"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/auth"
+	"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/cache"
 	"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/data"
 	apierr "git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/errors"
 	"git.frostfs.info/TrueCloudLab/frostfs-s3-gw/api/layer/encryption"
@@ -499,47 +500,58 @@ func (n *Layer) ListMultipartUploads(ctx context.Context, p *ListMultipartUpload
 		return &result, nil
 	}
 
-	multipartInfos, err := n.treeService.GetMultipartUploadsByPrefix(ctx, p.Bkt, p.Prefix)
+	session, err := n.getListMultipartUploadsSession(ctx, p)
 	if err != nil {
 		return nil, err
 	}
 
-	uploads := make([]*UploadInfo, 0, len(multipartInfos))
+	uploads := make([]*UploadInfo, 0, p.MaxUploads)
 	uniqDirs := make(map[string]struct{})
+	uploadsCount := 0
+	if session.Next != nil {
+		uploads = append(uploads, uploadInfoFromMultipartInfo(session.Next, p.Prefix, p.Delimiter))
+		uploadsCount++
+	}
 
-	for _, multipartInfo := range multipartInfos {
-		info := uploadInfoFromMultipartInfo(multipartInfo, p.Prefix, p.Delimiter)
-		if info != nil {
-			if info.IsDir {
-				if _, ok := uniqDirs[info.Key]; ok {
+	isTruncated := true
+	var last data.MultipartInfo
+	var info *data.MultipartInfo
+	for uploadsCount <= p.MaxUploads {
+		info, err = session.Stream.Next(p.KeyMarker, p.UploadIDMarker)
+		if err != nil {
+			if err == io.EOF {
+				isTruncated = false
+				break
+			}
+			n.log.Warn(logs.CouldNotGetMultipartUploadInfo, zap.Error(err))
+			continue
+		}
+		if uploadsCount == p.MaxUploads-1 {
+			// last upload from which is used for setting NextKeyMarker and NextUploadIDMarker
+			last = *info
+		}
+		upload := uploadInfoFromMultipartInfo(info, p.Prefix, p.Delimiter)
+		if upload != nil {
+			if upload.IsDir {
+				if _, ok := uniqDirs[upload.Key]; ok {
 					continue
 				}
-				uniqDirs[info.Key] = struct{}{}
+				uniqDirs[upload.Key] = struct{}{}
 			}
-			uploads = append(uploads, info)
+			uploads = append(uploads, upload)
+			uploadsCount++
 		}
 	}
 
-	sort.Slice(uploads, func(i, j int) bool {
-		if uploads[i].Key == uploads[j].Key {
-			return uploads[i].UploadID < uploads[j].UploadID
-		}
-		return uploads[i].Key < uploads[j].Key
-	})
-
-	if p.KeyMarker != "" {
-		if p.UploadIDMarker != "" {
-			uploads = trimAfterUploadIDAndKey(p.KeyMarker, p.UploadIDMarker, uploads)
-		} else {
-			uploads = trimAfterUploadKey(p.KeyMarker, uploads)
-		}
-	}
-
-	if len(uploads) > p.MaxUploads {
-		result.IsTruncated = true
+	if isTruncated {
+		// put to session redundant multipart upload which we read to check for EOF
+		session.Next = info
 		uploads = uploads[:p.MaxUploads]
-		result.NextUploadIDMarker = uploads[len(uploads)-1].UploadID
-		result.NextKeyMarker = uploads[len(uploads)-1].Key
+		result.IsTruncated = true
+		result.NextUploadIDMarker = last.UploadID
+		result.NextKeyMarker = last.Key
+		cacheKey := cache.CreateListMultipartSessionCacheKey(p.Bkt.CID, p.Prefix, last.Key, last.UploadID)
+		n.putListMultipartUploadsSession(ctx, session, cacheKey)
 	}
 
 	for _, ov := range uploads {
@@ -553,6 +565,38 @@ func (n *Layer) ListMultipartUploads(ctx context.Context, p *ListMultipartUpload
 	return &result, nil
 }
 
+func (n *Layer) putListMultipartUploadsSession(ctx context.Context, session *data.ListMultipartSession, cacheKey cache.ListMultipartSessionKey) {
+	session.Acquired.Store(false)
+	n.cache.PutListMultipartSession(n.BearerOwner(ctx), cacheKey, session)
+}
+
+func (n *Layer) getListMultipartUploadsSession(ctx context.Context, p *ListMultipartUploadsParams) (session *data.ListMultipartSession, err error) {
+	owner := n.BearerOwner(ctx)
+	cacheKey := cache.CreateListMultipartSessionCacheKey(p.Bkt.CID, p.Prefix, p.KeyMarker, p.UploadIDMarker)
+	session = n.cache.GetListMultipartSession(owner, cacheKey)
+	if session == nil {
+		ctx, cancel := context.WithCancel(ctx)
+		session = &data.ListMultipartSession{
+			CommonSession: data.CommonSession{
+				Context: ctx,
+				Cancel:  cancel,
+			},
+		}
+	} else if !session.Acquired.Swap(true) {
+		// if
+		// after reading next object from stream in session
+		// the current cache value already doesn't match with next token in cache key
+		n.cache.DeleteListMultipartSession(owner, cacheKey)
+		return session, nil
+	}
+	session.Stream, err = n.treeService.GetMultipartUploadsByPrefix(session.Context, p.Bkt, p.Prefix)
+	if err != nil {
+		return nil, err
+	}
+
+	return session, err
+}
+
 func (n *Layer) AbortMultipartUpload(ctx context.Context, p *UploadInfoParams) error {
 	multipartInfo, parts, err := n.getUploadParts(ctx, p)
 	if err != nil {
diff --git a/api/layer/tree/tree_service.go b/api/layer/tree/tree_service.go
index db079b1..05c65cd 100644
--- a/api/layer/tree/tree_service.go
+++ b/api/layer/tree/tree_service.go
@@ -43,7 +43,7 @@ type Service interface {
 
 	GetVersions(ctx context.Context, bktInfo *data.BucketInfo, objectName string) ([]*data.NodeVersion, error)
 	GetLatestVersion(ctx context.Context, bktInfo *data.BucketInfo, objectName string) (*data.NodeVersion, error)
-	InitVersionsByPrefixStream(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) (data.VersionsStream, error)
+	InitVersionsByPrefixStream(ctx context.Context, bktInfo *data.BucketInfo, prefix, treeID string, latestOnly bool) (data.VersionsStream, error)
 	GetUnversioned(ctx context.Context, bktInfo *data.BucketInfo, objectName string) (*data.NodeVersion, error)
 	AddVersion(ctx context.Context, bktInfo *data.BucketInfo, newVersion *data.NodeVersion) (uint64, error)
 	RemoveVersion(ctx context.Context, bktInfo *data.BucketInfo, nodeID uint64) error
@@ -53,7 +53,7 @@ type Service interface {
 
 	CreateMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, info *data.MultipartInfo) error
 	DeleteMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, info *data.MultipartInfo) error
-	GetMultipartUploadsByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string) ([]*data.MultipartInfo, error)
+	GetMultipartUploadsByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string) (data.MultipartInfoStream, error)
 	GetMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, objectName, uploadID string) (*data.MultipartInfo, error)
 
 	// AddPart puts a node to a system tree as a child of appropriate multipart upload
diff --git a/api/layer/tree_mock.go b/api/layer/tree_mock.go
index 577f406..e4cb3b6 100644
--- a/api/layer/tree_mock.go
+++ b/api/layer/tree_mock.go
@@ -184,7 +184,7 @@ func (t *TreeServiceMock) GetLatestVersion(_ context.Context, bktInfo *data.Buck
 	return nil, tree.ErrNodeNotFound
 }
 
-func (t *TreeServiceMock) InitVersionsByPrefixStream(_ context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) (data.VersionsStream, error) {
+func (t *TreeServiceMock) InitVersionsByPrefixStream(_ context.Context, bktInfo *data.BucketInfo, prefix, _ string, latestOnly bool) (data.VersionsStream, error) {
 	cnrVersionsMap, ok := t.versions[bktInfo.CID.EncodeToString()]
 	if !ok {
 		return nil, tree.ErrNodeNotFound
@@ -328,7 +328,7 @@ func (t *TreeServiceMock) CreateMultipartUpload(_ context.Context, bktInfo *data
 	return nil
 }
 
-func (t *TreeServiceMock) GetMultipartUploadsByPrefix(context.Context, *data.BucketInfo, string) ([]*data.MultipartInfo, error) {
+func (t *TreeServiceMock) GetMultipartUploadsByPrefix(context.Context, *data.BucketInfo, string) (data.MultipartInfoStream, error) {
 	panic("implement me")
 }
 
diff --git a/internal/logs/logs.go b/internal/logs/logs.go
index dd7a659..92c46b7 100644
--- a/internal/logs/logs.go
+++ b/internal/logs/logs.go
@@ -8,6 +8,8 @@ const (
 	UpdateAccessCredObjectIntoFrostFS                    = "update access cred object into FrostFS"                                                            // Info in ../../authmate/authmate.go
 	MetricsAreDisabled                                   = "metrics are disabled"                                                                              // Warn in ../../metrics/app.go
 	FoundMoreThanOneUnversionedNode                      = "found more than one unversioned node"                                                              // Debug in ../../pkg/service/tree/tree.go
+	CouldNotParseTreeNode                                = "could not parse tree node"                                                                         // Error in ../../pkg/service/tree/tree.go
+	CouldNotFormFilePath                                 = "could not form file path"                                                                          // Error in ../../pkg/service/tree/tree.go
 	ServiceIsRunning                                     = "service is running"                                                                                // Info in ../../cmd/s3-gw/service.go
 	ServiceCouldntStartOnConfiguredPort                  = "service couldn't start on configured port"                                                         // Warn in ../../cmd/s3-gw/service.go
 	ServiceHasntStartedSinceItsDisabled                  = "service hasn't started since it's disabled"                                                        // Info in ../../cmd/s3-gw/service.go
@@ -67,6 +69,7 @@ const (
 	CouldNotListUserContainers                           = "could not list user containers"                                                                    // Error in ../../api/layer/container.go
 	CouldNotFetchContainerInfo                           = "could not fetch container info"                                                                    // Error in ../../api/layer/container.go
 	MismatchedObjEncryptionInfo                          = "mismatched obj encryptionInfo"                                                                     // Warn in ../../api/layer/multipart_upload.go
+	CouldNotGetMultipartUploadInfo                       = "could not get multipart upload info"                                                               // Warn in ../../api/layer/multipart_upload.go
 	UploadPart                                           = "upload part"                                                                                       // Debug in ../../api/layer/multipart_upload.go
 	CouldntDeleteOldPartObject                           = "couldn't delete old part object"                                                                   // Error in ../../api/layer/multipart_upload.go
 	CouldNotPutCompletedObject                           = "could not put a completed object (multipart upload)"                                               // Error in ../../api/layer/multipart_upload.go
@@ -89,6 +92,7 @@ const (
 	CouldntPutObjAddressToNameCache                      = "couldn't put obj address to name cache"                                                            // Warn in ../../api/layer/cache.go
 	CouldntCacheListOfObjects                            = "couldn't cache list of objects"                                                                    // Warn in ../../api/layer/cache.go
 	CouldntCacheListSession                              = "couldn't cache list session"                                                                       // Warn in ../../api/layer/cache.go
+	CouldntCacheListMultipartSession                     = "couldn't cache list multipart session"                                                             // Warn in ../../api/layer/cache.go
 	CouldntCacheTags                                     = "couldn't cache tags"                                                                               // Error in ../../api/layer/cache.go
 	CouldntCacheLockInfo                                 = "couldn't cache lock info"                                                                          // Error in ../../api/layer/cache.go
 	CouldntCacheBucketSettings                           = "couldn't cache bucket settings"                                                                    // Warn in ../../api/layer/cache.go
diff --git a/pkg/service/tree/tree.go b/pkg/service/tree/tree.go
index 438b129..849c9c7 100644
--- a/pkg/service/tree/tree.go
+++ b/pkg/service/tree/tree.go
@@ -900,28 +900,10 @@ func (s *DummySubTreeStream) Next() (NodeResponse, error) {
 	return s.data, nil
 }
 
-type MultiID []uint64
-
-func (m MultiID) Equal(id MultiID) bool {
-	seen := make(map[uint64]struct{}, len(m))
-
-	for i := range m {
-		seen[m[i]] = struct{}{}
-	}
-
-	for i := range id {
-		if _, ok := seen[id[i]]; !ok {
-			return false
-		}
-	}
-
-	return true
-}
-
 type VersionsByPrefixStreamImpl struct {
 	ctx                context.Context
-	rootID             MultiID
-	intermediateRootID MultiID
+	rootID             data.MultiID
+	intermediateRootID data.MultiID
 	service            ServiceClient
 	bktInfo            *data.BucketInfo
 	mainStream         SubTreeStream
@@ -1079,8 +1061,31 @@ func (s *VersionsByPrefixStreamImpl) parseNodeResponse(node NodeResponse) (res *
 	return nodeVersion, false, err
 }
 
-func (c *Tree) InitVersionsByPrefixStream(ctx context.Context, bktInfo *data.BucketInfo, prefix string, latestOnly bool) (data.VersionsStream, error) {
-	mainStream, tailPrefix, rootID, err := c.getSubTreeByPrefixMainStream(ctx, bktInfo, versionTree, prefix)
+func (c *Tree) InitVersionsByPrefixStream(ctx context.Context, bktInfo *data.BucketInfo, prefix, treeID string, latestOnly bool) (data.VersionsStream, error) {
+	mainStream, tailPrefix, rootID, err := c.getSubTreeByPrefixMainStream(ctx, bktInfo, treeID, prefix)
+	if err != nil {
+		if errors.Is(err, io.EOF) {
+			return &VersionsByPrefixStreamImpl{ended: true}, nil
+		}
+		return nil, err
+	}
+
+	return &VersionsByPrefixStreamImpl{
+		ctx:        ctx,
+		namesMap:   map[uint64]string{},
+		rootID:     rootID,
+		service:    c.service,
+		bktInfo:    bktInfo,
+		mainStream: mainStream,
+		headPrefix: strings.TrimSuffix(prefix, tailPrefix),
+		tailPrefix: tailPrefix,
+		latestOnly: latestOnly,
+		log:        c.reqLogger(ctx),
+	}, nil
+}
+
+func (c *Tree) InitMultipartsByPrefixStream(ctx context.Context, bktInfo *data.BucketInfo, prefix, treeID string, latestOnly bool) (data.VersionsStream, error) {
+	mainStream, tailPrefix, rootID, err := c.getSubTreeByPrefixMainStream(ctx, bktInfo, treeID, prefix)
 	if err != nil {
 		if errors.Is(err, io.EOF) {
 			return &VersionsByPrefixStreamImpl{ended: true}, nil
@@ -1165,58 +1170,28 @@ func (c *Tree) getPrefixNodeID(ctx context.Context, bktInfo *data.BucketInfo, tr
 	return intermediateNodes, nil
 }
 
-func (c *Tree) getSubTreeByPrefix(ctx context.Context, bktInfo *data.BucketInfo, treeID, prefix string, latestOnly bool) ([]NodeResponse, string, error) {
-	rootID, tailPrefix, err := c.determinePrefixNode(ctx, bktInfo, treeID, prefix)
+func (c *Tree) getSubTreeByPrefixStream(ctx context.Context, bktInfo *data.BucketInfo, treeID, prefix string) (data.MultipartInfoStream, error) {
+	rootID, _, err := c.determinePrefixNode(ctx, bktInfo, treeID, prefix)
 	if err != nil {
 		if errors.Is(err, tree.ErrNodeNotFound) {
-			return nil, "", nil
+			return nil, nil
 		}
-		return nil, "", err
+		return nil, err
 	}
 
-	subTree, err := c.service.GetSubTree(ctx, bktInfo, treeID, rootID, 2, false)
+	stream, err := c.service.GetSubTreeStream(ctx, bktInfo, treeID, rootID, maxGetSubTreeDepth)
 	if err != nil {
 		if errors.Is(err, tree.ErrNodeNotFound) {
-			return nil, "", nil
+			return nil, nil
 		}
-		return nil, "", err
+		return nil, err
 	}
-
-	nodesMap := make(map[string][]NodeResponse, len(subTree))
-	for _, node := range subTree {
-		if MultiID(rootID).Equal(node.GetNodeID()) {
-			continue
-		}
-
-		fileName := getFilename(node)
-		if !strings.HasPrefix(fileName, tailPrefix) {
-			continue
-		}
-
-		nodes := nodesMap[fileName]
-
-		// Add all nodes if flag latestOnly is false.
-		// Add all intermediate nodes
-		// and only latest leaf (object) nodes. To do this store and replace last leaf (object) node in nodes[0]
-		if len(nodes) == 0 {
-			nodes = []NodeResponse{node}
-		} else if !latestOnly || isIntermediate(node) {
-			nodes = append(nodes, node)
-		} else if isIntermediate(nodes[0]) {
-			nodes = append([]NodeResponse{node}, nodes...)
-		} else if getMaxTimestamp(node) > getMaxTimestamp(nodes[0]) {
-			nodes[0] = node
-		}
-
-		nodesMap[fileName] = nodes
+	multipartStream := multipartInfoStream{
+		log:       c.log,
+		nodeNames: make(map[uint64]*treeNode),
+		stream:    stream,
 	}
-
-	result := make([]NodeResponse, 0, len(subTree))
-	for _, nodes := range nodesMap {
-		result = append(result, nodes...)
-	}
-
-	return result, strings.TrimSuffix(prefix, tailPrefix), nil
+	return multipartStream, nil
 }
 
 func getFilename(node NodeResponse) string {
@@ -1253,6 +1228,26 @@ func formFilePath(node NodeResponse, fileName string, namesMap map[uint64]string
 	return filepath, nil
 }
 
+func formFilePathV2(node treeNode, filename string, namesMap map[uint64]*treeNode) (string, error) {
+	var parentPath string
+	curNode := &node
+	for {
+		parentNode, ok := namesMap[curNode.ParentID[0]]
+		if !ok {
+			break
+		}
+		parentFileName, ok := parentNode.FileName()
+		if !ok {
+			return "", fmt.Errorf("couldn't get parent file name")
+		}
+
+		parentPath = parentFileName + separator + parentPath
+		curNode = parentNode
+	}
+
+	return parentPath + filename, nil
+}
+
 func parseTreeNode(node NodeResponse) (*treeNode, string, error) {
 	tNode, err := newTreeNode(node)
 	if err != nil { // invalid OID attribute
@@ -1267,10 +1262,6 @@ func parseTreeNode(node NodeResponse) (*treeNode, string, error) {
 	return tNode, fileName, nil
 }
 
-func formLatestNodeKey(parentID uint64, fileName string) string {
-	return strconv.FormatUint(parentID, 10) + "." + fileName
-}
-
 func (c *Tree) GetUnversioned(ctx context.Context, bktInfo *data.BucketInfo, filepath string) (*data.NodeVersion, error) {
 	return c.getUnversioned(ctx, bktInfo, versionTree, filepath)
 }
@@ -1313,84 +1304,46 @@ func (c *Tree) CreateMultipartUpload(ctx context.Context, bktInfo *data.BucketIn
 	return err
 }
 
-func (c *Tree) GetMultipartUploadsByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string) ([]*data.MultipartInfo, error) {
-	subTreeNodes, headPrefix, err := c.getSubTreeByPrefix(ctx, bktInfo, systemTree, prefix, false)
-	if err != nil {
-		return nil, err
-	}
+type multipartInfoStream struct {
+	log       *zap.Logger
+	nodeNames map[uint64]*treeNode
+	stream    SubTreeStream
+}
 
-	var result []*data.MultipartInfo
-	for _, node := range subTreeNodes {
-		multipartUploads, err := c.getSubTreeMultipartUploads(ctx, bktInfo, node.GetNodeID(), headPrefix)
+func (m multipartInfoStream) Next(marker, uploadID string) (*data.MultipartInfo, error) {
+	var tNode *treeNode
+	var filename, filepath string
+	for {
+		node, err := m.stream.Next()
 		if err != nil {
 			return nil, err
 		}
-		result = append(result, multipartUploads...)
+		tNode, filename, err = parseTreeNode(node)
+		if err != nil {
+			m.log.Error(logs.CouldNotParseTreeNode, zap.Error(err))
+			continue
+		}
+		m.nodeNames[tNode.ID[0]] = tNode
+		if _, ok := tNode.Meta[finishedKV]; ok {
+			continue
+		}
+		if id, ok := tNode.Meta[uploadIDKV]; ok {
+			filepath, err = formFilePathV2(*tNode, filename, m.nodeNames)
+			if err != nil {
+				m.log.Error(logs.CouldNotFormFilePath, zap.Error(err))
+				continue
+			}
+			if marker == "" || (uploadID == "" && filepath > marker) || (uploadID != "" && filepath == marker && id > uploadID) {
+				break
+			}
+		}
 	}
 
-	return result, nil
+	return newMultipartInfoFromTreeNode(m.log, filepath, tNode)
 }
 
-func (c *Tree) getSubTreeMultipartUploads(ctx context.Context, bktInfo *data.BucketInfo, nodeID []uint64, parentFilePath string) ([]*data.MultipartInfo, error) {
-	// sorting in getSubTree leads to skipping nodes that doesn't have FileName attribute
-	// so when we are only interested in multipart nodes, we can set this flag
-	// (despite we sort multiparts in above layer anyway)
-	// to skip its children (parts) that don't have FileName
-	subTree, err := c.service.GetSubTree(ctx, bktInfo, systemTree, nodeID, maxGetSubTreeDepth, true)
-	if err != nil {
-		return nil, err
-	}
-
-	var parentPrefix string
-	if parentFilePath != "" { // The root of subTree can also have a parent
-		parentPrefix = strings.TrimSuffix(parentFilePath, separator) + separator // To avoid 'foo//bar'
-	}
-
-	var filepath string
-	namesMap := make(map[uint64]string, len(subTree))
-	multiparts := make(map[string][]*data.MultipartInfo, len(subTree))
-
-	for i, node := range subTree {
-		tNode, fileName, err := parseTreeNode(node)
-		if err != nil {
-			continue
-		}
-
-		if i != 0 {
-			if filepath, err = formFilePath(node, fileName, namesMap); err != nil {
-				return nil, fmt.Errorf("invalid node order: %w", err)
-			}
-		} else {
-			filepath = parentPrefix + fileName
-			for _, id := range tNode.ID {
-				namesMap[id] = filepath
-			}
-		}
-
-		multipartInfo, err := newMultipartInfoFromTreeNode(c.reqLogger(ctx), filepath, tNode)
-		if err != nil || multipartInfo.Finished {
-			continue
-		}
-
-		for _, id := range node.GetParentID() {
-			key := formLatestNodeKey(id, fileName)
-			multipartInfos, ok := multiparts[key]
-			if !ok {
-				multipartInfos = []*data.MultipartInfo{multipartInfo}
-			} else {
-				multipartInfos = append(multipartInfos, multipartInfo)
-			}
-
-			multiparts[key] = multipartInfos
-		}
-	}
-
-	result := make([]*data.MultipartInfo, 0, len(multiparts))
-	for _, multipartInfo := range multiparts {
-		result = append(result, multipartInfo...)
-	}
-
-	return result, nil
+func (c *Tree) GetMultipartUploadsByPrefix(ctx context.Context, bktInfo *data.BucketInfo, prefix string) (data.MultipartInfoStream, error) {
+	return c.getSubTreeByPrefixStream(ctx, bktInfo, systemTree, prefix)
 }
 
 func (c *Tree) GetMultipartUpload(ctx context.Context, bktInfo *data.BucketInfo, objectName, uploadID string) (*data.MultipartInfo, error) {
@@ -1446,7 +1399,7 @@ func (c *Tree) AddPart(ctx context.Context, bktInfo *data.BucketInfo, multipartN
 		maxTimestamp uint64
 	)
 
-	multiNodeID := MultiID{multipartNodeID}
+	multiNodeID := data.MultiID{multipartNodeID}
 
 	for _, part := range parts {
 		if multiNodeID.Equal(part.GetNodeID()) {