package engine

import (
	"context"
	"errors"

	"git.frostfs.info/TrueCloudLab/frostfs-node/internal/logs"
	meta "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/metabase"
	"git.frostfs.info/TrueCloudLab/frostfs-node/pkg/local_object_storage/shard"
	tracingPkg "git.frostfs.info/TrueCloudLab/frostfs-node/pkg/tracing"
	"git.frostfs.info/TrueCloudLab/frostfs-observability/tracing"
	"git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client"
	apistatus "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/client/status"
	cid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/container/id"
	objectSDK "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object"
	oid "git.frostfs.info/TrueCloudLab/frostfs-sdk-go/object/id"
	"go.opentelemetry.io/otel/attribute"
	"go.opentelemetry.io/otel/trace"
	"go.uber.org/zap"
)

// InhumePrm encapsulates parameters for inhume operation.
type InhumePrm struct {
	tombstone *oid.Address
	addrs     []oid.Address

	forceRemoval bool
}

// InhumeRes encapsulates results of inhume operation.
type InhumeRes struct{}

// WithTarget sets a list of objects that should be inhumed and tombstone address
// as the reason for inhume operation.
//
// tombstone should not be nil, addr should not be empty.
// Should not be called along with MarkAsGarbage.
func (p *InhumePrm) WithTarget(tombstone oid.Address, addrs ...oid.Address) {
	p.addrs = addrs
	p.tombstone = &tombstone
}

// MarkAsGarbage marks an object to be physically removed from local storage.
//
// Should not be called along with WithTarget.
func (p *InhumePrm) MarkAsGarbage(addrs ...oid.Address) {
	p.addrs = addrs
	p.tombstone = nil
}

// WithForceRemoval inhumes objects specified via MarkAsGarbage with GC mark
// without any object restrictions checks.
func (p *InhumePrm) WithForceRemoval() {
	p.forceRemoval = true
	p.tombstone = nil
}

var errInhumeFailure = errors.New("inhume operation failed")

// Inhume calls metabase. Inhume method to mark an object as removed. It won't be
// removed physically from the shard until `Delete` operation.
//
// Allows inhuming non-locked objects only. Returns apistatus.ObjectLocked
// if at least one object is locked.
//
// NOTE: Marks any object as removed (despite any prohibitions on operations
// with that object) if WithForceRemoval option has been provided.
//
// Returns an error if executions are blocked (see BlockExecution).
func (e *StorageEngine) Inhume(ctx context.Context, prm InhumePrm) (res InhumeRes, err error) {
	ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.Inhume")
	defer span.End()
	defer elapsed("Inhume", e.metrics.AddMethodDuration)()

	err = e.execIfNotBlocked(func() error {
		res, err = e.inhume(ctx, prm)
		return err
	})

	return
}

func (e *StorageEngine) inhume(ctx context.Context, prm InhumePrm) (InhumeRes, error) {
	addrsPerShard, err := e.groupObjectsByShard(ctx, prm.addrs, !prm.forceRemoval)
	if err != nil {
		return InhumeRes{}, err
	}

	var shPrm shard.InhumePrm
	if prm.forceRemoval {
		shPrm.ForceRemoval()
	}

	var errLocked *apistatus.ObjectLocked

	for shardID, addrs := range addrsPerShard {
		if prm.tombstone != nil {
			shPrm.SetTarget(*prm.tombstone, addrs...)
		} else {
			shPrm.MarkAsGarbage(addrs...)
		}

		sh, exists := e.shards[shardID]
		if !exists {
			e.log.Warn(ctx, logs.EngineCouldNotInhumeObjectInShard,
				zap.Error(errors.New("this shard was expected to exist")),
				zap.String("shard_id", shardID),
				zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
			)
			return InhumeRes{}, errInhumeFailure
		}

		if _, err := sh.Inhume(ctx, shPrm); err != nil {
			switch {
			case errors.As(err, &errLocked):
			case errors.Is(err, shard.ErrLockObjectRemoval):
			case errors.Is(err, shard.ErrReadOnlyMode):
			case errors.Is(err, shard.ErrDegradedMode):
			default:
				e.reportShardError(ctx, sh, "couldn't inhume object in shard", err)
			}
			return InhumeRes{}, err
		}
	}

	return InhumeRes{}, nil
}

// groupObjectsByShard groups objects based on the shard(s) they are stored on.
//
// If checkLocked is set, [apistatus.ObjectLocked] will be returned if any of
// the objects are locked.
func (e *StorageEngine) groupObjectsByShard(ctx context.Context, addrs []oid.Address, checkLocked bool) (map[string][]oid.Address, error) {
	groups := make(map[string][]oid.Address)

	for _, addr := range addrs {
		ids, err := e.findShards(ctx, addr, checkLocked)
		if err != nil {
			return nil, err
		}
		for _, id := range ids {
			groups[id] = append(groups[id], addr)
		}
	}

	return groups, nil
}

// findShards determines the shard(s) where the object is stored.
//
// If the object is a root object, multiple shards will be returned.
//
// If checkLocked is set, [apistatus.ObjectLocked] will be returned if any of
// the objects are locked.
func (e *StorageEngine) findShards(ctx context.Context, addr oid.Address, checkLocked bool) ([]string, error) {
	var (
		ids    []string
		retErr error

		prm shard.ExistsPrm

		siErr *objectSDK.SplitInfoError
		ecErr *objectSDK.ECInfoError

		isRootObject bool
		objectExists bool
	)

	e.iterateOverSortedShards(addr, func(_ int, sh hashedShard) (stop bool) {
		objectExists = false

		prm.Address = addr
		switch res, err := sh.Exists(ctx, prm); {
		case client.IsErrObjectAlreadyRemoved(err) || shard.IsErrObjectExpired(err):
			// NOTE(@a-savchuk): there were some considerations that we can stop
			// immediately if the object is already removed or expired. However,
			// the previous method behavior was:
			// - keep iterating if it's a root object and already removed,
			// - stop iterating if it's not a root object and removed.
			//
			// Since my task was only improving method speed, let's keep the
			// previous method behavior. Continue if it's a root object.
			return !isRootObject
		case errors.As(err, &siErr) || errors.As(err, &ecErr):
			isRootObject = true
			objectExists = true
		case err != nil:
			e.reportShardError(
				ctx, sh, "couldn't check for presence in shard",
				err, zap.Stringer("address", addr),
			)
		case res.Exists():
			objectExists = true
		default:
		}

		if !objectExists {
			return
		}

		if checkLocked {
			if isLocked, err := sh.IsLocked(ctx, addr); err != nil {
				e.log.Warn(ctx, logs.EngineRemovingAnObjectWithoutFullLockingCheck,
					zap.Error(err),
					zap.Stringer("address", addr),
					zap.String("trace_id", tracingPkg.GetTraceID(ctx)),
				)
			} else if isLocked {
				retErr = new(apistatus.ObjectLocked)
				return true
			}
		}

		ids = append(ids, sh.ID().String())

		// Continue if it's a root object.
		return !isRootObject
	})

	if retErr != nil {
		return nil, retErr
	}
	return ids, nil
}

// IsLocked checks whether an object is locked according to StorageEngine's state.
func (e *StorageEngine) IsLocked(ctx context.Context, addr oid.Address) (bool, error) {
	ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.IsLocked",
		trace.WithAttributes(
			attribute.String("address", addr.EncodeToString()),
		))
	defer span.End()

	var locked bool
	var err error
	var outErr error

	e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) {
		locked, err = h.Shard.IsLocked(ctx, addr)
		if err != nil {
			e.reportShardError(ctx, h, "can't check object's lockers", err, zap.Stringer("address", addr),
				zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
			outErr = err
			return false
		}

		return locked
	})

	if locked {
		return locked, nil
	}

	return locked, outErr
}

// GetLocks return lock id's if object is locked according to StorageEngine's state.
func (e *StorageEngine) GetLocks(ctx context.Context, addr oid.Address) ([]oid.ID, error) {
	ctx, span := tracing.StartSpanFromContext(ctx, "StorageEngine.GetLocks",
		trace.WithAttributes(
			attribute.String("address", addr.EncodeToString()),
		))
	defer span.End()

	var allLocks []oid.ID
	var outErr error

	e.iterateOverUnsortedShards(func(h hashedShard) (stop bool) {
		locks, err := h.Shard.GetLocks(ctx, addr)
		if err != nil {
			e.reportShardError(ctx, h, logs.EngineInterruptGettingLockers, err, zap.Stringer("address", addr),
				zap.String("trace_id", tracingPkg.GetTraceID(ctx)))
			outErr = err
		}
		allLocks = append(allLocks, locks...)
		return false
	})
	if len(allLocks) > 0 {
		return allLocks, nil
	}
	return allLocks, outErr
}

func (e *StorageEngine) processExpiredTombstones(ctx context.Context, addrs []meta.TombstonedObject) {
	e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
		sh.HandleExpiredTombstones(ctx, addrs)

		select {
		case <-ctx.Done():
			return true
		default:
			return false
		}
	})
}

func (e *StorageEngine) processExpiredLocks(ctx context.Context, epoch uint64, lockers []oid.Address) {
	e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
		sh.HandleExpiredLocks(ctx, epoch, lockers)

		select {
		case <-ctx.Done():
			e.log.Info(ctx, logs.EngineInterruptProcessingTheExpiredLocks, zap.Error(ctx.Err()))
			return true
		default:
			return false
		}
	})
}

func (e *StorageEngine) processDeletedLocks(ctx context.Context, lockers []oid.Address) {
	e.iterateOverUnsortedShards(func(sh hashedShard) (stop bool) {
		sh.HandleDeletedLocks(ctx, lockers)

		select {
		case <-ctx.Done():
			e.log.Info(ctx, logs.EngineInterruptProcessingTheDeletedLocks, zap.Error(ctx.Err()))
			return true
		default:
			return false
		}
	})
}

func (e *StorageEngine) processZeroSizeContainers(ctx context.Context, ids []cid.ID) {
	if len(ids) == 0 {
		return
	}

	idMap, err := e.selectNonExistentIDs(ctx, ids)
	if err != nil {
		return
	}

	if len(idMap) == 0 {
		return
	}

	var failed bool
	var prm shard.ContainerSizePrm
	e.iterateOverUnsortedShards(func(sh hashedShard) bool {
		select {
		case <-ctx.Done():
			e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
			failed = true
			return true
		default:
		}

		var drop []cid.ID
		for id := range idMap {
			prm.SetContainerID(id)
			s, err := sh.ContainerSize(prm)
			if err != nil {
				e.log.Warn(ctx, logs.EngineFailedToGetContainerSize, zap.Stringer("container_id", id), zap.Error(err))
				failed = true
				return true
			}
			if s.Size() > 0 {
				drop = append(drop, id)
			}
		}
		for _, id := range drop {
			delete(idMap, id)
		}

		return len(idMap) == 0
	})

	if failed || len(idMap) == 0 {
		return
	}

	e.iterateOverUnsortedShards(func(sh hashedShard) bool {
		select {
		case <-ctx.Done():
			e.log.Info(ctx, logs.EngineInterruptProcessingZeroSizeContainers, zap.Error(ctx.Err()))
			failed = true
			return true
		default:
		}

		for id := range idMap {
			if err := sh.DeleteContainerSize(ctx, id); err != nil {
				e.log.Warn(ctx, logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
				failed = true
				return true
			}
		}

		return false
	})

	if failed {
		return
	}

	for id := range idMap {
		e.metrics.DeleteContainerSize(id.EncodeToString())
	}
}

func (e *StorageEngine) processZeroCountContainers(ctx context.Context, ids []cid.ID) {
	if len(ids) == 0 {
		return
	}

	idMap, err := e.selectNonExistentIDs(ctx, ids)
	if err != nil {
		return
	}

	if len(idMap) == 0 {
		return
	}

	var failed bool
	var prm shard.ContainerCountPrm
	e.iterateOverUnsortedShards(func(sh hashedShard) bool {
		select {
		case <-ctx.Done():
			e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
			failed = true
			return true
		default:
		}

		var drop []cid.ID
		for id := range idMap {
			prm.ContainerID = id
			s, err := sh.ContainerCount(ctx, prm)
			if err != nil {
				e.log.Warn(ctx, logs.EngineFailedToGetContainerCounters, zap.Stringer("container_id", id), zap.Error(err))
				failed = true
				return true
			}
			if s.User > 0 || s.Logic > 0 || s.Phy > 0 {
				drop = append(drop, id)
			}
		}
		for _, id := range drop {
			delete(idMap, id)
		}

		return len(idMap) == 0
	})

	if failed || len(idMap) == 0 {
		return
	}

	e.iterateOverUnsortedShards(func(sh hashedShard) bool {
		select {
		case <-ctx.Done():
			e.log.Info(ctx, logs.EngineInterruptProcessingZeroCountContainers, zap.Error(ctx.Err()))
			failed = true
			return true
		default:
		}

		for id := range idMap {
			if err := sh.DeleteContainerCount(ctx, id); err != nil {
				e.log.Warn(ctx, logs.EngineFailedToDeleteContainerSize, zap.Stringer("container_id", id), zap.Error(err))
				failed = true
				return true
			}
		}

		return false
	})

	if failed {
		return
	}

	for id := range idMap {
		e.metrics.DeleteContainerCount(id.EncodeToString())
	}
}

func (e *StorageEngine) selectNonExistentIDs(ctx context.Context, ids []cid.ID) (map[cid.ID]struct{}, error) {
	cs := e.containerSource.Load()

	idMap := make(map[cid.ID]struct{})
	for _, id := range ids {
		isAvailable, err := cs.IsContainerAvailable(ctx, id)
		if err != nil {
			e.log.Warn(ctx, logs.EngineFailedToCheckContainerAvailability, zap.Stringer("container_id", id), zap.Error(err))
			return nil, err
		}
		if isAvailable {
			continue
		}
		idMap[id] = struct{}{}
	}
	return idMap, nil
}