// pkg/kubelet/kuberuntime/kuberuntime_gc.go:226 // evict all containers that are evictable func(cgc *containerGC) evictContainers(ctx context.Context, gcPolicy kubecontainer.GCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error { // Separate containers by evict units. evictUnits, err := cgc.evictableContainers(ctx, gcPolicy.MinAge) if err != nil { return err }
// Remove deleted pod containers if all sources are ready. if allSourcesReady { for key, unit := range evictUnits { if cgc.podStateProvider.ShouldPodContentBeRemoved(key.uid) || (evictNonDeletedPods && cgc.podStateProvider.ShouldPodRuntimeBeRemoved(key.uid)) { cgc.removeOldestN(ctx, unit, len(unit)) // Remove all. delete(evictUnits, key) } } }
// Enforce max containers per evict unit. if gcPolicy.MaxPerPodContainer >= 0 { cgc.enforceMaxContainersPerEvictUnit(ctx, evictUnits, gcPolicy.MaxPerPodContainer) }
// Enforce max total number of containers. if gcPolicy.MaxContainers >= 0 && evictUnits.NumContainers() > gcPolicy.MaxContainers { // Leave an equal number of containers per evict unit (min: 1). numContainersPerEvictUnit := gcPolicy.MaxContainers / evictUnits.NumEvictUnits() if numContainersPerEvictUnit < 1 { numContainersPerEvictUnit = 1 } cgc.enforceMaxContainersPerEvictUnit(ctx, evictUnits, numContainersPerEvictUnit)
// If we still need to evict, evict oldest first. numContainers := evictUnits.NumContainers() if numContainers > gcPolicy.MaxContainers { flattened := make([]containerGCInfo, 0, numContainers) for key := range evictUnits { flattened = append(flattened, evictUnits[key]...) } sort.Sort(byCreated(flattened))
func(im *realImageGCManager) GarbageCollect(ctx context.Context) error { ctx, otelSpan := im.tracer.Start(ctx, "Images/GarbageCollect") defer otelSpan.End() // Get disk usage on disk holding images. fsStats, err := im.statsProvider.ImageFsStats(ctx) if err != nil { return err }
var capacity, available int64 if fsStats.CapacityBytes != nil { capacity = int64(*fsStats.CapacityBytes) } if fsStats.AvailableBytes != nil { available = int64(*fsStats.AvailableBytes) }
if available > capacity { klog.InfoS("Availability is larger than capacity", "available", available, "capacity", capacity) available = capacity }
// ....
// If over the max threshold, free enough to place us at the lower threshold. usagePercent := 100 - int(available*100/capacity) if usagePercent >= im.policy.HighThresholdPercent { amountToFree := capacity*int64(100-im.policy.LowThresholdPercent)/100 - available klog.InfoS("Disk usage on image filesystem is over the high threshold, trying to free bytes down to the low threshold", "usage", usagePercent, "highThreshold", im.policy.HighThresholdPercent, "amountToFree", amountToFree, "lowThreshold", im.policy.LowThresholdPercent) freed, err := im.freeSpace(ctx, amountToFree, time.Now()) if err != nil { return err }
// Get all images in eviction order. images := make([]evictionInfo, 0, len(im.imageRecords)) for image, record := range im.imageRecords { if isImageUsed(image, imagesInUse) { klog.V(5).InfoS("Image ID is being used", "imageID", image) continue } // Check if image is pinned, prevent garbage collection if record.pinned { klog.V(5).InfoS("Image is pinned, skipping garbage collection", "imageID", image) continue
// vendor/k8s.io/apimachinery/pkg/util/wait/backoff.go:210 funcBackoffUntil(f func(), backoff BackoffManager, sliding bool, stopCh <-chanstruct{}) { var t clock.Timer for { select { case <-stopCh: return default: }
if !sliding { t = backoff.Backoff() }
func() { defer runtime.HandleCrash() f() }()
if sliding { t = backoff.Backoff() }
// NOTE: b/c there is no priority selection in golang // it is possible for this to race, meaning we could // trigger t.C and stopCh, and t.C select falls through. // In order to mitigate we re-check stopCh at the beginning // of every loop to prevent extra executions of f(). select { case <-stopCh: if !t.Stop() { <-t.C() } return case <-t.C(): } } }