Kubernetes 垃圾回收机制分析
为了回收系统上的资源kubelet有ImageGC和ContainerGC等功能对image和container进行回收;
根据kubelet代码对ContainerGC部分进行分析
相关的参数主要有:
minimum-container-ttl-duration
maximum-dead-containers-per-container
minimum-container-ttl-duration
相对应的代码是:
//pkg/kubelet/container_gc.go
type ContainerGCPolicy struct {
// 已经死掉的容器在机器上存留的时间
MinAge time.Duration
// 每个pod可以保留的死掉的容器
MaxPerPodContainer int
// 机器上最大的可以保留的死亡容器数量
MaxContainers int
}
...
func NewContainerGC(runtime Runtime, policy ContainerGCPolicy, sourcesReadyProvider SourcesReadyProvider) (ContainerGC, error) {
if policy.MinAge < 0 {
return nil, fmt.Errorf("invalid minimum garbage collection age: %v", policy.MinAge)
}
return &realContainerGC{
runtime: runtime,
policy: policy,
sourcesReadyProvider: sourcesReadyProvider,
}, nil
}
func (cgc *realContainerGC) GarbageCollect() error {
return cgc.runtime.GarbageCollect(cgc.policy, cgc.sourcesReadyProvider.AllReady(), false)
}
func (cgc *realContainerGC) DeleteAllUnusedContainers() error {
return cgc.runtime.GarbageCollect(cgc.policy, cgc.sourcesReadyProvider.AllReady(), true)
}
GarbageCollect方法里面的调用就是ContainerGC真正的逻辑所在, GarbageCollect函数是在pkg/kubelet/kubelet.go里面调用的,每隔一分钟会执行一次。GarbageCollect里面所调用的runtime的GarbageCollect函数是在pkg/kubelet/kuberuntime/kuberuntime_gc.go里面。
//pkg/kubelet/kuberuntime/kuberuntime_gc.go
func (cgc *containerGC) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error {
// Remove evictable containers
if err := cgc.evictContainers(gcPolicy, allSourcesReady, evictNonDeletedPods); err != nil {
return err
}
// Remove sandboxes with zero containers
if err := cgc.evictSandboxes(evictNonDeletedPods); err != nil {
return err
}
// Remove pod sandbox log directory
return cgc.evictPodLogsDirectories(allSourcesReady)
}
第一步是驱逐容器
func (cgc *containerGC) evictContainers(gcPolicy kubecontainer.ContainerGCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error {
// Separate containers by evict units.
evictUnits, err := cgc.evictableContainers(gcPolicy.MinAge)
if err != nil {
return err
}
// Remove deleted pod containers if all sources are ready.
if allSourcesReady {
for key, unit := range evictUnits {
if cgc.isPodDeleted(key.uid) || evictNonDeletedPods {
cgc.removeOldestN(unit, len(unit)) // Remove all.
delete(evictUnits, key)
}
}
}
// Enforce max containers per evict unit.
if gcPolicy.MaxPerPodContainer >= 0 {
cgc.enforceMaxContainersPerEvictUnit(evictUnits, gcPolicy.MaxPerPodContainer)
}
// Enforce max total number of containers.
if gcPolicy.MaxContainers >= 0 && evictUnits.NumContainers() > gcPolicy.MaxContainers {
// Leave an equal number of containers per evict unit (min: 1).
numContainersPerEvictUnit := gcPolicy.MaxContainers / evictUnits.NumEvictUnits()
if numContainersPerEvictUnit < 1 {
numContainersPerEvictUnit = 1
}
cgc.enforceMaxContainersPerEvictUnit(evictUnits, numContainersPerEvictUnit)
// If we still need to evict, evict oldest first.
numContainers := evictUnits.NumContainers()
if numContainers > gcPolicy.MaxContainers {
flattened := make([]containerGCInfo, 0, numContainers)
for key := range evictUnits {
flattened = append(flattened, evictUnits[key]...)
}
sort.Sort(byCreated(flattened))
cgc.removeOldestN(flattened, numContainers-gcPolicy.MaxContainers)
}
}
return nil
}
1.首先获取已经死掉的并且创建时间大于minage的容器
2.如果pod已经delete那么把属于这个pod的容器全部删除
3.如果设置了MaxPerPodContainer那么把MaxPerPodContainer之外数量的容器删除,这个值默认是1
4.如果设置MaxContainers那么再次对容器进行清理,这个值默认是-1也就是不清理的。首先会拿所有容器的数量除以pod的数量,这样会得到一个平均值,然后按照这个值对3进行再次处理。这个时候如果机器上的死亡容器的数量还大于MaxContainer那么直接按照时间对容器进行排序然后删除大于MaxContainer数量之外的容器。
下一步是清理机器上的sandbox
func (cgc *containerGC) evictSandboxes(evictNonDeletedPods bool) error {
containers, err := cgc.manager.getKubeletContainers(true)
if err != nil {
return err
}
sandboxes, err := cgc.manager.getKubeletSandboxes(true)
if err != nil {
return err
}
sandboxesByPod := make(sandboxesByPodUID)
for _, sandbox := range sandboxes {
podUID := types.UID(sandbox.Metadata.Uid)
sandboxInfo := sandboxGCInfo{
id: sandbox.Id,
createTime: time.Unix(0, sandbox.CreatedAt),
}
// Set ready sandboxes to be active.
if sandbox.State == runtimeapi.PodSandboxState_SANDBOX_READY {
sandboxInfo.active = true
}
// Set sandboxes that still have containers to be active.
hasContainers := false
sandboxID := sandbox.Id
for _, container := range containers {
if container.PodSandboxId == sandboxID {
hasContainers = true
break
}
}
if hasContainers {
sandboxInfo.active = true
}
sandboxesByPod[podUID] = append(sandboxesByPod[podUID], sandboxInfo)
}
// Sort the sandboxes by age.
for uid := range sandboxesByPod {
sort.Sort(sandboxByCreated(sandboxesByPod[uid]))
}
for podUID, sandboxes := range sandboxesByPod {
if cgc.isPodDeleted(podUID) || evictNonDeletedPods {
// Remove all evictable sandboxes if the pod has been removed.
// Note that the latest dead sandbox is also removed if there is
// already an active one.
cgc.removeOldestNSandboxes(sandboxes, len(sandboxes))
} else {
// Keep latest one if the pod still exists.
cgc.removeOldestNSandboxes(sandboxes, len(sandboxes)-1)
}
}
return nil
}
先获取机器上所有的容器和sandbox,如果pod的状态是0则致为active状态,如果此sandbox还有运行的container也认为是active状态,接着对sandbox进行排序,如果sandbox所属的pod的已经被删除那么删除所有的sandbox,如果pod还存在那么就留下最新的一个sandbox其他的都删除.
最后一步是清除container和pod日志
// evictPodLogsDirectories evicts all evictable pod logs directories. Pod logs directories
// are evictable if there are no corresponding pods.
func (cgc *containerGC) evictPodLogsDirectories(allSourcesReady bool) error {
osInterface := cgc.manager.osInterface
if allSourcesReady {
// Only remove pod logs directories when all sources are ready.
dirs, err := osInterface.ReadDir(podLogsRootDirectory)
if err != nil {
return fmt.Errorf("failed to read podLogsRootDirectory %q: %v", podLogsRootDirectory, err)
}
for _, dir := range dirs {
name := dir.Name()
podUID := types.UID(name)
if !cgc.isPodDeleted(podUID) {
continue
}
err := osInterface.RemoveAll(filepath.Join(podLogsRootDirectory, name))
if err != nil {
glog.Errorf("Failed to remove pod logs directory %q: %v", name, err)
}
}
}
// Remove dead container log symlinks.
// TODO(random-liu): Remove this after cluster logging supports CRI container log path.
logSymlinks, _ := osInterface.Glob(filepath.Join(legacyContainerLogsDir, fmt.Sprintf("*.%s", legacyLogSuffix)))
for _, logSymlink := range logSymlinks {
if _, err := osInterface.Stat(logSymlink); os.IsNotExist(err) {
err := osInterface.Remove(logSymlink)
if err != nil {
glog.Errorf("Failed to remove container log dead symlink %q: %v", logSymlink, err)
}
}
}
return nil
}
首先会读取/var/log/pods目录下面的子目录,下面的目录名称都是pod的uid,如果pod已经删除那么直接把pod所属的目录删除,然后删除/var/log/containers目录下的软连接。
至此单次ContainerGC的流程结束。
k8s为了回收机器上的存储资源同时有ImageGC对image资源进行回收
ImageGC同样定义了image gc manager和gc policy。
//pkg/kubelet/image/image_gc_manager.go
type ImageGCPolicy struct {
// Any usage above this threshold will always trigger garbage collection.
// This is the highest usage we will allow.
HighThresholdPercent int
// Any usage below this threshold will never trigger garbage collection.
// This is the lowest threshold we will try to garbage collect to.
LowThresholdPercent int
// Minimum age at which an image can be garbage collected.
MinAge time.Duration
}
type realImageGCManager struct {
// Container runtime
runtime container.Runtime
// Records of images and their use.
imageRecords map[string]*imageRecord
imageRecordsLock sync.Mutex
// The image garbage collection policy in use.
policy ImageGCPolicy
// cAdvisor instance.
cadvisor cadvisor.Interface
// Recorder for Kubernetes events.
recorder record.EventRecorder
// Reference to this node.
nodeRef *v1.ObjectReference
// Track initialization
initialized bool
// imageCache is the cache of latest image list.
imageCache imageCache
}
策略也主要有三个参数:
HighThresholdPercent 高于此阈值将进行回收
LowThresholdPercent 低于此阈值将不会触发回收
MinAge 回收image的最小年龄
在这个文件里同样有一个GarbageCollect方法
func (im *realImageGCManager) detectImages(detectTime time.Time) error {
images, err := im.runtime.ListImages()
if err != nil {
return err
}
pods, err := im.runtime.GetPods(true)
if err != nil {
return err
}
// Make a set of images in use by containers.
imagesInUse := sets.NewString()
for _, pod := range pods {
for _, container := range pod.Containers {
glog.V(5).Infof("Pod %s/%s, container %s uses image %s(%s)", pod.Namespace, pod.Name, container.Name, container.Image, container.ImageID)
imagesInUse.Insert(container.ImageID)
}
}
// Add new images and record those being used.
now := time.Now()
currentImages := sets.NewString()
im.imageRecordsLock.Lock()
defer im.imageRecordsLock.Unlock()
for _, image := range images {
glog.V(5).Infof("Adding image ID %s to currentImages", image.ID)
currentImages.Insert(image.ID)
// New image, set it as detected now.
if _, ok := im.imageRecords[image.ID]; !ok {
glog.V(5).Infof("Image ID %s is new", image.ID)
im.imageRecords[image.ID] = &imageRecord{
firstDetected: detectTime,
}
}
// Set last used time to now if the image is being used.
if isImageUsed(image, imagesInUse) {
glog.V(5).Infof("Setting Image ID %s lastUsed to %v", image.ID, now)
im.imageRecords[image.ID].lastUsed = now
}
glog.V(5).Infof("Image ID %s has size %d", image.ID, image.Size)
im.imageRecords[image.ID].size = image.Size
}
// Remove old images from our records.
for image := range im.imageRecords {
if !currentImages.Has(image) {
glog.V(5).Infof("Image ID %s is no longer present; removing from imageRecords", image)
delete(im.imageRecords, image)
}
}
return nil
}
func (im *realImageGCManager) GarbageCollect() error {
// Get disk usage on disk holding images.
fsInfo, err := im.cadvisor.ImagesFsInfo()
if err != nil {
return err
}
capacity := int64(fsInfo.Capacity)
available := int64(fsInfo.Available)
if available > capacity {
glog.Warningf("available %d is larger than capacity %d", available, capacity)
available = capacity
}
// Check valid capacity.
if capacity == 0 {
err := fmt.Errorf("invalid capacity %d on device %q at mount point %q", capacity, fsInfo.Device, fsInfo.Mountpoint)
im.recorder.Eventf(im.nodeRef, v1.EventTypeWarning, events.InvalidDiskCapacity, err.Error())
return err
}
// If over the max threshold, free enough to place us at the lower threshold.
usagePercent := 100 - int(available*100/capacity)
if usagePercent >= im.policy.HighThresholdPercent {
amountToFree := capacity*int64(100-im.policy.LowThresholdPercent)/100 - available
glog.Infof("[imageGCManager]: Disk usage on %q (%s) is at %d%% which is over the high threshold (%d%%). Trying to free %d bytes", fsInfo.Device, fsInfo.Mountpoint, usagePercent, im.policy.HighThresholdPercent, amountToFree)
freed, err := im.freeSpace(amountToFree, time.Now())
if err != nil {
return err
}
if freed < amountToFree {
err := fmt.Errorf("failed to garbage collect required amount of images. Wanted to free %d bytes, but freed %d bytes", amountToFree, freed)
im.recorder.Eventf(im.nodeRef, v1.EventTypeWarning, events.FreeDiskSpaceFailed, err.Error())
return err
}
}
return nil
}
func (im *realImageGCManager) DeleteUnusedImages() (int64, error) {
return im.freeSpace(math.MaxInt64, time.Now())
}
// Tries to free bytesToFree worth of images on the disk.
//
// Returns the number of bytes free and an error if any occurred. The number of
// bytes freed is always returned.
// Note that error may be nil and the number of bytes free may be less
// than bytesToFree.
func (im *realImageGCManager) freeSpace(bytesToFree int64, freeTime time.Time) (int64, error) {
err := im.detectImages(freeTime)
if err != nil {
return 0, err
}
im.imageRecordsLock.Lock()
defer im.imageRecordsLock.Unlock()
// Get all images in eviction order.
images := make([]evictionInfo, 0, len(im.imageRecords))
for image, record := range im.imageRecords {
images = append(images, evictionInfo{
id: image,
imageRecord: *record,
})
}
sort.Sort(byLastUsedAndDetected(images))
// Delete unused images until we've freed up enough space.
var deletionErrors []error
spaceFreed := int64(0)
for _, image := range images {
glog.V(5).Infof("Evaluating image ID %s for possible garbage collection", image.id)
// Images that are currently in used were given a newer lastUsed.
if image.lastUsed.Equal(freeTime) || image.lastUsed.After(freeTime) {
glog.V(5).Infof("Image ID %s has lastUsed=%v which is >= freeTime=%v, not eligible for garbage collection", image.id, image.lastUsed, freeTime)
break
}
// Avoid garbage collect the image if the image is not old enough.
// In such a case, the image may have just been pulled down, and will be used by a container right away.
if freeTime.Sub(image.firstDetected) < im.policy.MinAge {
glog.V(5).Infof("Image ID %s has age %v which is less than the policy's minAge of %v, not eligible for garbage collection", image.id, freeTime.Sub(image.firstDetected), im.policy.MinAge)
continue
}
// Remove image. Continue despite errors.
glog.Infof("[imageGCManager]: Removing image %q to free %d bytes", image.id, image.size)
err := im.runtime.RemoveImage(container.ImageSpec{Image: image.id})
if err != nil {
deletionErrors = append(deletionErrors, err)
continue
}
delete(im.imageRecords, image.id)
spaceFreed += image.size
if spaceFreed >= bytesToFree {
break
}
}
if len(deletionErrors) > 0 {
return spaceFreed, fmt.Errorf("wanted to free %d bytes, but freed %d bytes space with errors in image deletion: %v", bytesToFree, spaceFreed, errors.NewAggregate(deletionErrors))
}
return spaceFreed, nil
}
GarbageCollect方法也是在pkg/kubelet/kubelet.go里面进行调用,每调用一次执行一次回收流程,每次调用间隔是5分钟。 GarbageCollect首先会调用cadvisor获取监控机器上的资源总量可用量,当发现使用的资源已经大于设置的最高阈值将会触发回收行为,并且计算出需要回收的空间大小。
freeSpace方法则是真正的回收过程,过程分两步。
探测机器上的image
删除老的image
探测镜像:
探测机器上的image是获取机器上的image列表并如果是正在使用的image标记最后使用时间和image大小和第一次探测到的时间,并把这个列表的image放到imageRecords里面进行缓存,imageRecords是个map结结构的数据类型。
回收镜像:
回收镜像首先遍历imageRecords里面的镜像,放到一个数组里面排序,按照最后使用时间或者第一次探测到的时间进行排序。 下一步则对数组里面的镜像进行回收,如果镜像还在使用中或者最后探测到的时间小于设置的MinAge则不进行回收,否则删除镜像。回收过程中如果发现现在机器上的资源已经小于设置的LowThresholdPercent那么跳出回收流程。
原文:
https://www.jianshu.com/p/a6a6f6bab4a1
https://www.jianshu.com/p/2531c043cd70
---END---
K8S培训推荐
Kubernetes线下实战培训,采用3+1新的培训模式(3天线下实战培训,1年内可免费再次参加),资深一线讲师,实操环境实践,现场答疑互动,培训内容覆盖:Docker架构、镜像、数据存储、网络、以及最佳实践。Kubernetes实战内容,Kubernetes设计、Pod、常用对象操作,Kuberentes调度系统、QoS、Helm、网络、存储、CI/CD、日志监控等。<了解更多详情>
北京站、上海站:1月4-6日; 咨询/报名:曹辉/15999647409