From 5f718908a399270001243666ba6bceaca1fe2c62 Mon Sep 17 00:00:00 2001 From: Ashley Kasim Date: Mon, 23 Sep 2019 15:01:34 -0700 Subject: [PATCH] update tests to reflect terminated containers should not be used in usage calculations --- pkg/kubelet/stats/cri_stats_provider_test.go | 72 ++++++++++++++------ 1 file changed, 52 insertions(+), 20 deletions(-) diff --git a/pkg/kubelet/stats/cri_stats_provider_test.go b/pkg/kubelet/stats/cri_stats_provider_test.go index dd57a77fc027d..74cea550b3368 100644 --- a/pkg/kubelet/stats/cri_stats_provider_test.go +++ b/pkg/kubelet/stats/cri_stats_provider_test.go @@ -80,6 +80,7 @@ const ( cName5 = "container5-name" cName6 = "container6-name" cName7 = "container7-name" + cName8 = "container8-name" ) func TestCRIListPodStats(t *testing.T) { @@ -112,10 +113,14 @@ func TestCRIListPodStats(t *testing.T) { containerStats4 = makeFakeContainerStats(container4, imageFsMountpoint) containerLogStats4 = makeFakeLogStats(4000) + // Running pod with a terminated container and a running container sandbox3 = makeFakePodSandbox("sandbox3-name", "sandbox3-uid", "sandbox3-ns", false) + sandbox3Cgroup = "/" + cm.GetPodCgroupNameSuffix(types.UID(sandbox2.PodSandboxStatus.Metadata.Uid)) container5 = makeFakeContainer(sandbox3, cName5, 0, true) containerStats5 = makeFakeContainerStats(container5, imageFsMountpoint) containerLogStats5 = makeFakeLogStats(5000) + container8 = makeFakeContainer(sandbox3, cName8, 0, false) + containerStats8 = makeFakeContainerStats(container8, imageFsMountpoint) // Terminated pod sandbox sandbox4 = makeFakePodSandbox("sandbox1-name", "sandbox1-uid", "sandbox1-ns", true) @@ -156,6 +161,7 @@ func TestCRIListPodStats(t *testing.T) { sandbox2.PodSandboxStatus.Id: getTestContainerInfo(seedSandbox2, pName2, sandbox2.PodSandboxStatus.Metadata.Namespace, leaky.PodInfraContainerName), sandbox2Cgroup: getTestContainerInfo(seedSandbox2, "", "", ""), container4.ContainerStatus.Id: getTestContainerInfo(seedContainer3, pName2, sandbox2.PodSandboxStatus.Metadata.Namespace, cName3), + sandbox3Cgroup: getTestContainerInfo(seedSandbox3, "", "", ""), } options := cadvisorapiv2.RequestOptions{ @@ -173,10 +179,10 @@ func TestCRIListPodStats(t *testing.T) { sandbox0, sandbox1, sandbox2, sandbox3, sandbox4, sandbox5, }) fakeRuntimeService.SetFakeContainers([]*critest.FakeContainer{ - container0, container1, container2, container3, container4, container5, container6, container7, + container0, container1, container2, container3, container4, container5, container6, container7, container8, }) fakeRuntimeService.SetFakeContainerStats([]*runtimeapi.ContainerStats{ - containerStats0, containerStats1, containerStats2, containerStats3, containerStats4, containerStats5, containerStats6, containerStats7, + containerStats0, containerStats1, containerStats2, containerStats3, containerStats4, containerStats5, containerStats6, containerStats7, containerStats8, }) ephemeralVolumes := makeFakeVolumeStats([]string{"ephVolume1, ephVolumes2"}) @@ -299,14 +305,12 @@ func TestCRIListPodStats(t *testing.T) { assert.Equal(sandbox3.CreatedAt, p3.StartTime.UnixNano()) assert.Equal(1, len(p3.Containers)) - c5 := p3.Containers[0] - assert.Equal(cName5, c5.Name) - assert.Equal(container5.CreatedAt, c5.StartTime.UnixNano()) - assert.NotNil(c5.CPU.Time) - assert.Zero(*c5.CPU.UsageCoreNanoSeconds) - assert.Zero(*c5.CPU.UsageNanoCores) - assert.NotNil(c5.Memory.Time) - assert.Zero(*c5.Memory.WorkingSetBytes) + c8 := p3.Containers[0] + assert.Equal(cName8, c8.Name) + assert.Equal(container8.CreatedAt, c8.StartTime.UnixNano()) + assert.NotNil(c8.CPU.Time) + assert.NotNil(c8.Memory.Time) + checkCRIPodCPUAndMemoryStats(assert, p3, infos[sandbox3Cgroup].Stats[0]) mockCadvisor.AssertExpectations(t) } @@ -336,9 +340,13 @@ func TestCRIListPodCPUAndMemoryStats(t *testing.T) { container4 = makeFakeContainer(sandbox2, cName3, 1, false) containerStats4 = makeFakeContainerStats(container4, imageFsMountpoint) + // Running pod with a terminated container and a running container sandbox3 = makeFakePodSandbox("sandbox3-name", "sandbox3-uid", "sandbox3-ns", false) + sandbox3Cgroup = "/" + cm.GetPodCgroupNameSuffix(types.UID(sandbox2.PodSandboxStatus.Metadata.Uid)) container5 = makeFakeContainer(sandbox3, cName5, 0, true) containerStats5 = makeFakeContainerStats(container5, imageFsMountpoint) + container8 = makeFakeContainer(sandbox3, cName8, 0, false) + containerStats8 = makeFakeContainerStats(container8, imageFsMountpoint) // Terminated pod sandbox sandbox4 = makeFakePodSandbox("sandbox1-name", "sandbox1-uid", "sandbox1-ns", true) @@ -373,6 +381,7 @@ func TestCRIListPodCPUAndMemoryStats(t *testing.T) { sandbox2.PodSandboxStatus.Id: getTestContainerInfo(seedSandbox2, pName2, sandbox2.PodSandboxStatus.Metadata.Namespace, leaky.PodInfraContainerName), sandbox2Cgroup: getTestContainerInfo(seedSandbox2, "", "", ""), container4.ContainerStatus.Id: getTestContainerInfo(seedContainer3, pName2, sandbox2.PodSandboxStatus.Metadata.Namespace, cName3), + sandbox3Cgroup: getTestContainerInfo(seedSandbox3, "", "", ""), } options := cadvisorapiv2.RequestOptions{ @@ -387,10 +396,10 @@ func TestCRIListPodCPUAndMemoryStats(t *testing.T) { sandbox0, sandbox1, sandbox2, sandbox3, sandbox4, sandbox5, }) fakeRuntimeService.SetFakeContainers([]*critest.FakeContainer{ - container0, container1, container2, container3, container4, container5, container6, container7, + container0, container1, container2, container3, container4, container5, container6, container7, container8, }) fakeRuntimeService.SetFakeContainerStats([]*runtimeapi.ContainerStats{ - containerStats0, containerStats1, containerStats2, containerStats3, containerStats4, containerStats5, containerStats6, containerStats7, + containerStats0, containerStats1, containerStats2, containerStats3, containerStats4, containerStats5, containerStats6, containerStats7, containerStats8, }) ephemeralVolumes := makeFakeVolumeStats([]string{"ephVolume1, ephVolumes2"}) @@ -487,14 +496,12 @@ func TestCRIListPodCPUAndMemoryStats(t *testing.T) { assert.Equal(sandbox3.CreatedAt, p3.StartTime.UnixNano()) assert.Equal(1, len(p3.Containers)) - c5 := p3.Containers[0] - assert.Equal(cName5, c5.Name) - assert.Equal(container5.CreatedAt, c5.StartTime.UnixNano()) - assert.NotNil(c5.CPU.Time) - assert.Zero(*c5.CPU.UsageCoreNanoSeconds) - assert.Zero(*c5.CPU.UsageNanoCores) - assert.NotNil(c5.Memory.Time) - assert.Zero(*c5.Memory.WorkingSetBytes) + c8 := p3.Containers[0] + assert.Equal(cName8, c8.Name) + assert.Equal(container8.CreatedAt, c8.StartTime.UnixNano()) + assert.NotNil(c8.CPU.Time) + assert.NotNil(c8.Memory.Time) + checkCRIPodCPUAndMemoryStats(assert, p3, infos[sandbox3Cgroup].Stats[0]) mockCadvisor.AssertExpectations(t) } @@ -882,6 +889,31 @@ func TestGetContainerUsageNanoCores(t *testing.T) { }, expected: &value2, }, + { + desc: "should return nil if cpuacct is reset to 0 in a live container", + stats: &runtimeapi.ContainerStats{ + Attributes: &runtimeapi.ContainerAttributes{ + Id: "1", + }, + Cpu: &runtimeapi.CpuUsage{ + Timestamp: 2, + UsageCoreNanoSeconds: &runtimeapi.UInt64Value{ + Value: 0, + }, + }, + }, + cpuUsageCache: map[string]*cpuUsageRecord{ + "1": { + stats: &runtimeapi.CpuUsage{ + Timestamp: 1, + UsageCoreNanoSeconds: &runtimeapi.UInt64Value{ + Value: 10000000000, + }, + }, + }, + }, + expected: nil, + }, } for _, test := range tests {