go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-autoscaling\]\sCluster\ssize\sautoscaling\s\[Slow\]\sShould\sbe\sable\sto\sscale\sa\snode\sgroup\sdown\sto\s0\[Feature\:ClusterSizeAutoscalingScaleDown\]$'
test/e2e/autoscaling/cluster_size_autoscaling.go:868 k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 +0x429 k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 +0x95from junit_01.xml
[BeforeEach] [sig-autoscaling] Cluster size autoscaling [Slow] set up framework | framework.go:178 STEP: Creating a kubernetes client 12/06/22 16:36:44.171 Dec 6 16:36:44.171: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename autoscaling 12/06/22 16:36:44.172 STEP: Waiting for a default service account to be provisioned in namespace 12/06/22 16:36:44.298 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 12/06/22 16:36:44.379 [BeforeEach] [sig-autoscaling] Cluster size autoscaling [Slow] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-autoscaling] Cluster size autoscaling [Slow] test/e2e/autoscaling/cluster_size_autoscaling.go:103 STEP: Initial size of ca-minion-group-1: 1 12/06/22 16:36:50.578 STEP: Initial size of ca-minion-group: 1 12/06/22 16:36:54.053 Dec 6 16:36:54.099: INFO: Cluster has reached the desired number of ready nodes 2 STEP: Initial number of schedulable nodes: 2 12/06/22 16:36:54.145 [It] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] test/e2e/autoscaling/cluster_size_autoscaling.go:877 STEP: Find smallest node group and manually scale it to a single node 12/06/22 16:36:54.145 Dec 6 16:36:54.145: INFO: Skipping dumping logs from cluster Dec 6 16:37:10.637: INFO: Skipping dumping logs from cluster Dec 6 16:37:10.681: INFO: Cluster has reached the desired number of ready nodes 2 STEP: Target node for scale-down: ca-minion-group-88hl 12/06/22 16:37:14.255 STEP: Make the single node unschedulable 12/06/22 16:37:14.255 STEP: Taint node ca-minion-group-88hl 12/06/22 16:37:14.255 STEP: Manually drain the single node 12/06/22 16:37:14.368 I1206 16:37:14.933495 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 I1206 16:37:34.981631 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 I1206 16:37:55.028806 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 I1206 16:38:15.076940 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 I1206 16:38:35.121279 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 I1206 16:38:55.166265 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 I1206 16:39:15.211010 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 I1206 16:39:35.256192 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 I1206 16:39:55.300882 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 I1206 16:40:15.347657 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 I1206 16:40:35.393627 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 I1206 16:40:55.439322 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 I1206 16:41:15.483594 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 I1206 16:41:35.529127 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 5m9.978s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 5m0.004s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 4m39.78s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:41:55.573414 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 5m29.983s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 5m20.009s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 4m59.785s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:42:15.617509 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 5m49.988s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 5m40.014s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 5m19.79s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:42:35.661594 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 6m9.992s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 6m0.018s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 5m39.794s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:42:55.706578 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 6m29.996s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 6m20.022s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 5m59.798s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:43:15.750221 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 6m49.996s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 6m40.023s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 6m19.799s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:43:35.794287 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 7m10.002s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 7m0.028s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 6m39.804s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:43:55.839362 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 7m30.004s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 7m20.031s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 6m59.807s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:44:15.883008 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 7m50.005s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 7m40.032s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 7m19.808s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:44:35.927323 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 8m10.006s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 8m0.032s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 7m39.809s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:44:55.970924 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 8m30.011s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 8m20.037s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 7m59.813s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:45:16.015813 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 8m50.015s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 8m40.041s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 8m19.817s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:45:36.061024 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 9m10.02s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 9m0.046s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 8m39.822s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:45:56.105218 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 9m30.021s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 9m20.047s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 8m59.824s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:46:16.149849 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 9m50.025s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 9m40.051s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 9m19.828s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:46:36.195014 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 10m10.03s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 10m0.056s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 9m39.832s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:46:56.238783 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 10m30.034s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 10m20.06s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 9m59.836s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:47:16.283787 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 10m50.035s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 10m40.061s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 10m19.838s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:47:36.327989 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 11m10.036s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 11m0.062s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 10m39.839s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:47:56.373202 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 11m30.042s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 11m20.068s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 10m59.844s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:48:16.420225 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 11m50.046s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 11m40.072s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 11m19.848s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:48:36.464549 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 12m10.047s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 12m0.073s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 11m39.849s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:48:56.509673 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 12m30.049s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 12m20.075s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 11m59.852s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:49:16.555854 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 12m50.055s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 12m40.081s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 12m19.857s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:49:36.600748 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 13m10.059s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 13m0.085s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 12m39.861s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:49:56.644915 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 13m30.061s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 13m20.087s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 12m59.863s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:50:16.689818 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 13m50.066s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 13m40.092s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 13m19.868s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:50:36.736782 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 14m10.066s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 14m0.093s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 13m39.869s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:50:56.781941 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 14m30.07s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 14m20.096s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 13m59.872s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:51:16.827231 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 14m50.07s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 14m40.097s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 14m19.873s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:51:36.877853 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 15m10.075s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 15m0.101s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 14m39.877s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:51:56.934735 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 15m30.08s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 15m20.106s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 14m59.882s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:52:16.989424 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 15m50.081s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 15m40.107s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 15m19.883s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:52:37.047886 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 16m10.085s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 16m0.111s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 15m39.887s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:52:57.094539 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 16m30.086s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 16m20.112s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 15m59.888s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:53:17.140984 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 16m50.087s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 16m40.113s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 16m19.889s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:53:37.188754 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 17m10.089s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 17m0.116s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 16m39.892s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:53:57.234959 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 17m30.09s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 17m20.116s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 16m59.893s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:54:17.282131 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 17m50.091s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 17m40.117s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 17m19.894s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:54:37.328410 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 18m10.092s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 18m0.118s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 17m39.895s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:54:57.384469 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 18m30.093s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 18m20.12s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 17m59.896s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:55:17.430640 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 18m50.098s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 18m40.124s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 18m19.9s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:55:37.476039 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 19m10.101s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 19m0.127s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 18m39.903s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:55:57.522101 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 19m30.105s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 19m20.131s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 18m59.908s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:56:17.566873 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 19m50.106s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 19m40.132s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 19m19.909s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:56:37.612515 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 20m10.11s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 20m0.136s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 19m39.913s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 16:56:57.656440 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group down to 0[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 20m30.115s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 In [It] (Node Runtime: 20m20.141s) test/e2e/autoscaling/cluster_size_autoscaling.go:877 At [By Step] Manually drain the single node (Step Runtime: 19m59.917s) test/e2e/autoscaling/cluster_size_autoscaling.go:1465 Spec Goroutine goroutine 2254 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc002bacb60}, 0xc0008abe88, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000477c80}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Dec 6 16:57:17.658: INFO: Unexpected error: <*errors.errorString | 0xc000e61040>: { s: "timeout waiting 20m0s for appropriate cluster size", } Dec 6 16:57:17.658: FAIL: timeout waiting 20m0s for appropriate cluster size Full Stack Trace k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.30() test/e2e/autoscaling/cluster_size_autoscaling.go:868 +0x429 k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.31() test/e2e/autoscaling/cluster_size_autoscaling.go:881 +0x95 [AfterEach] [sig-autoscaling] Cluster size autoscaling [Slow] test/e2e/framework/node/init/init.go:32 Dec 6 16:57:17.658: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-autoscaling] Cluster size autoscaling [Slow] test/e2e/autoscaling/cluster_size_autoscaling.go:139 STEP: Restoring initial size of the cluster 12/06/22 16:57:17.707 Dec 6 16:57:25.903: INFO: Cluster has reached the desired number of ready nodes 2 STEP: Remove taint from node ca-master 12/06/22 16:57:25.945 STEP: Remove taint from node ca-minion-group-1-bh26 12/06/22 16:57:25.987 STEP: Remove taint from node ca-minion-group-88hl 12/06/22 16:57:26.029 I1206 16:57:26.125880 7957 cluster_size_autoscaling.go:165] Made nodes schedulable again in 180.301091ms [DeferCleanup (Each)] [sig-autoscaling] Cluster size autoscaling [Slow] test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-autoscaling] Cluster size autoscaling [Slow] dump namespaces | framework.go:196 STEP: dump namespace information after failure 12/06/22 16:57:26.126 STEP: Collecting events from namespace "autoscaling-6901". 12/06/22 16:57:26.126 STEP: Found 0 events. 12/06/22 16:57:26.18 Dec 6 16:57:26.221: INFO: POD NODE PHASE GRACE CONDITIONS Dec 6 16:57:26.221: INFO: Dec 6 16:57:26.264: INFO: Logging node info for node ca-master Dec 6 16:57:26.352: INFO: Node Info: &Node{ObjectMeta:{ca-master 8e72891f-3d31-4302-b81e-5940fbd1a2b7 8337 0 2022-12-06 16:14:32 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-1 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:ca-master kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-1 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-12-06 16:14:32 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:unschedulable":{}}} } {kube-controller-manager Update v1 2022-12-06 16:14:42 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.1.0/24\"":{}},"f:taints":{}}} } {kube-controller-manager Update v1 2022-12-06 16:14:42 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-12-06 16:55:39 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.1.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gci-autoscaling-migs/us-west1-b/ca-master,Unschedulable:true,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:<nil>,},Taint{Key:node.kubernetes.io/unschedulable,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[10.64.1.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{16656896000 0} {<nil>} 16266500Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3858366464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{14991206376 0} {<nil>} 14991206376 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3596222464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-12-06 16:14:42 +0000 UTC,LastTransitionTime:2022-12-06 16:14:42 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-12-06 16:55:39 +0000 UTC,LastTransitionTime:2022-12-06 16:14:32 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-12-06 16:55:39 +0000 UTC,LastTransitionTime:2022-12-06 16:14:32 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-12-06 16:55:39 +0000 UTC,LastTransitionTime:2022-12-06 16:14:32 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-12-06 16:55:39 +0000 UTC,LastTransitionTime:2022-12-06 16:14:34 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.2,},NodeAddress{Type:ExternalIP,Address:35.230.76.149,},NodeAddress{Type:InternalDNS,Address:ca-master.c.k8s-jkns-gci-autoscaling-migs.internal,},NodeAddress{Type:Hostname,Address:ca-master.c.k8s-jkns-gci-autoscaling-migs.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:bac070a384fefb9133ee7878e15673cf,SystemUUID:bac070a3-84fe-fb91-33ee-7878e15673cf,BootID:06caa00c-99ed-4909-986f-2ab86fa8e8fe,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.6.6,KubeletVersion:v1.27.0-alpha.0.58+17bf864c1fc20e,KubeProxyVersion:v1.27.0-alpha.0.58+17bf864c1fc20e,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.27.0-alpha.0.58_17bf864c1fc20e],SizeBytes:135160275,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.27.0-alpha.0.58_17bf864c1fc20e],SizeBytes:124990265,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.27.0-alpha.0.58_17bf864c1fc20e],SizeBytes:57660216,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64@sha256:5db27383add6d9f4ebdf0286409ac31f7f5d273690204b341a4e37998917693b gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64:v1.20.1],SizeBytes:36598135,},ContainerImage{Names:[registry.k8s.io/addon-manager/kube-addon-manager@sha256:49cc4e6e4a3745b427ce14b0141476ab339bb65c6bc05033019e046c8727dcb0 registry.k8s.io/addon-manager/kube-addon-manager:v9.1.6],SizeBytes:30464183,},ContainerImage{Names:[registry.k8s.io/autoscaling/cluster-autoscaler@sha256:07ab8c89cd0ad296ddb6347febe196d8fe0e1c757656a98f71199860d87cf1a5 registry.k8s.io/autoscaling/cluster-autoscaler:v1.22.0],SizeBytes:24220268,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-server@sha256:2c111f004bec24888d8cfa2a812a38fb8341350abac67dcd0ac64e709dfe389c registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33],SizeBytes:22020129,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Dec 6 16:57:26.353: INFO: Logging kubelet events for node ca-master Dec 6 16:57:26.403: INFO: Logging pods the kubelet thinks is on node ca-master Dec 6 16:57:26.477: INFO: kube-controller-manager-ca-master started at 2022-12-06 16:13:48 +0000 UTC (0+1 container statuses recorded) Dec 6 16:57:26.477: INFO: Container kube-controller-manager ready: true, restart count 1 Dec 6 16:57:26.477: INFO: kube-scheduler-ca-master started at 2022-12-06 16:13:48 +0000 UTC (0+1 container statuses recorded) Dec 6 16:57:26.477: INFO: Container kube-scheduler ready: true, restart count 0 Dec 6 16:57:26.477: INFO: kube-apiserver-ca-master started at 2022-12-06 16:13:48 +0000 UTC (0+1 container statuses recorded) Dec 6 16:57:26.477: INFO: Container kube-apiserver ready: true, restart count 0 Dec 6 16:57:26.477: INFO: kube-addon-manager-ca-master started at 2022-12-06 16:14:05 +0000 UTC (0+1 container statuses recorded) Dec 6 16:57:26.477: INFO: Container kube-addon-manager ready: true, restart count 0 Dec 6 16:57:26.477: INFO: cluster-autoscaler-ca-master started at 2022-12-06 16:14:06 +0000 UTC (0+1 container statuses recorded) Dec 6 16:57:26.477: INFO: Container cluster-autoscaler ready: true, restart count 2 Dec 6 16:57:26.477: INFO: metadata-proxy-v0.1-7j85x started at 2022-12-06 16:14:35 +0000 UTC (0+2 container statuses recorded) Dec 6 16:57:26.477: INFO: Container metadata-proxy ready: true, restart count 0 Dec 6 16:57:26.477: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Dec 6 16:57:26.477: INFO: etcd-server-events-ca-master started at 2022-12-06 16:13:48 +0000 UTC (0+1 container statuses recorded) Dec 6 16:57:26.477: INFO: Container etcd-container ready: true, restart count 0 Dec 6 16:57:26.477: INFO: etcd-server-ca-master started at 2022-12-06 16:13:48 +0000 UTC (0+1 container statuses recorded) Dec 6 16:57:26.477: INFO: Container etcd-container ready: true, restart count 0 Dec 6 16:57:26.477: INFO: konnectivity-server-ca-master started at 2022-12-06 16:13:48 +0000 UTC (0+1 container statuses recorded) Dec 6 16:57:26.477: INFO: Container konnectivity-server-container ready: true, restart count 0 Dec 6 16:57:26.477: INFO: l7-lb-controller-ca-master started at 2022-12-06 16:14:07 +0000 UTC (0+1 container statuses recorded) Dec 6 16:57:26.477: INFO: Container l7-lb-controller ready: true, restart count 2 Dec 6 16:57:26.696: INFO: Latency metrics for node ca-master Dec 6 16:57:26.696: INFO: Logging node info for node ca-minion-group-1-bh26 Dec 6 16:57:26.739: INFO: Node Info: &Node{ObjectMeta:{ca-minion-group-1-bh26 1254a331-fce9-45fd-9e04-493b6a7b7894 8074 0 2022-12-06 16:28:38 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:ca-minion-group-1-bh26 kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-12-06 16:28:38 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-12-06 16:28:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.4.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-12-06 16:28:50 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {node-problem-detector Update v1 2022-12-06 16:53:46 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-12-06 16:53:58 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.4.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gci-autoscaling-migs/us-west1-b/ca-minion-group-1-bh26,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.4.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815430144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553286144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-12-06 16:53:46 +0000 UTC,LastTransitionTime:2022-12-06 16:28:42 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-12-06 16:53:46 +0000 UTC,LastTransitionTime:2022-12-06 16:28:42 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-12-06 16:53:46 +0000 UTC,LastTransitionTime:2022-12-06 16:28:42 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-12-06 16:53:46 +0000 UTC,LastTransitionTime:2022-12-06 16:28:42 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-12-06 16:53:46 +0000 UTC,LastTransitionTime:2022-12-06 16:28:42 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-12-06 16:53:46 +0000 UTC,LastTransitionTime:2022-12-06 16:28:42 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-12-06 16:53:46 +0000 UTC,LastTransitionTime:2022-12-06 16:28:42 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-12-06 16:28:50 +0000 UTC,LastTransitionTime:2022-12-06 16:28:50 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-12-06 16:53:58 +0000 UTC,LastTransitionTime:2022-12-06 16:28:38 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-12-06 16:53:58 +0000 UTC,LastTransitionTime:2022-12-06 16:28:38 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-12-06 16:53:58 +0000 UTC,LastTransitionTime:2022-12-06 16:28:38 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-12-06 16:53:58 +0000 UTC,LastTransitionTime:2022-12-06 16:28:40 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.6,},NodeAddress{Type:ExternalIP,Address:35.233.150.138,},NodeAddress{Type:InternalDNS,Address:ca-minion-group-1-bh26.c.k8s-jkns-gci-autoscaling-migs.internal,},NodeAddress{Type:Hostname,Address:ca-minion-group-1-bh26.c.k8s-jkns-gci-autoscaling-migs.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:ca593114e6f9068481524427dd317b95,SystemUUID:ca593114-e6f9-0684-8152-4427dd317b95,BootID:9198ec48-a64e-49d7-a53f-6d4eb874d4dd,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.6.6,KubeletVersion:v1.27.0-alpha.0.58+17bf864c1fc20e,KubeProxyVersion:v1.27.0-alpha.0.58+17bf864c1fc20e,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.58_17bf864c1fc20e],SizeBytes:67201736,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/snapshot-controller@sha256:823c75d0c45d1427f6d850070956d9ca657140a7bbf828381541d1d808475280 registry.k8s.io/sig-storage/snapshot-controller:v6.1.0],SizeBytes:22620891,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64@sha256:7eb7b3cee4d33c10c49893ad3c386232b86d4067de5251294d4c620d6e072b93 registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64:v1.10.11],SizeBytes:6463068,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Dec 6 16:57:26.739: INFO: Logging kubelet events for node ca-minion-group-1-bh26 Dec 6 16:57:26.793: INFO: Logging pods the kubelet thinks is on node ca-minion-group-1-bh26 Dec 6 16:57:26.870: INFO: volume-snapshot-controller-0 started at 2022-12-06 16:37:14 +0000 UTC (0+1 container statuses recorded) Dec 6 16:57:26.870: INFO: Container volume-snapshot-controller ready: true, restart count 0 Dec 6 16:57:26.870: INFO: coredns-6d97d5ddb-22gh4 started at 2022-12-06 16:32:32 +0000 UTC (0+1 container statuses recorded) Dec 6 16:57:26.870: INFO: Container coredns ready: true, restart count 0 Dec 6 16:57:26.870: INFO: l7-default-backend-8549d69d99-sc6v8 started at 2022-12-06 16:37:14 +0000 UTC (0+1 container statuses recorded) Dec 6 16:57:26.870: INFO: Container default-http-backend ready: true, restart count 0 Dec 6 16:57:26.870: INFO: konnectivity-agent-mb2mb started at 2022-12-06 16:28:50 +0000 UTC (0+1 container statuses recorded) Dec 6 16:57:26.870: INFO: Container konnectivity-agent ready: true, restart count 0 Dec 6 16:57:26.870: INFO: kube-proxy-ca-minion-group-1-bh26 started at 2022-12-06 16:28:38 +0000 UTC (0+1 container statuses recorded) Dec 6 16:57:26.870: INFO: Container kube-proxy ready: true, restart count 0 Dec 6 16:57:26.870: INFO: metadata-proxy-v0.1-vhnbd started at 2022-12-06 16:28:39 +0000 UTC (0+2 container statuses recorded) Dec 6 16:57:26.870: INFO: Container metadata-proxy ready: true, restart count 0 Dec 6 16:57:26.870: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Dec 6 16:57:26.870: INFO: metrics-server-v0.5.2-867b8754b9-z4tjs started at 2022-12-06 16:37:14 +0000 UTC (0+2 container statuses recorded) Dec 6 16:57:26.870: INFO: Container metrics-server ready: true, restart count 1 Dec 6 16:57:26.870: INFO: Container metrics-server-nanny ready: true, restart count 0 Dec 6 16:57:27.145: INFO: Latency metrics for node ca-minion-group-1-bh26 Dec 6 16:57:27.145: INFO: Logging node info for node ca-minion-group-88hl Dec 6 16:57:27.188: INFO: Node Info: &Node{ObjectMeta:{ca-minion-group-88hl d3e51bef-805c-4eb8-bb6c-a3b24a6c32f7 8614 0 2022-12-06 16:14:37 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:ca-minion-group-88hl kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-12-06 16:14:37 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-12-06 16:14:38 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.3.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-12-06 16:14:52 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-12-06 16:54:06 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status} {node-problem-detector Update v1 2022-12-06 16:54:49 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status}]},Spec:NodeSpec{PodCIDR:10.64.3.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gci-autoscaling-migs/us-west1-b/ca-minion-group-88hl,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.3.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-12-06 16:54:49 +0000 UTC,LastTransitionTime:2022-12-06 16:14:41 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-12-06 16:54:49 +0000 UTC,LastTransitionTime:2022-12-06 16:14:41 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-12-06 16:54:49 +0000 UTC,LastTransitionTime:2022-12-06 16:14:41 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-12-06 16:54:49 +0000 UTC,LastTransitionTime:2022-12-06 16:14:41 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-12-06 16:54:49 +0000 UTC,LastTransitionTime:2022-12-06 16:14:41 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-12-06 16:54:49 +0000 UTC,LastTransitionTime:2022-12-06 16:14:41 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-12-06 16:54:49 +0000 UTC,LastTransitionTime:2022-12-06 16:14:41 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-12-06 16:14:52 +0000 UTC,LastTransitionTime:2022-12-06 16:14:52 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-12-06 16:54:06 +0000 UTC,LastTransitionTime:2022-12-06 16:14:37 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-12-06 16:54:06 +0000 UTC,LastTransitionTime:2022-12-06 16:14:37 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-12-06 16:54:06 +0000 UTC,LastTransitionTime:2022-12-06 16:14:37 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-12-06 16:54:06 +0000 UTC,LastTransitionTime:2022-12-06 16:14:38 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.3,},NodeAddress{Type:ExternalIP,Address:34.105.95.26,},NodeAddress{Type:InternalDNS,Address:ca-minion-group-88hl.c.k8s-jkns-gci-autoscaling-migs.internal,},NodeAddress{Type:Hostname,Address:ca-minion-group-88hl.c.k8s-jkns-gci-autoscaling-migs.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:15139f59386ac9fe21f51f24ca514f9e,SystemUUID:15139f59-386a-c9fe-21f5-1f24ca514f9e,BootID:4e017bd6-594f-47cb-8947-c6e814f74e1e,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.6.6,KubeletVersion:v1.27.0-alpha.0.58+17bf864c1fc20e,KubeProxyVersion:v1.27.0-alpha.0.58+17bf864c1fc20e,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.58_17bf864c1fc20e],SizeBytes:67201736,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/snapshot-controller@sha256:823c75d0c45d1427f6d850070956d9ca657140a7bbf828381541d1d808475280 registry.k8s.io/sig-storage/snapshot-controller:v6.1.0],SizeBytes:22620891,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64@sha256:7eb7b3cee4d33c10c49893ad3c386232b86d4067de5251294d4c620d6e072b93 registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64:v1.10.11],SizeBytes:6463068,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Dec 6 16:57:27.189: INFO: Logging kubelet events for node ca-minion-group-88hl Dec 6 16:57:27.353: INFO: Logging pods the kubelet thinks is on node ca-minion-group-88hl Dec 6 16:57:27.576: INFO: kube-proxy-ca-minion-group-88hl started at 2022-12-06 16:14:37 +0000 UTC (0+1 container statuses recorded) Dec 6 16:57:27.577: INFO: Container kube-proxy ready: true, restart count 0 Dec 6 16:57:27.577: INFO: metadata-proxy-v0.1-q8mx9 started at 2022-12-06 16:37:14 +0000 UTC (0+2 container statuses recorded) Dec 6 16:57:27.577: INFO: Container metadata-proxy ready: true, restart count 0 Dec 6 16:57:27.577: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Dec 6 16:57:27.577: INFO: konnectivity-agent-pm75m started at 2022-12-06 16:57:26 +0000 UTC (0+1 container statuses recorded) Dec 6 16:57:27.577: INFO: Container konnectivity-agent ready: true, restart count 0 Dec 6 16:57:27.753: INFO: Latency metrics for node ca-minion-group-88hl [DeferCleanup (Each)] [sig-autoscaling] Cluster size autoscaling [Slow] tear down framework | framework.go:193 STEP: Destroying namespace "autoscaling-6901" for this suite. 12/06/22 16:57:27.754
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-autoscaling\]\sCluster\ssize\sautoscaling\s\[Slow\]\sShould\sbe\sable\sto\sscale\sa\snode\sgroup\sup\sfrom\s0\[Feature\:ClusterSizeAutoscalingScaleUp\]$'
test/e2e/autoscaling/cluster_size_autoscaling.go:801 k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.28() test/e2e/autoscaling/cluster_size_autoscaling.go:801 +0x7a5from junit_01.xml
[BeforeEach] [sig-autoscaling] Cluster size autoscaling [Slow] set up framework | framework.go:178 STEP: Creating a kubernetes client 12/06/22 20:41:33.855 Dec 6 20:41:33.855: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename autoscaling 12/06/22 20:41:33.857 STEP: Waiting for a default service account to be provisioned in namespace 12/06/22 20:41:33.982 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 12/06/22 20:41:34.063 [BeforeEach] [sig-autoscaling] Cluster size autoscaling [Slow] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-autoscaling] Cluster size autoscaling [Slow] test/e2e/autoscaling/cluster_size_autoscaling.go:103 STEP: Initial size of ca-minion-group-1: 1 12/06/22 20:41:37.723 STEP: Initial size of ca-minion-group: 1 12/06/22 20:41:41.336 Dec 6 20:41:41.380: INFO: Cluster has reached the desired number of ready nodes 2 STEP: Initial number of schedulable nodes: 2 12/06/22 20:41:41.422 [It] Should be able to scale a node group up from 0[Feature:ClusterSizeAutoscalingScaleUp] test/e2e/autoscaling/cluster_size_autoscaling.go:753 STEP: Manually scale smallest node group to 0 12/06/22 20:41:41.422 Dec 6 20:41:41.423: INFO: Skipping dumping logs from cluster Dec 6 20:41:45.867: INFO: Skipping dumping logs from cluster Dec 6 20:41:45.910: INFO: Waiting for ready nodes 1, current ready 2, not ready nodes 0 Dec 6 20:42:05.955: INFO: Waiting for ready nodes 1, current ready 2, not ready nodes 0 Dec 6 20:42:26.026: INFO: Waiting for ready nodes 1, current ready 2, not ready nodes 0 Dec 6 20:42:46.070: INFO: Condition Ready of node ca-minion-group-xqbn is false, but Node is tainted by NodeController with [{node.kubernetes.io/unreachable NoSchedule 2022-12-06 20:42:28 +0000 UTC} {node.kubernetes.io/unreachable NoExecute 2022-12-06 20:42:33 +0000 UTC}]. Failure Dec 6 20:42:46.070: INFO: Waiting for ready nodes 1, current ready 1, not ready nodes 1 Dec 6 20:43:06.118: INFO: Condition Ready of node ca-minion-group-xqbn is false, but Node is tainted by NodeController with [{node.kubernetes.io/unreachable NoSchedule 2022-12-06 20:42:28 +0000 UTC} {node.kubernetes.io/unreachable NoExecute 2022-12-06 20:42:33 +0000 UTC}]. Failure Dec 6 20:43:06.118: INFO: Waiting for ready nodes 1, current ready 1, not ready nodes 1 Dec 6 20:43:26.163: INFO: Condition Ready of node ca-minion-group-xqbn is false, but Node is tainted by NodeController with [{node.kubernetes.io/unreachable NoSchedule 2022-12-06 20:42:28 +0000 UTC} {node.kubernetes.io/unreachable NoExecute 2022-12-06 20:42:33 +0000 UTC}]. Failure Dec 6 20:43:26.163: INFO: Waiting for ready nodes 1, current ready 1, not ready nodes 1 Dec 6 20:43:46.218: INFO: Condition Ready of node ca-minion-group-xqbn is false, but Node is tainted by NodeController with [{node.kubernetes.io/unreachable NoSchedule 2022-12-06 20:42:28 +0000 UTC} {node.kubernetes.io/unreachable NoExecute 2022-12-06 20:42:33 +0000 UTC}]. Failure Dec 6 20:43:46.218: INFO: Waiting for ready nodes 1, current ready 1, not ready nodes 1 Dec 6 20:44:06.262: INFO: Cluster has reached the desired number of ready nodes 1 STEP: Make remaining nodes unschedulable 12/06/22 20:44:06.262 STEP: Taint node ca-minion-group-1-zjdh 12/06/22 20:44:06.305 STEP: Run a scale-up test 12/06/22 20:44:06.396 STEP: Running RC which reserves 100 MB of memory 12/06/22 20:44:06.396 STEP: creating replication controller memory-reservation in namespace autoscaling-50 12/06/22 20:44:06.396 I1206 20:44:06.442837 7957 runners.go:193] Created replication controller with name: memory-reservation, namespace: autoscaling-50, replica count: 1 I1206 20:44:16.494301 7957 runners.go:193] memory-reservation Pods: 1 out of 1 created, 0 running, 0 pending, 1 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I1206 20:44:26.495369 7957 runners.go:193] memory-reservation Pods: 1 out of 1 created, 0 running, 0 pending, 1 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I1206 20:44:26.538294 7957 runners.go:193] Pod memory-reservation-7rzqj Pending <nil> I1206 20:44:26.580883 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 1, not ready nodes 0 I1206 20:44:46.625865 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 1, not ready nodes 0 I1206 20:45:06.669138 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 1, not ready nodes 0 I1206 20:45:26.713379 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 1, not ready nodes 0 I1206 20:45:46.760219 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 1, not ready nodes 0 I1206 20:46:06.809723 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 1, not ready nodes 0 I1206 20:46:26.856126 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 1, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group up from 0[Feature:ClusterSizeAutoscalingScaleUp] (Spec Runtime: 5m7.568s) test/e2e/autoscaling/cluster_size_autoscaling.go:753 In [It] (Node Runtime: 5m0s) test/e2e/autoscaling/cluster_size_autoscaling.go:753 At [By Step] creating replication controller memory-reservation in namespace autoscaling-50 (Step Runtime: 2m35.026s) test/e2e/framework/rc/rc_utils.go:85 Spec Goroutine goroutine 10377 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc000b07a70, 0x45d964b800, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.28() test/e2e/autoscaling/cluster_size_autoscaling.go:801 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a99080}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 20:46:46.900895 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 1, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group up from 0[Feature:ClusterSizeAutoscalingScaleUp] (Spec Runtime: 5m27.569s) test/e2e/autoscaling/cluster_size_autoscaling.go:753 In [It] (Node Runtime: 5m20.002s) test/e2e/autoscaling/cluster_size_autoscaling.go:753 At [By Step] creating replication controller memory-reservation in namespace autoscaling-50 (Step Runtime: 2m55.027s) test/e2e/framework/rc/rc_utils.go:85 Spec Goroutine goroutine 10377 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc000b07a70, 0x45d964b800, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.28() test/e2e/autoscaling/cluster_size_autoscaling.go:801 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a99080}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 20:47:06.944909 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 1, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group up from 0[Feature:ClusterSizeAutoscalingScaleUp] (Spec Runtime: 5m47.571s) test/e2e/autoscaling/cluster_size_autoscaling.go:753 In [It] (Node Runtime: 5m40.003s) test/e2e/autoscaling/cluster_size_autoscaling.go:753 At [By Step] creating replication controller memory-reservation in namespace autoscaling-50 (Step Runtime: 3m15.029s) test/e2e/framework/rc/rc_utils.go:85 Spec Goroutine goroutine 10377 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc000b07a70, 0x45d964b800, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.28() test/e2e/autoscaling/cluster_size_autoscaling.go:801 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a99080}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 20:47:26.990309 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 1, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group up from 0[Feature:ClusterSizeAutoscalingScaleUp] (Spec Runtime: 6m7.573s) test/e2e/autoscaling/cluster_size_autoscaling.go:753 In [It] (Node Runtime: 6m0.005s) test/e2e/autoscaling/cluster_size_autoscaling.go:753 At [By Step] creating replication controller memory-reservation in namespace autoscaling-50 (Step Runtime: 3m35.031s) test/e2e/framework/rc/rc_utils.go:85 Spec Goroutine goroutine 10377 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc000b07a70, 0x45d964b800, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.28() test/e2e/autoscaling/cluster_size_autoscaling.go:801 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a99080}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 20:47:47.038590 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 1, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group up from 0[Feature:ClusterSizeAutoscalingScaleUp] (Spec Runtime: 6m27.574s) test/e2e/autoscaling/cluster_size_autoscaling.go:753 In [It] (Node Runtime: 6m20.006s) test/e2e/autoscaling/cluster_size_autoscaling.go:753 At [By Step] creating replication controller memory-reservation in namespace autoscaling-50 (Step Runtime: 3m55.032s) test/e2e/framework/rc/rc_utils.go:85 Spec Goroutine goroutine 10377 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc000b07a70, 0x45d964b800, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.28() test/e2e/autoscaling/cluster_size_autoscaling.go:801 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a99080}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 20:48:07.086333 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 1, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group up from 0[Feature:ClusterSizeAutoscalingScaleUp] (Spec Runtime: 6m47.576s) test/e2e/autoscaling/cluster_size_autoscaling.go:753 In [It] (Node Runtime: 6m40.009s) test/e2e/autoscaling/cluster_size_autoscaling.go:753 At [By Step] creating replication controller memory-reservation in namespace autoscaling-50 (Step Runtime: 4m15.034s) test/e2e/framework/rc/rc_utils.go:85 Spec Goroutine goroutine 10377 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc000b07a70, 0x45d964b800, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.28() test/e2e/autoscaling/cluster_size_autoscaling.go:801 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a99080}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 20:48:27.132510 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 1, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group up from 0[Feature:ClusterSizeAutoscalingScaleUp] (Spec Runtime: 7m7.578s) test/e2e/autoscaling/cluster_size_autoscaling.go:753 In [It] (Node Runtime: 7m0.01s) test/e2e/autoscaling/cluster_size_autoscaling.go:753 At [By Step] creating replication controller memory-reservation in namespace autoscaling-50 (Step Runtime: 4m35.036s) test/e2e/framework/rc/rc_utils.go:85 Spec Goroutine goroutine 10377 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc000b07a70, 0x45d964b800, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.28() test/e2e/autoscaling/cluster_size_autoscaling.go:801 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a99080}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 20:48:47.178345 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 1, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group up from 0[Feature:ClusterSizeAutoscalingScaleUp] (Spec Runtime: 7m27.579s) test/e2e/autoscaling/cluster_size_autoscaling.go:753 In [It] (Node Runtime: 7m20.012s) test/e2e/autoscaling/cluster_size_autoscaling.go:753 At [By Step] creating replication controller memory-reservation in namespace autoscaling-50 (Step Runtime: 4m55.038s) test/e2e/framework/rc/rc_utils.go:85 Spec Goroutine goroutine 10377 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc000b07a70, 0x45d964b800, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.28() test/e2e/autoscaling/cluster_size_autoscaling.go:801 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a99080}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 20:49:07.223545 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 1, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] Should be able to scale a node group up from 0[Feature:ClusterSizeAutoscalingScaleUp] (Spec Runtime: 7m47.581s) test/e2e/autoscaling/cluster_size_autoscaling.go:753 In [It] (Node Runtime: 7m40.013s) test/e2e/autoscaling/cluster_size_autoscaling.go:753 At [By Step] creating replication controller memory-reservation in namespace autoscaling-50 (Step Runtime: 5m15.039s) test/e2e/framework/rc/rc_utils.go:85 Spec Goroutine goroutine 10377 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc000b07a70, 0x45d964b800, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.28() test/e2e/autoscaling/cluster_size_autoscaling.go:801 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a99080}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Dec 6 20:49:27.223: INFO: Unexpected error: <*errors.errorString | 0xc0010ad160>: { s: "timeout waiting 5m0s for appropriate cluster size", } Dec 6 20:49:27.224: FAIL: timeout waiting 5m0s for appropriate cluster size Full Stack Trace k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.28() test/e2e/autoscaling/cluster_size_autoscaling.go:801 +0x7a5 STEP: deleting ReplicationController memory-reservation in namespace autoscaling-50, will wait for the garbage collector to delete the pods 12/06/22 20:49:27.224 Dec 6 20:49:27.366: INFO: Deleting ReplicationController memory-reservation took: 48.067302ms Dec 6 20:49:27.467: INFO: Terminating ReplicationController memory-reservation pods took: 100.490665ms STEP: Remove taint from node ca-minion-group-1-zjdh 12/06/22 20:49:27.467 [AfterEach] [sig-autoscaling] Cluster size autoscaling [Slow] test/e2e/framework/node/init/init.go:32 Dec 6 20:49:27.567: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-autoscaling] Cluster size autoscaling [Slow] test/e2e/autoscaling/cluster_size_autoscaling.go:139 STEP: Restoring initial size of the cluster 12/06/22 20:49:27.619 STEP: Setting size of ca-minion-group to 1 12/06/22 20:49:31.62 Dec 6 20:49:31.620: INFO: Skipping dumping logs from cluster Dec 6 20:49:36.358: INFO: Skipping dumping logs from cluster Dec 6 20:49:40.444: INFO: Waiting for ready nodes 2, current ready 1, not ready nodes 0 Dec 6 20:50:00.488: INFO: Waiting for ready nodes 2, current ready 1, not ready nodes 0 Dec 6 20:50:20.544: INFO: Waiting for ready nodes 2, current ready 1, not ready nodes 0 Dec 6 20:50:40.588: INFO: Waiting for ready nodes 2, current ready 1, not ready nodes 0 Dec 6 20:51:00.631: INFO: Cluster has reached the desired number of ready nodes 2 STEP: Remove taint from node ca-master 12/06/22 20:51:00.675 STEP: Remove taint from node ca-minion-group-1-zjdh 12/06/22 20:51:00.717 STEP: Remove taint from node ca-minion-group-gjlm 12/06/22 20:51:00.759 I1206 20:51:00.801565 7957 cluster_size_autoscaling.go:165] Made nodes schedulable again in 126.080175ms [DeferCleanup (Each)] [sig-autoscaling] Cluster size autoscaling [Slow] test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-autoscaling] Cluster size autoscaling [Slow] dump namespaces | framework.go:196 STEP: dump namespace information after failure 12/06/22 20:51:00.801 STEP: Collecting events from namespace "autoscaling-50". 12/06/22 20:51:00.801 STEP: Found 3 events. 12/06/22 20:51:00.842 Dec 6 20:51:00.842: INFO: At 2022-12-06 20:44:06 +0000 UTC - event for memory-reservation: {replication-controller } SuccessfulCreate: Created pod: memory-reservation-7rzqj Dec 6 20:51:00.843: INFO: At 2022-12-06 20:44:06 +0000 UTC - event for memory-reservation-7rzqj: {default-scheduler } FailedScheduling: 0/2 nodes are available: 1 node(s) had untolerated taint {DisabledForAutoscalingTest: DisabledForTest}, 1 node(s) had untolerated taint {node-role.kubernetes.io/master: }, 1 node(s) were unschedulable. preemption: 0/2 nodes are available: 2 Preemption is not helpful for scheduling.. Dec 6 20:51:00.843: INFO: At 2022-12-06 20:49:27 +0000 UTC - event for memory-reservation-7rzqj: {default-scheduler } FailedScheduling: skip schedule deleting pod: autoscaling-50/memory-reservation-7rzqj Dec 6 20:51:00.884: INFO: POD NODE PHASE GRACE CONDITIONS Dec 6 20:51:00.884: INFO: Dec 6 20:51:00.927: INFO: Logging node info for node ca-master Dec 6 20:51:00.969: INFO: Node Info: &Node{ObjectMeta:{ca-master 8e72891f-3d31-4302-b81e-5940fbd1a2b7 54076 0 2022-12-06 16:14:32 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-1 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:ca-master kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-1 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-12-06 16:14:32 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:unschedulable":{}}} } {kube-controller-manager Update v1 2022-12-06 16:14:42 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.1.0/24\"":{}},"f:taints":{}}} } {kube-controller-manager Update v1 2022-12-06 16:14:42 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-12-06 20:50:21 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.1.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gci-autoscaling-migs/us-west1-b/ca-master,Unschedulable:true,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:<nil>,},Taint{Key:node.kubernetes.io/unschedulable,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[10.64.1.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{16656896000 0} {<nil>} 16266500Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3858366464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{14991206376 0} {<nil>} 14991206376 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3596222464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-12-06 16:14:42 +0000 UTC,LastTransitionTime:2022-12-06 16:14:42 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-12-06 20:50:21 +0000 UTC,LastTransitionTime:2022-12-06 16:14:32 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-12-06 20:50:21 +0000 UTC,LastTransitionTime:2022-12-06 16:14:32 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-12-06 20:50:21 +0000 UTC,LastTransitionTime:2022-12-06 16:14:32 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-12-06 20:50:21 +0000 UTC,LastTransitionTime:2022-12-06 16:14:34 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.2,},NodeAddress{Type:ExternalIP,Address:35.230.76.149,},NodeAddress{Type:InternalDNS,Address:ca-master.c.k8s-jkns-gci-autoscaling-migs.internal,},NodeAddress{Type:Hostname,Address:ca-master.c.k8s-jkns-gci-autoscaling-migs.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:bac070a384fefb9133ee7878e15673cf,SystemUUID:bac070a3-84fe-fb91-33ee-7878e15673cf,BootID:06caa00c-99ed-4909-986f-2ab86fa8e8fe,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.6.6,KubeletVersion:v1.27.0-alpha.0.58+17bf864c1fc20e,KubeProxyVersion:v1.27.0-alpha.0.58+17bf864c1fc20e,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.27.0-alpha.0.58_17bf864c1fc20e],SizeBytes:135160275,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.27.0-alpha.0.58_17bf864c1fc20e],SizeBytes:124990265,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.27.0-alpha.0.58_17bf864c1fc20e],SizeBytes:57660216,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64@sha256:5db27383add6d9f4ebdf0286409ac31f7f5d273690204b341a4e37998917693b gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64:v1.20.1],SizeBytes:36598135,},ContainerImage{Names:[registry.k8s.io/addon-manager/kube-addon-manager@sha256:49cc4e6e4a3745b427ce14b0141476ab339bb65c6bc05033019e046c8727dcb0 registry.k8s.io/addon-manager/kube-addon-manager:v9.1.6],SizeBytes:30464183,},ContainerImage{Names:[registry.k8s.io/autoscaling/cluster-autoscaler@sha256:07ab8c89cd0ad296ddb6347febe196d8fe0e1c757656a98f71199860d87cf1a5 registry.k8s.io/autoscaling/cluster-autoscaler:v1.22.0],SizeBytes:24220268,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-server@sha256:2c111f004bec24888d8cfa2a812a38fb8341350abac67dcd0ac64e709dfe389c registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33],SizeBytes:22020129,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Dec 6 20:51:00.969: INFO: Logging kubelet events for node ca-master Dec 6 20:51:01.016: INFO: Logging pods the kubelet thinks is on node ca-master Dec 6 20:51:01.083: INFO: kube-controller-manager-ca-master started at 2022-12-06 16:13:48 +0000 UTC (0+1 container statuses recorded) Dec 6 20:51:01.083: INFO: Container kube-controller-manager ready: true, restart count 1 Dec 6 20:51:01.083: INFO: kube-scheduler-ca-master started at 2022-12-06 16:13:48 +0000 UTC (0+1 container statuses recorded) Dec 6 20:51:01.083: INFO: Container kube-scheduler ready: true, restart count 0 Dec 6 20:51:01.083: INFO: kube-apiserver-ca-master started at 2022-12-06 16:13:48 +0000 UTC (0+1 container statuses recorded) Dec 6 20:51:01.083: INFO: Container kube-apiserver ready: true, restart count 0 Dec 6 20:51:01.083: INFO: kube-addon-manager-ca-master started at 2022-12-06 16:14:05 +0000 UTC (0+1 container statuses recorded) Dec 6 20:51:01.083: INFO: Container kube-addon-manager ready: true, restart count 0 Dec 6 20:51:01.083: INFO: cluster-autoscaler-ca-master started at 2022-12-06 16:14:06 +0000 UTC (0+1 container statuses recorded) Dec 6 20:51:01.083: INFO: Container cluster-autoscaler ready: true, restart count 2 Dec 6 20:51:01.083: INFO: metadata-proxy-v0.1-7j85x started at 2022-12-06 16:14:35 +0000 UTC (0+2 container statuses recorded) Dec 6 20:51:01.083: INFO: Container metadata-proxy ready: true, restart count 0 Dec 6 20:51:01.083: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Dec 6 20:51:01.083: INFO: etcd-server-events-ca-master started at 2022-12-06 16:13:48 +0000 UTC (0+1 container statuses recorded) Dec 6 20:51:01.083: INFO: Container etcd-container ready: true, restart count 0 Dec 6 20:51:01.083: INFO: etcd-server-ca-master started at 2022-12-06 16:13:48 +0000 UTC (0+1 container statuses recorded) Dec 6 20:51:01.083: INFO: Container etcd-container ready: true, restart count 0 Dec 6 20:51:01.083: INFO: konnectivity-server-ca-master started at 2022-12-06 16:13:48 +0000 UTC (0+1 container statuses recorded) Dec 6 20:51:01.083: INFO: Container konnectivity-server-container ready: true, restart count 0 Dec 6 20:51:01.083: INFO: l7-lb-controller-ca-master started at 2022-12-06 16:14:07 +0000 UTC (0+1 container statuses recorded) Dec 6 20:51:01.083: INFO: Container l7-lb-controller ready: true, restart count 2 Dec 6 20:51:01.264: INFO: Latency metrics for node ca-master Dec 6 20:51:01.264: INFO: Logging node info for node ca-minion-group-1-zjdh Dec 6 20:51:01.306: INFO: Node Info: &Node{ObjectMeta:{ca-minion-group-1-zjdh e5f29fc6-3a36-4cb9-819b-ea70e28064e4 54098 0 2022-12-06 20:38:24 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:ca-minion-group-1-zjdh kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-12-06 20:38:24 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-12-06 20:38:26 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.48.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-12-06 20:38:39 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {node-problem-detector Update v1 2022-12-06 20:48:30 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-12-06 20:50:30 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.48.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gci-autoscaling-migs/us-west1-b/ca-minion-group-1-zjdh,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.48.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-12-06 20:48:30 +0000 UTC,LastTransitionTime:2022-12-06 20:38:28 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-12-06 20:48:30 +0000 UTC,LastTransitionTime:2022-12-06 20:38:28 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-12-06 20:48:30 +0000 UTC,LastTransitionTime:2022-12-06 20:38:28 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-12-06 20:48:30 +0000 UTC,LastTransitionTime:2022-12-06 20:38:28 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-12-06 20:48:30 +0000 UTC,LastTransitionTime:2022-12-06 20:38:28 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-12-06 20:48:30 +0000 UTC,LastTransitionTime:2022-12-06 20:38:28 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-12-06 20:48:30 +0000 UTC,LastTransitionTime:2022-12-06 20:38:28 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-12-06 20:38:39 +0000 UTC,LastTransitionTime:2022-12-06 20:38:39 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-12-06 20:50:30 +0000 UTC,LastTransitionTime:2022-12-06 20:38:24 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-12-06 20:50:30 +0000 UTC,LastTransitionTime:2022-12-06 20:38:24 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-12-06 20:50:30 +0000 UTC,LastTransitionTime:2022-12-06 20:38:24 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-12-06 20:50:30 +0000 UTC,LastTransitionTime:2022-12-06 20:38:26 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.50,},NodeAddress{Type:ExternalIP,Address:35.233.150.138,},NodeAddress{Type:InternalDNS,Address:ca-minion-group-1-zjdh.c.k8s-jkns-gci-autoscaling-migs.internal,},NodeAddress{Type:Hostname,Address:ca-minion-group-1-zjdh.c.k8s-jkns-gci-autoscaling-migs.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:1ae8b22ae4b79f877097bf98801fd2a2,SystemUUID:1ae8b22a-e4b7-9f87-7097-bf98801fd2a2,BootID:d317f63a-238e-47bc-9e4b-3de2ed7a5e2a,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.6.6,KubeletVersion:v1.27.0-alpha.0.58+17bf864c1fc20e,KubeProxyVersion:v1.27.0-alpha.0.58+17bf864c1fc20e,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.58_17bf864c1fc20e],SizeBytes:67201736,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/metrics-server/metrics-server@sha256:6385aec64bb97040a5e692947107b81e178555c7a5b71caa90d733e4130efc10 registry.k8s.io/metrics-server/metrics-server:v0.5.2],SizeBytes:26023008,},ContainerImage{Names:[registry.k8s.io/sig-storage/snapshot-controller@sha256:823c75d0c45d1427f6d850070956d9ca657140a7bbf828381541d1d808475280 registry.k8s.io/sig-storage/snapshot-controller:v6.1.0],SizeBytes:22620891,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/autoscaling/addon-resizer@sha256:43f129b81d28f0fdd54de6d8e7eacd5728030782e03db16087fc241ad747d3d6 registry.k8s.io/autoscaling/addon-resizer:1.8.14],SizeBytes:10153852,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64@sha256:7eb7b3cee4d33c10c49893ad3c386232b86d4067de5251294d4c620d6e072b93 registry.k8s.io/networking/ingress-gce-404-server-with-metrics-amd64:v1.10.11],SizeBytes:6463068,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Dec 6 20:51:01.307: INFO: Logging kubelet events for node ca-minion-group-1-zjdh Dec 6 20:51:01.355: INFO: Logging pods the kubelet thinks is on node ca-minion-group-1-zjdh Dec 6 20:51:01.418: INFO: kube-proxy-ca-minion-group-1-zjdh started at 2022-12-06 20:38:24 +0000 UTC (0+1 container statuses recorded) Dec 6 20:51:01.418: INFO: Container kube-proxy ready: true, restart count 0 Dec 6 20:51:01.418: INFO: metadata-proxy-v0.1-6rtlb started at 2022-12-06 20:38:25 +0000 UTC (0+2 container statuses recorded) Dec 6 20:51:01.418: INFO: Container metadata-proxy ready: true, restart count 0 Dec 6 20:51:01.418: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Dec 6 20:51:01.418: INFO: konnectivity-agent-llwkx started at 2022-12-06 20:38:39 +0000 UTC (0+1 container statuses recorded) Dec 6 20:51:01.418: INFO: Container konnectivity-agent ready: true, restart count 0 Dec 6 20:51:01.418: INFO: metrics-server-v0.5.2-867b8754b9-v424v started at 2022-12-06 20:49:27 +0000 UTC (0+2 container statuses recorded) Dec 6 20:51:01.418: INFO: Container metrics-server ready: true, restart count 0 Dec 6 20:51:01.418: INFO: Container metrics-server-nanny ready: true, restart count 0 Dec 6 20:51:01.418: INFO: l7-default-backend-8549d69d99-xjlxs started at 2022-12-06 20:49:27 +0000 UTC (0+1 container statuses recorded) Dec 6 20:51:01.418: INFO: Container default-http-backend ready: true, restart count 0 Dec 6 20:51:01.418: INFO: volume-snapshot-controller-0 started at 2022-12-06 20:49:27 +0000 UTC (0+1 container statuses recorded) Dec 6 20:51:01.418: INFO: Container volume-snapshot-controller ready: true, restart count 0 Dec 6 20:51:01.418: INFO: coredns-6d97d5ddb-4m5nn started at 2022-12-06 20:49:27 +0000 UTC (0+1 container statuses recorded) Dec 6 20:51:01.418: INFO: Container coredns ready: true, restart count 0 Dec 6 20:51:01.592: INFO: Latency metrics for node ca-minion-group-1-zjdh Dec 6 20:51:01.592: INFO: Logging node info for node ca-minion-group-gjlm Dec 6 20:51:01.635: INFO: Node Info: &Node{ObjectMeta:{ca-minion-group-gjlm 68f7253b-0522-4306-9b7d-d11563ac80fd 54189 0 2022-12-06 20:50:48 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:ca-minion-group-gjlm kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kube-controller-manager Update v1 2022-12-06 20:50:48 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.50.0/24\"":{}}}} } {kubelet Update v1 2022-12-06 20:50:48 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kubelet Update v1 2022-12-06 20:50:48 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"Ready\"}":{"f:message":{},"f:reason":{},"f:status":{}}}}} status} {node-problem-detector Update v1 2022-12-06 20:50:53 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kube-controller-manager Update v1 2022-12-06 20:50:57 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status}]},Spec:NodeSpec{PodCIDR:10.64.50.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gci-autoscaling-migs/us-west1-b/ca-minion-group-gjlm,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.50.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-12-06 20:50:53 +0000 UTC,LastTransitionTime:2022-12-06 20:50:52 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-12-06 20:50:53 +0000 UTC,LastTransitionTime:2022-12-06 20:50:52 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-12-06 20:50:53 +0000 UTC,LastTransitionTime:2022-12-06 20:50:52 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-12-06 20:50:53 +0000 UTC,LastTransitionTime:2022-12-06 20:50:52 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-12-06 20:50:53 +0000 UTC,LastTransitionTime:2022-12-06 20:50:52 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-12-06 20:50:53 +0000 UTC,LastTransitionTime:2022-12-06 20:50:52 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-12-06 20:50:53 +0000 UTC,LastTransitionTime:2022-12-06 20:50:52 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-12-06 20:50:57 +0000 UTC,LastTransitionTime:2022-12-06 20:50:57 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-12-06 20:50:48 +0000 UTC,LastTransitionTime:2022-12-06 20:50:48 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-12-06 20:50:48 +0000 UTC,LastTransitionTime:2022-12-06 20:50:48 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-12-06 20:50:48 +0000 UTC,LastTransitionTime:2022-12-06 20:50:48 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-12-06 20:50:48 +0000 UTC,LastTransitionTime:2022-12-06 20:50:48 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.52,},NodeAddress{Type:ExternalIP,Address:35.247.99.131,},NodeAddress{Type:InternalDNS,Address:ca-minion-group-gjlm.c.k8s-jkns-gci-autoscaling-migs.internal,},NodeAddress{Type:Hostname,Address:ca-minion-group-gjlm.c.k8s-jkns-gci-autoscaling-migs.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:af8d7d741bb05df1c5d2e56e7c39a344,SystemUUID:af8d7d74-1bb0-5df1-c5d2-e56e7c39a344,BootID:6bdbfa19-5155-4291-a593-d44be5c2517a,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.6.6,KubeletVersion:v1.27.0-alpha.0.58+17bf864c1fc20e,KubeProxyVersion:v1.27.0-alpha.0.58+17bf864c1fc20e,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.58_17bf864c1fc20e],SizeBytes:67201736,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Dec 6 20:51:01.635: INFO: Logging kubelet events for node ca-minion-group-gjlm Dec 6 20:51:01.682: INFO: Logging pods the kubelet thinks is on node ca-minion-group-gjlm Dec 6 20:51:01.744: INFO: kube-proxy-ca-minion-group-gjlm started at 2022-12-06 20:50:48 +0000 UTC (0+1 container statuses recorded) Dec 6 20:51:01.744: INFO: Container kube-proxy ready: true, restart count 0 Dec 6 20:51:01.744: INFO: metadata-proxy-v0.1-4ndpc started at 2022-12-06 20:50:49 +0000 UTC (0+2 container statuses recorded) Dec 6 20:51:01.744: INFO: Container metadata-proxy ready: true, restart count 0 Dec 6 20:51:01.744: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Dec 6 20:51:01.744: INFO: konnectivity-agent-4l6bt started at 2022-12-06 20:50:57 +0000 UTC (0+1 container statuses recorded) Dec 6 20:51:01.744: INFO: Container konnectivity-agent ready: true, restart count 0 Dec 6 20:51:01.940: INFO: Latency metrics for node ca-minion-group-gjlm [DeferCleanup (Each)] [sig-autoscaling] Cluster size autoscaling [Slow] tear down framework | framework.go:193 STEP: Destroying namespace "autoscaling-50" for this suite. 12/06/22 20:51:01.94
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-autoscaling\]\sCluster\ssize\sautoscaling\s\[Slow\]\sshould\sbe\sable\sto\sscale\sdown\sby\sdraining\ssystem\spods\swith\spdb\[Feature\:ClusterSizeAutoscalingScaleDown\]$'
test/e2e/autoscaling/cluster_size_autoscaling.go:748 k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 +0x94 k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 +0x842 k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 +0x57from junit_01.xml
[BeforeEach] [sig-autoscaling] Cluster size autoscaling [Slow] set up framework | framework.go:178 STEP: Creating a kubernetes client 12/06/22 19:23:10.193 Dec 6 19:23:10.193: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename autoscaling 12/06/22 19:23:10.195 STEP: Waiting for a default service account to be provisioned in namespace 12/06/22 19:23:10.321 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 12/06/22 19:23:10.401 [BeforeEach] [sig-autoscaling] Cluster size autoscaling [Slow] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-autoscaling] Cluster size autoscaling [Slow] test/e2e/autoscaling/cluster_size_autoscaling.go:103 STEP: Initial size of ca-minion-group-1: 1 12/06/22 19:23:13.981 STEP: Initial size of ca-minion-group: 1 12/06/22 19:23:17.353 Dec 6 19:23:17.401: INFO: Cluster has reached the desired number of ready nodes 2 STEP: Initial number of schedulable nodes: 2 12/06/22 19:23:17.446 [It] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] test/e2e/autoscaling/cluster_size_autoscaling.go:745 STEP: Manually increase cluster size 12/06/22 19:23:17.446 STEP: Setting size of ca-minion-group-1 to 3 12/06/22 19:23:20.995 Dec 6 19:23:20.995: INFO: Skipping dumping logs from cluster Dec 6 19:23:25.537: INFO: Skipping dumping logs from cluster STEP: Setting size of ca-minion-group to 3 12/06/22 19:23:28.815 Dec 6 19:23:28.815: INFO: Skipping dumping logs from cluster Dec 6 19:23:33.209: INFO: Skipping dumping logs from cluster STEP: Setting size of ca-minion-group to 3 12/06/22 19:23:40.011 Dec 6 19:23:40.011: INFO: Skipping dumping logs from cluster Dec 6 19:23:44.280: INFO: Skipping dumping logs from cluster W1206 19:23:44.280178 7957 cluster_size_autoscaling.go:1758] Unexpected node group size while waiting for cluster resize. Setting size to target again. I1206 19:23:44.280194 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 I1206 19:24:13.823831 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 0 Dec 6 19:24:33.871: INFO: Condition Ready of node ca-minion-group-1-3q9h is false instead of true. Reason: KubeletNotReady, message: [container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: cni plugin not initialized, CSINode is not yet initialized] I1206 19:24:33.871539 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 3, not ready nodes 1 I1206 19:24:53.919964 7957 cluster_size_autoscaling.go:1381] Cluster has reached the desired size STEP: Run a pod on each node 12/06/22 19:24:53.966 STEP: Taint node ca-minion-group-1-3q9h 12/06/22 19:24:53.966 STEP: Taint node ca-minion-group-1-f6sn 12/06/22 19:24:54.059 STEP: Taint node ca-minion-group-1-whwm 12/06/22 19:24:54.149 STEP: Taint node ca-minion-group-9m6s 12/06/22 19:24:54.24 STEP: Taint node ca-minion-group-qbln 12/06/22 19:24:54.332 STEP: Taint node ca-minion-group-wp8w 12/06/22 19:24:54.423 STEP: creating replication controller reschedulable-pods in namespace kube-system 12/06/22 19:24:54.517 I1206 19:24:54.562615 7957 runners.go:193] Created replication controller with name: reschedulable-pods, namespace: kube-system, replica count: 0 STEP: Remove taint from node ca-minion-group-1-3q9h 12/06/22 19:24:54.655 STEP: Taint node ca-minion-group-1-3q9h 12/06/22 19:24:59.878 STEP: Remove taint from node ca-minion-group-1-f6sn 12/06/22 19:24:59.971 STEP: Taint node ca-minion-group-1-f6sn 12/06/22 19:25:05.2 STEP: Remove taint from node ca-minion-group-1-whwm 12/06/22 19:25:05.291 STEP: Taint node ca-minion-group-1-whwm 12/06/22 19:25:10.519 STEP: Remove taint from node ca-minion-group-9m6s 12/06/22 19:25:10.612 STEP: Taint node ca-minion-group-9m6s 12/06/22 19:25:15.846 STEP: Remove taint from node ca-minion-group-qbln 12/06/22 19:25:15.937 STEP: Taint node ca-minion-group-qbln 12/06/22 19:25:21.165 STEP: Remove taint from node ca-minion-group-wp8w 12/06/22 19:25:21.259 STEP: Taint node ca-minion-group-wp8w 12/06/22 19:25:26.491 STEP: Remove taint from node ca-minion-group-wp8w 12/06/22 19:25:26.587 STEP: Remove taint from node ca-minion-group-qbln 12/06/22 19:25:26.682 STEP: Remove taint from node ca-minion-group-9m6s 12/06/22 19:25:26.781 STEP: Remove taint from node ca-minion-group-1-whwm 12/06/22 19:25:26.876 STEP: Remove taint from node ca-minion-group-1-f6sn 12/06/22 19:25:26.969 STEP: Remove taint from node ca-minion-group-1-3q9h 12/06/22 19:25:27.077 STEP: Create a PodDisruptionBudget 12/06/22 19:25:27.182 STEP: Some node should be removed 12/06/22 19:25:27.227 I1206 19:25:27.276565 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 I1206 19:25:47.330032 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 I1206 19:26:07.379388 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 I1206 19:26:27.432504 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 I1206 19:26:47.485474 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 I1206 19:27:07.539277 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 I1206 19:27:27.589880 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 I1206 19:27:47.640739 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 I1206 19:28:07.690669 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 5m7.253s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 5m0s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 2m50.22s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:28:27.743128 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 5m27.254s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 5m20.001s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 3m10.221s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:28:47.794306 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 5m47.255s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 5m40.003s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 3m30.222s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep, 2 minutes] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:29:07.845200 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 6m7.258s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 6m0.006s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 3m50.225s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:29:27.890625 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 6m27.261s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 6m20.008s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 4m10.227s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:29:47.938327 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 6m47.263s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 6m40.01s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 4m30.23s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:30:07.987703 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 7m7.264s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 7m0.011s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 4m50.231s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:30:28.038755 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 7m27.267s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 7m20.014s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 5m10.233s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:30:48.091422 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 7m47.268s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 7m40.015s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 5m30.234s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep, 2 minutes] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:31:08.141655 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 8m7.274s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 8m0.021s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 5m50.241s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:31:28.191059 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 8m27.277s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 8m20.024s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 6m10.244s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:31:48.240874 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 8m47.28s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 8m40.027s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 6m30.247s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:32:08.290066 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 9m7.283s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 9m0.03s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 6m50.25s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:32:28.339794 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 9m27.286s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 9m20.033s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 7m10.253s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:32:48.390257 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 9m47.288s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 9m40.035s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 7m30.254s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:33:08.442780 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 10m7.289s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 10m0.036s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 7m50.256s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:33:28.492286 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 10m27.29s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 10m20.037s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 8m10.257s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:33:48.545080 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 10m47.291s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 10m40.038s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 8m30.258s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:34:08.597045 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 11m7.293s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 11m0.041s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 8m50.26s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:34:28.649590 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 11m27.297s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 11m20.044s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 9m10.263s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:34:48.700279 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 11m47.298s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 11m40.045s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 9m30.264s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:35:08.752290 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 12m7.301s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 12m0.048s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 9m50.267s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:35:28.802781 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 12m27.303s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 12m20.05s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 10m10.27s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:35:48.853444 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 12m47.304s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 12m40.051s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 10m30.27s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:36:08.905989 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 13m7.305s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 13m0.052s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 10m50.272s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:36:28.955390 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 13m27.308s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 13m20.055s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 11m10.274s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:36:49.004450 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 13m47.31s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 13m40.057s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 11m30.277s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:37:09.053039 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 14m7.311s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 14m0.058s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 11m50.278s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:37:29.103509 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 14m27.312s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 14m20.059s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 12m10.279s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:37:49.155386 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 14m47.315s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 14m40.062s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 12m30.282s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:38:09.208049 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 15m7.316s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 15m0.063s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 12m50.283s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:38:29.260859 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 15m27.317s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 15m20.064s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 13m10.284s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:38:49.311371 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 15m47.32s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 15m40.067s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 13m30.287s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:39:09.366324 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 16m7.323s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 16m0.07s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 13m50.29s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:39:29.415077 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 16m27.324s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 16m20.071s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 14m10.291s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:39:49.467962 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 16m47.327s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 16m40.074s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 14m30.294s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:40:09.518747 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 17m7.329s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 17m0.077s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 14m50.296s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:40:29.571517 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 17m27.332s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 17m20.079s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 15m10.299s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:40:49.622485 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 17m47.335s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 17m40.082s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 15m30.302s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:41:09.685592 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 18m7.339s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 18m0.086s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 15m50.306s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:41:29.742001 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 18m27.343s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 18m20.09s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 16m10.31s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:41:49.796232 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 18m47.347s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 18m40.094s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 16m30.314s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:42:09.849599 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 19m7.354s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 19m0.101s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 16m50.321s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:42:29.899753 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 19m27.356s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 19m20.104s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 17m10.323s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:42:49.956357 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 19m47.358s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 19m40.105s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 17m30.324s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:43:10.006683 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 20m7.359s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 20m0.106s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 17m50.326s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:43:30.059978 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 20m27.362s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 20m20.109s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 18m10.329s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:43:50.112660 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 20m47.363s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 20m40.11s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 18m30.33s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:44:10.165636 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 21m7.365s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 21m0.112s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 18m50.331s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:44:30.215703 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 21m27.366s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 21m20.113s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 19m10.332s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:44:50.264882 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 21m47.367s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 21m40.114s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 19m30.333s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 19:45:10.314303 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 6, not ready nodes 0 ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should be able to scale down by draining system pods with pdb[Feature:ClusterSizeAutoscalingScaleDown] (Spec Runtime: 22m7.369s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 In [It] (Node Runtime: 22m0.117s) test/e2e/autoscaling/cluster_size_autoscaling.go:745 At [By Step] Some node should be removed (Step Runtime: 19m50.336s) test/e2e/autoscaling/cluster_size_autoscaling.go:747 Spec Goroutine goroutine 7656 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFuncWithUnready({0x801df68, 0xc004160680}, 0xc00252bba0, 0x1176592e000, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1364 > k8s.io/kubernetes/test/e2e/autoscaling.WaitForClusterSizeFunc(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1359 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 > k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc000a60600}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ Dec 6 19:45:30.315: INFO: Unexpected error: <*errors.errorString | 0xc0010ac5d0>: { s: "timeout waiting 20m0s for appropriate cluster size", } Dec 6 19:45:30.315: FAIL: timeout waiting 20m0s for appropriate cluster size Full Stack Trace k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27.1(0x6) test/e2e/autoscaling/cluster_size_autoscaling.go:748 +0x94 k8s.io/kubernetes/test/e2e/autoscaling.runDrainTest(0xc000416c30, 0x7fa3fc0?, {0x75cea57, 0xb}, 0x2, 0x1, 0xc000b07f58) test/e2e/autoscaling/cluster_size_autoscaling.go:1061 +0x842 k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.27() test/e2e/autoscaling/cluster_size_autoscaling.go:746 +0x57 STEP: deleting ReplicationController reschedulable-pods in namespace kube-system, will wait for the garbage collector to delete the pods 12/06/22 19:45:30.36 Dec 6 19:45:30.497: INFO: Deleting ReplicationController reschedulable-pods took: 45.375127ms Dec 6 19:45:30.598: INFO: Terminating ReplicationController reschedulable-pods pods took: 100.600869ms [AfterEach] [sig-autoscaling] Cluster size autoscaling [Slow] test/e2e/framework/node/init/init.go:32 Dec 6 19:45:31.899: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready [AfterEach] [sig-autoscaling] Cluster size autoscaling [Slow] test/e2e/autoscaling/cluster_size_autoscaling.go:139 STEP: Restoring initial size of the cluster 12/06/22 19:45:31.948 STEP: Setting size of ca-minion-group-1 to 1 12/06/22 19:45:35.494 Dec 6 19:45:35.494: INFO: Skipping dumping logs from cluster Dec 6 19:45:39.964: INFO: Skipping dumping logs from cluster STEP: Setting size of ca-minion-group to 1 12/06/22 19:45:43.368 Dec 6 19:45:43.368: INFO: Skipping dumping logs from cluster Dec 6 19:45:47.812: INFO: Skipping dumping logs from cluster Dec 6 19:45:47.861: INFO: Waiting for ready nodes 2, current ready 6, not ready nodes 0 Dec 6 19:46:07.912: INFO: Waiting for ready nodes 2, current ready 6, not ready nodes 0 Dec 6 19:46:27.962: INFO: Condition Ready of node ca-minion-group-1-3q9h is false, but Node is tainted by NodeController with [{node.kubernetes.io/unreachable NoSchedule 2022-12-06 19:46:20 +0000 UTC} {node.kubernetes.io/unreachable NoExecute 2022-12-06 19:46:25 +0000 UTC}]. Failure Dec 6 19:46:27.962: INFO: Condition Ready of node ca-minion-group-1-f6sn is false instead of true. Reason: NodeStatusUnknown, message: Kubelet stopped posting node status. Dec 6 19:46:27.962: INFO: Condition Ready of node ca-minion-group-9m6s is false instead of true. Reason: NodeStatusUnknown, message: Kubelet stopped posting node status. Dec 6 19:46:27.962: INFO: Waiting for ready nodes 2, current ready 3, not ready nodes 3 Dec 6 19:46:48.012: INFO: Condition Ready of node ca-minion-group-1-3q9h is false, but Node is tainted by NodeController with [{node.kubernetes.io/unreachable NoSchedule 2022-12-06 19:46:20 +0000 UTC} {node.kubernetes.io/unreachable NoExecute 2022-12-06 19:46:25 +0000 UTC}]. Failure Dec 6 19:46:48.012: INFO: Condition Ready of node ca-minion-group-1-f6sn is false instead of true. Reason: NodeStatusUnknown, message: Kubelet stopped posting node status. Dec 6 19:46:48.012: INFO: Condition Ready of node ca-minion-group-9m6s is false instead of true. Reason: NodeStatusUnknown, message: Kubelet stopped posting node status. Dec 6 19:46:48.012: INFO: Condition Ready of node ca-minion-group-qbln is false instead of true. Reason: NodeStatusUnknown, message: Kubelet stopped posting node status. Dec 6 19:46:48.012: INFO: Waiting for ready nodes 2, current ready 2, not ready nodes 4 Dec 6 19:47:08.067: INFO: Condition Ready of node ca-minion-group-1-3q9h is false, but Node is tainted by NodeController with [{node.kubernetes.io/unreachable NoSchedule 2022-12-06 19:46:20 +0000 UTC} {node.kubernetes.io/unreachable NoExecute 2022-12-06 19:46:25 +0000 UTC}]. Failure Dec 6 19:47:08.067: INFO: Condition Ready of node ca-minion-group-1-f6sn is false instead of true. Reason: NodeStatusUnknown, message: Kubelet stopped posting node status. Dec 6 19:47:08.067: INFO: Condition Ready of node ca-minion-group-9m6s is false instead of true. Reason: NodeStatusUnknown, message: Kubelet stopped posting node status. Dec 6 19:47:08.067: INFO: Condition Ready of node ca-minion-group-qbln is false instead of true. Reason: NodeStatusUnknown, message: Kubelet stopped posting node status. Dec 6 19:47:08.067: INFO: Waiting for ready nodes 2, current ready 2, not ready nodes 4 Dec 6 19:47:28.124: INFO: Condition Ready of node ca-minion-group-1-3q9h is false, but Node is tainted by NodeController with [{node.kubernetes.io/unreachable NoSchedule 2022-12-06 19:46:20 +0000 UTC} {node.kubernetes.io/unreachable NoExecute 2022-12-06 19:46:25 +0000 UTC}]. Failure Dec 6 19:47:28.124: INFO: Condition Ready of node ca-minion-group-1-f6sn is false instead of true. Reason: NodeStatusUnknown, message: Kubelet stopped posting node status. Dec 6 19:47:28.124: INFO: Condition Ready of node ca-minion-group-9m6s is false instead of true. Reason: NodeStatusUnknown, message: Kubelet stopped posting node status. Dec 6 19:47:28.124: INFO: Condition Ready of node ca-minion-group-qbln is false instead of true. Reason: NodeStatusUnknown, message: Kubelet stopped posting node status. Dec 6 19:47:28.124: INFO: Waiting for ready nodes 2, current ready 2, not ready nodes 4 Dec 6 19:47:48.173: INFO: Condition Ready of node ca-minion-group-1-3q9h is false, but Node is tainted by NodeController with [{node.kubernetes.io/unreachable NoSchedule 2022-12-06 19:46:20 +0000 UTC} {node.kubernetes.io/unreachable NoExecute 2022-12-06 19:46:25 +0000 UTC}]. Failure Dec 6 19:47:48.174: INFO: Condition Ready of node ca-minion-group-1-f6sn is false instead of true. Reason: NodeStatusUnknown, message: Kubelet stopped posting node status. Dec 6 19:47:48.174: INFO: Condition Ready of node ca-minion-group-9m6s is false instead of true. Reason: NodeStatusUnknown, message: Kubelet stopped posting node status. Dec 6 19:47:48.174: INFO: Condition Ready of node ca-minion-group-qbln is false instead of true. Reason: NodeStatusUnknown, message: Kubelet stopped posting node status. Dec 6 19:47:48.174: INFO: Waiting for ready nodes 2, current ready 2, not ready nodes 4 Dec 6 19:48:08.221: INFO: Cluster has reached the desired number of ready nodes 2 STEP: Remove taint from node ca-master 12/06/22 19:48:08.265 STEP: Remove taint from node ca-minion-group-1-whwm 12/06/22 19:48:08.308 STEP: Remove taint from node ca-minion-group-wp8w 12/06/22 19:48:08.35 I1206 19:48:08.393075 7957 cluster_size_autoscaling.go:165] Made nodes schedulable again in 127.335307ms [DeferCleanup (Each)] [sig-autoscaling] Cluster size autoscaling [Slow] test/e2e/framework/metrics/init/init.go:33 [DeferCleanup (Each)] [sig-autoscaling] Cluster size autoscaling [Slow] dump namespaces | framework.go:196 STEP: dump namespace information after failure 12/06/22 19:48:08.393 STEP: Collecting events from namespace "autoscaling-3052". 12/06/22 19:48:08.393 STEP: Found 0 events. 12/06/22 19:48:08.434 Dec 6 19:48:08.475: INFO: POD NODE PHASE GRACE CONDITIONS Dec 6 19:48:08.475: INFO: Dec 6 19:48:08.519: INFO: Logging node info for node ca-master Dec 6 19:48:08.562: INFO: Node Info: &Node{ObjectMeta:{ca-master 8e72891f-3d31-4302-b81e-5940fbd1a2b7 41230 0 2022-12-06 16:14:32 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-1 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:ca-master kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-1 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-12-06 16:14:32 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{},"f:unschedulable":{}}} } {kube-controller-manager Update v1 2022-12-06 16:14:42 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.1.0/24\"":{}},"f:taints":{}}} } {kube-controller-manager Update v1 2022-12-06 16:14:42 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {kubelet Update v1 2022-12-06 19:44:04 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.1.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gci-autoscaling-migs/us-west1-b/ca-master,Unschedulable:true,Taints:[]Taint{Taint{Key:node-role.kubernetes.io/master,Value:,Effect:NoSchedule,TimeAdded:<nil>,},Taint{Key:node.kubernetes.io/unschedulable,Value:,Effect:NoSchedule,TimeAdded:<nil>,},},ConfigSource:nil,PodCIDRs:[10.64.1.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{16656896000 0} {<nil>} 16266500Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3858366464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{1 0} {<nil>} 1 DecimalSI},ephemeral-storage: {{14991206376 0} {<nil>} 14991206376 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{3596222464 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-12-06 16:14:42 +0000 UTC,LastTransitionTime:2022-12-06 16:14:42 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-12-06 19:44:04 +0000 UTC,LastTransitionTime:2022-12-06 16:14:32 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-12-06 19:44:04 +0000 UTC,LastTransitionTime:2022-12-06 16:14:32 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-12-06 19:44:04 +0000 UTC,LastTransitionTime:2022-12-06 16:14:32 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-12-06 19:44:04 +0000 UTC,LastTransitionTime:2022-12-06 16:14:34 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.2,},NodeAddress{Type:ExternalIP,Address:35.230.76.149,},NodeAddress{Type:InternalDNS,Address:ca-master.c.k8s-jkns-gci-autoscaling-migs.internal,},NodeAddress{Type:Hostname,Address:ca-master.c.k8s-jkns-gci-autoscaling-migs.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:bac070a384fefb9133ee7878e15673cf,SystemUUID:bac070a3-84fe-fb91-33ee-7878e15673cf,BootID:06caa00c-99ed-4909-986f-2ab86fa8e8fe,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.6.6,KubeletVersion:v1.27.0-alpha.0.58+17bf864c1fc20e,KubeProxyVersion:v1.27.0-alpha.0.58+17bf864c1fc20e,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-apiserver-amd64:v1.27.0-alpha.0.58_17bf864c1fc20e],SizeBytes:135160275,},ContainerImage{Names:[registry.k8s.io/kube-controller-manager-amd64:v1.27.0-alpha.0.58_17bf864c1fc20e],SizeBytes:124990265,},ContainerImage{Names:[registry.k8s.io/etcd@sha256:dd75ec974b0a2a6f6bb47001ba09207976e625db898d1b16735528c009cb171c registry.k8s.io/etcd:3.5.6-0],SizeBytes:102542580,},ContainerImage{Names:[registry.k8s.io/kube-scheduler-amd64:v1.27.0-alpha.0.58_17bf864c1fc20e],SizeBytes:57660216,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64@sha256:5db27383add6d9f4ebdf0286409ac31f7f5d273690204b341a4e37998917693b gcr.io/k8s-ingress-image-push/ingress-gce-glbc-amd64:v1.20.1],SizeBytes:36598135,},ContainerImage{Names:[registry.k8s.io/addon-manager/kube-addon-manager@sha256:49cc4e6e4a3745b427ce14b0141476ab339bb65c6bc05033019e046c8727dcb0 registry.k8s.io/addon-manager/kube-addon-manager:v9.1.6],SizeBytes:30464183,},ContainerImage{Names:[registry.k8s.io/autoscaling/cluster-autoscaler@sha256:07ab8c89cd0ad296ddb6347febe196d8fe0e1c757656a98f71199860d87cf1a5 registry.k8s.io/autoscaling/cluster-autoscaler:v1.22.0],SizeBytes:24220268,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-server@sha256:2c111f004bec24888d8cfa2a812a38fb8341350abac67dcd0ac64e709dfe389c registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33],SizeBytes:22020129,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Dec 6 19:48:08.562: INFO: Logging kubelet events for node ca-master Dec 6 19:48:08.608: INFO: Logging pods the kubelet thinks is on node ca-master Dec 6 19:48:08.671: INFO: cluster-autoscaler-ca-master started at 2022-12-06 16:14:06 +0000 UTC (0+1 container statuses recorded) Dec 6 19:48:08.671: INFO: Container cluster-autoscaler ready: true, restart count 2 Dec 6 19:48:08.671: INFO: metadata-proxy-v0.1-7j85x started at 2022-12-06 16:14:35 +0000 UTC (0+2 container statuses recorded) Dec 6 19:48:08.671: INFO: Container metadata-proxy ready: true, restart count 0 Dec 6 19:48:08.671: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Dec 6 19:48:08.671: INFO: kube-controller-manager-ca-master started at 2022-12-06 16:13:48 +0000 UTC (0+1 container statuses recorded) Dec 6 19:48:08.671: INFO: Container kube-controller-manager ready: true, restart count 1 Dec 6 19:48:08.671: INFO: kube-scheduler-ca-master started at 2022-12-06 16:13:48 +0000 UTC (0+1 container statuses recorded) Dec 6 19:48:08.671: INFO: Container kube-scheduler ready: true, restart count 0 Dec 6 19:48:08.671: INFO: kube-apiserver-ca-master started at 2022-12-06 16:13:48 +0000 UTC (0+1 container statuses recorded) Dec 6 19:48:08.671: INFO: Container kube-apiserver ready: true, restart count 0 Dec 6 19:48:08.671: INFO: kube-addon-manager-ca-master started at 2022-12-06 16:14:05 +0000 UTC (0+1 container statuses recorded) Dec 6 19:48:08.671: INFO: Container kube-addon-manager ready: true, restart count 0 Dec 6 19:48:08.671: INFO: etcd-server-events-ca-master started at 2022-12-06 16:13:48 +0000 UTC (0+1 container statuses recorded) Dec 6 19:48:08.671: INFO: Container etcd-container ready: true, restart count 0 Dec 6 19:48:08.671: INFO: etcd-server-ca-master started at 2022-12-06 16:13:48 +0000 UTC (0+1 container statuses recorded) Dec 6 19:48:08.671: INFO: Container etcd-container ready: true, restart count 0 Dec 6 19:48:08.671: INFO: konnectivity-server-ca-master started at 2022-12-06 16:13:48 +0000 UTC (0+1 container statuses recorded) Dec 6 19:48:08.671: INFO: Container konnectivity-server-container ready: true, restart count 0 Dec 6 19:48:08.671: INFO: l7-lb-controller-ca-master started at 2022-12-06 16:14:07 +0000 UTC (0+1 container statuses recorded) Dec 6 19:48:08.671: INFO: Container l7-lb-controller ready: true, restart count 2 Dec 6 19:48:08.885: INFO: Latency metrics for node ca-master Dec 6 19:48:08.885: INFO: Logging node info for node ca-minion-group-1-whwm Dec 6 19:48:08.928: INFO: Node Info: &Node{ObjectMeta:{ca-minion-group-1-whwm 63b5491f-d73a-43da-bdfd-38b5117d315e 41421 0 2022-12-06 18:53:32 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:ca-minion-group-1-whwm kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-12-06 18:53:32 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-12-06 18:53:44 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.27.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-12-06 18:53:44 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {node-problem-detector Update v1 2022-12-06 19:43:41 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-12-06 19:45:07 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.27.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gci-autoscaling-migs/us-west1-b/ca-minion-group-1-whwm,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.27.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815438336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553294336 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-12-06 19:43:41 +0000 UTC,LastTransitionTime:2022-12-06 18:53:36 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-12-06 19:43:41 +0000 UTC,LastTransitionTime:2022-12-06 18:53:36 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-12-06 19:43:41 +0000 UTC,LastTransitionTime:2022-12-06 18:53:36 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-12-06 19:43:41 +0000 UTC,LastTransitionTime:2022-12-06 18:53:36 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-12-06 19:43:41 +0000 UTC,LastTransitionTime:2022-12-06 18:53:36 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-12-06 19:43:41 +0000 UTC,LastTransitionTime:2022-12-06 18:53:36 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-12-06 19:43:41 +0000 UTC,LastTransitionTime:2022-12-06 18:53:36 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-12-06 18:53:44 +0000 UTC,LastTransitionTime:2022-12-06 18:53:44 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-12-06 19:45:07 +0000 UTC,LastTransitionTime:2022-12-06 18:53:32 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-12-06 19:45:07 +0000 UTC,LastTransitionTime:2022-12-06 18:53:32 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-12-06 19:45:07 +0000 UTC,LastTransitionTime:2022-12-06 18:53:32 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-12-06 19:45:07 +0000 UTC,LastTransitionTime:2022-12-06 18:53:32 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.29,},NodeAddress{Type:ExternalIP,Address:34.83.4.245,},NodeAddress{Type:InternalDNS,Address:ca-minion-group-1-whwm.c.k8s-jkns-gci-autoscaling-migs.internal,},NodeAddress{Type:Hostname,Address:ca-minion-group-1-whwm.c.k8s-jkns-gci-autoscaling-migs.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:af20298bf265c586d9b7871ec9ea02da,SystemUUID:af20298b-f265-c586-d9b7-871ec9ea02da,BootID:48b8e1ab-b393-4aaa-b3da-55390d7afbd3,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.6.6,KubeletVersion:v1.27.0-alpha.0.58+17bf864c1fc20e,KubeProxyVersion:v1.27.0-alpha.0.58+17bf864c1fc20e,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.58_17bf864c1fc20e],SizeBytes:67201736,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/coredns/coredns@sha256:8e352a029d304ca7431c6507b56800636c321cb52289686a581ab70aaa8a2e2a registry.k8s.io/coredns/coredns:v1.9.3],SizeBytes:14837849,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Dec 6 19:48:08.928: INFO: Logging kubelet events for node ca-minion-group-1-whwm Dec 6 19:48:08.975: INFO: Logging pods the kubelet thinks is on node ca-minion-group-1-whwm Dec 6 19:48:09.037: INFO: coredns-6d97d5ddb-gdv8l started at 2022-12-06 19:24:09 +0000 UTC (0+1 container statuses recorded) Dec 6 19:48:09.037: INFO: Container coredns ready: true, restart count 0 Dec 6 19:48:09.037: INFO: kube-proxy-ca-minion-group-1-whwm started at 2022-12-06 18:53:32 +0000 UTC (0+1 container statuses recorded) Dec 6 19:48:09.037: INFO: Container kube-proxy ready: true, restart count 0 Dec 6 19:48:09.037: INFO: metadata-proxy-v0.1-nv5st started at 2022-12-06 18:53:33 +0000 UTC (0+2 container statuses recorded) Dec 6 19:48:09.037: INFO: Container metadata-proxy ready: true, restart count 0 Dec 6 19:48:09.037: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Dec 6 19:48:09.037: INFO: konnectivity-agent-w6bpc started at 2022-12-06 18:53:44 +0000 UTC (0+1 container statuses recorded) Dec 6 19:48:09.037: INFO: Container konnectivity-agent ready: true, restart count 0 Dec 6 19:48:09.203: INFO: Latency metrics for node ca-minion-group-1-whwm Dec 6 19:48:09.203: INFO: Logging node info for node ca-minion-group-wp8w Dec 6 19:48:09.245: INFO: Node Info: &Node{ObjectMeta:{ca-minion-group-wp8w 9a4a925d-0e80-496c-853d-893ba4db69a3 41622 0 2022-12-06 19:24:42 +0000 UTC <nil> <nil> map[beta.kubernetes.io/arch:amd64 beta.kubernetes.io/instance-type:n1-standard-2 beta.kubernetes.io/os:linux cloud.google.com/metadata-proxy-ready:true failure-domain.beta.kubernetes.io/region:us-west1 failure-domain.beta.kubernetes.io/zone:us-west1-b kubernetes.io/arch:amd64 kubernetes.io/hostname:ca-minion-group-wp8w kubernetes.io/os:linux node.kubernetes.io/instance-type:n1-standard-2 topology.kubernetes.io/region:us-west1 topology.kubernetes.io/zone:us-west1-b] map[node.alpha.kubernetes.io/ttl:0 volumes.kubernetes.io/controller-managed-attach-detach:true] [] [] [{kubelet Update v1 2022-12-06 19:24:42 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{".":{},"f:volumes.kubernetes.io/controller-managed-attach-detach":{}},"f:labels":{".":{},"f:beta.kubernetes.io/arch":{},"f:beta.kubernetes.io/instance-type":{},"f:beta.kubernetes.io/os":{},"f:cloud.google.com/metadata-proxy-ready":{},"f:failure-domain.beta.kubernetes.io/region":{},"f:failure-domain.beta.kubernetes.io/zone":{},"f:kubernetes.io/arch":{},"f:kubernetes.io/hostname":{},"f:kubernetes.io/os":{},"f:node.kubernetes.io/instance-type":{},"f:topology.kubernetes.io/region":{},"f:topology.kubernetes.io/zone":{}}},"f:spec":{"f:providerID":{}}} } {kube-controller-manager Update v1 2022-12-06 19:24:51 +0000 UTC FieldsV1 {"f:metadata":{"f:annotations":{"f:node.alpha.kubernetes.io/ttl":{}}},"f:spec":{"f:podCIDR":{},"f:podCIDRs":{".":{},"v:\"10.64.36.0/24\"":{}}}} } {kube-controller-manager Update v1 2022-12-06 19:24:51 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"NetworkUnavailable\"}":{"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{}}}}} status} {node-problem-detector Update v1 2022-12-06 19:44:49 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"CorruptDockerOverlay2\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentContainerdRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentDockerRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentKubeletRestart\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"FrequentUnregisterNetDevice\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"KernelDeadlock\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}},"k:{\"type\":\"ReadonlyFilesystem\"}":{".":{},"f:lastHeartbeatTime":{},"f:lastTransitionTime":{},"f:message":{},"f:reason":{},"f:status":{},"f:type":{}}}}} status} {kubelet Update v1 2022-12-06 19:45:40 +0000 UTC FieldsV1 {"f:status":{"f:conditions":{"k:{\"type\":\"DiskPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"MemoryPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"PIDPressure\"}":{"f:lastHeartbeatTime":{}},"k:{\"type\":\"Ready\"}":{"f:lastHeartbeatTime":{},"f:message":{},"f:reason":{},"f:status":{}}},"f:images":{}}} status}]},Spec:NodeSpec{PodCIDR:10.64.36.0/24,DoNotUseExternalID:,ProviderID:gce://k8s-jkns-gci-autoscaling-migs/us-west1-b/ca-minion-group-wp8w,Unschedulable:false,Taints:[]Taint{},ConfigSource:nil,PodCIDRs:[10.64.36.0/24],},Status:NodeStatus{Capacity:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{101203873792 0} {<nil>} 98831908Ki BinarySI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7815430144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Allocatable:ResourceList{attachable-volumes-gce-pd: {{127 0} {<nil>} 127 DecimalSI},cpu: {{2 0} {<nil>} 2 DecimalSI},ephemeral-storage: {{91083486262 0} {<nil>} 91083486262 DecimalSI},hugepages-1Gi: {{0 0} {<nil>} 0 DecimalSI},hugepages-2Mi: {{0 0} {<nil>} 0 DecimalSI},memory: {{7553286144 0} {<nil>} BinarySI},pods: {{110 0} {<nil>} 110 DecimalSI},},Phase:,Conditions:[]NodeCondition{NodeCondition{Type:FrequentContainerdRestart,Status:False,LastHeartbeatTime:2022-12-06 19:44:49 +0000 UTC,LastTransitionTime:2022-12-06 19:24:46 +0000 UTC,Reason:NoFrequentContainerdRestart,Message:containerd is functioning properly,},NodeCondition{Type:CorruptDockerOverlay2,Status:False,LastHeartbeatTime:2022-12-06 19:44:49 +0000 UTC,LastTransitionTime:2022-12-06 19:24:46 +0000 UTC,Reason:NoCorruptDockerOverlay2,Message:docker overlay2 is functioning properly,},NodeCondition{Type:KernelDeadlock,Status:False,LastHeartbeatTime:2022-12-06 19:44:49 +0000 UTC,LastTransitionTime:2022-12-06 19:24:46 +0000 UTC,Reason:KernelHasNoDeadlock,Message:kernel has no deadlock,},NodeCondition{Type:ReadonlyFilesystem,Status:False,LastHeartbeatTime:2022-12-06 19:44:49 +0000 UTC,LastTransitionTime:2022-12-06 19:24:46 +0000 UTC,Reason:FilesystemIsNotReadOnly,Message:Filesystem is not read-only,},NodeCondition{Type:FrequentUnregisterNetDevice,Status:False,LastHeartbeatTime:2022-12-06 19:44:49 +0000 UTC,LastTransitionTime:2022-12-06 19:24:46 +0000 UTC,Reason:NoFrequentUnregisterNetDevice,Message:node is functioning properly,},NodeCondition{Type:FrequentKubeletRestart,Status:False,LastHeartbeatTime:2022-12-06 19:44:49 +0000 UTC,LastTransitionTime:2022-12-06 19:24:46 +0000 UTC,Reason:NoFrequentKubeletRestart,Message:kubelet is functioning properly,},NodeCondition{Type:FrequentDockerRestart,Status:False,LastHeartbeatTime:2022-12-06 19:44:49 +0000 UTC,LastTransitionTime:2022-12-06 19:24:46 +0000 UTC,Reason:NoFrequentDockerRestart,Message:docker is functioning properly,},NodeCondition{Type:NetworkUnavailable,Status:False,LastHeartbeatTime:2022-12-06 19:24:51 +0000 UTC,LastTransitionTime:2022-12-06 19:24:51 +0000 UTC,Reason:RouteCreated,Message:RouteController created a route,},NodeCondition{Type:MemoryPressure,Status:False,LastHeartbeatTime:2022-12-06 19:45:40 +0000 UTC,LastTransitionTime:2022-12-06 19:24:42 +0000 UTC,Reason:KubeletHasSufficientMemory,Message:kubelet has sufficient memory available,},NodeCondition{Type:DiskPressure,Status:False,LastHeartbeatTime:2022-12-06 19:45:40 +0000 UTC,LastTransitionTime:2022-12-06 19:24:42 +0000 UTC,Reason:KubeletHasNoDiskPressure,Message:kubelet has no disk pressure,},NodeCondition{Type:PIDPressure,Status:False,LastHeartbeatTime:2022-12-06 19:45:40 +0000 UTC,LastTransitionTime:2022-12-06 19:24:42 +0000 UTC,Reason:KubeletHasSufficientPID,Message:kubelet has sufficient PID available,},NodeCondition{Type:Ready,Status:True,LastHeartbeatTime:2022-12-06 19:45:40 +0000 UTC,LastTransitionTime:2022-12-06 19:24:42 +0000 UTC,Reason:KubeletReady,Message:kubelet is posting ready status. AppArmor enabled,},},Addresses:[]NodeAddress{NodeAddress{Type:InternalIP,Address:10.138.0.39,},NodeAddress{Type:ExternalIP,Address:34.168.80.138,},NodeAddress{Type:InternalDNS,Address:ca-minion-group-wp8w.c.k8s-jkns-gci-autoscaling-migs.internal,},NodeAddress{Type:Hostname,Address:ca-minion-group-wp8w.c.k8s-jkns-gci-autoscaling-migs.internal,},},DaemonEndpoints:NodeDaemonEndpoints{KubeletEndpoint:DaemonEndpoint{Port:10250,},},NodeInfo:NodeSystemInfo{MachineID:e3397f79672743d290c62d2339ddaab3,SystemUUID:e3397f79-6727-43d2-90c6-2d2339ddaab3,BootID:505b62ed-f1f4-4200-9073-b71a935d17e4,KernelVersion:5.10.123+,OSImage:Container-Optimized OS from Google,ContainerRuntimeVersion:containerd://1.6.6,KubeletVersion:v1.27.0-alpha.0.58+17bf864c1fc20e,KubeProxyVersion:v1.27.0-alpha.0.58+17bf864c1fc20e,OperatingSystem:linux,Architecture:amd64,},Images:[]ContainerImage{ContainerImage{Names:[registry.k8s.io/kube-proxy-amd64:v1.27.0-alpha.0.58_17bf864c1fc20e],SizeBytes:67201736,},ContainerImage{Names:[gke.gcr.io/prometheus-to-sd@sha256:e739643c3939ba0b161425f45a1989eedfc4a3b166db9a7100863296b4c70510 gke.gcr.io/prometheus-to-sd:v0.11.1-gke.1],SizeBytes:48742566,},ContainerImage{Names:[registry.k8s.io/kas-network-proxy/proxy-agent@sha256:48f2a4ec3e10553a81b8dd1c6fa5fe4bcc9617f78e71c1ca89c6921335e2d7da registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33],SizeBytes:8512162,},ContainerImage{Names:[registry.k8s.io/metadata-proxy@sha256:e914645f22e946bce5165737e1b244e0a296ad1f0f81a9531adc57af2780978a registry.k8s.io/metadata-proxy:v0.1.12],SizeBytes:5301657,},ContainerImage{Names:[registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097 registry.k8s.io/pause:3.9],SizeBytes:321520,},},VolumesInUse:[],VolumesAttached:[]AttachedVolume{},Config:nil,},} Dec 6 19:48:09.246: INFO: Logging kubelet events for node ca-minion-group-wp8w Dec 6 19:48:09.292: INFO: Logging pods the kubelet thinks is on node ca-minion-group-wp8w Dec 6 19:48:09.353: INFO: konnectivity-agent-t4f5f started at 2022-12-06 19:24:51 +0000 UTC (0+1 container statuses recorded) Dec 6 19:48:09.353: INFO: Container konnectivity-agent ready: true, restart count 0 Dec 6 19:48:09.353: INFO: kube-proxy-ca-minion-group-wp8w started at 2022-12-06 19:24:42 +0000 UTC (0+1 container statuses recorded) Dec 6 19:48:09.353: INFO: Container kube-proxy ready: true, restart count 0 Dec 6 19:48:09.353: INFO: metadata-proxy-v0.1-r7sjh started at 2022-12-06 19:24:43 +0000 UTC (0+2 container statuses recorded) Dec 6 19:48:09.353: INFO: Container metadata-proxy ready: true, restart count 0 Dec 6 19:48:09.353: INFO: Container prometheus-to-sd-exporter ready: true, restart count 0 Dec 6 19:48:09.520: INFO: Latency metrics for node ca-minion-group-wp8w [DeferCleanup (Each)] [sig-autoscaling] Cluster size autoscaling [Slow] tear down framework | framework.go:193 STEP: Destroying namespace "autoscaling-3052" for this suite. 12/06/22 19:48:09.52
Filter through log files | View test history on testgrid
go run hack/e2e.go -v --test --test_args='--ginkgo.focus=Kubernetes\se2e\ssuite\s\[It\]\s\[sig\-autoscaling\]\sCluster\ssize\sautoscaling\s\[Slow\]\sshould\sincrease\scluster\ssize\sif\spending\spods\sare\ssmall\sand\sone\snode\sis\sbroken\s\[Feature\:ClusterSizeAutoscalingScaleUp\]$'
test/e2e/autoscaling/cluster_size_autoscaling.go:205 k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.4(0xacfc400?) test/e2e/autoscaling/cluster_size_autoscaling.go:205 +0x1bc k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.10.1() test/e2e/autoscaling/cluster_size_autoscaling.go:331 +0x22 k8s.io/kubernetes/test/e2e/framework/network.TestUnderTemporaryNetworkFailure({0x801df68, 0xc003cea4e0}, {0x7fa3fc0?, 0xc0002898d0?}, 0xc000d4f600, 0xc002535f58) test/e2e/framework/network/utils.go:1103 +0x49f k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.10() test/e2e/autoscaling/cluster_size_autoscaling.go:331 +0x76
[BeforeEach] [sig-autoscaling] Cluster size autoscaling [Slow] set up framework | framework.go:178 STEP: Creating a kubernetes client 12/06/22 18:40:56.324 Dec 6 18:40:56.324: INFO: >>> kubeConfig: /workspace/.kube/config STEP: Building a namespace api object, basename autoscaling 12/06/22 18:40:56.326 STEP: Waiting for a default service account to be provisioned in namespace 12/06/22 18:40:56.45 STEP: Waiting for kube-root-ca.crt to be provisioned in namespace 12/06/22 18:40:56.53 [BeforeEach] [sig-autoscaling] Cluster size autoscaling [Slow] test/e2e/framework/metrics/init/init.go:31 [BeforeEach] [sig-autoscaling] Cluster size autoscaling [Slow] test/e2e/autoscaling/cluster_size_autoscaling.go:103 STEP: Initial size of ca-minion-group-1: 1 12/06/22 18:41:00.56 STEP: Initial size of ca-minion-group: 1 12/06/22 18:41:04.306 Dec 6 18:41:04.350: INFO: Cluster has reached the desired number of ready nodes 2 STEP: Initial number of schedulable nodes: 2 12/06/22 18:41:04.392 [It] should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp] test/e2e/autoscaling/cluster_size_autoscaling.go:329 Dec 6 18:41:04.435: INFO: Getting external IP address for ca-minion-group-1-v846 STEP: block network traffic from node ca-minion-group-1-v846 to the control plane 12/06/22 18:41:04.476 Dec 6 18:41:04.476: INFO: Waiting 2m0s to ensure node ca-minion-group-1-v846 is ready before beginning test... Dec 6 18:41:04.476: INFO: Waiting up to 2m0s for node ca-minion-group-1-v846 condition Ready to be true Dec 6 18:41:04.518: INFO: block network traffic from 34.105.60.48:22 to 35.230.76.149 Dec 6 18:41:05.038: INFO: Waiting 2m0s for node ca-minion-group-1-v846 to be not ready after simulated network failure Dec 6 18:41:05.038: INFO: Waiting up to 2m0s for node ca-minion-group-1-v846 condition Ready to be false Dec 6 18:41:05.084: INFO: Condition Ready of node ca-minion-group-1-v846 is true instead of false. Reason: KubeletReady, message: kubelet is posting ready status. AppArmor enabled Dec 6 18:41:07.130: INFO: Condition Ready of node ca-minion-group-1-v846 is true instead of false. Reason: KubeletReady, message: kubelet is posting ready status. AppArmor enabled Dec 6 18:41:09.172: INFO: Condition Ready of node ca-minion-group-1-v846 is true instead of false. Reason: KubeletReady, message: kubelet is posting ready status. AppArmor enabled Dec 6 18:41:11.215: INFO: Condition Ready of node ca-minion-group-1-v846 is true instead of false. Reason: KubeletReady, message: kubelet is posting ready status. AppArmor enabled Dec 6 18:41:13.258: INFO: Condition Ready of node ca-minion-group-1-v846 is true instead of false. Reason: KubeletReady, message: kubelet is posting ready status. AppArmor enabled Dec 6 18:41:15.303: INFO: Condition Ready of node ca-minion-group-1-v846 is true instead of false. Reason: KubeletReady, message: kubelet is posting ready status. AppArmor enabled Dec 6 18:41:17.350: INFO: Condition Ready of node ca-minion-group-1-v846 is true instead of false. Reason: KubeletReady, message: kubelet is posting ready status. AppArmor enabled Dec 6 18:41:19.399: INFO: Condition Ready of node ca-minion-group-1-v846 is true instead of false. Reason: KubeletReady, message: kubelet is posting ready status. AppArmor enabled Dec 6 18:41:21.442: INFO: Condition Ready of node ca-minion-group-1-v846 is true instead of false. Reason: KubeletReady, message: kubelet is posting ready status. AppArmor enabled Dec 6 18:41:23.487: INFO: Condition Ready of node ca-minion-group-1-v846 is true instead of false. Reason: KubeletReady, message: kubelet is posting ready status. AppArmor enabled Dec 6 18:41:25.530: INFO: Condition Ready of node ca-minion-group-1-v846 is true instead of false. Reason: KubeletReady, message: kubelet is posting ready status. AppArmor enabled Dec 6 18:41:27.572: INFO: Condition Ready of node ca-minion-group-1-v846 is true instead of false. Reason: KubeletReady, message: kubelet is posting ready status. AppArmor enabled Dec 6 18:41:29.614: INFO: Condition Ready of node ca-minion-group-1-v846 is true instead of false. Reason: KubeletReady, message: kubelet is posting ready status. AppArmor enabled Dec 6 18:41:31.657: INFO: Condition Ready of node ca-minion-group-1-v846 is true instead of false. Reason: KubeletReady, message: kubelet is posting ready status. AppArmor enabled Dec 6 18:41:33.699: INFO: Condition Ready of node ca-minion-group-1-v846 is true instead of false. Reason: KubeletReady, message: kubelet is posting ready status. AppArmor enabled Dec 6 18:41:35.742: INFO: Condition Ready of node ca-minion-group-1-v846 is true instead of false. Reason: KubeletReady, message: kubelet is posting ready status. AppArmor enabled Dec 6 18:41:37.786: INFO: Condition Ready of node ca-minion-group-1-v846 is true instead of false. Reason: KubeletReady, message: kubelet is posting ready status. AppArmor enabled Dec 6 18:41:39.828: INFO: Condition Ready of node ca-minion-group-1-v846 is true instead of false. Reason: KubeletReady, message: kubelet is posting ready status. AppArmor enabled Dec 6 18:41:41.872: INFO: Condition Ready of node ca-minion-group-1-v846 is true instead of false. Reason: KubeletReady, message: kubelet is posting ready status. AppArmor enabled STEP: Running RC which reserves 14406 MB of memory 12/06/22 18:41:43.915 STEP: creating replication controller memory-reservation in namespace autoscaling-8765 12/06/22 18:41:43.915 I1206 18:41:43.960792 7957 runners.go:193] Created replication controller with name: memory-reservation, namespace: autoscaling-8765, replica count: 100 I1206 18:41:54.011540 7957 runners.go:193] memory-reservation Pods: 100 out of 100 created, 0 running, 47 pending, 53 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I1206 18:42:04.012186 7957 runners.go:193] memory-reservation Pods: 100 out of 100 created, 0 running, 47 pending, 53 waiting, 0 inactive, 0 terminating, 0 unknown, 0 runningButNotReady I1206 18:42:04.061262 7957 runners.go:193] Pod memory-reservation-2snlb ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061348 7957 runners.go:193] Pod memory-reservation-4l48n Pending <nil> I1206 18:42:04.061360 7957 runners.go:193] Pod memory-reservation-4npvn ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061369 7957 runners.go:193] Pod memory-reservation-4vfxl Pending <nil> I1206 18:42:04.061378 7957 runners.go:193] Pod memory-reservation-56jm8 Pending <nil> I1206 18:42:04.061386 7957 runners.go:193] Pod memory-reservation-5jc2l Pending <nil> I1206 18:42:04.061394 7957 runners.go:193] Pod memory-reservation-5npsx ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061403 7957 runners.go:193] Pod memory-reservation-5wdwk ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061411 7957 runners.go:193] Pod memory-reservation-5xkrr ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061419 7957 runners.go:193] Pod memory-reservation-62wdb Pending <nil> I1206 18:42:04.061427 7957 runners.go:193] Pod memory-reservation-66vsj ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061436 7957 runners.go:193] Pod memory-reservation-72pdz Pending <nil> I1206 18:42:04.061444 7957 runners.go:193] Pod memory-reservation-748d9 Pending <nil> I1206 18:42:04.061452 7957 runners.go:193] Pod memory-reservation-7mx95 ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061461 7957 runners.go:193] Pod memory-reservation-7qk5r ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061469 7957 runners.go:193] Pod memory-reservation-8fxbw Pending <nil> I1206 18:42:04.061477 7957 runners.go:193] Pod memory-reservation-8qqpr Pending <nil> I1206 18:42:04.061485 7957 runners.go:193] Pod memory-reservation-8r9cv Pending <nil> I1206 18:42:04.061493 7957 runners.go:193] Pod memory-reservation-8vqg6 ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061501 7957 runners.go:193] Pod memory-reservation-96ngf Pending <nil> I1206 18:42:04.061509 7957 runners.go:193] Pod memory-reservation-b4mr6 Pending <nil> I1206 18:42:04.061517 7957 runners.go:193] Pod memory-reservation-bf7jr Pending <nil> I1206 18:42:04.061526 7957 runners.go:193] Pod memory-reservation-bkp5l ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061534 7957 runners.go:193] Pod memory-reservation-bsh6h Pending <nil> I1206 18:42:04.061542 7957 runners.go:193] Pod memory-reservation-bwfjv Pending <nil> I1206 18:42:04.061550 7957 runners.go:193] Pod memory-reservation-bzjsm ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061558 7957 runners.go:193] Pod memory-reservation-c76g4 ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061566 7957 runners.go:193] Pod memory-reservation-cstng Pending <nil> I1206 18:42:04.061574 7957 runners.go:193] Pod memory-reservation-cwvl6 Pending <nil> I1206 18:42:04.061582 7957 runners.go:193] Pod memory-reservation-d278d Pending <nil> I1206 18:42:04.061594 7957 runners.go:193] Pod memory-reservation-dfqqt Pending <nil> I1206 18:42:04.061603 7957 runners.go:193] Pod memory-reservation-fhv8n Pending <nil> I1206 18:42:04.061611 7957 runners.go:193] Pod memory-reservation-fjs7d Pending <nil> I1206 18:42:04.061619 7957 runners.go:193] Pod memory-reservation-fnnzt ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061627 7957 runners.go:193] Pod memory-reservation-fszr7 Pending <nil> I1206 18:42:04.061635 7957 runners.go:193] Pod memory-reservation-gfcvl Pending <nil> I1206 18:42:04.061643 7957 runners.go:193] Pod memory-reservation-gfqdk Pending <nil> I1206 18:42:04.061651 7957 runners.go:193] Pod memory-reservation-h97lf Pending <nil> I1206 18:42:04.061659 7957 runners.go:193] Pod memory-reservation-hsgn9 ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061666 7957 runners.go:193] Pod memory-reservation-j4bhd Pending <nil> I1206 18:42:04.061675 7957 runners.go:193] Pod memory-reservation-jtwd9 ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061682 7957 runners.go:193] Pod memory-reservation-jx2bm ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061694 7957 runners.go:193] Pod memory-reservation-kjfkj ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061702 7957 runners.go:193] Pod memory-reservation-kqlhz ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061710 7957 runners.go:193] Pod memory-reservation-kt7hv ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061718 7957 runners.go:193] Pod memory-reservation-kxzvm ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061726 7957 runners.go:193] Pod memory-reservation-l6zth Pending <nil> I1206 18:42:04.061734 7957 runners.go:193] Pod memory-reservation-l86rg Pending <nil> I1206 18:42:04.061742 7957 runners.go:193] Pod memory-reservation-lgjfs Pending <nil> I1206 18:42:04.061749 7957 runners.go:193] Pod memory-reservation-m98lv Pending <nil> I1206 18:42:04.061758 7957 runners.go:193] Pod memory-reservation-mk2nr Pending <nil> I1206 18:42:04.061766 7957 runners.go:193] Pod memory-reservation-mtxsr ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061774 7957 runners.go:193] Pod memory-reservation-mzwcs ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061791 7957 runners.go:193] Pod memory-reservation-nff6c Pending <nil> I1206 18:42:04.061799 7957 runners.go:193] Pod memory-reservation-nmlg2 Pending <nil> I1206 18:42:04.061807 7957 runners.go:193] Pod memory-reservation-nnkgp ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061816 7957 runners.go:193] Pod memory-reservation-pj9g4 Pending <nil> I1206 18:42:04.061825 7957 runners.go:193] Pod memory-reservation-pvnps ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061834 7957 runners.go:193] Pod memory-reservation-pz4p7 Pending <nil> I1206 18:42:04.061843 7957 runners.go:193] Pod memory-reservation-q94fq ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061850 7957 runners.go:193] Pod memory-reservation-qg89c Pending <nil> I1206 18:42:04.061859 7957 runners.go:193] Pod memory-reservation-qvlx5 ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061866 7957 runners.go:193] Pod memory-reservation-r2bch Pending <nil> I1206 18:42:04.061880 7957 runners.go:193] Pod memory-reservation-rfvhv Pending <nil> I1206 18:42:04.061889 7957 runners.go:193] Pod memory-reservation-rgftw Pending <nil> I1206 18:42:04.061897 7957 runners.go:193] Pod memory-reservation-rmtxd Pending <nil> I1206 18:42:04.061905 7957 runners.go:193] Pod memory-reservation-rr44q Pending <nil> I1206 18:42:04.061914 7957 runners.go:193] Pod memory-reservation-s2xtc ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061922 7957 runners.go:193] Pod memory-reservation-s75ff ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061930 7957 runners.go:193] Pod memory-reservation-scxrt ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061939 7957 runners.go:193] Pod memory-reservation-sgl84 Pending <nil> I1206 18:42:04.061947 7957 runners.go:193] Pod memory-reservation-sm6hh ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061955 7957 runners.go:193] Pod memory-reservation-sn29r ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061963 7957 runners.go:193] Pod memory-reservation-tr2x2 ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061972 7957 runners.go:193] Pod memory-reservation-txbwh Pending <nil> I1206 18:42:04.061980 7957 runners.go:193] Pod memory-reservation-vc2lq Pending <nil> I1206 18:42:04.061988 7957 runners.go:193] Pod memory-reservation-vc6n6 ca-minion-group-pg0w Pending <nil> I1206 18:42:04.061996 7957 runners.go:193] Pod memory-reservation-vnmt6 ca-minion-group-pg0w Pending <nil> I1206 18:42:04.062004 7957 runners.go:193] Pod memory-reservation-w524l ca-minion-group-pg0w Pending <nil> I1206 18:42:04.062012 7957 runners.go:193] Pod memory-reservation-w65q7 ca-minion-group-pg0w Pending <nil> I1206 18:42:04.062020 7957 runners.go:193] Pod memory-reservation-w7w8v Pending <nil> I1206 18:42:04.062028 7957 runners.go:193] Pod memory-reservation-wbntx ca-minion-group-pg0w Pending <nil> I1206 18:42:04.062038 7957 runners.go:193] Pod memory-reservation-wc5d2 Pending <nil> I1206 18:42:04.062048 7957 runners.go:193] Pod memory-reservation-wcpjm ca-minion-group-pg0w Pending <nil> I1206 18:42:04.062056 7957 runners.go:193] Pod memory-reservation-wkx6s ca-minion-group-pg0w Pending <nil> I1206 18:42:04.062064 7957 runners.go:193] Pod memory-reservation-wpd2k Pending <nil> I1206 18:42:04.062072 7957 runners.go:193] Pod memory-reservation-xdn5b Pending <nil> I1206 18:42:04.062080 7957 runners.go:193] Pod memory-reservation-xfmvf ca-minion-group-pg0w Pending <nil> I1206 18:42:04.062088 7957 runners.go:193] Pod memory-reservation-xgw5b Pending <nil> I1206 18:42:04.062096 7957 runners.go:193] Pod memory-reservation-xj5h6 ca-minion-group-pg0w Pending <nil> I1206 18:42:04.062105 7957 runners.go:193] Pod memory-reservation-xvmjb ca-minion-group-pg0w Pending <nil> I1206 18:42:04.062118 7957 runners.go:193] Pod memory-reservation-xxwm4 ca-minion-group-pg0w Pending <nil> I1206 18:42:04.062130 7957 runners.go:193] Pod memory-reservation-z2cj2 Pending <nil> I1206 18:42:04.062141 7957 runners.go:193] Pod memory-reservation-z428q ca-minion-group-pg0w Pending <nil> I1206 18:42:04.062153 7957 runners.go:193] Pod memory-reservation-z5t4g Pending <nil> I1206 18:42:04.062164 7957 runners.go:193] Pod memory-reservation-zd9vq Pending <nil> I1206 18:42:04.062176 7957 runners.go:193] Pod memory-reservation-zgmtn ca-minion-group-pg0w Pending <nil> I1206 18:42:04.062187 7957 runners.go:193] Pod memory-reservation-zhscr ca-minion-group-pg0w Pending <nil> I1206 18:42:04.062199 7957 runners.go:193] Pod memory-reservation-zjsq6 ca-minion-group-pg0w Pending <nil> I1206 18:42:04.062210 7957 runners.go:193] Pod memory-reservation-zwc9v Pending <nil> Dec 6 18:42:04.106: INFO: Condition Ready of node ca-minion-group-1-v846 is false, but Node is tainted by NodeController with [{node.kubernetes.io/unreachable NoSchedule 2022-12-06 18:41:43 +0000 UTC} {node.kubernetes.io/unreachable NoExecute 2022-12-06 18:41:48 +0000 UTC}]. Failure I1206 18:42:04.106282 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 1 Dec 6 18:42:24.149: INFO: Condition Ready of node ca-minion-group-1-v846 is false, but Node is tainted by NodeController with [{node.kubernetes.io/unreachable NoSchedule 2022-12-06 18:41:43 +0000 UTC} {node.kubernetes.io/unreachable NoExecute 2022-12-06 18:41:48 +0000 UTC}]. Failure I1206 18:42:24.149760 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 1 Dec 6 18:42:44.197: INFO: Condition Ready of node ca-minion-group-1-v846 is false, but Node is tainted by NodeController with [{node.kubernetes.io/unreachable NoSchedule 2022-12-06 18:41:43 +0000 UTC} {node.kubernetes.io/unreachable NoExecute 2022-12-06 18:41:48 +0000 UTC}]. Failure I1206 18:42:44.198000 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 1 Dec 6 18:43:04.241: INFO: Condition Ready of node ca-minion-group-1-v846 is false, but Node is tainted by NodeController with [{node.kubernetes.io/unreachable NoSchedule 2022-12-06 18:41:43 +0000 UTC} {node.kubernetes.io/unreachable NoExecute 2022-12-06 18:41:48 +0000 UTC}]. Failure I1206 18:43:04.242025 7957 cluster_size_autoscaling.go:1384] Waiting for cluster with func, current size 2, not ready nodes 1 Dec 6 18:43:24.287: INFO: Condition Ready of node ca-minion-group-1-v846 is false, but Node is tainted by NodeController with [{node.kubernetes.io/unreachable NoSchedule 2022-12-06 18:41:43 +0000 UTC} {node.kubernetes.io/unreachable NoExecute 2022-12-06 18:41:48 +0000 UTC}]. Failure I1206 18:43:24.287160 7957 cluster_size_autoscaling.go:1381] Cluster has reached the desired size I1206 18:43:24.336921 7957 cluster_size_autoscaling.go:1417] Too many pods are not ready yet: [memory-reservation-4l48n memory-reservation-4vfxl memory-reservation-56jm8 memory-reservation-5jc2l memory-reservation-62wdb memory-reservation-72pdz memory-reservation-748d9 memory-reservation-8fxbw memory-reservation-8qqpr memory-reservation-8r9cv memory-reservation-96ngf memory-reservation-b4mr6 memory-reservation-bf7jr memory-reservation-bsh6h memory-reservation-bwfjv memory-reservation-cstng memory-reservation-cwvl6 memory-reservation-d278d memory-reservation-dfqqt memory-reservation-fhv8n memory-reservation-fjs7d memory-reservation-fszr7 memory-reservation-gfcvl memory-reservation-gfqdk memory-reservation-h97lf memory-reservation-j4bhd memory-reservation-l6zth memory-reservation-l86rg memory-reservation-lgjfs memory-reservation-m98lv memory-reservation-mk2nr memory-reservation-nff6c memory-reservation-nmlg2 memory-reservation-pj9g4 memory-reservation-pz4p7 memory-reservation-qg89c memory-reservation-r2bch memory-reservation-rfvhv memory-reservation-rgftw memory-reservation-rmtxd memory-reservation-rr44q memory-reservation-sgl84 memory-reservation-txbwh memory-reservation-vc2lq memory-reservation-w7w8v memory-reservation-wc5d2 memory-reservation-wpd2k memory-reservation-xdn5b memory-reservation-xgw5b memory-reservation-z2cj2 memory-reservation-z5t4g memory-reservation-zd9vq memory-reservation-zwc9v] I1206 18:43:44.394387 7957 cluster_size_autoscaling.go:1417] Too many pods are not ready yet: [memory-reservation-4l48n memory-reservation-4vfxl memory-reservation-56jm8 memory-reservation-5jc2l memory-reservation-62wdb memory-reservation-72pdz memory-reservation-8fxbw memory-reservation-8qqpr memory-reservation-8r9cv memory-reservation-96ngf memory-reservation-b4mr6 memory-reservation-bf7jr memory-reservation-bsh6h memory-reservation-cstng memory-reservation-cwvl6 memory-reservation-d278d memory-reservation-dfqqt memory-reservation-fhv8n memory-reservation-fjs7d memory-reservation-fszr7 memory-reservation-gfcvl memory-reservation-gfqdk memory-reservation-h97lf memory-reservation-j4bhd memory-reservation-l86rg memory-reservation-m98lv memory-reservation-mk2nr memory-reservation-nff6c memory-reservation-nmlg2 memory-reservation-pj9g4 memory-reservation-pz4p7 memory-reservation-qg89c memory-reservation-r2bch memory-reservation-rfvhv memory-reservation-rgftw memory-reservation-rmtxd memory-reservation-rr44q memory-reservation-sgl84 memory-reservation-txbwh memory-reservation-vc2lq memory-reservation-wc5d2 memory-reservation-xdn5b memory-reservation-xgw5b memory-reservation-z2cj2 memory-reservation-z5t4g memory-reservation-zd9vq memory-reservation-zwc9v] I1206 18:44:04.450262 7957 cluster_size_autoscaling.go:1417] Too many pods are not ready yet: [memory-reservation-j4bhd memory-reservation-r2bch memory-reservation-rfvhv memory-reservation-rmtxd] I1206 18:44:24.502733 7957 cluster_size_autoscaling.go:1417] Too many pods are not ready yet: [memory-reservation-j4bhd memory-reservation-r2bch memory-reservation-rfvhv memory-reservation-rmtxd] I1206 18:44:44.558549 7957 cluster_size_autoscaling.go:1417] Too many pods are not ready yet: [memory-reservation-j4bhd memory-reservation-r2bch memory-reservation-rfvhv memory-reservation-rmtxd] I1206 18:45:04.611742 7957 cluster_size_autoscaling.go:1417] Too many pods are not ready yet: [memory-reservation-j4bhd memory-reservation-r2bch memory-reservation-rfvhv memory-reservation-rmtxd] I1206 18:45:24.664665 7957 cluster_size_autoscaling.go:1417] Too many pods are not ready yet: [memory-reservation-j4bhd memory-reservation-r2bch memory-reservation-rfvhv memory-reservation-rmtxd] I1206 18:45:44.716327 7957 cluster_size_autoscaling.go:1417] Too many pods are not ready yet: [memory-reservation-j4bhd memory-reservation-r2bch memory-reservation-rfvhv memory-reservation-rmtxd] ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp] (Spec Runtime: 5m8.073s) test/e2e/autoscaling/cluster_size_autoscaling.go:329 In [It] (Node Runtime: 5m0.004s) test/e2e/autoscaling/cluster_size_autoscaling.go:329 At [By Step] creating replication controller memory-reservation in namespace autoscaling-8765 (Step Runtime: 4m20.481s) test/e2e/framework/rc/rc_utils.go:85 Spec Goroutine goroutine 6550 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.waitForCaPodsReadyInNamespace(0xc000416c30, {0x801df68, 0xc003cea4e0}, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1391 > k8s.io/kubernetes/test/e2e/autoscaling.waitForAllCaPodsReadyInNamespace(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1428 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.4(0xacfc400?) test/e2e/autoscaling/cluster_size_autoscaling.go:205 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.10.1() test/e2e/autoscaling/cluster_size_autoscaling.go:331 k8s.io/kubernetes/test/e2e/framework/network.TestUnderTemporaryNetworkFailure({0x801df68, 0xc003cea4e0}, {0x7fa3fc0?, 0xc0002898d0?}, 0xc000d4f600, 0xc002535f58) test/e2e/framework/network/utils.go:1103 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.10() test/e2e/autoscaling/cluster_size_autoscaling.go:331 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00073de00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 18:46:04.768354 7957 cluster_size_autoscaling.go:1417] Too many pods are not ready yet: [memory-reservation-j4bhd memory-reservation-r2bch memory-reservation-rfvhv memory-reservation-rmtxd] ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp] (Spec Runtime: 5m28.077s) test/e2e/autoscaling/cluster_size_autoscaling.go:329 In [It] (Node Runtime: 5m20.009s) test/e2e/autoscaling/cluster_size_autoscaling.go:329 At [By Step] creating replication controller memory-reservation in namespace autoscaling-8765 (Step Runtime: 4m40.486s) test/e2e/framework/rc/rc_utils.go:85 Spec Goroutine goroutine 6550 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.waitForCaPodsReadyInNamespace(0xc000416c30, {0x801df68, 0xc003cea4e0}, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1391 > k8s.io/kubernetes/test/e2e/autoscaling.waitForAllCaPodsReadyInNamespace(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1428 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.4(0xacfc400?) test/e2e/autoscaling/cluster_size_autoscaling.go:205 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.10.1() test/e2e/autoscaling/cluster_size_autoscaling.go:331 k8s.io/kubernetes/test/e2e/framework/network.TestUnderTemporaryNetworkFailure({0x801df68, 0xc003cea4e0}, {0x7fa3fc0?, 0xc0002898d0?}, 0xc000d4f600, 0xc002535f58) test/e2e/framework/network/utils.go:1103 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.10() test/e2e/autoscaling/cluster_size_autoscaling.go:331 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00073de00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 18:46:24.821686 7957 cluster_size_autoscaling.go:1417] Too many pods are not ready yet: [memory-reservation-j4bhd memory-reservation-r2bch memory-reservation-rfvhv memory-reservation-rmtxd] ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp] (Spec Runtime: 5m48.082s) test/e2e/autoscaling/cluster_size_autoscaling.go:329 In [It] (Node Runtime: 5m40.013s) test/e2e/autoscaling/cluster_size_autoscaling.go:329 At [By Step] creating replication controller memory-reservation in namespace autoscaling-8765 (Step Runtime: 5m0.491s) test/e2e/framework/rc/rc_utils.go:85 Spec Goroutine goroutine 6550 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.waitForCaPodsReadyInNamespace(0xc000416c30, {0x801df68, 0xc003cea4e0}, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1391 > k8s.io/kubernetes/test/e2e/autoscaling.waitForAllCaPodsReadyInNamespace(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1428 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.4(0xacfc400?) test/e2e/autoscaling/cluster_size_autoscaling.go:205 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.10.1() test/e2e/autoscaling/cluster_size_autoscaling.go:331 k8s.io/kubernetes/test/e2e/framework/network.TestUnderTemporaryNetworkFailure({0x801df68, 0xc003cea4e0}, {0x7fa3fc0?, 0xc0002898d0?}, 0xc000d4f600, 0xc002535f58) test/e2e/framework/network/utils.go:1103 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.10() test/e2e/autoscaling/cluster_size_autoscaling.go:331 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00073de00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 18:46:44.875217 7957 cluster_size_autoscaling.go:1417] Too many pods are not ready yet: [memory-reservation-j4bhd memory-reservation-r2bch memory-reservation-rfvhv memory-reservation-rmtxd] ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp] (Spec Runtime: 6m8.086s) test/e2e/autoscaling/cluster_size_autoscaling.go:329 In [It] (Node Runtime: 6m0.017s) test/e2e/autoscaling/cluster_size_autoscaling.go:329 At [By Step] creating replication controller memory-reservation in namespace autoscaling-8765 (Step Runtime: 5m20.494s) test/e2e/framework/rc/rc_utils.go:85 Spec Goroutine goroutine 6550 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.waitForCaPodsReadyInNamespace(0xc000416c30, {0x801df68, 0xc003cea4e0}, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1391 > k8s.io/kubernetes/test/e2e/autoscaling.waitForAllCaPodsReadyInNamespace(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1428 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.4(0xacfc400?) test/e2e/autoscaling/cluster_size_autoscaling.go:205 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.10.1() test/e2e/autoscaling/cluster_size_autoscaling.go:331 k8s.io/kubernetes/test/e2e/framework/network.TestUnderTemporaryNetworkFailure({0x801df68, 0xc003cea4e0}, {0x7fa3fc0?, 0xc0002898d0?}, 0xc000d4f600, 0xc002535f58) test/e2e/framework/network/utils.go:1103 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.10() test/e2e/autoscaling/cluster_size_autoscaling.go:331 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00073de00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 18:47:04.926033 7957 cluster_size_autoscaling.go:1417] Too many pods are not ready yet: [memory-reservation-j4bhd memory-reservation-r2bch memory-reservation-rfvhv memory-reservation-rmtxd] ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp] (Spec Runtime: 6m28.09s) test/e2e/autoscaling/cluster_size_autoscaling.go:329 In [It] (Node Runtime: 6m20.022s) test/e2e/autoscaling/cluster_size_autoscaling.go:329 At [By Step] creating replication controller memory-reservation in namespace autoscaling-8765 (Step Runtime: 5m40.499s) test/e2e/framework/rc/rc_utils.go:85 Spec Goroutine goroutine 6550 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.waitForCaPodsReadyInNamespace(0xc000416c30, {0x801df68, 0xc003cea4e0}, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1391 > k8s.io/kubernetes/test/e2e/autoscaling.waitForAllCaPodsReadyInNamespace(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1428 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.4(0xacfc400?) test/e2e/autoscaling/cluster_size_autoscaling.go:205 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.10.1() test/e2e/autoscaling/cluster_size_autoscaling.go:331 k8s.io/kubernetes/test/e2e/framework/network.TestUnderTemporaryNetworkFailure({0x801df68, 0xc003cea4e0}, {0x7fa3fc0?, 0xc0002898d0?}, 0xc000d4f600, 0xc002535f58) test/e2e/framework/network/utils.go:1103 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.10() test/e2e/autoscaling/cluster_size_autoscaling.go:331 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00073de00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 18:47:24.979781 7957 cluster_size_autoscaling.go:1417] Too many pods are not ready yet: [memory-reservation-j4bhd memory-reservation-r2bch memory-reservation-rfvhv memory-reservation-rmtxd] ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp] (Spec Runtime: 6m48.091s) test/e2e/autoscaling/cluster_size_autoscaling.go:329 In [It] (Node Runtime: 6m40.023s) test/e2e/autoscaling/cluster_size_autoscaling.go:329 At [By Step] creating replication controller memory-reservation in namespace autoscaling-8765 (Step Runtime: 6m0.5s) test/e2e/framework/rc/rc_utils.go:85 Spec Goroutine goroutine 6550 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.waitForCaPodsReadyInNamespace(0xc000416c30, {0x801df68, 0xc003cea4e0}, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1391 > k8s.io/kubernetes/test/e2e/autoscaling.waitForAllCaPodsReadyInNamespace(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1428 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.4(0xacfc400?) test/e2e/autoscaling/cluster_size_autoscaling.go:205 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.10.1() test/e2e/autoscaling/cluster_size_autoscaling.go:331 k8s.io/kubernetes/test/e2e/framework/network.TestUnderTemporaryNetworkFailure({0x801df68, 0xc003cea4e0}, {0x7fa3fc0?, 0xc0002898d0?}, 0xc000d4f600, 0xc002535f58) test/e2e/framework/network/utils.go:1103 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.10() test/e2e/autoscaling/cluster_size_autoscaling.go:331 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00073de00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 18:47:45.032277 7957 cluster_size_autoscaling.go:1417] Too many pods are not ready yet: [memory-reservation-j4bhd memory-reservation-r2bch memory-reservation-rfvhv memory-reservation-rmtxd] ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp] (Spec Runtime: 7m8.096s) test/e2e/autoscaling/cluster_size_autoscaling.go:329 In [It] (Node Runtime: 7m0.027s) test/e2e/autoscaling/cluster_size_autoscaling.go:329 At [By Step] creating replication controller memory-reservation in namespace autoscaling-8765 (Step Runtime: 6m20.505s) test/e2e/framework/rc/rc_utils.go:85 Spec Goroutine goroutine 6550 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.waitForCaPodsReadyInNamespace(0xc000416c30, {0x801df68, 0xc003cea4e0}, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1391 > k8s.io/kubernetes/test/e2e/autoscaling.waitForAllCaPodsReadyInNamespace(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1428 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.4(0xacfc400?) test/e2e/autoscaling/cluster_size_autoscaling.go:205 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.10.1() test/e2e/autoscaling/cluster_size_autoscaling.go:331 k8s.io/kubernetes/test/e2e/framework/network.TestUnderTemporaryNetworkFailure({0x801df68, 0xc003cea4e0}, {0x7fa3fc0?, 0xc0002898d0?}, 0xc000d4f600, 0xc002535f58) test/e2e/framework/network/utils.go:1103 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.10() test/e2e/autoscaling/cluster_size_autoscaling.go:331 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00073de00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 18:48:05.099814 7957 cluster_size_autoscaling.go:1417] Too many pods are not ready yet: [memory-reservation-j4bhd memory-reservation-r2bch memory-reservation-rfvhv memory-reservation-rmtxd] ------------------------------ Automatically polling progress: [sig-autoscaling] Cluster size autoscaling [Slow] should increase cluster size if pending pods are small and one node is broken [Feature:ClusterSizeAutoscalingScaleUp] (Spec Runtime: 7m28.097s) test/e2e/autoscaling/cluster_size_autoscaling.go:329 In [It] (Node Runtime: 7m20.029s) test/e2e/autoscaling/cluster_size_autoscaling.go:329 At [By Step] creating replication controller memory-reservation in namespace autoscaling-8765 (Step Runtime: 6m40.506s) test/e2e/framework/rc/rc_utils.go:85 Spec Goroutine goroutine 6550 [sleep] time.Sleep(0x4a817c800) /usr/local/go/src/runtime/time.go:195 > k8s.io/kubernetes/test/e2e/autoscaling.waitForCaPodsReadyInNamespace(0xc000416c30, {0x801df68, 0xc003cea4e0}, 0x0) test/e2e/autoscaling/cluster_size_autoscaling.go:1391 > k8s.io/kubernetes/test/e2e/autoscaling.waitForAllCaPodsReadyInNamespace(...) test/e2e/autoscaling/cluster_size_autoscaling.go:1428 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.4(0xacfc400?) test/e2e/autoscaling/cluster_size_autoscaling.go:205 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.10.1() test/e2e/autoscaling/cluster_size_autoscaling.go:331 k8s.io/kubernetes/test/e2e/framework/network.TestUnderTemporaryNetworkFailure({0x801df68, 0xc003cea4e0}, {0x7fa3fc0?, 0xc0002898d0?}, 0xc000d4f600, 0xc002535f58) test/e2e/framework/network/utils.go:1103 > k8s.io/kubernetes/test/e2e/autoscaling.glob..func3.10() test/e2e/autoscaling/cluster_size_autoscaling.go:331 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.extractBodyFunction.func3({0x2d591ce, 0xc00073de00}) vendor/github.com/onsi/ginkgo/v2/internal/node.go:449 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode.func2() vendor/github.com/onsi/ginkgo/v2/internal/suite.go:750 k8s.io/kubernetes/vendor/github.com/onsi/ginkgo/v2/internal.(*Suite).runNode vendor/github.com/onsi/ginkgo/v2/internal/suite.go:738 ------------------------------ I1206 18:48:25.100715 7957 cluster_size_autoscaling.go:1419] Timeout on waiting for pods being ready Dec 6 18:48:25.100: INFO: Running '/workspace/kubernetes/platforms/linux/amd64/kubectl --server=https://35.230.76.149 --kubeconfig=/workspace/.kube/config --namespace=autoscaling-8765 get pods -o json --all-namespaces' Dec 6 18:48:25.651: INFO: stderr: "" Dec 6 18:48:25.652: INFO: stdout: "{\n \"apiVersion\": \"v1\",\n \"items\": [\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:43Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-2snlb\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28892\",\n \"uid\": \"2106fb3a-d560-48ac-8d96-40fb5c168eb6\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-cm5vh\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-cm5vh\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:04Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:04Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:43Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://3976be17bb982e647cde81e84ac135f1c4819eca267cd40bb3376951464141d3\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:03Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.18\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.18\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:44Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:45Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-4l48n\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29349\",\n \"uid\": \"9572c3bb-4ede-4298-9514-9e79d57323c4\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-9l2ct\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-9l2ct\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:31Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:31Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://ce5361b0a44d47a6046a3317c058bc6f838811fa5fb833042b7dae61fcf28c6e\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:31Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.20\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.20\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:44Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-4npvn\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28863\",\n \"uid\": \"50648d8b-b21e-4c62-b92e-739143cc6a90\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-6kjk6\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-6kjk6\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:04Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:04Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://5c7c1cdd8706362ffce44b75b9f4a35dfaaeda0a2636814d301641b7e1659ef0\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:03Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.21\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.21\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:44Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:47Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-4vfxl\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29398\",\n \"uid\": \"d9aaa64e-29d3-4061-a09a-4c543374edf8\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-ncngp\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-ncngp\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:36Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:36Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://fc5a01b4012c6f5cbec6d774635be91ea11d3c97f74608f0a5c5343cc41e99fc\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:35Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.38\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.38\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:46Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-56jm8\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29406\",\n \"uid\": \"818ffb76-de64-4dbf-b728-1cc5fb9f3980\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-z6789\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-z6789\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:32Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:32Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://bb2a8002a114b0ecc651b6274a273435969ee36be9c2e545e4e86dc2af13f194\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:31Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.17\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.17\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:46Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-5jc2l\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29325\",\n \"uid\": \"0d99b046-78e0-49d8-aef1-16f3766f29e4\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-gsjjv\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-gsjjv\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:30Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:30Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://53a06be7a712412b29c3b53490898e3d83a5590ec66d024e4a5df31644ac7808\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:30Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.15\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.15\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:44Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-5npsx\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28949\",\n \"uid\": \"d5a1d47c-8c8b-4f84-bde3-a2c011e6f9d5\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-6scm7\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-6scm7\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:04Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:04Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://f9da8a9a09a3a128ddf37a753c5dddb5a6e2134cecdea38d0211bba8ba7fad28\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:03Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.23\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.23\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:44Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:43Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-5wdwk\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28837\",\n \"uid\": \"0b640ac5-1181-45cf-850d-f9728824d050\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-nsk84\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-nsk84\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:43Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:00Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:00Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:43Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://bdcaac65282abc68245d552184e3facdbd330c38ba8aaa01b1ce900b0d753cd5\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:41:59Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.15\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.15\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:43Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:43Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-5xkrr\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28827\",\n \"uid\": \"5d1093dd-ffd3-4557-a742-c53f9d8ae77a\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-5624s\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-5624s\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:59Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:59Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://816ff6da56ce7690211ce86d6683b27602a1a56cdaa5430a08156c0e367117ae\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:41:58Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.12\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.12\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:44Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:47Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-62wdb\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29342\",\n \"uid\": \"dd806a31-71f0-4c55-bab5-de74faef44b9\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-qxhfc\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-qxhfc\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:36Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:36Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://01797728b5efb990961c730f2c875b24f7ac7f66548ff96940b7663d8b8791ea\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:35Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.33\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.33\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:44Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-66vsj\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28956\",\n \"uid\": \"ff0ec051-3891-40c7-bd40-debf434d604e\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-jzfxk\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-jzfxk\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:09Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:09Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://ab1a902cf3f0020ce99403ae57413cc31c1e1fa8254bc4b9e93a5a94fad995df\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:08Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.46\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.46\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:44Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:47Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-72pdz\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29355\",\n \"uid\": \"0c727556-06b6-49bf-b8a3-4520373505f4\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-t8gwn\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-t8gwn\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:37Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:37Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://949d6e8092105a8fb065f80e28bbe5a3516cbe2848060075e160331c75a8d503\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:36Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.31\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.31\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:45Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-748d9\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29263\",\n \"uid\": \"233afad8-e4f1-47ae-8e95-ef6ee703bb1e\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-8kmsc\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-8kmsc\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:20Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:20Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://cc19114cbb1c4158e437ace77cf423ca4390f818a79a015c6d090aa373fd1e25\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:20Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.7\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.7\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:44Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-7mx95\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28962\",\n \"uid\": \"f328bd75-6358-4536-bc43-78f968902828\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-jndpt\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-jndpt\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:09Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:09Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://d7baa92c450c378f2b024f2c99f93da5157851efee32cdfaab4e87ad00f7372d\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:08Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.41\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.41\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:44Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:45Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-7qk5r\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28920\",\n \"uid\": \"fa28dd15-62ed-4bde-b4fb-ccc86a39ed73\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-gbtd5\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-gbtd5\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:45Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:10Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:10Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:45Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://4649a76495cf0e266ecbcb36e5305205239de6e45be421d14f41855c3967108b\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:09Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.58\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.58\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:45Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:46Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-8fxbw\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29360\",\n \"uid\": \"2970c7bf-4027-4ff5-b028-437af8257992\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-ffhcd\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-ffhcd\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:27Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:27Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://8ea6efcf1f912f7c85f2d5910f4b3e81cedbe157e5d8591d174f9328324d4add\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:27Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.10\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.10\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:47Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-8qqpr\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29432\",\n \"uid\": \"dd7c8e23-60a6-462d-921a-670f7f27a8a0\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-sbgw8\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-sbgw8\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:36Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:36Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://4aaec671379dc24d863be0aa8798c5ce62eab68e99eb6a0dbb8f2a831375620d\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:36Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.39\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.39\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:47Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-8r9cv\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29416\",\n \"uid\": \"3b78b9b1-c360-436a-a462-97a42820ffd6\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-khsxl\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-khsxl\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:37Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:37Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://0279b508fae5c49831030db55b17914de514fa2c43e75236baca4aa327bc3540\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:36Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.46\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.46\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:44Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-8vqg6\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28899\",\n \"uid\": \"d993acdc-53fd-4bf2-9990-493f73655e20\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-qlwb6\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-qlwb6\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:06Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:06Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://7e17101d042eed3516234da9500b08534b0160d583cc6dd3b524478f073e1017\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:06Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.30\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.30\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:44Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:45Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-96ngf\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29382\",\n \"uid\": \"c0e4e547-7ab1-43be-a382-2ce43b7c62fc\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-k7w78\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-k7w78\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:33Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:33Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://583f82d9e9a179dc1ff92401acec31b370f4e82ac4a9957325c1ee6459a87667\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:32Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.23\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.23\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:47Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-b4mr6\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29434\",\n \"uid\": \"93650264-62b2-4aea-88be-8eaf45a738cf\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-22264\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-22264\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:37Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:37Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://dafe2c3c4c9ae4c169e9af65f4a1dff70c9876d54ac55a8d0bb7542f88dfeba1\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:36Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.49\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.49\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:47Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-bf7jr\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29340\",\n \"uid\": \"79cea25c-3df1-4f57-b6a4-e7f12f25b494\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-vl5cw\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-vl5cw\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:36Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:36Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://3f6f06093f3b8aa6c35daccf46ab91d36e32fd94c07be3def7dae7a0ac77b3a7\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:35Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.36\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.36\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:44Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-bkp5l\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28912\",\n \"uid\": \"8797cdbb-aa5c-4e45-8958-0d3be7d7fd04\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-h5tg4\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-h5tg4\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:09Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:09Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://f86e33a3e1596a3996c05b16ddd1d5505631c382ad2c040249794bc7aa06a581\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:08Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.52\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.52\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:44Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:46Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-bsh6h\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29366\",\n \"uid\": \"5dc6ef44-654e-49f3-a7b1-f24663325af4\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-k5kjg\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-k5kjg\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:30Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:30Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://b911745dd334ecc9a63eea10fa19e259a3e8aa745946e655d8c8f6fdde2d61ee\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:30Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.16\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.16\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:45Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-bwfjv\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29256\",\n \"uid\": \"d8e81f25-ef84-4acb-aabb-212ad4d0b3ba\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-kxtrh\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-kxtrh\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:17Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:17Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://bf19f239431c9e97cebb4fae3e4f4d433b04ffbb59a351e79f00540e9aeb8f84\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:16Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.4\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.4\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:44Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-bzjsm\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28877\",\n \"uid\": \"fd6f8784-aadf-4eb8-ba6a-b49106d53a63\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-zwj9x\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-zwj9x\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:07Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:07Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://cba536ffd78572562f64d120d1367469f73f1d7077f4c278056ecc135a04b1c1\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:07Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.53\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.53\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:44Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:44Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-c76g4\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28908\",\n \"uid\": \"6e2650bf-1c8f-425d-95d9-baf6636d32b4\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-cqncp\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-cqncp\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:09Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:09Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://4fb05ad447f8ee83288f2a9970eb5e7a52eef09bc7011d8e839b65b9ab7f9b6c\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:09Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.49\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.49\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:44Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:46Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-cstng\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29337\",\n \"uid\": \"6e623165-f04c-4d5c-9f34-9501cd395c0b\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-kddrm\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-kddrm\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:37Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:37Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://e420fe3e84d3e67479f5cee068125aa592419b67e1a23804b1ac15f7bb33c9f6\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:36Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.50\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.50\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:45Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-cwvl6\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29421\",\n \"uid\": \"5111f0a9-7971-4895-aa59-1f482c3834e4\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-5v8pg\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-5v8pg\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:29Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:29Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://a0c239239230842aec4fc858dd65fcffe3b90d56a8f34730c6cfdbf1eb9efc97\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:29Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.11\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.11\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:46Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-d278d\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29389\",\n \"uid\": \"a63bf2c2-5c84-4c46-86c0-30691c075616\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-57rrp\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-57rrp\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:37Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:37Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://7fbc0a0970bf69b33f2840435f9d7c1f9c7fb2faf31fccc6c788e219e0a68318\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:36Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.40\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.40\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:47Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-dfqqt\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29372\",\n \"uid\": \"8d60edc0-b005-4a20-9928-f1f81b6a9364\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-pzq5w\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-pzq5w\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:36Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:36Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://0e689ddba6042d41531f362336546a1e230a6ae8f8cbf53b8d258121c81a15da\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:35Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.42\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.42\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:45Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-fhv8n\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29379\",\n \"uid\": \"9e5fdcfc-d829-42c5-8b82-ccac364bc55e\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-ccsc9\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-ccsc9\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:30Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:30Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://79fa6763662ff658b735448aec1b4076fc54a5037c22b448f0e8a31b18fbf746\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:30Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.18\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.18\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:45Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-fjs7d\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29345\",\n \"uid\": \"b848c655-1387-440a-ab61-f1c0b14bb524\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-h4jfn\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-h4jfn\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:34Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:34Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://1d5233c3f26b8c33588b95aba1374e3ff6e026a5ac95efa60dfac037213ade2c\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:33Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.29\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.29\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:44Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-fnnzt\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28839\",\n \"uid\": \"b15248a3-df59-4662-b914-b28b87ee9f25\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-bf5qk\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-bf5qk\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:07Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:07Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://b141beccf107129cc2b64770b1b4bb4e19d5fb68c783d6fc28404def0d1b47e6\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:07Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.33\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.33\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:44Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:45Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-fszr7\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29322\",\n \"uid\": \"e8cab9e5-4934-4e9d-82f3-95973271d5c8\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-nvw6j\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-nvw6j\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:28Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:28Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://5d2da70977b070623d0a80b29b5fb061cd6236e76e6fa35c498ea07ebc0840c0\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:28Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.12\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.12\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:46Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-gfcvl\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29375\",\n \"uid\": \"cfbc8c78-54bf-4b54-8697-dd72af6ad819\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-d9wcl\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-d9wcl\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:37Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:37Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://83a088b75b81019608a7e7eb09fc812ecd6a8d6d18555d63509233f7586cbfc9\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:36Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.51\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.51\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:45Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-gfqdk\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29313\",\n \"uid\": \"6ed87607-225c-4b34-a1a0-f794d7101d88\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-mz96d\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-mz96d\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:36Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:36Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://522dfcd39c0953fb181f7c2f984ee47c67e850a4b0a4af3a59ad71b4052d452c\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:35Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.27\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.27\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:46Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-h97lf\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29428\",\n \"uid\": \"796c5094-7c61-45b2-996b-c19bc3d73540\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-8fj42\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-8fj42\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:33Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:33Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://e15e3f4a23b6f43a2e3f42f3679cc881c21d04a00d004af317ba48bf316f6e99\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:32Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.32\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.32\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:44Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-hsgn9\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28939\",\n \"uid\": \"52ebf7da-8b20-4175-a9a4-f2e3d77bbf67\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-n7knc\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-n7knc\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:03Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:03Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://e9bd5423680abe4deb2367b507f2310b5da939b5e61e038c1741e967a4cb782f\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:02Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.22\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.22\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:44Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:47Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-j4bhd\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29160\",\n \"uid\": \"b2ad8c0c-03ad-427e-945d-27fb2a06bc73\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-zv7xp\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-zv7xp\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:47Z\",\n \"message\": \"0/4 nodes are available: 1 node(s) had untolerated taint {node-role.kubernetes.io/master: }, 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }, 1 node(s) were unschedulable, 2 Insufficient memory. preemption: 0/4 nodes are available: 2 No preemption victims found for incoming pod, 2 Preemption is not helpful for scheduling..\",\n \"reason\": \"Unschedulable\",\n \"status\": \"False\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"phase\": \"Pending\",\n \"qosClass\": \"Burstable\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:44Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-jtwd9\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28874\",\n \"uid\": \"f0e7d1a1-7726-48b9-8add-81e2516cb1ae\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-mljgn\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-mljgn\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:09Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:09Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://703caf8d1b58118effec1563626fc942804a709559a99b615bf95a03149c75f3\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:08Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.48\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.48\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:44Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:44Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-jx2bm\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28933\",\n \"uid\": \"8215c0d2-4eb6-4c25-a8aa-d43fb4855503\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-r7zv9\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-r7zv9\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:10Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:10Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://dc735994209a6425005fe26c96b42df5bdbe2cb7cca8d50bafdc0e19fb71ba2a\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:09Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.50\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.50\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:44Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:44Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-kjfkj\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28942\",\n \"uid\": \"527c5cde-0c4c-4172-955e-134a31494bfb\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-jnnmj\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-jnnmj\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:07Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:07Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://76ca18b153f56b8a28b230a76a18d11a7cd1181a1d383816a7398d5b1d8de8aa\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:06Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.35\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.35\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:44Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:44Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-kqlhz\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28889\",\n \"uid\": \"9eae6c8b-f15d-4648-b64e-b41298d2bafc\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-st2jn\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-st2jn\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:45Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:10Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:10Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:45Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://74883ab58365776dc9829f91879147c104ada5ef7e83f4e0f1f4ae281557595a\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:09Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.57\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.57\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:45Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:44Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-kt7hv\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28914\",\n \"uid\": \"ea25ff8f-91c2-419a-8c76-b3e633156fdf\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-jllwc\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-jllwc\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:10Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:10Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://09750fd1f21328c89e252c035e44022a722224f603d24b133718fd6e50d2eea1\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:09Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.47\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.47\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:44Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:44Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-kxzvm\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28947\",\n \"uid\": \"cbc34cff-d4fe-4157-812f-387279a1f8db\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-5nvfc\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-5nvfc\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:03Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:03Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://62fea9c2ff00cb6b385d5c2f747a278c65133a61a205354734bff8e32a5f0cab\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:03Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.20\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.20\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:44Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:45Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-l6zth\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29250\",\n \"uid\": \"4bb5bca3-3326-4cc2-9c65-a47a1de96089\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-rlrgq\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-rlrgq\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:18Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:18Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://f3252b813eecfbda28b6a1d4db12459c2ee847fd67867935c3ae57b3b8a7b94d\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:17Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.5\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.5\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:45Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-l86rg\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29403\",\n \"uid\": \"0e139367-3cb4-4509-81f9-5f0b1408f8ce\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-md44x\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-md44x\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:29Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:29Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://57d0899775e24112695e7ff50c1f52d20c3aa16cac07b5c2b515376a7bf0b134\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:29Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.13\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.13\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:45Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-lgjfs\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29209\",\n \"uid\": \"e9dbc998-49f2-4860-98d2-5c489d46b353\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-d98nr\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-d98nr\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:24Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:24Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://55d1662fa2931c77ffc9f0a9c803a69943e442f81d754b016171c4062fbfbdbc\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:24Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.6\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.6\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:46Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-m98lv\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29335\",\n \"uid\": \"fe9a5a2b-ba63-40e6-b3a1-884bac2dc526\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-wvzhh\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-wvzhh\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:36Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:36Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://0b93ffb53d7c1649f4859b4b2201acbc056b15a5ebdb649af083d00b5c340488\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:35Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.44\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.44\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:46Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-mk2nr\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29392\",\n \"uid\": \"ac3f7095-b664-4c21-be62-36a69f1e72c7\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-w8bcm\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-w8bcm\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:36Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:36Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://808969af1e43a01c6a6b37aff1e8af5cdcec9ba88380114202204f500899b60e\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:36Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.41\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.41\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:43Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-mtxsr\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28872\",\n \"uid\": \"09a4abb1-e452-4e88-b24d-e52733a0807d\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-m42x8\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-m42x8\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:43Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:01Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:01Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:43Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://97fab10f3cf776b3b95bfa95ebfeac04f557f16ef7a9cc506485785e6e612c35\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:00Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.16\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.16\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:43Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:44Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-mzwcs\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28869\",\n \"uid\": \"a87eaddd-b14e-412f-aca3-393b11b24276\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-ght7g\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-ght7g\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:09Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:09Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://5f4728be947373a48a057e69e39dcac9df4879e0ad6a1f85a885810c57b75411\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:08Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.44\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.44\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:44Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:45Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-nff6c\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29386\",\n \"uid\": \"813a0f54-d6a0-448b-bc0d-4f0a0ab8156a\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-69hrc\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-69hrc\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:35Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:35Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://36ea8732967922dbcf84873dd6b27764fa99fb1d52642deb1bfa5d984ef3e22c\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:34Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.28\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.28\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:46Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-nmlg2\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29411\",\n \"uid\": \"c847d3f5-fe82-46d0-ba87-c3e979f27db2\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-x9bt6\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-x9bt6\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:37Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:37Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://f1ebb9a3772e7e504b6c0fa72bc62a867a1555e3465cb670545302531517634c\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:36Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.37\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.37\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:44Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-nnkgp\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28881\",\n \"uid\": \"e34a1ad4-9cb4-46e6-9e72-562c901cabeb\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-4cxkq\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-4cxkq\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:05Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:05Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://160dbe6a983ccd530e5fca16a1e0cf76279cd58dcf487cd6e9cc5b96a33e3be8\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:05Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.29\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.29\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:44Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:47Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-pj9g4\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29357\",\n \"uid\": \"00bdb75b-245f-4703-9374-8544d7f11448\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-7q7l8\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-7q7l8\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:36Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:36Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://fff4ff7593f4e621443e766c3a2ba9196c810b5892207ca0831a464f941d0c07\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:35Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.30\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.30\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:44Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-pvnps\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28849\",\n \"uid\": \"53247610-e4f3-4ad2-bd1d-826ef1064272\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-qt55t\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-qt55t\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:06Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:06Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://c5f6aba5362b3e207f786de2d6d40bbed2d3b677f98846da3d302987fbbc39a1\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:05Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.31\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.31\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:44Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:45Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-pz4p7\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29300\",\n \"uid\": \"11b6cdaa-debd-4472-a7eb-3a7addb93a4f\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-2l4nh\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-2l4nh\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:17Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:17Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://c11a5bca353cd87dc7d5d1b0b82aa012f64d4f45dbbeb08e7bdb2ecab4ca9bac\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:17Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.3\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.3\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:44Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-q94fq\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28887\",\n \"uid\": \"677b963f-dcaa-48bd-8c40-b09e8a62d9d4\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-xbf99\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-xbf99\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:10Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:10Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://9fe99d7c3a1ab990e76ad401a31de684fd9a0fe4a411fda5957932354ae2fd10\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:09Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.45\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.45\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:44Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:46Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-qg89c\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29351\",\n \"uid\": \"6f14bc9a-10ef-49cd-93fb-05efdc8b9484\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-748ql\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-748ql\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:34Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:34Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://973ef8018d9e780506527dff33796e77e2f6d0362881b2e740c27e99c481d6c1\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:33Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.26\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.26\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:43Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-qvlx5\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28902\",\n \"uid\": \"d0c7e705-d8af-4cdf-990a-128738a2640e\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-xd9dq\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-xd9dq\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:02Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:02Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://25527d75aadb2e788307fa4d1d47aab9fe721af24cd2a61bc399f08b2b6b4b1e\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:01Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.17\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.17\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:44Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:47Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-r2bch\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29161\",\n \"uid\": \"1a79ce8c-0562-4ffb-824a-1bc28965c3ee\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-mptnz\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-mptnz\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:47Z\",\n \"message\": \"0/4 nodes are available: 1 node(s) had untolerated taint {node-role.kubernetes.io/master: }, 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }, 1 node(s) were unschedulable, 2 Insufficient memory. preemption: 0/4 nodes are available: 2 No preemption victims found for incoming pod, 2 Preemption is not helpful for scheduling..\",\n \"reason\": \"Unschedulable\",\n \"status\": \"False\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"phase\": \"Pending\",\n \"qosClass\": \"Burstable\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:47Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-rfvhv\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29156\",\n \"uid\": \"bceaffb4-da51-4175-8c3b-755fa0a5922b\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-ztbd8\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-ztbd8\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:47Z\",\n \"message\": \"0/4 nodes are available: 1 node(s) had untolerated taint {node-role.kubernetes.io/master: }, 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }, 1 node(s) were unschedulable, 2 Insufficient memory. preemption: 0/4 nodes are available: 2 No preemption victims found for incoming pod, 2 Preemption is not helpful for scheduling..\",\n \"reason\": \"Unschedulable\",\n \"status\": \"False\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"phase\": \"Pending\",\n \"qosClass\": \"Burstable\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:46Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-rgftw\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29413\",\n \"uid\": \"e080dd6d-d284-47e7-8751-56dfcf39b89f\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-7fdl4\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-7fdl4\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:37Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:37Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://18c80726ec446aa00ab2348239ad5aa0dd0ddfef3b37c0c5aa5b83a54566492c\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:36Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.48\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.48\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:47Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-rmtxd\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29159\",\n \"uid\": \"4544ae86-aca8-4535-84fd-2c2217773050\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-q59bx\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-q59bx\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:47Z\",\n \"message\": \"0/4 nodes are available: 1 node(s) had untolerated taint {node-role.kubernetes.io/master: }, 1 node(s) had untolerated taint {node.kubernetes.io/unreachable: }, 1 node(s) were unschedulable, 2 Insufficient memory. preemption: 0/4 nodes are available: 2 No preemption victims found for incoming pod, 2 Preemption is not helpful for scheduling..\",\n \"reason\": \"Unschedulable\",\n \"status\": \"False\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"phase\": \"Pending\",\n \"qosClass\": \"Burstable\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:46Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-rr44q\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29330\",\n \"uid\": \"ef5b9e8a-6c85-49e2-a224-36685e42bd41\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-pnnkq\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-pnnkq\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:37Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:37Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://77aa8e950a216dd276762feb60f04de20aab7aa5071e3625a4aa50f4e600da25\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:43:36Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.27\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.25.45\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.25.45\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:43:13Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:44Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-s2xtc\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28925\",\n \"uid\": \"58441ea3-187a-4efc-a8c9-c2d61f354087\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-hb46k\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-hb46k\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:06Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:06Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://223e9050313b0df2aab79304fde263385a48b271c99f5ca25828abb93389d92f\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:06Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.32\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.32\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:44Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:44Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-s75ff\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28935\",\n \"uid\": \"8d44e258-7cc2-421a-9afb-a8205eb8872a\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-9x6mb\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-9x6mb\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:04Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:04Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://da6d67f959595b408e166231cf140ad54dc3bb6a821ccea7deb11ecfa62a0a00\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:03Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.24\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.24\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:44Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:44Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-scxrt\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"28867\",\n \"uid\": \"c8a8ff69-1c91-4d87-8578-367e818a43e8\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-v87wc\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-pg0w\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-v87wc\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:03Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:42:03Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:41:44Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n \"containerStatuses\": [\n {\n \"containerID\": \"containerd://7e5f1427e99e657703eceb52d7ef149c3f1bb578ed7845ff8556bf7c2c5096a8\",\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imageID\": \"registry.k8s.io/pause@sha256:7031c1b283388d2c2e09b57badb803c05ebed362dc88d84b480cc47f72a21097\",\n \"lastState\": {},\n \"name\": \"memory-reservation\",\n \"ready\": true,\n \"restartCount\": 0,\n \"started\": true,\n \"state\": {\n \"running\": {\n \"startedAt\": \"2022-12-06T18:42:02Z\"\n }\n }\n }\n ],\n \"hostIP\": \"10.138.0.19\",\n \"phase\": \"Running\",\n \"podIP\": \"10.64.18.19\",\n \"podIPs\": [\n {\n \"ip\": \"10.64.18.19\"\n }\n ],\n \"qosClass\": \"Burstable\",\n \"startTime\": \"2022-12-06T18:41:44Z\"\n }\n },\n {\n \"apiVersion\": \"v1\",\n \"kind\": \"Pod\",\n \"metadata\": {\n \"creationTimestamp\": \"2022-12-06T18:41:46Z\",\n \"generateName\": \"memory-reservation-\",\n \"labels\": {\n \"name\": \"memory-reservation\"\n },\n \"name\": \"memory-reservation-sgl84\",\n \"namespace\": \"autoscaling-8765\",\n \"ownerReferences\": [\n {\n \"apiVersion\": \"v1\",\n \"blockOwnerDeletion\": true,\n \"controller\": true,\n \"kind\": \"ReplicationController\",\n \"name\": \"memory-reservation\",\n \"uid\": \"aa408279-e8a9-42e9-a136-5b9c5090a3c4\"\n }\n ],\n \"resourceVersion\": \"29327\",\n \"uid\": \"d45aedec-de44-4d52-9bf2-0b7673a2c768\"\n },\n \"spec\": {\n \"containers\": [\n {\n \"image\": \"registry.k8s.io/pause:3.9\",\n \"imagePullPolicy\": \"IfNotPresent\",\n \"name\": \"memory-reservation\",\n \"ports\": [\n {\n \"containerPort\": 80,\n \"protocol\": \"TCP\"\n }\n ],\n \"resources\": {\n \"requests\": {\n \"memory\": \"151057858\"\n }\n },\n \"terminationMessagePath\": \"/dev/termination-log\",\n \"terminationMessagePolicy\": \"File\",\n \"volumeMounts\": [\n {\n \"mountPath\": \"/var/run/secrets/kubernetes.io/serviceaccount\",\n \"name\": \"kube-api-access-jv9n2\",\n \"readOnly\": true\n }\n ]\n }\n ],\n \"dnsPolicy\": \"Default\",\n \"enableServiceLinks\": true,\n \"nodeName\": \"ca-minion-group-1-5w6g\",\n \"preemptionPolicy\": \"PreemptLowerPriority\",\n \"priority\": 0,\n \"restartPolicy\": \"Always\",\n \"schedulerName\": \"default-scheduler\",\n \"securityContext\": {},\n \"serviceAccount\": \"default\",\n \"serviceAccountName\": \"default\",\n \"terminationGracePeriodSeconds\": 1,\n \"tolerations\": [\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/not-ready\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n },\n {\n \"effect\": \"NoExecute\",\n \"key\": \"node.kubernetes.io/unreachable\",\n \"operator\": \"Exists\",\n \"tolerationSeconds\": 300\n }\n ],\n \"volumes\": [\n {\n \"name\": \"kube-api-access-jv9n2\",\n \"projected\": {\n \"defaultMode\": 420,\n \"sources\": [\n {\n \"serviceAccountToken\": {\n \"expirationSeconds\": 3607,\n \"path\": \"token\"\n }\n },\n {\n \"configMap\": {\n \"items\": [\n {\n \"key\": \"ca.crt\",\n \"path\": \"ca.crt\"\n }\n ],\n \"name\": \"kube-root-ca.crt\"\n }\n },\n {\n \"downwardAPI\": {\n \"items\": [\n {\n \"fieldRef\": {\n \"apiVersion\": \"v1\",\n \"fieldPath\": \"metadata.namespace\"\n },\n \"path\": \"namespace\"\n }\n ]\n }\n }\n ]\n }\n }\n ]\n },\n \"status\": {\n \"conditions\": [\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"Initialized\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:34Z\",\n \"status\": \"True\",\n \"type\": \"Ready\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:34Z\",\n \"status\": \"True\",\n \"type\": \"ContainersReady\"\n },\n {\n \"lastProbeTime\": null,\n \"lastTransitionTime\": \"2022-12-06T18:43:13Z\",\n \"status\": \"True\",\n \"type\": \"PodScheduled\"\n }\n ],\n