This job view page is being replaced by Spyglass soon. Check out the new job view.
ResultFAILURE
Tests 0 failed / 0 succeeded
Started2019-12-12 06:34
Elapsed1h24m
Revisionrelease-0.4
links{u'resultstore': {u'url': u'https://source.cloud.google.com/results/invocations/e1abd27d-af03-4b70-ae64-8f6d4083df82/targets/test'}}
resultstorehttps://source.cloud.google.com/results/invocations/e1abd27d-af03-4b70-ae64-8f6d4083df82/targets/test

No Test Failures!


Error lines from build-log.txt

... skipping 1256 lines ...
++ jq -r '.items[].status.phase'
++ awk 'BEGIN{count=0} /(r|R)unning/{count++} END{print count " " NR}'
+++ kind get kubeconfig-path --name=clusterapi
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ 5 == \5 ]]
+ [[ 0 == \5 ]]
+ read failed total
++ jq -r '.items[].status.phase'
++ awk 'BEGIN{count=0} /(f|F)ailed/{count++} END{print count " " NR}'
+++ kind get kubeconfig-path --name=clusterapi
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ ! 0 -eq 0 ]]
++ date '+[%H:%M:%S]'
... skipping 16 lines ...
++ jq -r '.items[].status.phase'
++ awk 'BEGIN{count=0} /(r|R)unning/{count++} END{print count " " NR}'
+++ kind get kubeconfig-path --name=clusterapi
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ 5 == \5 ]]
+ [[ 0 == \5 ]]
+ read failed total
++ jq -r '.items[].status.phase'
+++ kind get kubeconfig-path --name=clusterapi
++ awk 'BEGIN{count=0} /(f|F)ailed/{count++} END{print count " " NR}'
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ ! 0 -eq 0 ]]
++ date '+[%H:%M:%S]'
... skipping 16 lines ...
++ jq -r '.items[].status.phase'
++ awk 'BEGIN{count=0} /(r|R)unning/{count++} END{print count " " NR}'
+++ kind get kubeconfig-path --name=clusterapi
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ 5 == \5 ]]
+ [[ 1 == \5 ]]
+ read failed total
++ jq -r '.items[].status.phase'
++ awk 'BEGIN{count=0} /(f|F)ailed/{count++} END{print count " " NR}'
+++ kind get kubeconfig-path --name=clusterapi
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ ! 0 -eq 0 ]]
++ date '+[%H:%M:%S]'
... skipping 16 lines ...
++ jq -r '.items[].status.phase'
+++ kind get kubeconfig-path --name=clusterapi
++ awk 'BEGIN{count=0} /(r|R)unning/{count++} END{print count " " NR}'
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ 5 == \5 ]]
+ [[ 1 == \5 ]]
+ read failed total
++ jq -r '.items[].status.phase'
++ awk 'BEGIN{count=0} /(f|F)ailed/{count++} END{print count " " NR}'
+++ kind get kubeconfig-path --name=clusterapi
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ ! 0 -eq 0 ]]
++ date '+[%H:%M:%S]'
... skipping 16 lines ...
++ jq -r '.items[].status.phase'
++ awk 'BEGIN{count=0} /(r|R)unning/{count++} END{print count " " NR}'
+++ kind get kubeconfig-path --name=clusterapi
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ 5 == \5 ]]
+ [[ 1 == \5 ]]
+ read failed total
++ jq -r '.items[].status.phase'
+++ kind get kubeconfig-path --name=clusterapi
++ awk 'BEGIN{count=0} /(f|F)ailed/{count++} END{print count " " NR}'
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ ! 0 -eq 0 ]]
++ date '+[%H:%M:%S]'
... skipping 16 lines ...
+++ kind get kubeconfig-path --name=clusterapi
++ jq -r '.items[].status.phase'
++ awk 'BEGIN{count=0} /(r|R)unning/{count++} END{print count " " NR}'
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ 5 == \5 ]]
+ [[ 1 == \5 ]]
+ read failed total
+++ kind get kubeconfig-path --name=clusterapi
++ jq -r '.items[].status.phase'
++ awk 'BEGIN{count=0} /(f|F)ailed/{count++} END{print count " " NR}'
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ ! 0 -eq 0 ]]
++ date '+[%H:%M:%S]'
... skipping 16 lines ...
++ awk 'BEGIN{count=0} /(r|R)unning/{count++} END{print count " " NR}'
+++ kind get kubeconfig-path --name=clusterapi
++ jq -r '.items[].status.phase'
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ 5 == \5 ]]
+ [[ 1 == \5 ]]
+ read failed total
++ awk 'BEGIN{count=0} /(f|F)ailed/{count++} END{print count " " NR}'
+++ kind get kubeconfig-path --name=clusterapi
++ jq -r '.items[].status.phase'
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ ! 0 -eq 0 ]]
++ date '+[%H:%M:%S]'
... skipping 16 lines ...
+++ kind get kubeconfig-path --name=clusterapi
++ jq -r '.items[].status.phase'
++ awk 'BEGIN{count=0} /(r|R)unning/{count++} END{print count " " NR}'
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ 5 == \5 ]]
+ [[ 1 == \5 ]]
+ read failed total
++ jq -r '.items[].status.phase'
++ awk 'BEGIN{count=0} /(f|F)ailed/{count++} END{print count " " NR}'
+++ kind get kubeconfig-path --name=clusterapi
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ ! 0 -eq 0 ]]
++ date '+[%H:%M:%S]'
... skipping 16 lines ...
++ jq -r '.items[].status.phase'
+++ kind get kubeconfig-path --name=clusterapi
++ awk 'BEGIN{count=0} /(r|R)unning/{count++} END{print count " " NR}'
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ 5 == \5 ]]
+ [[ 1 == \5 ]]
+ read failed total
++ jq -r '.items[].status.phase'
+++ kind get kubeconfig-path --name=clusterapi
++ awk 'BEGIN{count=0} /(f|F)ailed/{count++} END{print count " " NR}'
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ ! 0 -eq 0 ]]
++ date '+[%H:%M:%S]'
... skipping 16 lines ...
+++ kind get kubeconfig-path --name=clusterapi
++ jq -r '.items[].status.phase'
++ awk 'BEGIN{count=0} /(r|R)unning/{count++} END{print count " " NR}'
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ 5 == \5 ]]
+ [[ 1 == \5 ]]
+ read failed total
+++ kind get kubeconfig-path --name=clusterapi
++ awk 'BEGIN{count=0} /(f|F)ailed/{count++} END{print count " " NR}'
++ jq -r '.items[].status.phase'
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ ! 0 -eq 0 ]]
++ date '+[%H:%M:%S]'
... skipping 16 lines ...
++ jq -r '.items[].status.phase'
++ awk 'BEGIN{count=0} /(r|R)unning/{count++} END{print count " " NR}'
+++ kind get kubeconfig-path --name=clusterapi
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ 5 == \5 ]]
+ [[ 1 == \5 ]]
+ read failed total
++ awk 'BEGIN{count=0} /(f|F)ailed/{count++} END{print count " " NR}'
++ jq -r '.items[].status.phase'
+++ kind get kubeconfig-path --name=clusterapi
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ ! 0 -eq 0 ]]
++ date '+[%H:%M:%S]'
... skipping 16 lines ...
++ jq -r '.items[].status.phase'
+++ kind get kubeconfig-path --name=clusterapi
++ awk 'BEGIN{count=0} /(r|R)unning/{count++} END{print count " " NR}'
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ 5 == \5 ]]
+ [[ 1 == \5 ]]
+ read failed total
++ jq -r '.items[].status.phase'
++ awk 'BEGIN{count=0} /(f|F)ailed/{count++} END{print count " " NR}'
+++ kind get kubeconfig-path --name=clusterapi
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ ! 0 -eq 0 ]]
++ date '+[%H:%M:%S]'
... skipping 16 lines ...
++ jq -r '.items[].status.phase'
++ awk 'BEGIN{count=0} /(r|R)unning/{count++} END{print count " " NR}'
+++ kind get kubeconfig-path --name=clusterapi
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ 5 == \5 ]]
+ [[ 1 == \5 ]]
+ read failed total
++ jq -r '.items[].status.phase'
+++ kind get kubeconfig-path --name=clusterapi
++ awk 'BEGIN{count=0} /(f|F)ailed/{count++} END{print count " " NR}'
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ ! 0 -eq 0 ]]
++ date '+[%H:%M:%S]'
... skipping 16 lines ...
++ awk 'BEGIN{count=0} /(r|R)unning/{count++} END{print count " " NR}'
+++ kind get kubeconfig-path --name=clusterapi
++ jq -r '.items[].status.phase'
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ 5 == \5 ]]
+ [[ 1 == \5 ]]
+ read failed total
++ awk 'BEGIN{count=0} /(f|F)ailed/{count++} END{print count " " NR}'
++ jq -r '.items[].status.phase'
+++ kind get kubeconfig-path --name=clusterapi
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ ! 0 -eq 0 ]]
++ date '+[%H:%M:%S]'
... skipping 16 lines ...
+++ kind get kubeconfig-path --name=clusterapi
++ jq -r '.items[].status.phase'
++ awk 'BEGIN{count=0} /(r|R)unning/{count++} END{print count " " NR}'
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ 5 == \5 ]]
+ [[ 1 == \5 ]]
+ read failed total
++ jq -r '.items[].status.phase'
+++ kind get kubeconfig-path --name=clusterapi
++ awk 'BEGIN{count=0} /(f|F)ailed/{count++} END{print count " " NR}'
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ ! 0 -eq 0 ]]
++ date '+[%H:%M:%S]'
... skipping 16 lines ...
+++ kind get kubeconfig-path --name=clusterapi
++ awk 'BEGIN{count=0} /(r|R)unning/{count++} END{print count " " NR}'
++ jq -r '.items[].status.phase'
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ 5 == \5 ]]
+ [[ 1 == \5 ]]
+ read failed total
++ jq -r '.items[].status.phase'
+++ kind get kubeconfig-path --name=clusterapi
++ awk 'BEGIN{count=0} /(f|F)ailed/{count++} END{print count " " NR}'
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ ! 0 -eq 0 ]]
++ date '+[%H:%M:%S]'
... skipping 16 lines ...
++ jq -r '.items[].status.phase'
+++ kind get kubeconfig-path --name=clusterapi
++ awk 'BEGIN{count=0} /(r|R)unning/{count++} END{print count " " NR}'
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ 5 == \5 ]]
+ [[ 4 == \5 ]]
+ read failed total
+++ kind get kubeconfig-path --name=clusterapi
++ jq -r '.items[].status.phase'
++ awk 'BEGIN{count=0} /(f|F)ailed/{count++} END{print count " " NR}'
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ ! 0 -eq 0 ]]
++ date '+[%H:%M:%S]'
... skipping 16 lines ...
++ jq -r '.items[].status.phase'
++ awk 'BEGIN{count=0} /(r|R)unning/{count++} END{print count " " NR}'
+++ kind get kubeconfig-path --name=clusterapi
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ 5 == \5 ]]
+ [[ 4 == \5 ]]
+ read failed total
+++ kind get kubeconfig-path --name=clusterapi
++ jq -r '.items[].status.phase'
++ awk 'BEGIN{count=0} /(f|F)ailed/{count++} END{print count " " NR}'
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ ! 0 -eq 0 ]]
++ date '+[%H:%M:%S]'
... skipping 16 lines ...
++ jq -r '.items[].status.phase'
++ awk 'BEGIN{count=0} /(r|R)unning/{count++} END{print count " " NR}'
+++ kind get kubeconfig-path --name=clusterapi
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ 5 == \5 ]]
+ [[ 4 == \5 ]]
+ read failed total
++ jq -r '.items[].status.phase'
+++ kind get kubeconfig-path --name=clusterapi
++ awk 'BEGIN{count=0} /(f|F)ailed/{count++} END{print count " " NR}'
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ ! 0 -eq 0 ]]
++ date '+[%H:%M:%S]'
... skipping 16 lines ...
++ jq -r '.items[].status.phase'
+++ kind get kubeconfig-path --name=clusterapi
++ awk 'BEGIN{count=0} /(r|R)unning/{count++} END{print count " " NR}'
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ 5 == \5 ]]
+ [[ 4 == \5 ]]
+ read failed total
++ awk 'BEGIN{count=0} /(f|F)ailed/{count++} END{print count " " NR}'
++ jq -r '.items[].status.phase'
+++ kind get kubeconfig-path --name=clusterapi
++ kubectl get machines --kubeconfig=/root/.kube/kind-config-clusterapi -o json
+ [[ ! 0 -eq 0 ]]
++ date '+[%H:%M:%S]'
... skipping 42 lines ...
++ go env GOPATH
+ cd /home/prow/go/src/k8s.io/kubernetes
+ ./hack/ginkgo-e2e.sh --provider=skeleton --num-nodes=2 '--ginkgo.focus=\[Conformance\]' --ginkgo.skip= --report-dir=/logs/artifacts --disable-log-dump=true
Conformance test: not doing test setup.
I1212 06:54:36.001915   20158 test_context.go:419] Tolerating taints "node-role.kubernetes.io/master" when considering if nodes are ready
I1212 06:54:36.003266   20158 e2e.go:109] Starting e2e run "c795b603-5298-49c4-9ddc-db966c6b84dc" on Ginkgo node 1
{"msg":"Test Suite starting","total":280,"completed":0,"skipped":0,"failed":0}
Running Suite: Kubernetes e2e suite
===================================
Random Seed: 1576133673 - Will randomize all specs
Will run 280 of 4840 specs

Dec 12 06:54:36.044: INFO: >>> kubeConfig: /home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig
Dec 12 06:54:36.071: INFO: Waiting up to 30m0s for all (but 0) nodes to be schedulable
Dec 12 06:54:36.365: INFO: Waiting up to 10m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready
Dec 12 06:54:36.640: INFO: The status of Pod calico-node-fgwgw is Running (Ready = false), waiting for it to be either Running (with Ready = true) or Failed
Dec 12 06:54:36.640: INFO: 22 / 23 pods in namespace 'kube-system' are running and ready (0 seconds elapsed)
Dec 12 06:54:36.640: INFO: expected 3 pod replicas in namespace 'kube-system', 3 are Running and Ready.
Dec 12 06:54:36.640: INFO: POD                NODE                                      PHASE    GRACE  CONDITIONS
Dec 12 06:54:36.640: INFO: calico-node-fgwgw  ip-10-0-0-157.us-west-2.compute.internal  Running         [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-12-12 06:54:29 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-12-12 06:54:11 +0000 UTC ContainersNotReady containers with unready status: [calico-node]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-12-12 06:54:11 +0000 UTC ContainersNotReady containers with unready status: [calico-node]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-12-12 06:54:11 +0000 UTC  }]
Dec 12 06:54:36.640: INFO: 
Dec 12 06:54:38.831: INFO: The status of Pod calico-node-fgwgw is Running (Ready = false), waiting for it to be either Running (with Ready = true) or Failed
Dec 12 06:54:38.832: INFO: 22 / 23 pods in namespace 'kube-system' are running and ready (2 seconds elapsed)
Dec 12 06:54:38.832: INFO: expected 3 pod replicas in namespace 'kube-system', 3 are Running and Ready.
Dec 12 06:54:38.832: INFO: POD                NODE                                      PHASE    GRACE  CONDITIONS
Dec 12 06:54:38.832: INFO: calico-node-fgwgw  ip-10-0-0-157.us-west-2.compute.internal  Running         [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-12-12 06:54:29 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-12-12 06:54:11 +0000 UTC ContainersNotReady containers with unready status: [calico-node]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-12-12 06:54:11 +0000 UTC ContainersNotReady containers with unready status: [calico-node]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-12-12 06:54:11 +0000 UTC  }]
Dec 12 06:54:38.833: INFO: 
Dec 12 06:54:40.834: INFO: The status of Pod calico-node-fgwgw is Running (Ready = false), waiting for it to be either Running (with Ready = true) or Failed
Dec 12 06:54:40.835: INFO: 22 / 23 pods in namespace 'kube-system' are running and ready (4 seconds elapsed)
Dec 12 06:54:40.835: INFO: expected 3 pod replicas in namespace 'kube-system', 3 are Running and Ready.
Dec 12 06:54:40.835: INFO: POD                NODE                                      PHASE    GRACE  CONDITIONS
Dec 12 06:54:40.835: INFO: calico-node-fgwgw  ip-10-0-0-157.us-west-2.compute.internal  Running         [{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-12-12 06:54:29 +0000 UTC  } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-12-12 06:54:11 +0000 UTC ContainersNotReady containers with unready status: [calico-node]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-12-12 06:54:11 +0000 UTC ContainersNotReady containers with unready status: [calico-node]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-12-12 06:54:11 +0000 UTC  }]
Dec 12 06:54:40.835: INFO: 
Dec 12 06:54:42.832: INFO: 23 / 23 pods in namespace 'kube-system' are running and ready (6 seconds elapsed)
... skipping 42 lines ...
Dec 12 06:54:50.127: INFO: Pod "test-recreate-deployment-5f94c574ff-f7w2s" is not available:
&Pod{ObjectMeta:{test-recreate-deployment-5f94c574ff-f7w2s test-recreate-deployment-5f94c574ff- deployment-3519 /api/v1/namespaces/deployment-3519/pods/test-recreate-deployment-5f94c574ff-f7w2s fbae9153-0922-486c-8643-6281e7dbe163 1444 0 2019-12-12 06:54:49 +0000 UTC <nil> <nil> map[name:sample-pod-3 pod-template-hash:5f94c574ff] map[] [{apps/v1 ReplicaSet test-recreate-deployment-5f94c574ff 53fe827d-a954-46d6-8c49-51a31ffdd163 0xc002683057 0xc002683058}] []  []},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-djmjb,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-djmjb,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-djmjb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-0-0-96.us-west-2.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 06:54:49 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 06:54:49 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 06:54:49 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 06:54:49 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:10.0.0.96,PodIP:,StartTime:2019-12-12 06:54:49 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},}
[AfterEach] [sig-apps] Deployment
  test/e2e/framework/framework.go:150
Dec 12 06:54:50.127: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "deployment-3519" for this suite.
•{"msg":"PASSED [sig-apps] Deployment RecreateDeployment should delete old pods and create new ones [Conformance]","total":280,"completed":1,"skipped":29,"failed":0}
SS
------------------------------
[sig-storage] EmptyDir wrapper volumes 
  should not cause race condition when used for configmaps [Serial] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] EmptyDir wrapper volumes
... skipping 25 lines ...
Dec 12 06:56:02.364: INFO: Terminating ReplicationController wrapped-volume-race-f74c7903-fa5a-4573-862e-2fe4a813404c pods took: 400.956524ms
STEP: Cleaning up the configMaps
[AfterEach] [sig-storage] EmptyDir wrapper volumes
  test/e2e/framework/framework.go:150
Dec 12 06:56:16.840: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "emptydir-wrapper-8984" for this suite.
•{"msg":"PASSED [sig-storage] EmptyDir wrapper volumes should not cause race condition when used for configmaps [Serial] [Conformance]","total":280,"completed":2,"skipped":31,"failed":0}
SSSSSSSSSSSSSS
------------------------------
[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] 
  should include custom resource definition resources in discovery documents [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin]
... skipping 12 lines ...
STEP: fetching the /apis/apiextensions.k8s.io/v1 discovery document
STEP: finding customresourcedefinitions resources in the /apis/apiextensions.k8s.io/v1 discovery document
[AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin]
  test/e2e/framework/framework.go:150
Dec 12 06:56:17.395: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "custom-resource-definition-2719" for this suite.
•{"msg":"PASSED [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] should include custom resource definition resources in discovery documents [Conformance]","total":280,"completed":3,"skipped":45,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[k8s.io] Security Context When creating a container with runAsUser 
  should run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Security Context
... skipping 12 lines ...
Dec 12 06:56:22.015: INFO: Pod "busybox-user-65534-a494e997-d509-4ef2-aef2-0bfd7f6ceb5b": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.182587756s
Dec 12 06:56:22.015: INFO: Pod "busybox-user-65534-a494e997-d509-4ef2-aef2-0bfd7f6ceb5b" satisfied condition "success or failure"
[AfterEach] [k8s.io] Security Context
  test/e2e/framework/framework.go:150
Dec 12 06:56:22.015: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "security-context-test-108" for this suite.
•{"msg":"PASSED [k8s.io] Security Context When creating a container with runAsUser should run the container with uid 65534 [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":4,"skipped":86,"failed":0}
SSSSSSS
------------------------------
[sig-storage] EmptyDir volumes 
  should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] EmptyDir volumes
... skipping 15 lines ...
Dec 12 06:56:24.792: INFO: Waiting for pod pod-2f6c90bd-f06e-4530-bfe8-2eabd77d3a65 to disappear
Dec 12 06:56:24.851: INFO: Pod pod-2f6c90bd-f06e-4530-bfe8-2eabd77d3a65 no longer exists
[AfterEach] [sig-storage] EmptyDir volumes
  test/e2e/framework/framework.go:150
Dec 12 06:56:24.851: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "emptydir-704" for this suite.
•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":5,"skipped":93,"failed":0}
SSSSSSS
------------------------------
[k8s.io] InitContainer [NodeConformance] 
  should invoke init containers on a RestartAlways pod [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] InitContainer [NodeConformance]
... skipping 9 lines ...
STEP: creating the pod
Dec 12 06:56:25.218: INFO: PodSpec: initContainers in spec.initContainers
[AfterEach] [k8s.io] InitContainer [NodeConformance]
  test/e2e/framework/framework.go:150
Dec 12 06:56:28.859: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "init-container-185" for this suite.
•{"msg":"PASSED [k8s.io] InitContainer [NodeConformance] should invoke init containers on a RestartAlways pod [Conformance]","total":280,"completed":6,"skipped":100,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-storage] Subpath Atomic writer volumes 
  should support subpaths with secret pod [LinuxOnly] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Subpath
... skipping 31 lines ...
STEP: Deleting pod pod-subpath-test-secret-fgp7
Dec 12 06:56:52.405: INFO: Deleting pod "pod-subpath-test-secret-fgp7" in namespace "subpath-6494"
[AfterEach] [sig-storage] Subpath
  test/e2e/framework/framework.go:150
Dec 12 06:56:52.465: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "subpath-6494" for this suite.
•{"msg":"PASSED [sig-storage] Subpath Atomic writer volumes should support subpaths with secret pod [LinuxOnly] [Conformance]","total":280,"completed":7,"skipped":125,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-network] Networking Granular Checks: Pods 
  should function for intra-pod communication: http [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-network] Networking
... skipping 16 lines ...
Dec 12 06:57:18.218: INFO: >>> kubeConfig: /home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig
Dec 12 06:57:18.666: INFO: Waiting for responses: map[]
[AfterEach] [sig-network] Networking
  test/e2e/framework/framework.go:150
Dec 12 06:57:18.666: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "pod-network-test-7943" for this suite.
•{"msg":"PASSED [sig-network] Networking Granular Checks: Pods should function for intra-pod communication: http [NodeConformance] [Conformance]","total":280,"completed":8,"skipped":164,"failed":0}
SSSSSSSSSSSSSSS
------------------------------
[sig-storage] ConfigMap 
  binary data should be reflected in volume [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] ConfigMap
... skipping 9 lines ...
STEP: Waiting for pod with text data
STEP: Waiting for pod with binary data
[AfterEach] [sig-storage] ConfigMap
  test/e2e/framework/framework.go:150
Dec 12 06:57:25.523: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "configmap-9939" for this suite.
•{"msg":"PASSED [sig-storage] ConfigMap binary data should be reflected in volume [NodeConformance] [Conformance]","total":280,"completed":9,"skipped":179,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-api-machinery] Garbage collector 
  should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] Garbage collector
... skipping 33 lines ...

[AfterEach] [sig-api-machinery] Garbage collector
  test/e2e/framework/framework.go:150
Dec 12 06:57:32.323: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
W1212 06:57:32.322980   20158 metrics_grabber.go:79] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
STEP: Destroying namespace "gc-1927" for this suite.
•{"msg":"PASSED [sig-api-machinery] Garbage collector should keep the rc around until all its pods are deleted if the deleteOptions says so [Conformance]","total":280,"completed":10,"skipped":206,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-storage] EmptyDir volumes 
  should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] EmptyDir volumes
... skipping 15 lines ...
Dec 12 06:57:35.078: INFO: Waiting for pod pod-b351d91c-bf47-48fb-8605-3ee108095f23 to disappear
Dec 12 06:57:35.137: INFO: Pod pod-b351d91c-bf47-48fb-8605-3ee108095f23 no longer exists
[AfterEach] [sig-storage] EmptyDir volumes
  test/e2e/framework/framework.go:150
Dec 12 06:57:35.137: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "emptydir-2352" for this suite.
•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":11,"skipped":292,"failed":0}
SSSSSSSSSSSSSSSS
------------------------------
[sig-apps] Deployment 
  RollingUpdateDeployment should delete old pods and create new ones [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-apps] Deployment
... skipping 26 lines ...
Dec 12 06:57:40.408: INFO: Pod "test-rolling-update-deployment-67cf4f6444-ncqhs" is available:
&Pod{ObjectMeta:{test-rolling-update-deployment-67cf4f6444-ncqhs test-rolling-update-deployment-67cf4f6444- deployment-2365 /api/v1/namespaces/deployment-2365/pods/test-rolling-update-deployment-67cf4f6444-ncqhs 75fd4f12-01aa-432c-b731-58e041f1e4b9 3487 0 2019-12-12 06:57:37 +0000 UTC <nil> <nil> map[name:sample-pod pod-template-hash:67cf4f6444] map[cni.projectcalico.org/podIP:192.168.220.17/32] [{apps/v1 ReplicaSet test-rolling-update-deployment-67cf4f6444 06b41bda-8ab9-4a0a-874c-847af583c994 0xc000d61f77 0xc000d61f78}] []  []},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-v66ml,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-v66ml,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,},},},Containers:[]Container{Container{Name:agnhost,Image:gcr.io/kubernetes-e2e-test-images/agnhost:2.8,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-v66ml,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-0-0-18.us-west-2.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 06:57:37 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 06:57:38 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 06:57:38 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 06:57:37 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:10.0.0.18,PodIP:192.168.220.17,StartTime:2019-12-12 06:57:37 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:agnhost,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2019-12-12 06:57:38 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:gcr.io/kubernetes-e2e-test-images/agnhost:2.8,ImageID:gcr.io/kubernetes-e2e-test-images/agnhost@sha256:daf5332100521b1256d0e3c56d697a238eaec3af48897ed9167cbadd426773b5,ContainerID:containerd://00e60e624bc54559b8cd013fa2b5b141854889076d6015060a9a9f0f77239e68,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:192.168.220.17,},},EphemeralContainerStatuses:[]ContainerStatus{},},}
[AfterEach] [sig-apps] Deployment
  test/e2e/framework/framework.go:150
Dec 12 06:57:40.408: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "deployment-2365" for this suite.
•{"msg":"PASSED [sig-apps] Deployment RollingUpdateDeployment should delete old pods and create new ones [Conformance]","total":280,"completed":12,"skipped":308,"failed":0}
SSSSSSS
------------------------------
[sig-api-machinery] ResourceQuota 
  should verify ResourceQuota with best effort scope. [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] ResourceQuota
... skipping 19 lines ...
STEP: Deleting the pod
STEP: Ensuring resource quota status released the pod usage
[AfterEach] [sig-api-machinery] ResourceQuota
  test/e2e/framework/framework.go:150
Dec 12 06:57:57.703: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "resourcequota-4220" for this suite.
•{"msg":"PASSED [sig-api-machinery] ResourceQuota should verify ResourceQuota with best effort scope. [Conformance]","total":280,"completed":13,"skipped":315,"failed":0}
SSSSSSSS
------------------------------
[sig-storage] Downward API volume 
  should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Downward API volume
... skipping 18 lines ...
Dec 12 06:58:02.529: INFO: Waiting for pod downwardapi-volume-9dcccad2-8559-47be-9792-75851268e1c5 to disappear
Dec 12 06:58:02.598: INFO: Pod downwardapi-volume-9dcccad2-8559-47be-9792-75851268e1c5 no longer exists
[AfterEach] [sig-storage] Downward API volume
  test/e2e/framework/framework.go:150
Dec 12 06:58:02.599: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "downward-api-6454" for this suite.
•{"msg":"PASSED [sig-storage] Downward API volume should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]","total":280,"completed":14,"skipped":323,"failed":0}
SSSSSS
------------------------------
[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] 
  should deny crd creation [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin]
... skipping 22 lines ...
  test/e2e/framework/framework.go:150
Dec 12 06:58:09.626: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "webhook-3550" for this suite.
STEP: Destroying namespace "webhook-3550-markers" for this suite.
[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin]
  test/e2e/apimachinery/webhook.go:101
•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should deny crd creation [Conformance]","total":280,"completed":15,"skipped":329,"failed":0}
SSSSSSS
------------------------------
[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] 
  removes definition from spec when one version gets changed to not be served [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]
... skipping 10 lines ...
STEP: check the unserved version gets removed
STEP: check the other version is not changed
[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]
  test/e2e/framework/framework.go:150
Dec 12 06:58:37.030: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "crd-publish-openapi-5418" for this suite.
•{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] removes definition from spec when one version gets changed to not be served [Conformance]","total":280,"completed":16,"skipped":336,"failed":0}
SSSSSSSSSSSS
------------------------------
[sig-storage] ConfigMap 
  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] ConfigMap
... skipping 16 lines ...
Dec 12 06:58:39.859: INFO: Waiting for pod pod-configmaps-e6091458-03ee-45a2-9510-0cefba80d350 to disappear
Dec 12 06:58:39.914: INFO: Pod pod-configmaps-e6091458-03ee-45a2-9510-0cefba80d350 no longer exists
[AfterEach] [sig-storage] ConfigMap
  test/e2e/framework/framework.go:150
Dec 12 06:58:39.915: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "configmap-8526" for this suite.
•{"msg":"PASSED [sig-storage] ConfigMap should be consumable from pods in volume with mappings [NodeConformance] [Conformance]","total":280,"completed":17,"skipped":348,"failed":0}
SSS
------------------------------
[sig-storage] ConfigMap 
  should be consumable from pods in volume with mappings as non-root [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] ConfigMap
... skipping 16 lines ...
Dec 12 06:58:42.685: INFO: Waiting for pod pod-configmaps-06615fbc-28b3-4590-b2c5-7c543b49aeb3 to disappear
Dec 12 06:58:42.741: INFO: Pod pod-configmaps-06615fbc-28b3-4590-b2c5-7c543b49aeb3 no longer exists
[AfterEach] [sig-storage] ConfigMap
  test/e2e/framework/framework.go:150
Dec 12 06:58:42.741: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "configmap-6402" for this suite.
•{"msg":"PASSED [sig-storage] ConfigMap should be consumable from pods in volume with mappings as non-root [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":18,"skipped":351,"failed":0}
SSSSSSSSSSSSS
------------------------------
[k8s.io] Pods 
  should get a host IP [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Pods
... skipping 9 lines ...
STEP: creating pod
Dec 12 06:58:45.378: INFO: Pod pod-hostip-dabb2563-90f0-4327-b555-d67f09ab585b has hostIP: 10.0.0.96
[AfterEach] [k8s.io] Pods
  test/e2e/framework/framework.go:150
Dec 12 06:58:45.379: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "pods-5946" for this suite.
•{"msg":"PASSED [k8s.io] Pods should get a host IP [NodeConformance] [Conformance]","total":280,"completed":19,"skipped":364,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-storage] Projected configMap 
  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Projected configMap
... skipping 16 lines ...
Dec 12 06:58:48.167: INFO: Waiting for pod pod-projected-configmaps-702732fd-e8ed-4e79-83a0-f5523ae3a5e2 to disappear
Dec 12 06:58:48.224: INFO: Pod pod-projected-configmaps-702732fd-e8ed-4e79-83a0-f5523ae3a5e2 no longer exists
[AfterEach] [sig-storage] Projected configMap
  test/e2e/framework/framework.go:150
Dec 12 06:58:48.224: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "projected-5830" for this suite.
•{"msg":"PASSED [sig-storage] Projected configMap should be consumable from pods in volume with mappings [NodeConformance] [Conformance]","total":280,"completed":20,"skipped":392,"failed":0}
SSSSSSS
------------------------------
[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] 
  works for CRD preserving unknown fields in an embedded object [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]
... skipping 23 lines ...
Dec 12 06:58:56.873: INFO: stderr: ""
Dec 12 06:58:56.873: INFO: stdout: "KIND:     E2e-test-crd-publish-openapi-9881-crd\nVERSION:  crd-publish-openapi-test-unknown-in-nested.example.com/v1\n\nDESCRIPTION:\n     preserve-unknown-properties in nested field for Testing\n\nFIELDS:\n   apiVersion\t<string>\n     APIVersion defines the versioned schema of this representation of an\n     object. Servers should convert recognized schemas to the latest internal\n     value, and may reject unrecognized values. More info:\n     https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources\n\n   kind\t<string>\n     Kind is a string value representing the REST resource this object\n     represents. Servers may infer this from the endpoint the client submits\n     requests to. Cannot be updated. In CamelCase. More info:\n     https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\n\n   metadata\t<Object>\n     Standard object's metadata. More info:\n     https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata\n\n   spec\t<map[string]>\n     Specification of Waldo\n\n   status\t<Object>\n     Status of Waldo\n\n"
[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]
  test/e2e/framework/framework.go:150
Dec 12 06:59:01.759: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "crd-publish-openapi-3668" for this suite.
•{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD preserving unknown fields in an embedded object [Conformance]","total":280,"completed":21,"skipped":399,"failed":0}
SSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-storage] Subpath Atomic writer volumes 
  should support subpaths with configmap pod with mountPath of existing file [LinuxOnly] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Subpath
... skipping 31 lines ...
STEP: Deleting pod pod-subpath-test-configmap-pnw6
Dec 12 06:59:25.318: INFO: Deleting pod "pod-subpath-test-configmap-pnw6" in namespace "subpath-6348"
[AfterEach] [sig-storage] Subpath
  test/e2e/framework/framework.go:150
Dec 12 06:59:25.376: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "subpath-6348" for this suite.
•{"msg":"PASSED [sig-storage] Subpath Atomic writer volumes should support subpaths with configmap pod with mountPath of existing file [LinuxOnly] [Conformance]","total":280,"completed":22,"skipped":421,"failed":0}
SSSSSSSSSSSSSSSSSSS
------------------------------
[sig-storage] Downward API volume 
  should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Downward API volume
... skipping 17 lines ...
Dec 12 06:59:28.101: INFO: Waiting for pod downwardapi-volume-a9f80e66-0096-49f2-b3c2-4c56a25af98a to disappear
Dec 12 06:59:28.157: INFO: Pod downwardapi-volume-a9f80e66-0096-49f2-b3c2-4c56a25af98a no longer exists
[AfterEach] [sig-storage] Downward API volume
  test/e2e/framework/framework.go:150
Dec 12 06:59:28.159: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "downward-api-7488" for this suite.
•{"msg":"PASSED [sig-storage] Downward API volume should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":23,"skipped":440,"failed":0}
SSSS
------------------------------
[k8s.io] Probing container 
  should *not* be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Probing container
... skipping 12 lines ...
Dec 12 06:59:30.757: INFO: Initial restart count of pod busybox-b0b79854-9931-4640-8133-a00750f9a465 is 0
STEP: deleting the pod
[AfterEach] [k8s.io] Probing container
  test/e2e/framework/framework.go:150
Dec 12 07:03:31.724: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "container-probe-1621" for this suite.
•{"msg":"PASSED [k8s.io] Probing container should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance] [Conformance]","total":280,"completed":24,"skipped":444,"failed":0}
SSSSSSSSSSSSSSS
------------------------------
[sig-scheduling] SchedulerPredicates [Serial] 
  validates that NodeSelector is respected if not matching  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
... skipping 26 lines ...
[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
  test/e2e/framework/framework.go:150
Dec 12 07:03:33.802: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "sched-pred-8097" for this suite.
[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
  test/e2e/scheduling/predicates.go:77
•{"msg":"PASSED [sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector is respected if not matching  [Conformance]","total":280,"completed":25,"skipped":459,"failed":0}
SSSSSSS
------------------------------
[sig-storage] Projected downwardAPI 
  should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Projected downwardAPI
... skipping 17 lines ...
Dec 12 07:03:36.528: INFO: Waiting for pod downwardapi-volume-21abe6e4-5cac-4083-a9b0-9834a206b184 to disappear
Dec 12 07:03:36.585: INFO: Pod downwardapi-volume-21abe6e4-5cac-4083-a9b0-9834a206b184 no longer exists
[AfterEach] [sig-storage] Projected downwardAPI
  test/e2e/framework/framework.go:150
Dec 12 07:03:36.585: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "projected-1037" for this suite.
•{"msg":"PASSED [sig-storage] Projected downwardAPI should set mode on item file [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":26,"skipped":466,"failed":0}

------------------------------
[sig-storage] Projected configMap 
  should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Projected configMap
... skipping 16 lines ...
Dec 12 07:03:39.354: INFO: Waiting for pod pod-projected-configmaps-4b09ea96-7018-40be-99cc-cc389e8f2795 to disappear
Dec 12 07:03:39.410: INFO: Pod pod-projected-configmaps-4b09ea96-7018-40be-99cc-cc389e8f2795 no longer exists
[AfterEach] [sig-storage] Projected configMap
  test/e2e/framework/framework.go:150
Dec 12 07:03:39.410: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "projected-7326" for this suite.
•{"msg":"PASSED [sig-storage] Projected configMap should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":27,"skipped":466,"failed":0}
SSS
------------------------------
[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] 
  should be able to deny pod and configmap creation [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin]
... skipping 28 lines ...
  test/e2e/framework/framework.go:150
Dec 12 07:03:56.908: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "webhook-9480" for this suite.
STEP: Destroying namespace "webhook-9480-markers" for this suite.
[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin]
  test/e2e/apimachinery/webhook.go:101
•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny pod and configmap creation [Conformance]","total":280,"completed":28,"skipped":469,"failed":0}
SSSSSSSSS
------------------------------
[sig-cli] Kubectl client Kubectl run pod 
  should create a pod from an image when restart is Never  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-cli] Kubectl client
... skipping 19 lines ...
Dec 12 07:04:12.200: INFO: stderr: ""
Dec 12 07:04:12.200: INFO: stdout: "pod \"e2e-test-httpd-pod\" deleted\n"
[AfterEach] [sig-cli] Kubectl client
  test/e2e/framework/framework.go:150
Dec 12 07:04:12.200: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "kubectl-9461" for this suite.
•{"msg":"PASSED [sig-cli] Kubectl client Kubectl run pod should create a pod from an image when restart is Never  [Conformance]","total":280,"completed":29,"skipped":478,"failed":0}
SSSSSSS
------------------------------
[sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition 
  listing custom resource definition objects works  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin]
... skipping 6 lines ...
  test/e2e/framework/framework.go:639
Dec 12 07:04:12.546: INFO: >>> kubeConfig: /home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig
[AfterEach] [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin]
  test/e2e/framework/framework.go:150
Dec 12 07:04:17.131: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "custom-resource-definition-5798" for this suite.
•{"msg":"PASSED [sig-api-machinery] CustomResourceDefinition resources [Privileged:ClusterAdmin] Simple CustomResourceDefinition listing custom resource definition objects works  [Conformance]","total":280,"completed":30,"skipped":485,"failed":0}
SSSSSSSSSSS
------------------------------
[sig-storage] EmptyDir volumes 
  volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] EmptyDir volumes
... skipping 15 lines ...
Dec 12 07:04:19.908: INFO: Waiting for pod pod-aaa3b605-4405-463a-aa88-036876d158a1 to disappear
Dec 12 07:04:19.965: INFO: Pod pod-aaa3b605-4405-463a-aa88-036876d158a1 no longer exists
[AfterEach] [sig-storage] EmptyDir volumes
  test/e2e/framework/framework.go:150
Dec 12 07:04:19.965: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "emptydir-8341" for this suite.
•{"msg":"PASSED [sig-storage] EmptyDir volumes volume on default medium should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":31,"skipped":496,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] 
  should be able to convert a non homogeneous list of CRs [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin]
... skipping 22 lines ...
[AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin]
  test/e2e/framework/framework.go:150
Dec 12 07:04:25.787: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "crd-webhook-934" for this suite.
[AfterEach] [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin]
  test/e2e/apimachinery/crd_conversion_webhook.go:136
•{"msg":"PASSED [sig-api-machinery] CustomResourceConversionWebhook [Privileged:ClusterAdmin] should be able to convert a non homogeneous list of CRs [Conformance]","total":280,"completed":32,"skipped":519,"failed":0}
SSS
------------------------------
[sig-api-machinery] ResourceQuota 
  should create a ResourceQuota and capture the life of a replication controller. [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] ResourceQuota
... skipping 12 lines ...
STEP: Deleting a ReplicationController
STEP: Ensuring resource quota status released usage
[AfterEach] [sig-api-machinery] ResourceQuota
  test/e2e/framework/framework.go:150
Dec 12 07:04:37.937: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "resourcequota-4248" for this suite.
•{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a replication controller. [Conformance]","total":280,"completed":33,"skipped":522,"failed":0}
SSSSSSSSSSSSSSS
------------------------------
[k8s.io] Security Context When creating a pod with privileged 
  should run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Security Context
... skipping 12 lines ...
Dec 12 07:04:40.466: INFO: Pod "busybox-privileged-false-1a447e2b-ec15-42ee-96e2-c38a6260dd84" satisfied condition "success or failure"
Dec 12 07:04:40.528: INFO: Got logs for pod "busybox-privileged-false-1a447e2b-ec15-42ee-96e2-c38a6260dd84": "ip: RTNETLINK answers: Operation not permitted\n"
[AfterEach] [k8s.io] Security Context
  test/e2e/framework/framework.go:150
Dec 12 07:04:40.528: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "security-context-test-5176" for this suite.
•{"msg":"PASSED [k8s.io] Security Context When creating a pod with privileged should run the container as unprivileged when false [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":34,"skipped":537,"failed":0}
SSSSS
------------------------------
[sig-api-machinery] Namespaces [Serial] 
  should ensure that all services are removed when a namespace is deleted [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] Namespaces [Serial]
... skipping 15 lines ...
  test/e2e/framework/framework.go:150
Dec 12 07:04:47.517: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "namespaces-8701" for this suite.
STEP: Destroying namespace "nsdeletetest-374" for this suite.
Dec 12 07:04:47.694: INFO: Namespace nsdeletetest-374 was already deleted
STEP: Destroying namespace "nsdeletetest-7265" for this suite.
•{"msg":"PASSED [sig-api-machinery] Namespaces [Serial] should ensure that all services are removed when a namespace is deleted [Conformance]","total":280,"completed":35,"skipped":542,"failed":0}

------------------------------
[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] 
  listing mutating webhooks should work [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin]
... skipping 22 lines ...
  test/e2e/framework/framework.go:150
Dec 12 07:04:53.968: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "webhook-7776" for this suite.
STEP: Destroying namespace "webhook-7776-markers" for this suite.
[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin]
  test/e2e/apimachinery/webhook.go:101
•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing mutating webhooks should work [Conformance]","total":280,"completed":36,"skipped":542,"failed":0}
SSSSSSSSSSSS
------------------------------
[sig-network] Networking Granular Checks: Pods 
  should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-network] Networking
... skipping 16 lines ...
Dec 12 07:05:14.256: INFO: >>> kubeConfig: /home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig
Dec 12 07:05:14.701: INFO: Found all expected endpoints: [netserver-1]
[AfterEach] [sig-network] Networking
  test/e2e/framework/framework.go:150
Dec 12 07:05:14.701: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "pod-network-test-1456" for this suite.
•{"msg":"PASSED [sig-network] Networking Granular Checks: Pods should function for node-pod communication: http [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":37,"skipped":554,"failed":0}

------------------------------
[sig-api-machinery] Secrets 
  should fail to create secret due to empty secret key [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] Secrets
  test/e2e/framework/framework.go:149
STEP: Creating a kubernetes client
Dec 12 07:05:14.820: INFO: >>> kubeConfig: /home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig
STEP: Building a namespace api object, basename secrets
STEP: Waiting for a default service account to be provisioned in namespace
[It] should fail to create secret due to empty secret key [Conformance]
  test/e2e/framework/framework.go:639
STEP: Creating projection with secret that has name secret-emptykey-test-d8ad6f4d-b1e7-4ec8-baf2-2790f27149e8
[AfterEach] [sig-api-machinery] Secrets
  test/e2e/framework/framework.go:150
Dec 12 07:05:15.106: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "secrets-7243" for this suite.
•{"msg":"PASSED [sig-api-machinery] Secrets should fail to create secret due to empty secret key [Conformance]","total":280,"completed":38,"skipped":554,"failed":0}
S
------------------------------
[sig-storage] EmptyDir volumes 
  should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] EmptyDir volumes
... skipping 15 lines ...
Dec 12 07:05:17.820: INFO: Waiting for pod pod-20c9088a-32dc-4d13-84df-d08868139eed to disappear
Dec 12 07:05:17.875: INFO: Pod pod-20c9088a-32dc-4d13-84df-d08868139eed no longer exists
[AfterEach] [sig-storage] EmptyDir volumes
  test/e2e/framework/framework.go:150
Dec 12 07:05:17.875: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "emptydir-8893" for this suite.
•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":39,"skipped":555,"failed":0}
SSSSSSSSSSSSSSSSSSS
------------------------------
[sig-network] DNS 
  should provide DNS for ExternalName services [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-network] DNS
... skipping 26 lines ...
STEP: retrieving the pod
STEP: looking for the results for each expected name from probers
Dec 12 07:05:37.134: INFO: File wheezy_udp@dns-test-service-3.dns-8532.svc.cluster.local from pod  dns-8532/dns-test-cd9666c0-3765-47a6-930b-084b79d86aa8 contains 'foo.example.com.
' instead of 'bar.example.com.'
Dec 12 07:05:37.193: INFO: File jessie_udp@dns-test-service-3.dns-8532.svc.cluster.local from pod  dns-8532/dns-test-cd9666c0-3765-47a6-930b-084b79d86aa8 contains 'foo.example.com.
' instead of 'bar.example.com.'
Dec 12 07:05:37.193: INFO: Lookups using dns-8532/dns-test-cd9666c0-3765-47a6-930b-084b79d86aa8 failed for: [wheezy_udp@dns-test-service-3.dns-8532.svc.cluster.local jessie_udp@dns-test-service-3.dns-8532.svc.cluster.local]

Dec 12 07:05:42.252: INFO: File wheezy_udp@dns-test-service-3.dns-8532.svc.cluster.local from pod  dns-8532/dns-test-cd9666c0-3765-47a6-930b-084b79d86aa8 contains 'foo.example.com.
' instead of 'bar.example.com.'
Dec 12 07:05:42.310: INFO: File jessie_udp@dns-test-service-3.dns-8532.svc.cluster.local from pod  dns-8532/dns-test-cd9666c0-3765-47a6-930b-084b79d86aa8 contains 'foo.example.com.
' instead of 'bar.example.com.'
Dec 12 07:05:42.310: INFO: Lookups using dns-8532/dns-test-cd9666c0-3765-47a6-930b-084b79d86aa8 failed for: [wheezy_udp@dns-test-service-3.dns-8532.svc.cluster.local jessie_udp@dns-test-service-3.dns-8532.svc.cluster.local]

Dec 12 07:05:47.254: INFO: File wheezy_udp@dns-test-service-3.dns-8532.svc.cluster.local from pod  dns-8532/dns-test-cd9666c0-3765-47a6-930b-084b79d86aa8 contains 'foo.example.com.
' instead of 'bar.example.com.'
Dec 12 07:05:47.312: INFO: File jessie_udp@dns-test-service-3.dns-8532.svc.cluster.local from pod  dns-8532/dns-test-cd9666c0-3765-47a6-930b-084b79d86aa8 contains 'foo.example.com.
' instead of 'bar.example.com.'
Dec 12 07:05:47.313: INFO: Lookups using dns-8532/dns-test-cd9666c0-3765-47a6-930b-084b79d86aa8 failed for: [wheezy_udp@dns-test-service-3.dns-8532.svc.cluster.local jessie_udp@dns-test-service-3.dns-8532.svc.cluster.local]

Dec 12 07:05:52.252: INFO: File wheezy_udp@dns-test-service-3.dns-8532.svc.cluster.local from pod  dns-8532/dns-test-cd9666c0-3765-47a6-930b-084b79d86aa8 contains 'foo.example.com.
' instead of 'bar.example.com.'
Dec 12 07:05:52.310: INFO: File jessie_udp@dns-test-service-3.dns-8532.svc.cluster.local from pod  dns-8532/dns-test-cd9666c0-3765-47a6-930b-084b79d86aa8 contains 'foo.example.com.
' instead of 'bar.example.com.'
Dec 12 07:05:52.310: INFO: Lookups using dns-8532/dns-test-cd9666c0-3765-47a6-930b-084b79d86aa8 failed for: [wheezy_udp@dns-test-service-3.dns-8532.svc.cluster.local jessie_udp@dns-test-service-3.dns-8532.svc.cluster.local]

Dec 12 07:05:57.310: INFO: DNS probes using dns-test-cd9666c0-3765-47a6-930b-084b79d86aa8 succeeded

STEP: deleting the pod
STEP: changing the service to type=ClusterIP
STEP: Running these commands on wheezy: for i in `seq 1 30`; do dig +short dns-test-service-3.dns-8532.svc.cluster.local A > /results/wheezy_udp@dns-test-service-3.dns-8532.svc.cluster.local; sleep 1; done
... skipping 9 lines ...
STEP: deleting the pod
STEP: deleting the test externalName service
[AfterEach] [sig-network] DNS
  test/e2e/framework/framework.go:150
Dec 12 07:06:00.070: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "dns-8532" for this suite.
•{"msg":"PASSED [sig-network] DNS should provide DNS for ExternalName services [Conformance]","total":280,"completed":40,"skipped":574,"failed":0}
SSSSSS
------------------------------
[sig-api-machinery] Watchers 
  should observe an object deletion if it stops meeting the requirements of the selector [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] Watchers
... skipping 22 lines ...
Dec 12 07:06:11.178: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed  watch-3064 /api/v1/namespaces/watch-3064/configmaps/e2e-watch-test-label-changed d33dfadc-15a7-486c-93dc-89bb3de59471 6211 0 2019-12-12 07:06:00 +0000 UTC <nil> <nil> map[watch-this-configmap:label-changed-and-restored] map[] [] []  []},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},}
Dec 12 07:06:11.178: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-label-changed  watch-3064 /api/v1/namespaces/watch-3064/configmaps/e2e-watch-test-label-changed d33dfadc-15a7-486c-93dc-89bb3de59471 6212 0 2019-12-12 07:06:00 +0000 UTC <nil> <nil> map[watch-this-configmap:label-changed-and-restored] map[] [] []  []},Data:map[string]string{mutation: 3,},BinaryData:map[string][]byte{},}
[AfterEach] [sig-api-machinery] Watchers
  test/e2e/framework/framework.go:150
Dec 12 07:06:11.178: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "watch-3064" for this suite.
•{"msg":"PASSED [sig-api-machinery] Watchers should observe an object deletion if it stops meeting the requirements of the selector [Conformance]","total":280,"completed":41,"skipped":580,"failed":0}
SSSS
------------------------------
[sig-cli] Kubectl client Kubectl run rc 
  should create an rc from an image  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-cli] Kubectl client
... skipping 33 lines ...
Dec 12 07:06:21.336: INFO: stderr: ""
Dec 12 07:06:21.336: INFO: stdout: "replicationcontroller \"e2e-test-httpd-rc\" deleted\n"
[AfterEach] [sig-cli] Kubectl client
  test/e2e/framework/framework.go:150
Dec 12 07:06:21.336: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "kubectl-532" for this suite.
•{"msg":"PASSED [sig-cli] Kubectl client Kubectl run rc should create an rc from an image  [Conformance]","total":280,"completed":42,"skipped":584,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[k8s.io] KubeletManagedEtcHosts 
  should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] KubeletManagedEtcHosts
... skipping 42 lines ...
Dec 12 07:06:30.040: INFO: >>> kubeConfig: /home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig
Dec 12 07:06:30.461: INFO: Exec stderr: ""
[AfterEach] [k8s.io] KubeletManagedEtcHosts
  test/e2e/framework/framework.go:150
Dec 12 07:06:30.461: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "e2e-kubelet-etc-hosts-5146" for this suite.
•{"msg":"PASSED [k8s.io] KubeletManagedEtcHosts should test kubelet managed /etc/hosts file [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":43,"skipped":636,"failed":0}
SSSSSS
------------------------------
[sig-storage] HostPath 
  should give a volume the correct mode [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] HostPath
... skipping 17 lines ...
Dec 12 07:06:33.177: INFO: Waiting for pod pod-host-path-test to disappear
Dec 12 07:06:33.252: INFO: Pod pod-host-path-test no longer exists
[AfterEach] [sig-storage] HostPath
  test/e2e/framework/framework.go:150
Dec 12 07:06:33.252: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "hostpath-8187" for this suite.
•{"msg":"PASSED [sig-storage] HostPath should give a volume the correct mode [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":44,"skipped":642,"failed":0}
SSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-node] Downward API 
  should provide host IP as an env var [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-node] Downward API
... skipping 15 lines ...
Dec 12 07:06:35.980: INFO: Waiting for pod downward-api-26ca8873-5df9-4be8-9f32-e45634036db5 to disappear
Dec 12 07:06:36.038: INFO: Pod downward-api-26ca8873-5df9-4be8-9f32-e45634036db5 no longer exists
[AfterEach] [sig-node] Downward API
  test/e2e/framework/framework.go:150
Dec 12 07:06:36.039: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "downward-api-4579" for this suite.
•{"msg":"PASSED [sig-node] Downward API should provide host IP as an env var [NodeConformance] [Conformance]","total":280,"completed":45,"skipped":662,"failed":0}
SSSSSSS
------------------------------
[sig-network] DNS 
  should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-network] DNS
... skipping 26 lines ...
Dec 12 07:06:41.729: INFO: Unable to read jessie_udp@dns-test-service.dns-9939 from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:41.786: INFO: Unable to read jessie_tcp@dns-test-service.dns-9939 from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:41.843: INFO: Unable to read jessie_udp@dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:41.899: INFO: Unable to read jessie_tcp@dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:41.961: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:42.019: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:42.366: INFO: Lookups using dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-9939 wheezy_tcp@dns-test-service.dns-9939 wheezy_udp@dns-test-service.dns-9939.svc wheezy_tcp@dns-test-service.dns-9939.svc wheezy_udp@_http._tcp.dns-test-service.dns-9939.svc wheezy_tcp@_http._tcp.dns-test-service.dns-9939.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-9939 jessie_tcp@dns-test-service.dns-9939 jessie_udp@dns-test-service.dns-9939.svc jessie_tcp@dns-test-service.dns-9939.svc jessie_udp@_http._tcp.dns-test-service.dns-9939.svc jessie_tcp@_http._tcp.dns-test-service.dns-9939.svc]

Dec 12 07:06:47.426: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:47.483: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:47.541: INFO: Unable to read wheezy_udp@dns-test-service.dns-9939 from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:47.599: INFO: Unable to read wheezy_tcp@dns-test-service.dns-9939 from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:47.657: INFO: Unable to read wheezy_udp@dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
... skipping 5 lines ...
Dec 12 07:06:48.351: INFO: Unable to read jessie_udp@dns-test-service.dns-9939 from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:48.409: INFO: Unable to read jessie_tcp@dns-test-service.dns-9939 from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:48.466: INFO: Unable to read jessie_udp@dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:48.523: INFO: Unable to read jessie_tcp@dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:48.580: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:48.638: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:48.984: INFO: Lookups using dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-9939 wheezy_tcp@dns-test-service.dns-9939 wheezy_udp@dns-test-service.dns-9939.svc wheezy_tcp@dns-test-service.dns-9939.svc wheezy_udp@_http._tcp.dns-test-service.dns-9939.svc wheezy_tcp@_http._tcp.dns-test-service.dns-9939.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-9939 jessie_tcp@dns-test-service.dns-9939 jessie_udp@dns-test-service.dns-9939.svc jessie_tcp@dns-test-service.dns-9939.svc jessie_udp@_http._tcp.dns-test-service.dns-9939.svc jessie_tcp@_http._tcp.dns-test-service.dns-9939.svc]

Dec 12 07:06:52.426: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:52.485: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:52.544: INFO: Unable to read wheezy_udp@dns-test-service.dns-9939 from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:52.602: INFO: Unable to read wheezy_tcp@dns-test-service.dns-9939 from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:52.660: INFO: Unable to read wheezy_udp@dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
... skipping 5 lines ...
Dec 12 07:06:53.358: INFO: Unable to read jessie_udp@dns-test-service.dns-9939 from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:53.417: INFO: Unable to read jessie_tcp@dns-test-service.dns-9939 from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:53.475: INFO: Unable to read jessie_udp@dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:53.534: INFO: Unable to read jessie_tcp@dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:53.592: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:53.651: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:54.002: INFO: Lookups using dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-9939 wheezy_tcp@dns-test-service.dns-9939 wheezy_udp@dns-test-service.dns-9939.svc wheezy_tcp@dns-test-service.dns-9939.svc wheezy_udp@_http._tcp.dns-test-service.dns-9939.svc wheezy_tcp@_http._tcp.dns-test-service.dns-9939.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-9939 jessie_tcp@dns-test-service.dns-9939 jessie_udp@dns-test-service.dns-9939.svc jessie_tcp@dns-test-service.dns-9939.svc jessie_udp@_http._tcp.dns-test-service.dns-9939.svc jessie_tcp@_http._tcp.dns-test-service.dns-9939.svc]

Dec 12 07:06:57.426: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:57.488: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:57.545: INFO: Unable to read wheezy_udp@dns-test-service.dns-9939 from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:57.604: INFO: Unable to read wheezy_tcp@dns-test-service.dns-9939 from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:57.661: INFO: Unable to read wheezy_udp@dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
... skipping 5 lines ...
Dec 12 07:06:58.352: INFO: Unable to read jessie_udp@dns-test-service.dns-9939 from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:58.410: INFO: Unable to read jessie_tcp@dns-test-service.dns-9939 from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:58.467: INFO: Unable to read jessie_udp@dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:58.523: INFO: Unable to read jessie_tcp@dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:58.581: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:58.639: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:06:58.989: INFO: Lookups using dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-9939 wheezy_tcp@dns-test-service.dns-9939 wheezy_udp@dns-test-service.dns-9939.svc wheezy_tcp@dns-test-service.dns-9939.svc wheezy_udp@_http._tcp.dns-test-service.dns-9939.svc wheezy_tcp@_http._tcp.dns-test-service.dns-9939.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-9939 jessie_tcp@dns-test-service.dns-9939 jessie_udp@dns-test-service.dns-9939.svc jessie_tcp@dns-test-service.dns-9939.svc jessie_udp@_http._tcp.dns-test-service.dns-9939.svc jessie_tcp@_http._tcp.dns-test-service.dns-9939.svc]

Dec 12 07:07:02.425: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:07:02.482: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:07:02.541: INFO: Unable to read wheezy_udp@dns-test-service.dns-9939 from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:07:02.598: INFO: Unable to read wheezy_tcp@dns-test-service.dns-9939 from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:07:02.657: INFO: Unable to read wheezy_udp@dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
... skipping 5 lines ...
Dec 12 07:07:03.360: INFO: Unable to read jessie_udp@dns-test-service.dns-9939 from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:07:03.417: INFO: Unable to read jessie_tcp@dns-test-service.dns-9939 from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:07:03.476: INFO: Unable to read jessie_udp@dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:07:03.533: INFO: Unable to read jessie_tcp@dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:07:03.592: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:07:03.651: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:07:03.998: INFO: Lookups using dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-9939 wheezy_tcp@dns-test-service.dns-9939 wheezy_udp@dns-test-service.dns-9939.svc wheezy_tcp@dns-test-service.dns-9939.svc wheezy_udp@_http._tcp.dns-test-service.dns-9939.svc wheezy_tcp@_http._tcp.dns-test-service.dns-9939.svc jessie_udp@dns-test-service jessie_tcp@dns-test-service jessie_udp@dns-test-service.dns-9939 jessie_tcp@dns-test-service.dns-9939 jessie_udp@dns-test-service.dns-9939.svc jessie_tcp@dns-test-service.dns-9939.svc jessie_udp@_http._tcp.dns-test-service.dns-9939.svc jessie_tcp@_http._tcp.dns-test-service.dns-9939.svc]

Dec 12 07:07:07.423: INFO: Unable to read wheezy_udp@dns-test-service from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:07:07.481: INFO: Unable to read wheezy_tcp@dns-test-service from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:07:07.538: INFO: Unable to read wheezy_udp@dns-test-service.dns-9939 from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:07:07.596: INFO: Unable to read wheezy_tcp@dns-test-service.dns-9939 from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:07:07.654: INFO: Unable to read wheezy_udp@dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:07:07.713: INFO: Unable to read wheezy_tcp@dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:07:07.771: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:07:07.829: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-9939.svc from pod dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a: the server could not find the requested resource (get pods dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a)
Dec 12 07:07:08.990: INFO: Lookups using dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a failed for: [wheezy_udp@dns-test-service wheezy_tcp@dns-test-service wheezy_udp@dns-test-service.dns-9939 wheezy_tcp@dns-test-service.dns-9939 wheezy_udp@dns-test-service.dns-9939.svc wheezy_tcp@dns-test-service.dns-9939.svc wheezy_udp@_http._tcp.dns-test-service.dns-9939.svc wheezy_tcp@_http._tcp.dns-test-service.dns-9939.svc]

Dec 12 07:07:13.979: INFO: DNS probes using dns-9939/dns-test-160aabb3-a3ba-4bea-a47c-b9c720edd06a succeeded

STEP: deleting the pod
STEP: deleting the test service
STEP: deleting the test headless service
[AfterEach] [sig-network] DNS
  test/e2e/framework/framework.go:150
Dec 12 07:07:14.217: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "dns-9939" for this suite.
•{"msg":"PASSED [sig-network] DNS should resolve DNS of partial qualified names for services [LinuxOnly] [Conformance]","total":280,"completed":46,"skipped":669,"failed":0}
SSSSSSSSSSSSSSSSSSS
------------------------------
[sig-api-machinery] Watchers 
  should be able to restart watching from the last resource version observed by the previous watch [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] Watchers
... skipping 17 lines ...
Dec 12 07:07:15.028: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed  watch-2612 /api/v1/namespaces/watch-2612/configmaps/e2e-watch-test-watch-closed 4c599dec-371a-449f-b102-501e5bb2ba40 6626 0 2019-12-12 07:07:14 +0000 UTC <nil> <nil> map[watch-this-configmap:watch-closed-and-restarted] map[] [] []  []},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
Dec 12 07:07:15.029: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-watch-closed  watch-2612 /api/v1/namespaces/watch-2612/configmaps/e2e-watch-test-watch-closed 4c599dec-371a-449f-b102-501e5bb2ba40 6627 0 2019-12-12 07:07:14 +0000 UTC <nil> <nil> map[watch-this-configmap:watch-closed-and-restarted] map[] [] []  []},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
[AfterEach] [sig-api-machinery] Watchers
  test/e2e/framework/framework.go:150
Dec 12 07:07:15.029: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "watch-2612" for this suite.
•{"msg":"PASSED [sig-api-machinery] Watchers should be able to restart watching from the last resource version observed by the previous watch [Conformance]","total":280,"completed":47,"skipped":688,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[k8s.io] Pods 
  should be submitted and removed [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Pods
... skipping 16 lines ...
STEP: verifying the kubelet observed the termination notice
STEP: verifying pod deletion was observed
[AfterEach] [k8s.io] Pods
  test/e2e/framework/framework.go:150
Dec 12 07:07:32.252: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "pods-517" for this suite.
•{"msg":"PASSED [k8s.io] Pods should be submitted and removed [NodeConformance] [Conformance]","total":280,"completed":48,"skipped":724,"failed":0}
SSSSSSSSSSSSSSSSSS
------------------------------
[k8s.io] Container Runtime blackbox test on terminated container 
  should report termination message [LinuxOnly] if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Container Runtime
... skipping 12 lines ...
Dec 12 07:07:34.886: INFO: Expected: &{DONE} to match Container's Termination Message: DONE --
STEP: delete the container
[AfterEach] [k8s.io] Container Runtime
  test/e2e/framework/framework.go:150
Dec 12 07:07:35.016: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "container-runtime-5716" for this suite.
•{"msg":"PASSED [k8s.io] Container Runtime blackbox test on terminated container should report termination message [LinuxOnly] if TerminationMessagePath is set as non-root user and at a non-default path [NodeConformance] [Conformance]","total":280,"completed":49,"skipped":742,"failed":0}

------------------------------
[sig-storage] Downward API volume 
  should provide container's memory request [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Downward API volume
... skipping 17 lines ...
Dec 12 07:07:37.735: INFO: Waiting for pod downwardapi-volume-cd9108d7-0b70-4554-86bd-070d875ebfda to disappear
Dec 12 07:07:37.791: INFO: Pod downwardapi-volume-cd9108d7-0b70-4554-86bd-070d875ebfda no longer exists
[AfterEach] [sig-storage] Downward API volume
  test/e2e/framework/framework.go:150
Dec 12 07:07:37.792: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "downward-api-8560" for this suite.
•{"msg":"PASSED [sig-storage] Downward API volume should provide container's memory request [NodeConformance] [Conformance]","total":280,"completed":50,"skipped":742,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-scheduling] SchedulerPredicates [Serial] 
  validates that NodeSelector is respected if matching  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
... skipping 30 lines ...
[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
  test/e2e/framework/framework.go:150
Dec 12 07:07:43.329: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "sched-pred-3692" for this suite.
[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
  test/e2e/scheduling/predicates.go:77
•{"msg":"PASSED [sig-scheduling] SchedulerPredicates [Serial] validates that NodeSelector is respected if matching  [Conformance]","total":280,"completed":51,"skipped":797,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-network] Services 
  should serve multiport endpoints from pods  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-network] Services
... skipping 24 lines ...
[AfterEach] [sig-network] Services
  test/e2e/framework/framework.go:150
Dec 12 07:07:49.197: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "services-1859" for this suite.
[AfterEach] [sig-network] Services
  test/e2e/network/service.go:144
•{"msg":"PASSED [sig-network] Services should serve multiport endpoints from pods  [Conformance]","total":280,"completed":52,"skipped":851,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[k8s.io] [sig-node] Pods Extended [k8s.io] Delete Grace Period 
  should be submitted and removed [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] [sig-node] Pods Extended
... skipping 14 lines ...
STEP: verifying the kubelet observed the termination notice
Dec 12 07:08:07.025: INFO: no pod exists with the name we were looking for, assuming the termination request was observed and completed
[AfterEach] [k8s.io] [sig-node] Pods Extended
  test/e2e/framework/framework.go:150
Dec 12 07:08:07.083: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "pods-9493" for this suite.
•{"msg":"PASSED [k8s.io] [sig-node] Pods Extended [k8s.io] Delete Grace Period should be submitted and removed [Conformance]","total":280,"completed":53,"skipped":880,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] 
  works for multiple CRDs of same group and version but different kinds [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]
... skipping 8 lines ...
Dec 12 07:08:07.442: INFO: >>> kubeConfig: /home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig
Dec 12 07:08:11.405: INFO: >>> kubeConfig: /home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig
[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]
  test/e2e/framework/framework.go:150
Dec 12 07:08:28.547: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "crd-publish-openapi-8899" for this suite.
•{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of same group and version but different kinds [Conformance]","total":280,"completed":54,"skipped":932,"failed":0}
SSS
------------------------------
[sig-scheduling] SchedulerPredicates [Serial] 
  validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
... skipping 38 lines ...
• [SLOW TEST:307.718 seconds]
[sig-scheduling] SchedulerPredicates [Serial]
test/e2e/scheduling/framework.go:40
  validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance]
  test/e2e/framework/framework.go:639
------------------------------
{"msg":"PASSED [sig-scheduling] SchedulerPredicates [Serial] validates that there exists conflict between pods with same hostPort and protocol but one using 0.0.0.0 hostIP [Conformance]","total":280,"completed":55,"skipped":935,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] 
  Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-apps] StatefulSet
... skipping 120 lines ...
Dec 12 07:14:25.809: INFO: Waiting for statefulset status.replicas updated to 0
Dec 12 07:14:25.866: INFO: Deleting statefulset ss
[AfterEach] [sig-apps] StatefulSet
  test/e2e/framework/framework.go:150
Dec 12 07:14:26.049: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "statefulset-1480" for this suite.
•{"msg":"PASSED [sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] Burst scaling should run to completion even with unhealthy pods [Slow] [Conformance]","total":280,"completed":56,"skipped":1004,"failed":0}
SSSSSSSS
------------------------------
[sig-network] DNS 
  should provide DNS for services  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-network] DNS
... skipping 18 lines ...
Dec 12 07:14:30.953: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:31.011: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:31.425: INFO: Unable to read jessie_udp@dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:31.484: INFO: Unable to read jessie_tcp@dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:31.542: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:31.600: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:31.952: INFO: Lookups using dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2 failed for: [wheezy_udp@dns-test-service.dns-6089.svc.cluster.local wheezy_tcp@dns-test-service.dns-6089.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local jessie_udp@dns-test-service.dns-6089.svc.cluster.local jessie_tcp@dns-test-service.dns-6089.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local]

Dec 12 07:14:37.011: INFO: Unable to read wheezy_udp@dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:37.070: INFO: Unable to read wheezy_tcp@dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:37.131: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:37.189: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:37.609: INFO: Unable to read jessie_udp@dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:37.668: INFO: Unable to read jessie_tcp@dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:37.728: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:37.790: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:38.146: INFO: Lookups using dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2 failed for: [wheezy_udp@dns-test-service.dns-6089.svc.cluster.local wheezy_tcp@dns-test-service.dns-6089.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local jessie_udp@dns-test-service.dns-6089.svc.cluster.local jessie_tcp@dns-test-service.dns-6089.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local]

Dec 12 07:14:42.012: INFO: Unable to read wheezy_udp@dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:42.070: INFO: Unable to read wheezy_tcp@dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:42.129: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:42.188: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:42.600: INFO: Unable to read jessie_udp@dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:42.658: INFO: Unable to read jessie_tcp@dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:42.717: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:42.778: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:43.132: INFO: Lookups using dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2 failed for: [wheezy_udp@dns-test-service.dns-6089.svc.cluster.local wheezy_tcp@dns-test-service.dns-6089.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local jessie_udp@dns-test-service.dns-6089.svc.cluster.local jessie_tcp@dns-test-service.dns-6089.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local]

Dec 12 07:14:47.011: INFO: Unable to read wheezy_udp@dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:47.071: INFO: Unable to read wheezy_tcp@dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:47.131: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:47.190: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:47.608: INFO: Unable to read jessie_udp@dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:47.666: INFO: Unable to read jessie_tcp@dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:47.725: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:47.783: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:48.140: INFO: Lookups using dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2 failed for: [wheezy_udp@dns-test-service.dns-6089.svc.cluster.local wheezy_tcp@dns-test-service.dns-6089.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local jessie_udp@dns-test-service.dns-6089.svc.cluster.local jessie_tcp@dns-test-service.dns-6089.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local]

Dec 12 07:14:52.012: INFO: Unable to read wheezy_udp@dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:52.071: INFO: Unable to read wheezy_tcp@dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:52.129: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:52.187: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:52.602: INFO: Unable to read jessie_udp@dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:52.661: INFO: Unable to read jessie_tcp@dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:52.721: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:52.779: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:53.134: INFO: Lookups using dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2 failed for: [wheezy_udp@dns-test-service.dns-6089.svc.cluster.local wheezy_tcp@dns-test-service.dns-6089.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local jessie_udp@dns-test-service.dns-6089.svc.cluster.local jessie_tcp@dns-test-service.dns-6089.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local]

Dec 12 07:14:57.011: INFO: Unable to read wheezy_udp@dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:57.070: INFO: Unable to read wheezy_tcp@dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:57.130: INFO: Unable to read wheezy_udp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:57.189: INFO: Unable to read wheezy_tcp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:57.606: INFO: Unable to read jessie_udp@dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:57.665: INFO: Unable to read jessie_tcp@dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:57.723: INFO: Unable to read jessie_udp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:57.783: INFO: Unable to read jessie_tcp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local from pod dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2: the server could not find the requested resource (get pods dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2)
Dec 12 07:14:58.137: INFO: Lookups using dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2 failed for: [wheezy_udp@dns-test-service.dns-6089.svc.cluster.local wheezy_tcp@dns-test-service.dns-6089.svc.cluster.local wheezy_udp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local wheezy_tcp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local jessie_udp@dns-test-service.dns-6089.svc.cluster.local jessie_tcp@dns-test-service.dns-6089.svc.cluster.local jessie_udp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local jessie_tcp@_http._tcp.dns-test-service.dns-6089.svc.cluster.local]

Dec 12 07:15:03.224: INFO: DNS probes using dns-6089/dns-test-f81e1ce3-5ee1-4daf-b3a9-c7602859ecb2 succeeded

STEP: deleting the pod
STEP: deleting the test service
STEP: deleting the test headless service
[AfterEach] [sig-network] DNS
  test/e2e/framework/framework.go:150
Dec 12 07:15:03.468: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "dns-6089" for this suite.
•{"msg":"PASSED [sig-network] DNS should provide DNS for services  [Conformance]","total":280,"completed":57,"skipped":1012,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-network] DNS 
  should provide DNS for the cluster  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-network] DNS
... skipping 16 lines ...

STEP: deleting the pod
[AfterEach] [sig-network] DNS
  test/e2e/framework/framework.go:150
Dec 12 07:15:08.617: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "dns-9403" for this suite.
•{"msg":"PASSED [sig-network] DNS should provide DNS for the cluster  [Conformance]","total":280,"completed":58,"skipped":1044,"failed":0}
SS
------------------------------
[sig-storage] Projected configMap 
  should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Projected configMap
... skipping 16 lines ...
Dec 12 07:15:11.417: INFO: Waiting for pod pod-projected-configmaps-3f4089bd-8a24-4b7a-88f7-a213a1743c78 to disappear
Dec 12 07:15:11.475: INFO: Pod pod-projected-configmaps-3f4089bd-8a24-4b7a-88f7-a213a1743c78 no longer exists
[AfterEach] [sig-storage] Projected configMap
  test/e2e/framework/framework.go:150
Dec 12 07:15:11.476: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "projected-8472" for this suite.
•{"msg":"PASSED [sig-storage] Projected configMap should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":59,"skipped":1046,"failed":0}
SSSSSSSSSSSSSSSSSSS
------------------------------
[sig-storage] Downward API volume 
  should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Downward API volume
... skipping 17 lines ...
Dec 12 07:15:14.217: INFO: Waiting for pod downwardapi-volume-3a1d3bd3-da1f-4779-9338-d650dcbed8b4 to disappear
Dec 12 07:15:14.275: INFO: Pod downwardapi-volume-3a1d3bd3-da1f-4779-9338-d650dcbed8b4 no longer exists
[AfterEach] [sig-storage] Downward API volume
  test/e2e/framework/framework.go:150
Dec 12 07:15:14.276: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "downward-api-1287" for this suite.
•{"msg":"PASSED [sig-storage] Downward API volume should set DefaultMode on files [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":60,"skipped":1065,"failed":0}
SSSSSSSSSSSSS
------------------------------
[sig-cli] Kubectl client Kubectl version 
  should check is all data is printed  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-cli] Kubectl client
... skipping 10 lines ...
Dec 12 07:15:15.032: INFO: stderr: ""
Dec 12 07:15:15.032: INFO: stdout: "Client Version: version.Info{Major:\"1\", Minor:\"18+\", GitVersion:\"v1.18.0-alpha.0.1678+9caece8bd9fab5\", GitCommit:\"9caece8bd9fab55c3300b4a4373c49afcc5f13f8\", GitTreeState:\"clean\", BuildDate:\"2019-12-06T17:00:51Z\", GoVersion:\"go1.13.4\", Compiler:\"gc\", Platform:\"linux/amd64\"}\nServer Version: version.Info{Major:\"1\", Minor:\"18+\", GitVersion:\"v1.18.0-alpha.0.1678+9caece8bd9fab5\", GitCommit:\"9caece8bd9fab55c3300b4a4373c49afcc5f13f8\", GitTreeState:\"clean\", BuildDate:\"2019-12-12T05:00:32Z\", GoVersion:\"go1.13.4\", Compiler:\"gc\", Platform:\"linux/amd64\"}\n"
[AfterEach] [sig-cli] Kubectl client
  test/e2e/framework/framework.go:150
Dec 12 07:15:15.032: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "kubectl-1241" for this suite.
•{"msg":"PASSED [sig-cli] Kubectl client Kubectl version should check is all data is printed  [Conformance]","total":280,"completed":61,"skipped":1078,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-apps] ReplicationController 
  should serve a basic image on each replica with a public image  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-apps] ReplicationController
... skipping 11 lines ...
Dec 12 07:15:17.621: INFO: Trying to dial the pod
Dec 12 07:15:22.797: INFO: Controller my-hostname-basic-96ae6222-040b-47c5-8167-716abd4adfae: Got expected result from replica 1 [my-hostname-basic-96ae6222-040b-47c5-8167-716abd4adfae-d5h5w]: "my-hostname-basic-96ae6222-040b-47c5-8167-716abd4adfae-d5h5w", 1 of 1 required successes so far
[AfterEach] [sig-apps] ReplicationController
  test/e2e/framework/framework.go:150
Dec 12 07:15:22.797: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "replication-controller-327" for this suite.
•{"msg":"PASSED [sig-apps] ReplicationController should serve a basic image on each replica with a public image  [Conformance]","total":280,"completed":62,"skipped":1104,"failed":0}
SSSSSSSSSS
------------------------------
[sig-api-machinery] ResourceQuota 
  should be able to update and delete ResourceQuota. [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] ResourceQuota
... skipping 11 lines ...
STEP: Deleting a ResourceQuota
STEP: Verifying the deleted ResourceQuota
[AfterEach] [sig-api-machinery] ResourceQuota
  test/e2e/framework/framework.go:150
Dec 12 07:15:23.507: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "resourcequota-7912" for this suite.
•{"msg":"PASSED [sig-api-machinery] ResourceQuota should be able to update and delete ResourceQuota. [Conformance]","total":280,"completed":63,"skipped":1114,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-api-machinery] Garbage collector 
  should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] Garbage collector
... skipping 34 lines ...

[AfterEach] [sig-api-machinery] Garbage collector
  test/e2e/framework/framework.go:150
Dec 12 07:15:54.338: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
W1212 07:15:54.337949   20158 metrics_grabber.go:79] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
STEP: Destroying namespace "gc-7205" for this suite.
•{"msg":"PASSED [sig-api-machinery] Garbage collector should orphan RS created by deployment when deleteOptions.PropagationPolicy is Orphan [Conformance]","total":280,"completed":64,"skipped":1179,"failed":0}
SSSSSSSSSSSS
------------------------------
[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] 
  should unconditionally reject operations on fail closed webhook [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin]
  test/e2e/framework/framework.go:149
STEP: Creating a kubernetes client
Dec 12 07:15:54.460: INFO: >>> kubeConfig: /home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig
STEP: Building a namespace api object, basename webhook
... skipping 5 lines ...
STEP: Deploying the webhook pod
STEP: Wait for the deployment to be ready
Dec 12 07:15:56.172: INFO: deployment status: v1.DeploymentStatus{ObservedGeneration:1, Replicas:1, UpdatedReplicas:1, ReadyReplicas:0, AvailableReplicas:0, UnavailableReplicas:1, Conditions:[]v1.DeploymentCondition{v1.DeploymentCondition{Type:"Available", Status:"False", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63711731756, loc:(*time.Location)(0x7d673a0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63711731756, loc:(*time.Location)(0x7d673a0)}}, Reason:"MinimumReplicasUnavailable", Message:"Deployment does not have minimum availability."}, v1.DeploymentCondition{Type:"Progressing", Status:"True", LastUpdateTime:v1.Time{Time:time.Time{wall:0x0, ext:63711731756, loc:(*time.Location)(0x7d673a0)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63711731756, loc:(*time.Location)(0x7d673a0)}}, Reason:"ReplicaSetUpdated", Message:"ReplicaSet \"sample-webhook-deployment-5f65f8c764\" is progressing."}}, CollisionCount:(*int32)(nil)}
STEP: Deploying the webhook service
STEP: Verifying the service has paired with the endpoint
Dec 12 07:15:59.306: INFO: Waiting for amount of service:e2e-test-webhook endpoints to be 1
[It] should unconditionally reject operations on fail closed webhook [Conformance]
  test/e2e/framework/framework.go:639
STEP: Registering a webhook that server cannot talk to, with fail closed policy, via the AdmissionRegistration API
STEP: create a namespace for the webhook
STEP: create a configmap should be unconditionally rejected by the webhook
[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin]
  test/e2e/framework/framework.go:150
Dec 12 07:15:59.748: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "webhook-6215" for this suite.
STEP: Destroying namespace "webhook-6215-markers" for this suite.
[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin]
  test/e2e/apimachinery/webhook.go:101
•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should unconditionally reject operations on fail closed webhook [Conformance]","total":280,"completed":65,"skipped":1191,"failed":0}
SSSSSSSSSS
------------------------------
[sig-cli] Kubectl client Kubectl cluster-info 
  should check if Kubernetes master services is included in cluster-info  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-cli] Kubectl client
... skipping 11 lines ...
Dec 12 07:16:00.862: INFO: stderr: ""
Dec 12 07:16:00.862: INFO: stdout: "\x1b[0;32mKubernetes master\x1b[0m is running at \x1b[0;33mhttps://test-1576132495-apiserver-1623198031.us-west-2.elb.amazonaws.com:6443\x1b[0m\n\x1b[0;32mKubeDNS\x1b[0m is running at \x1b[0;33mhttps://test-1576132495-apiserver-1623198031.us-west-2.elb.amazonaws.com:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy\x1b[0m\n\nTo further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.\n"
[AfterEach] [sig-cli] Kubectl client
  test/e2e/framework/framework.go:150
Dec 12 07:16:00.862: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "kubectl-638" for this suite.
•{"msg":"PASSED [sig-cli] Kubectl client Kubectl cluster-info should check if Kubernetes master services is included in cluster-info  [Conformance]","total":280,"completed":66,"skipped":1201,"failed":0}
SSSS
------------------------------
[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] 
  should mutate custom resource with pruning [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin]
... skipping 21 lines ...
  test/e2e/framework/framework.go:150
Dec 12 07:16:07.190: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "webhook-3089" for this suite.
STEP: Destroying namespace "webhook-3089-markers" for this suite.
[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin]
  test/e2e/apimachinery/webhook.go:101
•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource with pruning [Conformance]","total":280,"completed":67,"skipped":1205,"failed":0}
SSSSSSSS
------------------------------
[sig-node] ConfigMap 
  should fail to create ConfigMap with empty key [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-node] ConfigMap
  test/e2e/framework/framework.go:149
STEP: Creating a kubernetes client
Dec 12 07:16:07.656: INFO: >>> kubeConfig: /home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig
STEP: Building a namespace api object, basename configmap
STEP: Waiting for a default service account to be provisioned in namespace
[It] should fail to create ConfigMap with empty key [Conformance]
  test/e2e/framework/framework.go:639
STEP: Creating configMap that has name configmap-test-emptyKey-5f7c1a9e-1a6f-4468-b42c-2761e5147c6a
[AfterEach] [sig-node] ConfigMap
  test/e2e/framework/framework.go:150
Dec 12 07:16:07.943: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "configmap-7753" for this suite.
•{"msg":"PASSED [sig-node] ConfigMap should fail to create ConfigMap with empty key [Conformance]","total":280,"completed":68,"skipped":1213,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-storage] Secrets 
  should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Secrets
... skipping 16 lines ...
Dec 12 07:16:10.789: INFO: Waiting for pod pod-secrets-a8ea0d65-8e43-4737-bfba-cf50f93e4361 to disappear
Dec 12 07:16:10.853: INFO: Pod pod-secrets-a8ea0d65-8e43-4737-bfba-cf50f93e4361 no longer exists
[AfterEach] [sig-storage] Secrets
  test/e2e/framework/framework.go:150
Dec 12 07:16:10.854: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "secrets-7327" for this suite.
•{"msg":"PASSED [sig-storage] Secrets should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":69,"skipped":1237,"failed":0}

------------------------------
[k8s.io] Container Runtime blackbox test on terminated container 
  should report termination message [LinuxOnly] from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Container Runtime
... skipping 2 lines ...
Dec 12 07:16:10.975: INFO: >>> kubeConfig: /home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig
STEP: Building a namespace api object, basename container-runtime
STEP: Waiting for a default service account to be provisioned in namespace
[It] should report termination message [LinuxOnly] from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
STEP: create the container
STEP: wait for the container to reach Failed
STEP: get the container status
STEP: the container should be terminated
STEP: the termination message should be set
Dec 12 07:16:13.506: INFO: Expected: &{DONE} to match Container's Termination Message: DONE --
STEP: delete the container
[AfterEach] [k8s.io] Container Runtime
  test/e2e/framework/framework.go:150
Dec 12 07:16:13.635: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "container-runtime-6539" for this suite.
•{"msg":"PASSED [k8s.io] Container Runtime blackbox test on terminated container should report termination message [LinuxOnly] from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]","total":280,"completed":70,"skipped":1237,"failed":0}
SSSS
------------------------------
[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] 
  listing validating webhooks should work [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin]
... skipping 22 lines ...
  test/e2e/framework/framework.go:150
Dec 12 07:16:19.298: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "webhook-6289" for this suite.
STEP: Destroying namespace "webhook-6289-markers" for this suite.
[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin]
  test/e2e/apimachinery/webhook.go:101
•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] listing validating webhooks should work [Conformance]","total":280,"completed":71,"skipped":1241,"failed":0}
SSSSSS
------------------------------
[sig-api-machinery] ResourceQuota 
  should create a ResourceQuota and capture the life of a service. [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] ResourceQuota
... skipping 12 lines ...
STEP: Deleting a Service
STEP: Ensuring resource quota status released usage
[AfterEach] [sig-api-machinery] ResourceQuota
  test/e2e/framework/framework.go:150
Dec 12 07:16:31.446: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "resourcequota-8286" for this suite.
•{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a service. [Conformance]","total":280,"completed":72,"skipped":1247,"failed":0}
SSSS
------------------------------
[sig-apps] Daemon set [Serial] 
  should run and stop complex daemon [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-apps] Daemon set [Serial]
... skipping 56 lines ...
Dec 12 07:16:52.415: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/daemonsets-5341/pods","resourceVersion":"9520"},"items":null}

[AfterEach] [sig-apps] Daemon set [Serial]
  test/e2e/framework/framework.go:150
Dec 12 07:16:52.655: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "daemonsets-5341" for this suite.
•{"msg":"PASSED [sig-apps] Daemon set [Serial] should run and stop complex daemon [Conformance]","total":280,"completed":73,"skipped":1251,"failed":0}
S
------------------------------
[sig-network] Proxy version v1 
  should proxy through a service and a pod  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] version v1
... skipping 337 lines ...
Dec 12 07:16:59.048: INFO: Deleting ReplicationController proxy-service-2579k took: 62.137387ms
Dec 12 07:16:59.450: INFO: Terminating ReplicationController proxy-service-2579k pods took: 402.034713ms
[AfterEach] version v1
  test/e2e/framework/framework.go:150
Dec 12 07:17:01.552: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "proxy-994" for this suite.
•{"msg":"PASSED [sig-network] Proxy version v1 should proxy through a service and a pod  [Conformance]","total":280,"completed":74,"skipped":1252,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-cli] Kubectl client Proxy server 
  should support --unix-socket=/path  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-cli] Kubectl client
... skipping 10 lines ...
Dec 12 07:17:01.923: INFO: Asynchronously running '/home/prow/go/src/k8s.io/kubernetes/bazel-bin/cmd/kubectl/linux_amd64_pure_stripped/kubectl kubectl --server=https://test-1576132495-apiserver-1623198031.us-west-2.elb.amazonaws.com:6443 --kubeconfig=/home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig proxy --unix-socket=/tmp/kubectl-proxy-unix226180474/test'
STEP: retrieving proxy /api/ output
[AfterEach] [sig-cli] Kubectl client
  test/e2e/framework/framework.go:150
Dec 12 07:17:02.072: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "kubectl-1443" for this suite.
•{"msg":"PASSED [sig-cli] Kubectl client Proxy server should support --unix-socket=/path  [Conformance]","total":280,"completed":75,"skipped":1280,"failed":0}
SSSSSSSS
------------------------------
[sig-storage] Secrets 
  should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Secrets
... skipping 17 lines ...
Dec 12 07:17:05.169: INFO: Pod pod-secrets-948f9014-e24f-419c-a52c-8054ede5c0d7 no longer exists
[AfterEach] [sig-storage] Secrets
  test/e2e/framework/framework.go:150
Dec 12 07:17:05.169: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "secrets-4482" for this suite.
STEP: Destroying namespace "secret-namespace-2760" for this suite.
•{"msg":"PASSED [sig-storage] Secrets should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance] [Conformance]","total":280,"completed":76,"skipped":1288,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-storage] Projected configMap 
  should be consumable from pods in volume as non-root [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Projected configMap
... skipping 16 lines ...
Dec 12 07:17:08.025: INFO: Waiting for pod pod-projected-configmaps-9391dba9-99b0-4fc0-963c-595cde67f9d5 to disappear
Dec 12 07:17:08.082: INFO: Pod pod-projected-configmaps-9391dba9-99b0-4fc0-963c-595cde67f9d5 no longer exists
[AfterEach] [sig-storage] Projected configMap
  test/e2e/framework/framework.go:150
Dec 12 07:17:08.082: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "projected-4038" for this suite.
•{"msg":"PASSED [sig-storage] Projected configMap should be consumable from pods in volume as non-root [NodeConformance] [Conformance]","total":280,"completed":77,"skipped":1336,"failed":0}
SSSSSSSSSSS
------------------------------
[sig-network] Services 
  should be able to change the type from ClusterIP to ExternalName [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-network] Services
... skipping 24 lines ...
[AfterEach] [sig-network] Services
  test/e2e/framework/framework.go:150
Dec 12 07:17:23.655: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "services-940" for this suite.
[AfterEach] [sig-network] Services
  test/e2e/network/service.go:144
•{"msg":"PASSED [sig-network] Services should be able to change the type from ClusterIP to ExternalName [Conformance]","total":280,"completed":78,"skipped":1347,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-apps] Job 
  should run a job to completion when tasks sometimes fail and are locally restarted [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-apps] Job
  test/e2e/framework/framework.go:149
STEP: Creating a kubernetes client
Dec 12 07:17:23.780: INFO: >>> kubeConfig: /home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig
STEP: Building a namespace api object, basename job
STEP: Waiting for a default service account to be provisioned in namespace
[It] should run a job to completion when tasks sometimes fail and are locally restarted [Conformance]
  test/e2e/framework/framework.go:639
STEP: Creating a job
STEP: Ensuring job reaches completions
[AfterEach] [sig-apps] Job
  test/e2e/framework/framework.go:150
Dec 12 07:17:30.129: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "job-1190" for this suite.
•{"msg":"PASSED [sig-apps] Job should run a job to completion when tasks sometimes fail and are locally restarted [Conformance]","total":280,"completed":79,"skipped":1384,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-storage] EmptyDir volumes 
  should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] EmptyDir volumes
... skipping 15 lines ...
Dec 12 07:17:32.854: INFO: Waiting for pod pod-4a6d8f57-458e-43a4-89a7-c962ddae72b9 to disappear
Dec 12 07:17:32.914: INFO: Pod pod-4a6d8f57-458e-43a4-89a7-c962ddae72b9 no longer exists
[AfterEach] [sig-storage] EmptyDir volumes
  test/e2e/framework/framework.go:150
Dec 12 07:17:32.914: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "emptydir-348" for this suite.
•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":80,"skipped":1421,"failed":0}
SSSSSSSSSSSSSSSS
------------------------------
[sig-storage] Subpath Atomic writer volumes 
  should support subpaths with projected pod [LinuxOnly] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Subpath
... skipping 31 lines ...
STEP: Deleting pod pod-subpath-test-projected-n9qm
Dec 12 07:17:56.404: INFO: Deleting pod "pod-subpath-test-projected-n9qm" in namespace "subpath-1652"
[AfterEach] [sig-storage] Subpath
  test/e2e/framework/framework.go:150
Dec 12 07:17:56.460: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "subpath-1652" for this suite.
•{"msg":"PASSED [sig-storage] Subpath Atomic writer volumes should support subpaths with projected pod [LinuxOnly] [Conformance]","total":280,"completed":81,"skipped":1437,"failed":0}
SS
------------------------------
[sig-api-machinery] ResourceQuota 
  should create a ResourceQuota and capture the life of a replica set. [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] ResourceQuota
... skipping 12 lines ...
STEP: Deleting a ReplicaSet
STEP: Ensuring resource quota status released usage
[AfterEach] [sig-api-machinery] ResourceQuota
  test/e2e/framework/framework.go:150
Dec 12 07:18:08.255: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "resourcequota-4288" for this suite.
•{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a replica set. [Conformance]","total":280,"completed":82,"skipped":1439,"failed":0}
SSSSSSSS
------------------------------
[sig-auth] ServiceAccounts 
  should allow opting out of API token automount  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-auth] ServiceAccounts
... skipping 24 lines ...
Dec 12 07:18:09.893: INFO: created pod pod-service-account-nomountsa-nomountspec
Dec 12 07:18:09.893: INFO: pod pod-service-account-nomountsa-nomountspec service account token volume mount: false
[AfterEach] [sig-auth] ServiceAccounts
  test/e2e/framework/framework.go:150
Dec 12 07:18:09.893: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "svcaccounts-1172" for this suite.
•{"msg":"PASSED [sig-auth] ServiceAccounts should allow opting out of API token automount  [Conformance]","total":280,"completed":83,"skipped":1447,"failed":0}
SSSSSSSSSSSSSS
------------------------------
[sig-storage] ConfigMap 
  should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] ConfigMap
... skipping 16 lines ...
Dec 12 07:18:12.680: INFO: Waiting for pod pod-configmaps-273ee470-e77f-4a61-98d2-0e996b8ad2bf to disappear
Dec 12 07:18:12.737: INFO: Pod pod-configmaps-273ee470-e77f-4a61-98d2-0e996b8ad2bf no longer exists
[AfterEach] [sig-storage] ConfigMap
  test/e2e/framework/framework.go:150
Dec 12 07:18:12.738: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "configmap-9733" for this suite.
•{"msg":"PASSED [sig-storage] ConfigMap should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":84,"skipped":1461,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[k8s.io] Variable Expansion 
  should allow substituting values in a container's args [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Variable Expansion
... skipping 15 lines ...
Dec 12 07:18:15.463: INFO: Waiting for pod var-expansion-8990b232-cbe0-49e6-989b-f6fbdd518aa4 to disappear
Dec 12 07:18:15.520: INFO: Pod var-expansion-8990b232-cbe0-49e6-989b-f6fbdd518aa4 no longer exists
[AfterEach] [k8s.io] Variable Expansion
  test/e2e/framework/framework.go:150
Dec 12 07:18:15.521: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "var-expansion-4147" for this suite.
•{"msg":"PASSED [k8s.io] Variable Expansion should allow substituting values in a container's args [NodeConformance] [Conformance]","total":280,"completed":85,"skipped":1516,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-storage] Projected downwardAPI 
  should provide container's cpu request [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Projected downwardAPI
... skipping 17 lines ...
Dec 12 07:18:18.259: INFO: Waiting for pod downwardapi-volume-64c5f311-b141-4766-a43b-899810d1fec9 to disappear
Dec 12 07:18:18.317: INFO: Pod downwardapi-volume-64c5f311-b141-4766-a43b-899810d1fec9 no longer exists
[AfterEach] [sig-storage] Projected downwardAPI
  test/e2e/framework/framework.go:150
Dec 12 07:18:18.317: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "projected-5068" for this suite.
•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide container's cpu request [NodeConformance] [Conformance]","total":280,"completed":86,"skipped":1555,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-storage] Projected secret 
  should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Projected secret
... skipping 16 lines ...
Dec 12 07:18:21.089: INFO: Waiting for pod pod-projected-secrets-b0bb8a4b-fa36-4936-806f-16c2a5aa83df to disappear
Dec 12 07:18:21.147: INFO: Pod pod-projected-secrets-b0bb8a4b-fa36-4936-806f-16c2a5aa83df no longer exists
[AfterEach] [sig-storage] Projected secret
  test/e2e/framework/framework.go:150
Dec 12 07:18:21.147: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "projected-8585" for this suite.
•{"msg":"PASSED [sig-storage] Projected secret should be consumable in multiple volumes in a pod [NodeConformance] [Conformance]","total":280,"completed":87,"skipped":1592,"failed":0}
SSSSSSS
------------------------------
[sig-node] Downward API 
  should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-node] Downward API
... skipping 15 lines ...
Dec 12 07:18:23.860: INFO: Waiting for pod downward-api-c80d6e84-7a31-4055-885e-ebd5f86ec340 to disappear
Dec 12 07:18:23.918: INFO: Pod downward-api-c80d6e84-7a31-4055-885e-ebd5f86ec340 no longer exists
[AfterEach] [sig-node] Downward API
  test/e2e/framework/framework.go:150
Dec 12 07:18:23.918: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "downward-api-7963" for this suite.
•{"msg":"PASSED [sig-node] Downward API should provide default limits.cpu/memory from node allocatable [NodeConformance] [Conformance]","total":280,"completed":88,"skipped":1599,"failed":0}
SSSSSSS
------------------------------
[sig-apps] Daemon set [Serial] 
  should retry creating failed daemon pods [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-apps] Daemon set [Serial]
  test/e2e/framework/framework.go:149
STEP: Creating a kubernetes client
Dec 12 07:18:24.038: INFO: >>> kubeConfig: /home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig
STEP: Building a namespace api object, basename daemonsets
STEP: Waiting for a default service account to be provisioned in namespace
[BeforeEach] [sig-apps] Daemon set [Serial]
  test/e2e/apps/daemon_set.go:133
[It] should retry creating failed daemon pods [Conformance]
  test/e2e/framework/framework.go:639
STEP: Creating a simple DaemonSet "daemon-set"
STEP: Check that daemon pods launch on every node of the cluster.
Dec 12 07:18:24.687: INFO: DaemonSet pods can't tolerate node ip-10-0-0-157.us-west-2.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:<nil>}], skip checking this node
Dec 12 07:18:24.687: INFO: DaemonSet pods can't tolerate node ip-10-0-0-249.us-west-2.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:<nil>}], skip checking this node
Dec 12 07:18:24.687: INFO: DaemonSet pods can't tolerate node ip-10-0-0-93.us-west-2.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:<nil>}], skip checking this node
... skipping 6 lines ...
Dec 12 07:18:25.861: INFO: Node ip-10-0-0-18.us-west-2.compute.internal is running more than one daemon pod
Dec 12 07:18:26.805: INFO: DaemonSet pods can't tolerate node ip-10-0-0-157.us-west-2.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:<nil>}], skip checking this node
Dec 12 07:18:26.805: INFO: DaemonSet pods can't tolerate node ip-10-0-0-249.us-west-2.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:<nil>}], skip checking this node
Dec 12 07:18:26.805: INFO: DaemonSet pods can't tolerate node ip-10-0-0-93.us-west-2.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:<nil>}], skip checking this node
Dec 12 07:18:26.863: INFO: Number of nodes with available pods: 2
Dec 12 07:18:26.863: INFO: Number of running nodes: 2, number of available pods: 2
STEP: Set a daemon pod's phase to 'Failed', check that the daemon pod is revived.
Dec 12 07:18:27.095: INFO: DaemonSet pods can't tolerate node ip-10-0-0-157.us-west-2.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:<nil>}], skip checking this node
Dec 12 07:18:27.095: INFO: DaemonSet pods can't tolerate node ip-10-0-0-249.us-west-2.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:<nil>}], skip checking this node
Dec 12 07:18:27.095: INFO: DaemonSet pods can't tolerate node ip-10-0-0-93.us-west-2.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:<nil>}], skip checking this node
Dec 12 07:18:27.154: INFO: Number of nodes with available pods: 1
Dec 12 07:18:27.154: INFO: Node ip-10-0-0-18.us-west-2.compute.internal is running more than one daemon pod
Dec 12 07:18:28.213: INFO: DaemonSet pods can't tolerate node ip-10-0-0-157.us-west-2.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:<nil>}], skip checking this node
... skipping 3 lines ...
Dec 12 07:18:28.270: INFO: Node ip-10-0-0-18.us-west-2.compute.internal is running more than one daemon pod
Dec 12 07:18:29.213: INFO: DaemonSet pods can't tolerate node ip-10-0-0-157.us-west-2.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:<nil>}], skip checking this node
Dec 12 07:18:29.213: INFO: DaemonSet pods can't tolerate node ip-10-0-0-249.us-west-2.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:<nil>}], skip checking this node
Dec 12 07:18:29.214: INFO: DaemonSet pods can't tolerate node ip-10-0-0-93.us-west-2.compute.internal with taints [{Key:node-role.kubernetes.io/master Value: Effect:NoSchedule TimeAdded:<nil>}], skip checking this node
Dec 12 07:18:29.270: INFO: Number of nodes with available pods: 2
Dec 12 07:18:29.270: INFO: Number of running nodes: 2, number of available pods: 2
STEP: Wait for the failed daemon pod to be completely deleted.
[AfterEach] [sig-apps] Daemon set [Serial]
  test/e2e/apps/daemon_set.go:99
STEP: Deleting DaemonSet "daemon-set"
STEP: deleting DaemonSet.extensions daemon-set in namespace daemonsets-1765, will wait for the garbage collector to delete the pods
Dec 12 07:18:29.602: INFO: Deleting DaemonSet.extensions daemon-set took: 61.110921ms
Dec 12 07:18:30.003: INFO: Terminating DaemonSet.extensions daemon-set pods took: 401.371742ms
... skipping 4 lines ...
Dec 12 07:18:42.373: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/daemonsets-1765/pods","resourceVersion":"10689"},"items":null}

[AfterEach] [sig-apps] Daemon set [Serial]
  test/e2e/framework/framework.go:150
Dec 12 07:18:42.548: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "daemonsets-1765" for this suite.
•{"msg":"PASSED [sig-apps] Daemon set [Serial] should retry creating failed daemon pods [Conformance]","total":280,"completed":89,"skipped":1606,"failed":0}

------------------------------
[k8s.io] Pods 
  should contain environment variables for services [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Pods
... skipping 16 lines ...
Dec 12 07:18:47.570: INFO: Waiting for pod client-envvars-919d187c-3fae-49fa-b1fe-1fe7fe992703 to disappear
Dec 12 07:18:47.626: INFO: Pod client-envvars-919d187c-3fae-49fa-b1fe-1fe7fe992703 no longer exists
[AfterEach] [k8s.io] Pods
  test/e2e/framework/framework.go:150
Dec 12 07:18:47.628: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "pods-9935" for this suite.
•{"msg":"PASSED [k8s.io] Pods should contain environment variables for services [NodeConformance] [Conformance]","total":280,"completed":90,"skipped":1606,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[k8s.io] Docker Containers 
  should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Docker Containers
... skipping 15 lines ...
Dec 12 07:18:50.350: INFO: Waiting for pod client-containers-1894d83e-773c-4c3f-9900-ebe1595b146d to disappear
Dec 12 07:18:50.408: INFO: Pod client-containers-1894d83e-773c-4c3f-9900-ebe1595b146d no longer exists
[AfterEach] [k8s.io] Docker Containers
  test/e2e/framework/framework.go:150
Dec 12 07:18:50.408: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "containers-9639" for this suite.
•{"msg":"PASSED [k8s.io] Docker Containers should be able to override the image's default arguments (docker cmd) [NodeConformance] [Conformance]","total":280,"completed":91,"skipped":1640,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-storage] EmptyDir volumes 
  should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] EmptyDir volumes
... skipping 16 lines ...
Dec 12 07:18:55.178: INFO: Waiting for pod pod-b4780549-d6c6-4b99-8b19-2de0a7ae3182 to disappear
Dec 12 07:18:55.233: INFO: Pod pod-b4780549-d6c6-4b99-8b19-2de0a7ae3182 no longer exists
[AfterEach] [sig-storage] EmptyDir volumes
  test/e2e/framework/framework.go:150
Dec 12 07:18:55.233: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "emptydir-1205" for this suite.
•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (non-root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":92,"skipped":1665,"failed":0}
SSSSS
------------------------------
[sig-storage] Projected configMap 
  should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Projected configMap
... skipping 16 lines ...
Dec 12 07:18:58.003: INFO: Waiting for pod pod-projected-configmaps-1a67e988-5e5e-4a2a-a27b-d33621d9eccd to disappear
Dec 12 07:18:58.060: INFO: Pod pod-projected-configmaps-1a67e988-5e5e-4a2a-a27b-d33621d9eccd no longer exists
[AfterEach] [sig-storage] Projected configMap
  test/e2e/framework/framework.go:150
Dec 12 07:18:58.060: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "projected-5895" for this suite.
•{"msg":"PASSED [sig-storage] Projected configMap should be consumable in multiple volumes in the same pod [NodeConformance] [Conformance]","total":280,"completed":93,"skipped":1670,"failed":0}
SSSSSSSSSSSSS
------------------------------
[sig-auth] ServiceAccounts 
  should mount an API token into pods  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-auth] ServiceAccounts
... skipping 12 lines ...
STEP: reading a file in the container
Dec 12 07:19:03.057: INFO: Running '/home/prow/go/src/k8s.io/kubernetes/bazel-bin/cmd/kubectl/linux_amd64_pure_stripped/kubectl exec --namespace=svcaccounts-8935 pod-service-account-acf369a4-161c-4280-a119-994c7a892f3f -c=test -- cat /var/run/secrets/kubernetes.io/serviceaccount/namespace'
[AfterEach] [sig-auth] ServiceAccounts
  test/e2e/framework/framework.go:150
Dec 12 07:19:04.000: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "svcaccounts-8935" for this suite.
•{"msg":"PASSED [sig-auth] ServiceAccounts should mount an API token into pods  [Conformance]","total":280,"completed":94,"skipped":1683,"failed":0}
SSSSSSSSSS
------------------------------
[sig-storage] Projected downwardAPI 
  should provide podname only [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Projected downwardAPI
... skipping 17 lines ...
Dec 12 07:19:06.728: INFO: Waiting for pod downwardapi-volume-f969d0e6-730a-4279-8ae3-56b33aa2f00d to disappear
Dec 12 07:19:06.784: INFO: Pod downwardapi-volume-f969d0e6-730a-4279-8ae3-56b33aa2f00d no longer exists
[AfterEach] [sig-storage] Projected downwardAPI
  test/e2e/framework/framework.go:150
Dec 12 07:19:06.784: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "projected-1235" for this suite.
•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide podname only [NodeConformance] [Conformance]","total":280,"completed":95,"skipped":1693,"failed":0}
SSSSSSS
------------------------------
[sig-cli] Kubectl client Kubectl run --rm job 
  should create a job from an image, then delete the job  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-cli] Kubectl client
... skipping 12 lines ...
Dec 12 07:19:10.005: INFO: stdout: "abcd1234stdin closed\njob.batch \"e2e-test-rm-busybox-job\" deleted\n"
STEP: verifying the job e2e-test-rm-busybox-job was deleted
[AfterEach] [sig-cli] Kubectl client
  test/e2e/framework/framework.go:150
Dec 12 07:19:12.119: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "kubectl-8633" for this suite.
•{"msg":"PASSED [sig-cli] Kubectl client Kubectl run --rm job should create a job from an image, then delete the job  [Conformance]","total":280,"completed":96,"skipped":1700,"failed":0}
SSSSSSSSSSSSSSSSS
------------------------------
[sig-storage] ConfigMap 
  should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] ConfigMap
... skipping 16 lines ...
Dec 12 07:19:14.888: INFO: Waiting for pod pod-configmaps-94f2dfeb-d73d-4ba8-b668-2cb4881ce2f5 to disappear
Dec 12 07:19:14.945: INFO: Pod pod-configmaps-94f2dfeb-d73d-4ba8-b668-2cb4881ce2f5 no longer exists
[AfterEach] [sig-storage] ConfigMap
  test/e2e/framework/framework.go:150
Dec 12 07:19:14.945: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "configmap-2058" for this suite.
•{"msg":"PASSED [sig-storage] ConfigMap should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":97,"skipped":1717,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[k8s.io] Pods 
  should support retrieving logs from the container over websockets [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Pods
... skipping 10 lines ...
STEP: creating the pod
STEP: submitting the pod to kubernetes
[AfterEach] [k8s.io] Pods
  test/e2e/framework/framework.go:150
Dec 12 07:19:17.734: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "pods-44" for this suite.
•{"msg":"PASSED [k8s.io] Pods should support retrieving logs from the container over websockets [NodeConformance] [Conformance]","total":280,"completed":98,"skipped":1747,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[k8s.io] Pods 
  should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Pods
... skipping 11 lines ...
STEP: verifying the pod is in kubernetes
STEP: updating the pod
Dec 12 07:19:20.991: INFO: Successfully updated pod "pod-update-activedeadlineseconds-f61a61ed-a74e-4e43-8fa9-0d9ecfcf43db"
Dec 12 07:19:20.991: INFO: Waiting up to 5m0s for pod "pod-update-activedeadlineseconds-f61a61ed-a74e-4e43-8fa9-0d9ecfcf43db" in namespace "pods-9595" to be "terminated due to deadline exceeded"
Dec 12 07:19:21.049: INFO: Pod "pod-update-activedeadlineseconds-f61a61ed-a74e-4e43-8fa9-0d9ecfcf43db": Phase="Running", Reason="", readiness=true. Elapsed: 57.399702ms
Dec 12 07:19:23.105: INFO: Pod "pod-update-activedeadlineseconds-f61a61ed-a74e-4e43-8fa9-0d9ecfcf43db": Phase="Running", Reason="", readiness=true. Elapsed: 2.114027958s
Dec 12 07:19:25.163: INFO: Pod "pod-update-activedeadlineseconds-f61a61ed-a74e-4e43-8fa9-0d9ecfcf43db": Phase="Failed", Reason="DeadlineExceeded", readiness=false. Elapsed: 4.171320372s
Dec 12 07:19:25.163: INFO: Pod "pod-update-activedeadlineseconds-f61a61ed-a74e-4e43-8fa9-0d9ecfcf43db" satisfied condition "terminated due to deadline exceeded"
[AfterEach] [k8s.io] Pods
  test/e2e/framework/framework.go:150
Dec 12 07:19:25.163: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "pods-9595" for this suite.
•{"msg":"PASSED [k8s.io] Pods should allow activeDeadlineSeconds to be updated [NodeConformance] [Conformance]","total":280,"completed":99,"skipped":1794,"failed":0}
SS
------------------------------
[k8s.io] Variable Expansion 
  should allow composing env vars into new env vars [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Variable Expansion
... skipping 15 lines ...
Dec 12 07:19:27.872: INFO: Waiting for pod var-expansion-a2f70885-b8a7-4f2b-805d-95a64c5835a0 to disappear
Dec 12 07:19:27.928: INFO: Pod var-expansion-a2f70885-b8a7-4f2b-805d-95a64c5835a0 no longer exists
[AfterEach] [k8s.io] Variable Expansion
  test/e2e/framework/framework.go:150
Dec 12 07:19:27.929: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "var-expansion-8525" for this suite.
•{"msg":"PASSED [k8s.io] Variable Expansion should allow composing env vars into new env vars [NodeConformance] [Conformance]","total":280,"completed":100,"skipped":1796,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-storage] EmptyDir wrapper volumes 
  should not conflict [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] EmptyDir wrapper volumes
... skipping 8 lines ...
STEP: Cleaning up the configmap
STEP: Cleaning up the pod
[AfterEach] [sig-storage] EmptyDir wrapper volumes
  test/e2e/framework/framework.go:150
Dec 12 07:19:32.819: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "emptydir-wrapper-4059" for this suite.
•{"msg":"PASSED [sig-storage] EmptyDir wrapper volumes should not conflict [Conformance]","total":280,"completed":101,"skipped":1851,"failed":0}
SS
------------------------------
[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] 
  patching/updating a validating webhook should work [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin]
... skipping 24 lines ...
  test/e2e/framework/framework.go:150
Dec 12 07:19:38.263: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "webhook-7272" for this suite.
STEP: Destroying namespace "webhook-7272-markers" for this suite.
[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin]
  test/e2e/apimachinery/webhook.go:101
•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] patching/updating a validating webhook should work [Conformance]","total":280,"completed":102,"skipped":1853,"failed":0}
SSSSSSSSSSSSSSSS
------------------------------
[sig-network] Services 
  should serve a basic endpoint from pods  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-network] Services
... skipping 24 lines ...
[AfterEach] [sig-network] Services
  test/e2e/framework/framework.go:150
Dec 12 07:19:44.423: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "services-1701" for this suite.
[AfterEach] [sig-network] Services
  test/e2e/network/service.go:144
•{"msg":"PASSED [sig-network] Services should serve a basic endpoint from pods  [Conformance]","total":280,"completed":103,"skipped":1869,"failed":0}
SSSS
------------------------------
[sig-storage] Downward API volume 
  should provide podname only [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Downward API volume
... skipping 17 lines ...
Dec 12 07:19:47.145: INFO: Waiting for pod downwardapi-volume-38b934d8-d4d3-44e4-8e9b-5b3425a8931e to disappear
Dec 12 07:19:47.202: INFO: Pod downwardapi-volume-38b934d8-d4d3-44e4-8e9b-5b3425a8931e no longer exists
[AfterEach] [sig-storage] Downward API volume
  test/e2e/framework/framework.go:150
Dec 12 07:19:47.203: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "downward-api-8220" for this suite.
•{"msg":"PASSED [sig-storage] Downward API volume should provide podname only [NodeConformance] [Conformance]","total":280,"completed":104,"skipped":1873,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-storage] Projected downwardAPI 
  should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Projected downwardAPI
... skipping 17 lines ...
Dec 12 07:19:49.920: INFO: Waiting for pod downwardapi-volume-f7b46b80-9280-42f4-b40f-e1517b545ded to disappear
Dec 12 07:19:49.977: INFO: Pod downwardapi-volume-f7b46b80-9280-42f4-b40f-e1517b545ded no longer exists
[AfterEach] [sig-storage] Projected downwardAPI
  test/e2e/framework/framework.go:150
Dec 12 07:19:49.977: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "projected-6688" for this suite.
•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance] [Conformance]","total":280,"completed":105,"skipped":1917,"failed":0}
SSSSSSSSSSS
------------------------------
[sig-api-machinery] Secrets 
  should be consumable via the environment [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] Secrets
... skipping 16 lines ...
Dec 12 07:19:52.756: INFO: Waiting for pod pod-configmaps-d78dc221-eaf5-419f-9404-38f0d2f01a14 to disappear
Dec 12 07:19:52.813: INFO: Pod pod-configmaps-d78dc221-eaf5-419f-9404-38f0d2f01a14 no longer exists
[AfterEach] [sig-api-machinery] Secrets
  test/e2e/framework/framework.go:150
Dec 12 07:19:52.814: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "secrets-9154" for this suite.
•{"msg":"PASSED [sig-api-machinery] Secrets should be consumable via the environment [NodeConformance] [Conformance]","total":280,"completed":106,"skipped":1928,"failed":0}
SSSSSSSSSSS
------------------------------
[sig-storage] Projected configMap 
  should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Projected configMap
... skipping 16 lines ...
Dec 12 07:19:55.600: INFO: Waiting for pod pod-projected-configmaps-affbc2af-7a16-415f-89ae-d75a87d32160 to disappear
Dec 12 07:19:55.656: INFO: Pod pod-projected-configmaps-affbc2af-7a16-415f-89ae-d75a87d32160 no longer exists
[AfterEach] [sig-storage] Projected configMap
  test/e2e/framework/framework.go:150
Dec 12 07:19:55.657: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "projected-7599" for this suite.
•{"msg":"PASSED [sig-storage] Projected configMap should be consumable from pods in volume with mappings as non-root [NodeConformance] [Conformance]","total":280,"completed":107,"skipped":1939,"failed":0}
SSSSSSSSSSSSSS
------------------------------
[sig-storage] Downward API volume 
  should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Downward API volume
... skipping 17 lines ...
Dec 12 07:19:58.368: INFO: Waiting for pod downwardapi-volume-ab7126da-b6da-4e29-8f94-e4a71609feac to disappear
Dec 12 07:19:58.426: INFO: Pod downwardapi-volume-ab7126da-b6da-4e29-8f94-e4a71609feac no longer exists
[AfterEach] [sig-storage] Downward API volume
  test/e2e/framework/framework.go:150
Dec 12 07:19:58.426: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "downward-api-5095" for this suite.
•{"msg":"PASSED [sig-storage] Downward API volume should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]","total":280,"completed":108,"skipped":1953,"failed":0}
SSSSSSSSS
------------------------------
[k8s.io] Probing container 
  with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Probing container
... skipping 7 lines ...
[It] with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[AfterEach] [k8s.io] Probing container
  test/e2e/framework/framework.go:150
Dec 12 07:20:58.895: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "container-probe-976" for this suite.
•{"msg":"PASSED [k8s.io] Probing container with readiness probe that fails should never be ready and never restart [NodeConformance] [Conformance]","total":280,"completed":109,"skipped":1962,"failed":0}
SSSSSSSSSSSS
------------------------------
[sig-api-machinery] ResourceQuota 
  should verify ResourceQuota with terminating scopes. [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] ResourceQuota
... skipping 19 lines ...
STEP: Deleting the pod
STEP: Ensuring resource quota status released the pod usage
[AfterEach] [sig-api-machinery] ResourceQuota
  test/e2e/framework/framework.go:150
Dec 12 07:21:16.121: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "resourcequota-8397" for this suite.
•{"msg":"PASSED [sig-api-machinery] ResourceQuota should verify ResourceQuota with terminating scopes. [Conformance]","total":280,"completed":110,"skipped":1974,"failed":0}
SSSSS
------------------------------
[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] 
  works for multiple CRDs of same group but different versions [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]
... skipping 10 lines ...
Dec 12 07:21:33.277: INFO: >>> kubeConfig: /home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig
Dec 12 07:21:37.204: INFO: >>> kubeConfig: /home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig
[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]
  test/e2e/framework/framework.go:150
Dec 12 07:21:54.359: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "crd-publish-openapi-3932" for this suite.
•{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for multiple CRDs of same group but different versions [Conformance]","total":280,"completed":111,"skipped":1979,"failed":0}
SSSS
------------------------------
[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] 
  should include webhook resources in discovery documents [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin]
... skipping 25 lines ...
  test/e2e/framework/framework.go:150
Dec 12 07:21:59.024: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "webhook-7875" for this suite.
STEP: Destroying namespace "webhook-7875-markers" for this suite.
[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin]
  test/e2e/apimachinery/webhook.go:101
•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should include webhook resources in discovery documents [Conformance]","total":280,"completed":112,"skipped":1983,"failed":0}
SS
------------------------------
[sig-api-machinery] Watchers 
  should observe add, update, and delete watch notifications on configmaps [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] Watchers
... skipping 26 lines ...
Dec 12 07:22:50.353: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b  watch-4648 /api/v1/namespaces/watch-4648/configmaps/e2e-watch-test-configmap-b 54d5ac2e-54a4-4d16-8127-e9a504acfb93 12352 0 2019-12-12 07:22:40 +0000 UTC <nil> <nil> map[watch-this-configmap:multiple-watchers-B] map[] [] []  []},Data:map[string]string{},BinaryData:map[string][]byte{},}
Dec 12 07:22:50.353: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-configmap-b  watch-4648 /api/v1/namespaces/watch-4648/configmaps/e2e-watch-test-configmap-b 54d5ac2e-54a4-4d16-8127-e9a504acfb93 12352 0 2019-12-12 07:22:40 +0000 UTC <nil> <nil> map[watch-this-configmap:multiple-watchers-B] map[] [] []  []},Data:map[string]string{},BinaryData:map[string][]byte{},}
[AfterEach] [sig-api-machinery] Watchers
  test/e2e/framework/framework.go:150
Dec 12 07:23:00.354: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "watch-4648" for this suite.
•{"msg":"PASSED [sig-api-machinery] Watchers should observe add, update, and delete watch notifications on configmaps [Conformance]","total":280,"completed":113,"skipped":1985,"failed":0}
SSSSSSSSS
------------------------------
[sig-storage] Projected downwardAPI 
  should provide container's memory limit [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Projected downwardAPI
... skipping 17 lines ...
Dec 12 07:23:03.092: INFO: Waiting for pod downwardapi-volume-289c7d5c-4c10-4719-8708-036fa8d1129c to disappear
Dec 12 07:23:03.149: INFO: Pod downwardapi-volume-289c7d5c-4c10-4719-8708-036fa8d1129c no longer exists
[AfterEach] [sig-storage] Projected downwardAPI
  test/e2e/framework/framework.go:150
Dec 12 07:23:03.149: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "projected-9114" for this suite.
•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide container's memory limit [NodeConformance] [Conformance]","total":280,"completed":114,"skipped":1994,"failed":0}
SSSS
------------------------------
[k8s.io] Docker Containers 
  should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Docker Containers
... skipping 15 lines ...
Dec 12 07:23:05.881: INFO: Waiting for pod client-containers-eaec4517-6596-4184-9a2c-509f11892045 to disappear
Dec 12 07:23:05.937: INFO: Pod client-containers-eaec4517-6596-4184-9a2c-509f11892045 no longer exists
[AfterEach] [k8s.io] Docker Containers
  test/e2e/framework/framework.go:150
Dec 12 07:23:05.937: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "containers-7148" for this suite.
•{"msg":"PASSED [k8s.io] Docker Containers should be able to override the image's default command (docker entrypoint) [NodeConformance] [Conformance]","total":280,"completed":115,"skipped":1998,"failed":0}
SSSS
------------------------------
[sig-storage] EmptyDir volumes 
  should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] EmptyDir volumes
... skipping 15 lines ...
Dec 12 07:23:08.665: INFO: Waiting for pod pod-9f00c5a1-935c-4f6d-ae4e-b5927942e023 to disappear
Dec 12 07:23:08.722: INFO: Pod pod-9f00c5a1-935c-4f6d-ae4e-b5927942e023 no longer exists
[AfterEach] [sig-storage] EmptyDir volumes
  test/e2e/framework/framework.go:150
Dec 12 07:23:08.722: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "emptydir-3065" for this suite.
•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (root,0644,default) [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":116,"skipped":2002,"failed":0}
S
------------------------------
[sig-node] Downward API 
  should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-node] Downward API
... skipping 15 lines ...
Dec 12 07:23:11.437: INFO: Waiting for pod downward-api-eaa0f231-14ad-4b93-a52d-8509fce11364 to disappear
Dec 12 07:23:11.493: INFO: Pod downward-api-eaa0f231-14ad-4b93-a52d-8509fce11364 no longer exists
[AfterEach] [sig-node] Downward API
  test/e2e/framework/framework.go:150
Dec 12 07:23:11.493: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "downward-api-6388" for this suite.
•{"msg":"PASSED [sig-node] Downward API should provide pod name, namespace and IP address as env vars [NodeConformance] [Conformance]","total":280,"completed":117,"skipped":2003,"failed":0}
SSSSSSSSSSSSSSS
------------------------------
[sig-storage] Secrets 
  optional updates should be reflected in volume [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Secrets
... skipping 12 lines ...
STEP: Creating secret with name s-test-opt-create-a028d76e-f3e6-4dd3-8dab-3ceb17ea187b
STEP: waiting to observe update in volume
[AfterEach] [sig-storage] Secrets
  test/e2e/framework/framework.go:150
Dec 12 07:23:16.846: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "secrets-4993" for this suite.
•{"msg":"PASSED [sig-storage] Secrets optional updates should be reflected in volume [NodeConformance] [Conformance]","total":280,"completed":118,"skipped":2018,"failed":0}
SSSSSS
------------------------------
[sig-api-machinery] ResourceQuota 
  should create a ResourceQuota and capture the life of a pod. [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] ResourceQuota
... skipping 16 lines ...
STEP: Deleting the pod
STEP: Ensuring resource quota status released the pod usage
[AfterEach] [sig-api-machinery] ResourceQuota
  test/e2e/framework/framework.go:150
Dec 12 07:23:30.845: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "resourcequota-280" for this suite.
•{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a pod. [Conformance]","total":280,"completed":119,"skipped":2024,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-apps] Job 
  should adopt matching orphans and release non-matching pods [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-apps] Job
... skipping 19 lines ...
Dec 12 07:23:34.713: INFO: Pod "adopt-release-4jlgt": Phase="Running", Reason="", readiness=true. Elapsed: 56.091207ms
Dec 12 07:23:34.714: INFO: Pod "adopt-release-4jlgt" satisfied condition "released"
[AfterEach] [sig-apps] Job
  test/e2e/framework/framework.go:150
Dec 12 07:23:34.714: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "job-1719" for this suite.
•{"msg":"PASSED [sig-apps] Job should adopt matching orphans and release non-matching pods [Conformance]","total":280,"completed":120,"skipped":2070,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-api-machinery] Garbage collector 
  should not be blocked by dependency circle [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] Garbage collector
... skipping 8 lines ...
Dec 12 07:23:35.369: INFO: pod2.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod1", UID:"d8103cf0-3a9b-4a96-bccd-454b79068728", Controller:(*bool)(0xc003167396), BlockOwnerDeletion:(*bool)(0xc003167397)}}
Dec 12 07:23:35.427: INFO: pod3.ObjectMeta.OwnerReferences=[]v1.OwnerReference{v1.OwnerReference{APIVersion:"v1", Kind:"Pod", Name:"pod2", UID:"2c61cfaf-2c83-4632-b057-6987d2317423", Controller:(*bool)(0xc0031b5b16), BlockOwnerDeletion:(*bool)(0xc0031b5b17)}}
[AfterEach] [sig-api-machinery] Garbage collector
  test/e2e/framework/framework.go:150
Dec 12 07:23:40.546: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "gc-7849" for this suite.
•{"msg":"PASSED [sig-api-machinery] Garbage collector should not be blocked by dependency circle [Conformance]","total":280,"completed":121,"skipped":2137,"failed":0}
SSS
------------------------------
[sig-storage] ConfigMap 
  optional updates should be reflected in volume [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] ConfigMap
... skipping 12 lines ...
STEP: Creating configMap with name cm-test-opt-create-c07b3dd4-2996-4e29-a320-55f1c2027b92
STEP: waiting to observe update in volume
[AfterEach] [sig-storage] ConfigMap
  test/e2e/framework/framework.go:150
Dec 12 07:23:45.909: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "configmap-4646" for this suite.
•{"msg":"PASSED [sig-storage] ConfigMap optional updates should be reflected in volume [NodeConformance] [Conformance]","total":280,"completed":122,"skipped":2140,"failed":0}
SSSSSSSSSSSSSS
------------------------------
[k8s.io] Probing container 
  should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Probing container
... skipping 12 lines ...
Dec 12 07:23:48.492: INFO: Initial restart count of pod test-webserver-a981cb25-d19d-47e3-8c78-83e90d7d97fb is 0
STEP: deleting the pod
[AfterEach] [k8s.io] Probing container
  test/e2e/framework/framework.go:150
Dec 12 07:27:49.320: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "container-probe-2692" for this suite.
•{"msg":"PASSED [k8s.io] Probing container should *not* be restarted with a /healthz http liveness probe [NodeConformance] [Conformance]","total":280,"completed":123,"skipped":2154,"failed":0}
SSSSSSSSSSSSSSSS
------------------------------
[k8s.io] [sig-node] Events 
  should be sent by kubelets and the scheduler about pods scheduling and running  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] [sig-node] Events
... skipping 16 lines ...
Dec 12 07:27:56.091: INFO: Saw kubelet event for our pod.
STEP: deleting the pod
[AfterEach] [k8s.io] [sig-node] Events
  test/e2e/framework/framework.go:150
Dec 12 07:27:56.153: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "events-3219" for this suite.
•{"msg":"PASSED [k8s.io] [sig-node] Events should be sent by kubelets and the scheduler about pods scheduling and running  [Conformance]","total":280,"completed":124,"skipped":2170,"failed":0}
SSSS
------------------------------
[sig-cli] Kubectl client Kubectl logs 
  should be able to retrieve and filter logs  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-cli] Kubectl client
... skipping 51 lines ...
Dec 12 07:28:13.473: INFO: stderr: ""
Dec 12 07:28:13.473: INFO: stdout: "pod \"logs-generator\" deleted\n"
[AfterEach] [sig-cli] Kubectl client
  test/e2e/framework/framework.go:150
Dec 12 07:28:13.473: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "kubectl-5355" for this suite.
•{"msg":"PASSED [sig-cli] Kubectl client Kubectl logs should be able to retrieve and filter logs  [Conformance]","total":280,"completed":125,"skipped":2174,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-network] Networking Granular Checks: Pods 
  should function for intra-pod communication: udp [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-network] Networking
... skipping 16 lines ...
Dec 12 07:28:37.179: INFO: >>> kubeConfig: /home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig
Dec 12 07:28:37.620: INFO: Waiting for responses: map[]
[AfterEach] [sig-network] Networking
  test/e2e/framework/framework.go:150
Dec 12 07:28:37.620: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "pod-network-test-9929" for this suite.
•{"msg":"PASSED [sig-network] Networking Granular Checks: Pods should function for intra-pod communication: udp [NodeConformance] [Conformance]","total":280,"completed":126,"skipped":2212,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-storage] Projected downwardAPI 
  should update labels on modification [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Projected downwardAPI
... skipping 9 lines ...
STEP: Creating the pod
Dec 12 07:28:40.897: INFO: Successfully updated pod "labelsupdatedb3c080f-66cc-473e-b334-aa915e371596"
[AfterEach] [sig-storage] Projected downwardAPI
  test/e2e/framework/framework.go:150
Dec 12 07:28:45.081: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "projected-9196" for this suite.
•{"msg":"PASSED [sig-storage] Projected downwardAPI should update labels on modification [NodeConformance] [Conformance]","total":280,"completed":127,"skipped":2236,"failed":0}
SSSSSSSSSSSS
------------------------------
[sig-storage] EmptyDir volumes 
  should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] EmptyDir volumes
... skipping 15 lines ...
Dec 12 07:28:47.791: INFO: Waiting for pod pod-427e94d6-3032-4602-bb65-a952723b5250 to disappear
Dec 12 07:28:47.847: INFO: Pod pod-427e94d6-3032-4602-bb65-a952723b5250 no longer exists
[AfterEach] [sig-storage] EmptyDir volumes
  test/e2e/framework/framework.go:150
Dec 12 07:28:47.847: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "emptydir-6118" for this suite.
•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (non-root,0777,default) [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":128,"skipped":2248,"failed":0}
SSSSSSSS
------------------------------
[sig-storage] ConfigMap 
  should be consumable from pods in volume as non-root [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] ConfigMap
... skipping 16 lines ...
Dec 12 07:28:50.626: INFO: Waiting for pod pod-configmaps-c9dc2c9f-774d-4943-bbdf-91e47dc4a4e7 to disappear
Dec 12 07:28:50.683: INFO: Pod pod-configmaps-c9dc2c9f-774d-4943-bbdf-91e47dc4a4e7 no longer exists
[AfterEach] [sig-storage] ConfigMap
  test/e2e/framework/framework.go:150
Dec 12 07:28:50.683: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "configmap-6447" for this suite.
•{"msg":"PASSED [sig-storage] ConfigMap should be consumable from pods in volume as non-root [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":129,"skipped":2256,"failed":0}
SSSSSSS
------------------------------
[k8s.io] [sig-node] PreStop 
  should call prestop when killing a pod  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] [sig-node] PreStop
... skipping 25 lines ...
}
STEP: Deleting the server pod
[AfterEach] [k8s.io] [sig-node] PreStop
  test/e2e/framework/framework.go:150
Dec 12 07:29:00.637: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "prestop-6224" for this suite.
•{"msg":"PASSED [k8s.io] [sig-node] PreStop should call prestop when killing a pod  [Conformance]","total":280,"completed":130,"skipped":2263,"failed":0}
SSSSSSSS
------------------------------
[sig-api-machinery] Garbage collector 
  should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] Garbage collector
... skipping 35 lines ...
For evicted_pods_total:

[AfterEach] [sig-api-machinery] Garbage collector
  test/e2e/framework/framework.go:150
Dec 12 07:29:11.845: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "gc-4746" for this suite.
•{"msg":"PASSED [sig-api-machinery] Garbage collector should not delete dependents that have both valid owner and owner that's waiting for dependents to be deleted [Conformance]","total":280,"completed":131,"skipped":2271,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-api-machinery] ResourceQuota 
  should create a ResourceQuota and capture the life of a secret. [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] ResourceQuota
... skipping 13 lines ...
STEP: Deleting a secret
STEP: Ensuring resource quota status released usage
[AfterEach] [sig-api-machinery] ResourceQuota
  test/e2e/framework/framework.go:150
Dec 12 07:29:29.674: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "resourcequota-9934" for this suite.
•{"msg":"PASSED [sig-api-machinery] ResourceQuota should create a ResourceQuota and capture the life of a secret. [Conformance]","total":280,"completed":132,"skipped":2294,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[k8s.io] Kubelet when scheduling a busybox Pod with hostAliases 
  should write entries to /etc/hosts [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Kubelet
... skipping 7 lines ...
[It] should write entries to /etc/hosts [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[AfterEach] [k8s.io] Kubelet
  test/e2e/framework/framework.go:150
Dec 12 07:29:32.325: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "kubelet-test-8704" for this suite.
•{"msg":"PASSED [k8s.io] Kubelet when scheduling a busybox Pod with hostAliases should write entries to /etc/hosts [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":133,"skipped":2324,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-storage] ConfigMap 
  updates should be reflected in volume [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] ConfigMap
... skipping 9 lines ...
STEP: Updating configmap configmap-test-upd-30ec4a67-00a4-4a40-890c-9bbfc00acf7f
STEP: waiting to observe update in volume
[AfterEach] [sig-storage] ConfigMap
  test/e2e/framework/framework.go:150
Dec 12 07:29:37.333: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "configmap-1193" for this suite.
•{"msg":"PASSED [sig-storage] ConfigMap updates should be reflected in volume [NodeConformance] [Conformance]","total":280,"completed":134,"skipped":2377,"failed":0}
SSSSSSSSSSSSSSSSSS
------------------------------
[sig-apps] Deployment 
  deployment should support rollover [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-apps] Deployment
... skipping 45 lines ...
Dec 12 07:29:55.199: INFO: Pod "test-rollover-deployment-574d6dfbff-72x85" is available:
&Pod{ObjectMeta:{test-rollover-deployment-574d6dfbff-72x85 test-rollover-deployment-574d6dfbff- deployment-8614 /api/v1/namespaces/deployment-8614/pods/test-rollover-deployment-574d6dfbff-72x85 e18d3453-49e7-411a-a937-5ca4dd7275a9 14648 0 2019-12-12 07:29:42 +0000 UTC <nil> <nil> map[name:rollover-pod pod-template-hash:574d6dfbff] map[cni.projectcalico.org/podIP:192.168.220.23/32] [{apps/v1 ReplicaSet test-rollover-deployment-574d6dfbff 1f24ce19-9176-4ecc-88c4-903fd87d246d 0xc00324e117 0xc00324e118}] []  []},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-x9mgp,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-x9mgp,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,},},},Containers:[]Container{Container{Name:agnhost,Image:gcr.io/kubernetes-e2e-test-images/agnhost:2.8,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-x9mgp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-0-0-18.us-west-2.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},},Status:PodStatus{Phase:Running,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:29:42 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:29:44 +0000 UTC,Reason:,Message:,},PodCondition{Type:ContainersReady,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:29:44 +0000 UTC,Reason:,Message:,},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:29:42 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:10.0.0.18,PodIP:192.168.220.23,StartTime:2019-12-12 07:29:42 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:agnhost,State:ContainerState{Waiting:nil,Running:&ContainerStateRunning{StartedAt:2019-12-12 07:29:43 +0000 UTC,},Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:true,RestartCount:0,Image:gcr.io/kubernetes-e2e-test-images/agnhost:2.8,ImageID:gcr.io/kubernetes-e2e-test-images/agnhost@sha256:daf5332100521b1256d0e3c56d697a238eaec3af48897ed9167cbadd426773b5,ContainerID:containerd://7cea50d8de3ce404eef0d4273ee64bfb1e70e3916ab7d836aec7e54040d88dbb,Started:*true,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:192.168.220.23,},},EphemeralContainerStatuses:[]ContainerStatus{},},}
[AfterEach] [sig-apps] Deployment
  test/e2e/framework/framework.go:150
Dec 12 07:29:55.200: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "deployment-8614" for this suite.
•{"msg":"PASSED [sig-apps] Deployment deployment should support rollover [Conformance]","total":280,"completed":135,"skipped":2395,"failed":0}

------------------------------
[k8s.io] Security Context when creating containers with AllowPrivilegeEscalation 
  should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Security Context
... skipping 12 lines ...
Dec 12 07:29:59.787: INFO: Pod "alpine-nnp-false-9b22f9d8-0f76-411a-aa86-a3468d6f32c9": Phase="Succeeded", Reason="", readiness=false. Elapsed: 4.173326081s
Dec 12 07:29:59.788: INFO: Pod "alpine-nnp-false-9b22f9d8-0f76-411a-aa86-a3468d6f32c9" satisfied condition "success or failure"
[AfterEach] [k8s.io] Security Context
  test/e2e/framework/framework.go:150
Dec 12 07:29:59.849: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "security-context-test-2559" for this suite.
•{"msg":"PASSED [k8s.io] Security Context when creating containers with AllowPrivilegeEscalation should not allow privilege escalation when false [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":136,"skipped":2395,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-storage] EmptyDir volumes 
  should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] EmptyDir volumes
... skipping 15 lines ...
Dec 12 07:30:02.575: INFO: Waiting for pod pod-a4ae9375-a211-4326-9a8f-42dbf23893a9 to disappear
Dec 12 07:30:02.635: INFO: Pod pod-a4ae9375-a211-4326-9a8f-42dbf23893a9 no longer exists
[AfterEach] [sig-storage] EmptyDir volumes
  test/e2e/framework/framework.go:150
Dec 12 07:30:02.636: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "emptydir-1563" for this suite.
•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (non-root,0666,default) [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":137,"skipped":2431,"failed":0}

------------------------------
[sig-scheduling] NoExecuteTaintManager Single Pod [Serial] 
  removing taint cancels eviction [Disruptive] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-scheduling] NoExecuteTaintManager Single Pod [Serial]
... skipping 20 lines ...
STEP: Waiting some time to make sure that toleration time passed.
Dec 12 07:32:18.921: INFO: Pod wasn't evicted. Test successful
[AfterEach] [sig-scheduling] NoExecuteTaintManager Single Pod [Serial]
  test/e2e/framework/framework.go:150
Dec 12 07:32:18.921: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "taint-single-pod-8091" for this suite.
•{"msg":"PASSED [sig-scheduling] NoExecuteTaintManager Single Pod [Serial] removing taint cancels eviction [Disruptive] [Conformance]","total":280,"completed":138,"skipped":2431,"failed":0}
SSSSSSSSSSS
------------------------------
[k8s.io] Container Runtime blackbox test on terminated container 
  should report termination message [LinuxOnly] from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Container Runtime
... skipping 12 lines ...
Dec 12 07:32:21.566: INFO: Expected: &{OK} to match Container's Termination Message: OK --
STEP: delete the container
[AfterEach] [k8s.io] Container Runtime
  test/e2e/framework/framework.go:150
Dec 12 07:32:21.686: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "container-runtime-9818" for this suite.
•{"msg":"PASSED [k8s.io] Container Runtime blackbox test on terminated container should report termination message [LinuxOnly] from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]","total":280,"completed":139,"skipped":2442,"failed":0}

------------------------------
[sig-network] Services 
  should be able to change the type from ExternalName to ClusterIP [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-network] Services
... skipping 22 lines ...
[AfterEach] [sig-network] Services
  test/e2e/framework/framework.go:150
Dec 12 07:32:30.390: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "services-2863" for this suite.
[AfterEach] [sig-network] Services
  test/e2e/network/service.go:144
•{"msg":"PASSED [sig-network] Services should be able to change the type from ExternalName to ClusterIP [Conformance]","total":280,"completed":140,"skipped":2442,"failed":0}
SS
------------------------------
[k8s.io] Security Context When creating a pod with readOnlyRootFilesystem 
  should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Security Context
... skipping 11 lines ...
Dec 12 07:32:32.911: INFO: Pod "busybox-readonly-false-0babcff7-ffb9-480b-bec1-dced999851ff": Phase="Succeeded", Reason="", readiness=false. Elapsed: 2.112324701s
Dec 12 07:32:32.911: INFO: Pod "busybox-readonly-false-0babcff7-ffb9-480b-bec1-dced999851ff" satisfied condition "success or failure"
[AfterEach] [k8s.io] Security Context
  test/e2e/framework/framework.go:150
Dec 12 07:32:32.911: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "security-context-test-1387" for this suite.
•{"msg":"PASSED [k8s.io] Security Context When creating a pod with readOnlyRootFilesystem should run the container with writable rootfs when readOnlyRootFilesystem=false [NodeConformance] [Conformance]","total":280,"completed":141,"skipped":2444,"failed":0}
SSSSSSSSSS
------------------------------
[sig-apps] ReplicationController 
  should surface a failure condition on a common issue like exceeded quota [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-apps] ReplicationController
... skipping 11 lines ...
Dec 12 07:32:33.605: INFO: Updating replication controller "condition-test"
STEP: Checking rc "condition-test" has no failure condition set
[AfterEach] [sig-apps] ReplicationController
  test/e2e/framework/framework.go:150
Dec 12 07:32:33.662: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "replication-controller-4735" for this suite.
•{"msg":"PASSED [sig-apps] ReplicationController should surface a failure condition on a common issue like exceeded quota [Conformance]","total":280,"completed":142,"skipped":2454,"failed":0}
SSSSSSS
------------------------------
[sig-network] Proxy version v1 
  should proxy logs on node using proxy subresource  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] version v1
... skipping 85 lines ...
<a href="amazon/">amazon/</a>
<a href="apt/... (200; 58.693413ms)
[AfterEach] version v1
  test/e2e/framework/framework.go:150
Dec 12 07:32:35.241: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "proxy-2769" for this suite.
•{"msg":"PASSED [sig-network] Proxy version v1 should proxy logs on node using proxy subresource  [Conformance]","total":280,"completed":143,"skipped":2461,"failed":0}

------------------------------
[sig-storage] EmptyDir volumes 
  should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] EmptyDir volumes
... skipping 15 lines ...
Dec 12 07:32:37.971: INFO: Waiting for pod pod-b58ab070-766d-4826-9ad8-d65f8e5835d2 to disappear
Dec 12 07:32:38.028: INFO: Pod pod-b58ab070-766d-4826-9ad8-d65f8e5835d2 no longer exists
[AfterEach] [sig-storage] EmptyDir volumes
  test/e2e/framework/framework.go:150
Dec 12 07:32:38.028: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "emptydir-1328" for this suite.
•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":144,"skipped":2461,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-storage] Projected secret 
  should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Projected secret
... skipping 16 lines ...
Dec 12 07:32:40.805: INFO: Waiting for pod pod-projected-secrets-d78a2c55-eaf3-4344-a1a1-858ec63a5e57 to disappear
Dec 12 07:32:40.860: INFO: Pod pod-projected-secrets-d78a2c55-eaf3-4344-a1a1-858ec63a5e57 no longer exists
[AfterEach] [sig-storage] Projected secret
  test/e2e/framework/framework.go:150
Dec 12 07:32:40.860: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "projected-8184" for this suite.
•{"msg":"PASSED [sig-storage] Projected secret should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":145,"skipped":2486,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-network] DNS 
  should support configurable pod DNS nameservers [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-network] DNS
... skipping 14 lines ...
Dec 12 07:32:43.819: INFO: >>> kubeConfig: /home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig
Dec 12 07:32:44.257: INFO: Deleting pod dns-5944...
[AfterEach] [sig-network] DNS
  test/e2e/framework/framework.go:150
Dec 12 07:32:44.328: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "dns-5944" for this suite.
•{"msg":"PASSED [sig-network] DNS should support configurable pod DNS nameservers [Conformance]","total":280,"completed":146,"skipped":2558,"failed":0}
SSSSSS
------------------------------
[k8s.io] Pods 
  should support remote command execution over websockets [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Pods
... skipping 10 lines ...
STEP: creating the pod
STEP: submitting the pod to kubernetes
[AfterEach] [k8s.io] Pods
  test/e2e/framework/framework.go:150
Dec 12 07:32:47.153: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "pods-5579" for this suite.
•{"msg":"PASSED [k8s.io] Pods should support remote command execution over websockets [NodeConformance] [Conformance]","total":280,"completed":147,"skipped":2564,"failed":0}
S
------------------------------
[sig-storage] Projected secret 
  should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Projected secret
... skipping 16 lines ...
Dec 12 07:32:49.937: INFO: Waiting for pod pod-projected-secrets-09f5e4b8-c9b0-4e19-a0d5-a88de11f8e26 to disappear
Dec 12 07:32:49.995: INFO: Pod pod-projected-secrets-09f5e4b8-c9b0-4e19-a0d5-a88de11f8e26 no longer exists
[AfterEach] [sig-storage] Projected secret
  test/e2e/framework/framework.go:150
Dec 12 07:32:49.996: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "projected-2394" for this suite.
•{"msg":"PASSED [sig-storage] Projected secret should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":148,"skipped":2565,"failed":0}
S
------------------------------
[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] 
  should mutate custom resource with different stored version [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin]
... skipping 24 lines ...
  test/e2e/framework/framework.go:150
Dec 12 07:32:58.353: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "webhook-4476" for this suite.
STEP: Destroying namespace "webhook-4476-markers" for this suite.
[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin]
  test/e2e/apimachinery/webhook.go:101
•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource with different stored version [Conformance]","total":280,"completed":149,"skipped":2566,"failed":0}
SSS
------------------------------
[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] 
  should mutate custom resource [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin]
... skipping 22 lines ...
  test/e2e/framework/framework.go:150
Dec 12 07:33:06.404: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "webhook-7673" for this suite.
STEP: Destroying namespace "webhook-7673-markers" for this suite.
[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin]
  test/e2e/apimachinery/webhook.go:101
•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should mutate custom resource [Conformance]","total":280,"completed":150,"skipped":2569,"failed":0}
SSSSSSSSSS
------------------------------
[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] 
  should perform rolling updates and roll backs of template modifications [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-apps] StatefulSet
... skipping 59 lines ...
Dec 12 07:35:03.093: INFO: Waiting for statefulset status.replicas updated to 0
Dec 12 07:35:03.151: INFO: Deleting statefulset ss2
[AfterEach] [sig-apps] StatefulSet
  test/e2e/framework/framework.go:150
Dec 12 07:35:03.409: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "statefulset-8174" for this suite.
•{"msg":"PASSED [sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] should perform rolling updates and roll backs of template modifications [Conformance]","total":280,"completed":151,"skipped":2579,"failed":0}
SS
------------------------------
[sig-node] Downward API 
  should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-node] Downward API
... skipping 15 lines ...
Dec 12 07:35:06.143: INFO: Waiting for pod downward-api-4cf2f532-e884-49c9-8cbd-85353169c91b to disappear
Dec 12 07:35:06.199: INFO: Pod downward-api-4cf2f532-e884-49c9-8cbd-85353169c91b no longer exists
[AfterEach] [sig-node] Downward API
  test/e2e/framework/framework.go:150
Dec 12 07:35:06.199: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "downward-api-8996" for this suite.
•{"msg":"PASSED [sig-node] Downward API should provide container's limits.cpu/memory and requests.cpu/memory as env vars [NodeConformance] [Conformance]","total":280,"completed":152,"skipped":2581,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] 
  should be able to deny custom resource creation, update and deletion [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin]
... skipping 27 lines ...
  test/e2e/framework/framework.go:150
Dec 12 07:35:14.532: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "webhook-7820" for this suite.
STEP: Destroying namespace "webhook-7820-markers" for this suite.
[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin]
  test/e2e/apimachinery/webhook.go:101
•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should be able to deny custom resource creation, update and deletion [Conformance]","total":280,"completed":153,"skipped":2604,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-cli] Kubectl client Kubectl run job 
  should create a job from an image when restart is OnFailure  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-cli] Kubectl client
... skipping 19 lines ...
Dec 12 07:35:16.291: INFO: stderr: ""
Dec 12 07:35:16.291: INFO: stdout: "job.batch \"e2e-test-httpd-job\" deleted\n"
[AfterEach] [sig-cli] Kubectl client
  test/e2e/framework/framework.go:150
Dec 12 07:35:16.291: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "kubectl-2184" for this suite.
•{"msg":"PASSED [sig-cli] Kubectl client Kubectl run job should create a job from an image when restart is OnFailure  [Conformance]","total":280,"completed":154,"skipped":2645,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] 
  Should recreate evicted statefulset [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-apps] StatefulSet
... skipping 12 lines ...
STEP: Looking for a node to schedule stateful set and pod
STEP: Creating pod with conflicting port in namespace statefulset-9748
STEP: Creating statefulset with conflicting port in namespace statefulset-9748
STEP: Waiting until pod test-pod will start running in namespace statefulset-9748
STEP: Waiting until stateful pod ss-0 will be recreated and deleted at least once in namespace statefulset-9748
Dec 12 07:35:19.134: INFO: Observed stateful pod in namespace: statefulset-9748, name: ss-0, uid: 7bfb1da9-af73-4a3d-9c58-7b58ad8ca89b, status phase: Pending. Waiting for statefulset controller to delete.
Dec 12 07:35:23.431: INFO: Observed stateful pod in namespace: statefulset-9748, name: ss-0, uid: 7bfb1da9-af73-4a3d-9c58-7b58ad8ca89b, status phase: Failed. Waiting for statefulset controller to delete.
Dec 12 07:35:23.442: INFO: Observed stateful pod in namespace: statefulset-9748, name: ss-0, uid: 7bfb1da9-af73-4a3d-9c58-7b58ad8ca89b, status phase: Failed. Waiting for statefulset controller to delete.
Dec 12 07:35:23.456: INFO: Observed delete event for stateful pod ss-0 in namespace statefulset-9748
STEP: Removing pod with conflicting port in namespace statefulset-9748
STEP: Waiting when stateful pod ss-0 will be recreated in namespace statefulset-9748 and will be in running state
[AfterEach] [k8s.io] Basic StatefulSet functionality [StatefulSetBasic]
  test/e2e/apps/statefulset.go:90
Dec 12 07:35:27.723: INFO: Deleting all statefulset in ns statefulset-9748
Dec 12 07:35:27.779: INFO: Scaling statefulset ss to 0
Dec 12 07:35:38.011: INFO: Waiting for statefulset status.replicas updated to 0
Dec 12 07:35:38.069: INFO: Deleting statefulset ss
[AfterEach] [sig-apps] StatefulSet
  test/e2e/framework/framework.go:150
Dec 12 07:35:38.243: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "statefulset-9748" for this suite.
•{"msg":"PASSED [sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] Should recreate evicted statefulset [Conformance]","total":280,"completed":155,"skipped":2684,"failed":0}

------------------------------
[sig-cli] Kubectl client Kubectl api-versions 
  should check if v1 is in available api versions  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-cli] Kubectl client
... skipping 11 lines ...
Dec 12 07:35:39.068: INFO: stderr: ""
Dec 12 07:35:39.069: INFO: stdout: "admissionregistration.k8s.io/v1\nadmissionregistration.k8s.io/v1beta1\napiextensions.k8s.io/v1\napiextensions.k8s.io/v1beta1\napiregistration.k8s.io/v1\napiregistration.k8s.io/v1beta1\napps/v1\nauthentication.k8s.io/v1\nauthentication.k8s.io/v1beta1\nauthorization.k8s.io/v1\nauthorization.k8s.io/v1beta1\nautoscaling/v1\nautoscaling/v2beta1\nautoscaling/v2beta2\nbatch/v1\nbatch/v1beta1\ncertificates.k8s.io/v1beta1\ncoordination.k8s.io/v1\ncoordination.k8s.io/v1beta1\ncrd.projectcalico.org/v1\ndiscovery.k8s.io/v1beta1\nevents.k8s.io/v1beta1\nextensions/v1beta1\nnetworking.k8s.io/v1\nnetworking.k8s.io/v1beta1\nnode.k8s.io/v1beta1\npolicy/v1beta1\nrbac.authorization.k8s.io/v1\nrbac.authorization.k8s.io/v1beta1\nscheduling.k8s.io/v1\nscheduling.k8s.io/v1beta1\nstorage.k8s.io/v1\nstorage.k8s.io/v1beta1\nv1\n"
[AfterEach] [sig-cli] Kubectl client
  test/e2e/framework/framework.go:150
Dec 12 07:35:39.069: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "kubectl-9182" for this suite.
•{"msg":"PASSED [sig-cli] Kubectl client Kubectl api-versions should check if v1 is in available api versions  [Conformance]","total":280,"completed":156,"skipped":2684,"failed":0}
SSSSSS
------------------------------
[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] 
  updates the published spec when one version gets renamed [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]
... skipping 11 lines ...
STEP: check the old version name is removed
STEP: check the other version is not changed
[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]
  test/e2e/framework/framework.go:150
Dec 12 07:36:05.084: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "crd-publish-openapi-6566" for this suite.
•{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] updates the published spec when one version gets renamed [Conformance]","total":280,"completed":157,"skipped":2690,"failed":0}
SSSSSSS
------------------------------
[k8s.io] Kubelet when scheduling a read only busybox container 
  should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Kubelet
... skipping 7 lines ...
[It] should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[AfterEach] [k8s.io] Kubelet
  test/e2e/framework/framework.go:150
Dec 12 07:36:07.759: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "kubelet-test-1111" for this suite.
•{"msg":"PASSED [k8s.io] Kubelet when scheduling a read only busybox container should not write to root filesystem [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":158,"skipped":2697,"failed":0}
SSSSSSSSSSSSSSSS
------------------------------
[k8s.io] InitContainer [NodeConformance] 
  should invoke init containers on a RestartNever pod [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] InitContainer [NodeConformance]
... skipping 9 lines ...
STEP: creating the pod
Dec 12 07:36:08.103: INFO: PodSpec: initContainers in spec.initContainers
[AfterEach] [k8s.io] InitContainer [NodeConformance]
  test/e2e/framework/framework.go:150
Dec 12 07:36:12.051: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "init-container-1556" for this suite.
•{"msg":"PASSED [k8s.io] InitContainer [NodeConformance] should invoke init containers on a RestartNever pod [Conformance]","total":280,"completed":159,"skipped":2713,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-storage] Projected downwardAPI 
  should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Projected downwardAPI
... skipping 17 lines ...
Dec 12 07:36:14.763: INFO: Waiting for pod downwardapi-volume-cf61204e-3438-45a5-b463-1d62958fe99c to disappear
Dec 12 07:36:14.820: INFO: Pod downwardapi-volume-cf61204e-3438-45a5-b463-1d62958fe99c no longer exists
[AfterEach] [sig-storage] Projected downwardAPI
  test/e2e/framework/framework.go:150
Dec 12 07:36:14.820: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "projected-3" for this suite.
•{"msg":"PASSED [sig-storage] Projected downwardAPI should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance] [Conformance]","total":280,"completed":160,"skipped":2745,"failed":0}
SSSSSSSSSSSSSS
------------------------------
[sig-scheduling] SchedulerPredicates [Serial] 
  validates resource limits of pods that are allowed to run  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-scheduling] SchedulerPredicates [Serial]
... skipping 60 lines ...
[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
  test/e2e/framework/framework.go:150
Dec 12 07:36:19.767: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "sched-pred-5402" for this suite.
[AfterEach] [sig-scheduling] SchedulerPredicates [Serial]
  test/e2e/scheduling/predicates.go:77
•{"msg":"PASSED [sig-scheduling] SchedulerPredicates [Serial] validates resource limits of pods that are allowed to run  [Conformance]","total":280,"completed":161,"skipped":2759,"failed":0}
SSSSSS
------------------------------
[sig-storage] Projected downwardAPI 
  should update annotations on modification [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Projected downwardAPI
... skipping 9 lines ...
STEP: Creating the pod
Dec 12 07:36:23.306: INFO: Successfully updated pod "annotationupdate164fc191-e84f-4ac6-ad18-03778180d338"
[AfterEach] [sig-storage] Projected downwardAPI
  test/e2e/framework/framework.go:150
Dec 12 07:36:25.428: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "projected-5117" for this suite.
•{"msg":"PASSED [sig-storage] Projected downwardAPI should update annotations on modification [NodeConformance] [Conformance]","total":280,"completed":162,"skipped":2765,"failed":0}
SSSSS
------------------------------
[k8s.io] Kubelet when scheduling a busybox command in a pod 
  should print the output to logs [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Kubelet
... skipping 7 lines ...
[It] should print the output to logs [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[AfterEach] [k8s.io] Kubelet
  test/e2e/framework/framework.go:150
Dec 12 07:36:28.087: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "kubelet-test-3773" for this suite.
•{"msg":"PASSED [k8s.io] Kubelet when scheduling a busybox command in a pod should print the output to logs [NodeConformance] [Conformance]","total":280,"completed":163,"skipped":2770,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[k8s.io] Container Runtime blackbox test on terminated container 
  should report termination message [LinuxOnly] as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Container Runtime
... skipping 12 lines ...
Dec 12 07:36:30.728: INFO: Expected: &{} to match Container's Termination Message:  --
STEP: delete the container
[AfterEach] [k8s.io] Container Runtime
  test/e2e/framework/framework.go:150
Dec 12 07:36:30.857: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "container-runtime-3303" for this suite.
•{"msg":"PASSED [k8s.io] Container Runtime blackbox test on terminated container should report termination message [LinuxOnly] as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance] [Conformance]","total":280,"completed":164,"skipped":2808,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-storage] Projected secret 
  optional updates should be reflected in volume [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Projected secret
... skipping 12 lines ...
STEP: Creating secret with name s-test-opt-create-900a5b97-7fe0-4bba-8a87-eabdb761b9d9
STEP: waiting to observe update in volume
[AfterEach] [sig-storage] Projected secret
  test/e2e/framework/framework.go:150
Dec 12 07:36:36.214: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "projected-7467" for this suite.
•{"msg":"PASSED [sig-storage] Projected secret optional updates should be reflected in volume [NodeConformance] [Conformance]","total":280,"completed":165,"skipped":2838,"failed":0}
SS
------------------------------
[sig-storage] Projected configMap 
  optional updates should be reflected in volume [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Projected configMap
... skipping 12 lines ...
STEP: Creating configMap with name cm-test-opt-create-a9a7657d-2c37-442c-956b-27347d88344b
STEP: waiting to observe update in volume
[AfterEach] [sig-storage] Projected configMap
  test/e2e/framework/framework.go:150
Dec 12 07:36:41.569: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "projected-9563" for this suite.
•{"msg":"PASSED [sig-storage] Projected configMap optional updates should be reflected in volume [NodeConformance] [Conformance]","total":280,"completed":166,"skipped":2840,"failed":0}
SSSSSSSSSSS
------------------------------
[sig-cli] Kubectl client Kubectl describe 
  should check if kubectl describe prints relevant information for rc and pods  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-cli] Kubectl client
... skipping 20 lines ...
Dec 12 07:36:44.725: INFO: ForEach: Found 1 pods from the filter.  Now looping through them.
Dec 12 07:36:44.725: INFO: Running '/home/prow/go/src/k8s.io/kubernetes/bazel-bin/cmd/kubectl/linux_amd64_pure_stripped/kubectl --server=https://test-1576132495-apiserver-1623198031.us-west-2.elb.amazonaws.com:6443 --kubeconfig=/home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig describe pod agnhost-master-l2rrs --namespace=kubectl-7233'
Dec 12 07:36:45.243: INFO: stderr: ""
Dec 12 07:36:45.244: INFO: stdout: "Name:         agnhost-master-l2rrs\nNamespace:    kubectl-7233\nPriority:     0\nNode:         ip-10-0-0-18.us-west-2.compute.internal/10.0.0.18\nStart Time:   Thu, 12 Dec 2019 07:36:42 +0000\nLabels:       app=agnhost\n              role=master\nAnnotations:  cni.projectcalico.org/podIP: 192.168.220.45/32\nStatus:       Running\nIP:           192.168.220.45\nIPs:\n  IP:           192.168.220.45\nControlled By:  ReplicationController/agnhost-master\nContainers:\n  agnhost-master:\n    Container ID:   containerd://971812a9002aaa925adfa43236c563e77d64d48e6cb6d2185d529ba4ab3ddc94\n    Image:          gcr.io/kubernetes-e2e-test-images/agnhost:2.8\n    Image ID:       gcr.io/kubernetes-e2e-test-images/agnhost@sha256:daf5332100521b1256d0e3c56d697a238eaec3af48897ed9167cbadd426773b5\n    Port:           6379/TCP\n    Host Port:      0/TCP\n    State:          Running\n      Started:      Thu, 12 Dec 2019 07:36:43 +0000\n    Ready:          True\n    Restart Count:  0\n    Environment:    <none>\n    Mounts:\n      /var/run/secrets/kubernetes.io/serviceaccount from default-token-cnthr (ro)\nConditions:\n  Type              Status\n  Initialized       True \n  Ready             True \n  ContainersReady   True \n  PodScheduled      True \nVolumes:\n  default-token-cnthr:\n    Type:        Secret (a volume populated by a Secret)\n    SecretName:  default-token-cnthr\n    Optional:    false\nQoS Class:       BestEffort\nNode-Selectors:  <none>\nTolerations:     node.kubernetes.io/not-ready:NoExecute for 300s\n                 node.kubernetes.io/unreachable:NoExecute for 300s\nEvents:\n  Type    Reason     Age        From                                              Message\n  ----    ------     ----       ----                                              -------\n  Normal  Scheduled  <unknown>  default-scheduler                                 Successfully assigned kubectl-7233/agnhost-master-l2rrs to ip-10-0-0-18.us-west-2.compute.internal\n  Normal  Pulled     2s         kubelet, ip-10-0-0-18.us-west-2.compute.internal  Container image \"gcr.io/kubernetes-e2e-test-images/agnhost:2.8\" already present on machine\n  Normal  Created    2s         kubelet, ip-10-0-0-18.us-west-2.compute.internal  Created container agnhost-master\n  Normal  Started    2s         kubelet, ip-10-0-0-18.us-west-2.compute.internal  Started container agnhost-master\n"
Dec 12 07:36:45.244: INFO: Running '/home/prow/go/src/k8s.io/kubernetes/bazel-bin/cmd/kubectl/linux_amd64_pure_stripped/kubectl --server=https://test-1576132495-apiserver-1623198031.us-west-2.elb.amazonaws.com:6443 --kubeconfig=/home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig describe rc agnhost-master --namespace=kubectl-7233'
Dec 12 07:36:45.854: INFO: stderr: ""
Dec 12 07:36:45.854: INFO: stdout: "Name:         agnhost-master\nNamespace:    kubectl-7233\nSelector:     app=agnhost,role=master\nLabels:       app=agnhost\n              role=master\nAnnotations:  <none>\nReplicas:     1 current / 1 desired\nPods Status:  1 Running / 0 Waiting / 0 Succeeded / 0 Failed\nPod Template:\n  Labels:  app=agnhost\n           role=master\n  Containers:\n   agnhost-master:\n    Image:        gcr.io/kubernetes-e2e-test-images/agnhost:2.8\n    Port:         6379/TCP\n    Host Port:    0/TCP\n    Environment:  <none>\n    Mounts:       <none>\n  Volumes:        <none>\nEvents:\n  Type    Reason            Age   From                    Message\n  ----    ------            ----  ----                    -------\n  Normal  SuccessfulCreate  3s    replication-controller  Created pod: agnhost-master-l2rrs\n"
Dec 12 07:36:45.854: INFO: Running '/home/prow/go/src/k8s.io/kubernetes/bazel-bin/cmd/kubectl/linux_amd64_pure_stripped/kubectl --server=https://test-1576132495-apiserver-1623198031.us-west-2.elb.amazonaws.com:6443 --kubeconfig=/home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig describe service agnhost-master --namespace=kubectl-7233'
Dec 12 07:36:46.430: INFO: stderr: ""
Dec 12 07:36:46.430: INFO: stdout: "Name:              agnhost-master\nNamespace:         kubectl-7233\nLabels:            app=agnhost\n                   role=master\nAnnotations:       <none>\nSelector:          app=agnhost,role=master\nType:              ClusterIP\nIP:                10.96.51.19\nPort:              <unset>  6379/TCP\nTargetPort:        agnhost-server/TCP\nEndpoints:         192.168.220.45:6379\nSession Affinity:  None\nEvents:            <none>\n"
Dec 12 07:36:46.489: INFO: Running '/home/prow/go/src/k8s.io/kubernetes/bazel-bin/cmd/kubectl/linux_amd64_pure_stripped/kubectl --server=https://test-1576132495-apiserver-1623198031.us-west-2.elb.amazonaws.com:6443 --kubeconfig=/home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig describe node ip-10-0-0-157.us-west-2.compute.internal'
Dec 12 07:36:47.188: INFO: stderr: ""
Dec 12 07:36:47.188: INFO: stdout: "Name:               ip-10-0-0-157.us-west-2.compute.internal\nRoles:              master\nLabels:             beta.kubernetes.io/arch=amd64\n                    beta.kubernetes.io/instance-type=t2.medium\n                    beta.kubernetes.io/os=linux\n                    failure-domain.beta.kubernetes.io/region=us-west-2\n                    failure-domain.beta.kubernetes.io/zone=us-west-2a\n                    kubernetes.io/arch=amd64\n                    kubernetes.io/hostname=ip-10-0-0-157.us-west-2.compute.internal\n                    kubernetes.io/os=linux\n                    node-role.kubernetes.io/master=\n                    node.kubernetes.io/instance-type=t2.medium\n                    topology.kubernetes.io/region=us-west-2\n                    topology.kubernetes.io/zone=us-west-2a\nAnnotations:        kubeadm.alpha.kubernetes.io/cri-socket: /run/containerd/containerd.sock\n                    node.alpha.kubernetes.io/ttl: 0\n                    projectcalico.org/IPv4Address: 10.0.0.157/24\n                    projectcalico.org/IPv4IPIPTunnelAddr: 192.168.191.64\n                    volumes.kubernetes.io/controller-managed-attach-detach: true\nCreationTimestamp:  Thu, 12 Dec 2019 06:54:11 +0000\nTaints:             node-role.kubernetes.io/master:NoSchedule\nUnschedulable:      false\nLease:\n  HolderIdentity:  ip-10-0-0-157.us-west-2.compute.internal\n  AcquireTime:     <unset>\n  RenewTime:       Thu, 12 Dec 2019 07:36:39 +0000\nConditions:\n  Type                 Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message\n  ----                 ------  -----------------                 ------------------                ------                       -------\n  NetworkUnavailable   False   Thu, 12 Dec 2019 06:54:36 +0000   Thu, 12 Dec 2019 06:54:36 +0000   CalicoIsUp                   Calico is running on this node\n  MemoryPressure       False   Thu, 12 Dec 2019 07:34:52 +0000   Thu, 12 Dec 2019 06:54:11 +0000   KubeletHasSufficientMemory   kubelet has sufficient memory available\n  DiskPressure         False   Thu, 12 Dec 2019 07:34:52 +0000   Thu, 12 Dec 2019 06:54:11 +0000   KubeletHasNoDiskPressure     kubelet has no disk pressure\n  PIDPressure          False   Thu, 12 Dec 2019 07:34:52 +0000   Thu, 12 Dec 2019 06:54:11 +0000   KubeletHasSufficientPID      kubelet has sufficient PID available\n  Ready                True    Thu, 12 Dec 2019 07:34:52 +0000   Thu, 12 Dec 2019 06:54:31 +0000   KubeletReady                 kubelet is posting ready status. AppArmor enabled\nAddresses:\n  InternalIP:   10.0.0.157\n  Hostname:     ip-10-0-0-157.us-west-2.compute.internal\n  InternalDNS:  ip-10-0-0-157.us-west-2.compute.internal\nCapacity:\n  attachable-volumes-aws-ebs:  39\n  cpu:                         2\n  ephemeral-storage:           8065444Ki\n  hugepages-2Mi:               0\n  memory:                      4038204Ki\n  pods:                        110\nAllocatable:\n  attachable-volumes-aws-ebs:  39\n  cpu:                         2\n  ephemeral-storage:           7433113179\n  hugepages-2Mi:               0\n  memory:                      3935804Ki\n  pods:                        110\nSystem Info:\n  Machine ID:                 0dc9350be54b4f15a6b62410b6cb42a5\n  System UUID:                EC2BC76F-3C85-B022-5EAA-C24052BB68C5\n  Boot ID:                    27cc4c50-9d4f-4df8-bd93-386c15ce7ef3\n  Kernel Version:             4.15.0-1052-aws\n  OS Image:                   Ubuntu 18.04.3 LTS\n  Operating System:           linux\n  Architecture:               amd64\n  Container Runtime Version:  containerd://1.3.0\n  Kubelet Version:            v1.18.0-alpha.0.1678+9caece8bd9fab5\n  Kube-Proxy Version:         v1.18.0-alpha.0.1678+9caece8bd9fab5\nPodCIDR:                      192.168.4.0/24\nPodCIDRs:                     192.168.4.0/24\nProviderID:                   aws:///us-west-2a/i-041aca506083e31fa\nNon-terminated Pods:          (6 in total)\n  Namespace                   Name                                                                CPU Requests  CPU Limits  Memory Requests  Memory Limits  AGE\n  ---------                   ----                                                                ------------  ----------  ---------------  -------------  ---\n  kube-system                 calico-node-fgwgw                                                   250m (12%)    0 (0%)      0 (0%)           0 (0%)         42m\n  kube-system                 etcd-ip-10-0-0-157.us-west-2.compute.internal                       0 (0%)        0 (0%)      0 (0%)           0 (0%)         42m\n  kube-system                 kube-apiserver-ip-10-0-0-157.us-west-2.compute.internal             250m (12%)    0 (0%)      0 (0%)           0 (0%)         42m\n  kube-system                 kube-controller-manager-ip-10-0-0-157.us-west-2.compute.internal    200m (10%)    0 (0%)      0 (0%)           0 (0%)         41m\n  kube-system                 kube-proxy-j7nfn                                                    0 (0%)        0 (0%)      0 (0%)           0 (0%)         42m\n  kube-system                 kube-scheduler-ip-10-0-0-157.us-west-2.compute.internal             100m (5%)     0 (0%)      0 (0%)           0 (0%)         41m\nAllocated resources:\n  (Total limits may be over 100 percent, i.e., overcommitted.)\n  Resource                    Requests    Limits\n  --------                    --------    ------\n  cpu                         800m (40%)  0 (0%)\n  memory                      0 (0%)      0 (0%)\n  ephemeral-storage           0 (0%)      0 (0%)\n  attachable-volumes-aws-ebs  0           0\nEvents:\n  Type    Reason    Age   From                                                  Message\n  ----    ------    ----  ----                                                  -------\n  Normal  Starting  42m   kube-proxy, ip-10-0-0-157.us-west-2.compute.internal  Starting kube-proxy.\n"
Dec 12 07:36:47.188: INFO: Running '/home/prow/go/src/k8s.io/kubernetes/bazel-bin/cmd/kubectl/linux_amd64_pure_stripped/kubectl --server=https://test-1576132495-apiserver-1623198031.us-west-2.elb.amazonaws.com:6443 --kubeconfig=/home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig describe namespace kubectl-7233'
Dec 12 07:36:47.785: INFO: stderr: ""
Dec 12 07:36:47.785: INFO: stdout: "Name:         kubectl-7233\nLabels:       e2e-framework=kubectl\n              e2e-run=c795b603-5298-49c4-9ddc-db966c6b84dc\nAnnotations:  <none>\nStatus:       Active\n\nNo resource quota.\n\nNo LimitRange resource.\n"
[AfterEach] [sig-cli] Kubectl client
  test/e2e/framework/framework.go:150
Dec 12 07:36:47.785: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "kubectl-7233" for this suite.
•{"msg":"PASSED [sig-cli] Kubectl client Kubectl describe should check if kubectl describe prints relevant information for rc and pods  [Conformance]","total":280,"completed":167,"skipped":2851,"failed":0}
SSSSSSSS
------------------------------
[k8s.io] Probing container 
  should have monotonically increasing restart count [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Probing container
... skipping 17 lines ...
Dec 12 07:39:16.528: INFO: Restart count of pod container-probe-1261/liveness-d3f85c27-0688-4127-833e-130201a4ef2f is now 5 (2m26.161016265s elapsed)
STEP: deleting the pod
[AfterEach] [k8s.io] Probing container
  test/e2e/framework/framework.go:150
Dec 12 07:39:16.593: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "container-probe-1261" for this suite.
•{"msg":"PASSED [k8s.io] Probing container should have monotonically increasing restart count [NodeConformance] [Conformance]","total":280,"completed":168,"skipped":2859,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-storage] Projected combined 
  should project all components that make up the projection API [Projection][NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Projected combined
... skipping 17 lines ...
Dec 12 07:39:19.426: INFO: Waiting for pod projected-volume-66c9dc11-b31e-4152-9b28-7ea23133e700 to disappear
Dec 12 07:39:19.482: INFO: Pod projected-volume-66c9dc11-b31e-4152-9b28-7ea23133e700 no longer exists
[AfterEach] [sig-storage] Projected combined
  test/e2e/framework/framework.go:150
Dec 12 07:39:19.482: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "projected-205" for this suite.
•{"msg":"PASSED [sig-storage] Projected combined should project all components that make up the projection API [Projection][NodeConformance] [Conformance]","total":280,"completed":169,"skipped":2899,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-storage] Projected secret 
  should be consumable from pods in volume [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Projected secret
... skipping 16 lines ...
Dec 12 07:39:22.256: INFO: Waiting for pod pod-projected-secrets-ffcba714-2167-4616-9a8e-c27b55552f94 to disappear
Dec 12 07:39:22.314: INFO: Pod pod-projected-secrets-ffcba714-2167-4616-9a8e-c27b55552f94 no longer exists
[AfterEach] [sig-storage] Projected secret
  test/e2e/framework/framework.go:150
Dec 12 07:39:22.314: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "projected-6461" for this suite.
•{"msg":"PASSED [sig-storage] Projected secret should be consumable from pods in volume [NodeConformance] [Conformance]","total":280,"completed":170,"skipped":2941,"failed":0}
SSSSSS
------------------------------
[sig-network] Networking Granular Checks: Pods 
  should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-network] Networking
... skipping 16 lines ...
Dec 12 07:39:47.143: INFO: >>> kubeConfig: /home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig
Dec 12 07:39:48.591: INFO: Found all expected endpoints: [netserver-1]
[AfterEach] [sig-network] Networking
  test/e2e/framework/framework.go:150
Dec 12 07:39:48.591: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "pod-network-test-4600" for this suite.
•{"msg":"PASSED [sig-network] Networking Granular Checks: Pods should function for node-pod communication: udp [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":171,"skipped":2947,"failed":0}
SSSSSSSS
------------------------------
[sig-storage] Projected configMap 
  updates should be reflected in volume [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Projected configMap
... skipping 9 lines ...
STEP: Updating configmap projected-configmap-test-upd-0793204c-ee63-482d-99de-5dcdb604b0e6
STEP: waiting to observe update in volume
[AfterEach] [sig-storage] Projected configMap
  test/e2e/framework/framework.go:150
Dec 12 07:39:53.531: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "projected-1371" for this suite.
•{"msg":"PASSED [sig-storage] Projected configMap updates should be reflected in volume [NodeConformance] [Conformance]","total":280,"completed":172,"skipped":2955,"failed":0}
SSSSSSSSSSSSSSSS
------------------------------
[sig-network] Proxy version v1 
  should proxy logs on node with explicit kubelet port using proxy subresource  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] version v1
... skipping 85 lines ...
<a href="amazon/">amazon/</a>
<a href="apt/... (200; 57.130642ms)
[AfterEach] version v1
  test/e2e/framework/framework.go:150
Dec 12 07:39:55.112: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "proxy-8317" for this suite.
•{"msg":"PASSED [sig-network] Proxy version v1 should proxy logs on node with explicit kubelet port using proxy subresource  [Conformance]","total":280,"completed":173,"skipped":2971,"failed":0}
SSSSSSS
------------------------------
[k8s.io] Docker Containers 
  should be able to override the image's default command and arguments [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Docker Containers
... skipping 15 lines ...
Dec 12 07:39:57.819: INFO: Waiting for pod client-containers-09177774-efe6-4062-9188-d62a9d955094 to disappear
Dec 12 07:39:57.874: INFO: Pod client-containers-09177774-efe6-4062-9188-d62a9d955094 no longer exists
[AfterEach] [k8s.io] Docker Containers
  test/e2e/framework/framework.go:150
Dec 12 07:39:57.875: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "containers-401" for this suite.
•{"msg":"PASSED [k8s.io] Docker Containers should be able to override the image's default command and arguments [NodeConformance] [Conformance]","total":280,"completed":174,"skipped":2978,"failed":0}

------------------------------
[sig-apps] ReplicationController 
  should release no longer matching pods [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-apps] ReplicationController
... skipping 9 lines ...
Dec 12 07:39:58.338: INFO: Pod name pod-release: Found 1 pods out of 1
STEP: Then the pod is released
[AfterEach] [sig-apps] ReplicationController
  test/e2e/framework/framework.go:150
Dec 12 07:39:58.511: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "replication-controller-2069" for this suite.
•{"msg":"PASSED [sig-apps] ReplicationController should release no longer matching pods [Conformance]","total":280,"completed":175,"skipped":2978,"failed":0}
SSSSSSSSSS
------------------------------
[sig-node] ConfigMap 
  should be consumable via the environment [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-node] ConfigMap
... skipping 16 lines ...
Dec 12 07:40:01.294: INFO: Waiting for pod pod-configmaps-cf66bef3-f9eb-471a-843b-013564b0e320 to disappear
Dec 12 07:40:01.352: INFO: Pod pod-configmaps-cf66bef3-f9eb-471a-843b-013564b0e320 no longer exists
[AfterEach] [sig-node] ConfigMap
  test/e2e/framework/framework.go:150
Dec 12 07:40:01.352: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "configmap-7313" for this suite.
•{"msg":"PASSED [sig-node] ConfigMap should be consumable via the environment [NodeConformance] [Conformance]","total":280,"completed":176,"skipped":2988,"failed":0}
S
------------------------------
[sig-apps] Job 
  should delete a job [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-apps] Job
... skipping 12 lines ...
Dec 12 07:40:06.204: INFO: Terminating Job.batch foo pods took: 100.219365ms
STEP: Ensuring job was deleted
[AfterEach] [sig-apps] Job
  test/e2e/framework/framework.go:150
Dec 12 07:40:42.361: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "job-1986" for this suite.
•{"msg":"PASSED [sig-apps] Job should delete a job [Conformance]","total":280,"completed":177,"skipped":2989,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[k8s.io] Probing container 
  should be restarted with a exec "cat /tmp/health" liveness probe [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Probing container
... skipping 13 lines ...
Dec 12 07:41:32.330: INFO: Restart count of pod container-probe-5432/busybox-65def661-724b-4ccc-8793-9ec769f12148 is now 1 (47.385478183s elapsed)
STEP: deleting the pod
[AfterEach] [k8s.io] Probing container
  test/e2e/framework/framework.go:150
Dec 12 07:41:32.400: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "container-probe-5432" for this suite.
•{"msg":"PASSED [k8s.io] Probing container should be restarted with a exec \"cat /tmp/health\" liveness probe [NodeConformance] [Conformance]","total":280,"completed":178,"skipped":3023,"failed":0}
SSSSSSS
------------------------------
[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] 
  works for CRD with validation schema [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]
... skipping 39 lines ...
Dec 12 07:41:44.218: INFO: Running '/home/prow/go/src/k8s.io/kubernetes/bazel-bin/cmd/kubectl/linux_amd64_pure_stripped/kubectl --server=https://test-1576132495-apiserver-1623198031.us-west-2.elb.amazonaws.com:6443 --kubeconfig=/home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig explain e2e-test-crd-publish-openapi-3663-crds.spec'
Dec 12 07:41:44.997: INFO: stderr: ""
Dec 12 07:41:44.997: INFO: stdout: "KIND:     E2e-test-crd-publish-openapi-3663-crd\nVERSION:  crd-publish-openapi-test-foo.example.com/v1\n\nRESOURCE: spec <Object>\n\nDESCRIPTION:\n     Specification of Foo\n\nFIELDS:\n   bars\t<[]Object>\n     List of Bars and their specs.\n\n"
Dec 12 07:41:44.997: INFO: Running '/home/prow/go/src/k8s.io/kubernetes/bazel-bin/cmd/kubectl/linux_amd64_pure_stripped/kubectl --server=https://test-1576132495-apiserver-1623198031.us-west-2.elb.amazonaws.com:6443 --kubeconfig=/home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig explain e2e-test-crd-publish-openapi-3663-crds.spec.bars'
Dec 12 07:41:45.760: INFO: stderr: ""
Dec 12 07:41:45.760: INFO: stdout: "KIND:     E2e-test-crd-publish-openapi-3663-crd\nVERSION:  crd-publish-openapi-test-foo.example.com/v1\n\nRESOURCE: bars <[]Object>\n\nDESCRIPTION:\n     List of Bars and their specs.\n\nFIELDS:\n   age\t<string>\n     Age of Bar.\n\n   bazs\t<[]string>\n     List of Bazs.\n\n   name\t<string> -required-\n     Name of Bar.\n\n"
STEP: kubectl explain works to return error when explain is called on property that doesn't exist
Dec 12 07:41:45.760: INFO: Running '/home/prow/go/src/k8s.io/kubernetes/bazel-bin/cmd/kubectl/linux_amd64_pure_stripped/kubectl --server=https://test-1576132495-apiserver-1623198031.us-west-2.elb.amazonaws.com:6443 --kubeconfig=/home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig explain e2e-test-crd-publish-openapi-3663-crds.spec.bars2'
Dec 12 07:41:46.487: INFO: rc: 1
[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]
  test/e2e/framework/framework.go:150
Dec 12 07:41:50.277: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "crd-publish-openapi-3191" for this suite.
•{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD with validation schema [Conformance]","total":280,"completed":179,"skipped":3030,"failed":0}
SS
------------------------------
[sig-storage] EmptyDir volumes 
  volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] EmptyDir volumes
... skipping 15 lines ...
Dec 12 07:41:52.996: INFO: Waiting for pod pod-b4dbe2db-5cf0-48be-9140-a14b33931585 to disappear
Dec 12 07:41:53.054: INFO: Pod pod-b4dbe2db-5cf0-48be-9140-a14b33931585 no longer exists
[AfterEach] [sig-storage] EmptyDir volumes
  test/e2e/framework/framework.go:150
Dec 12 07:41:53.054: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "emptydir-7092" for this suite.
•{"msg":"PASSED [sig-storage] EmptyDir volumes volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":180,"skipped":3032,"failed":0}
SSSSSSSSSSSSS
------------------------------
[k8s.io] Container Runtime blackbox test when starting a container that exits 
  should run with the expected status [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Container Runtime
... skipping 20 lines ...
STEP: Container 'terminate-cmd-rpn': should get the expected 'State'
STEP: Container 'terminate-cmd-rpn': should be possible to delete [NodeConformance]
[AfterEach] [k8s.io] Container Runtime
  test/e2e/framework/framework.go:150
Dec 12 07:42:18.151: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "container-runtime-4097" for this suite.
•{"msg":"PASSED [k8s.io] Container Runtime blackbox test when starting a container that exits should run with the expected status [NodeConformance] [Conformance]","total":280,"completed":181,"skipped":3045,"failed":0}
SSSSSSSSSSSSSSSSSSS
------------------------------
[k8s.io] InitContainer [NodeConformance] 
  should not start app containers if init containers fail on a RestartAlways pod [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] InitContainer [NodeConformance]
  test/e2e/framework/framework.go:149
STEP: Creating a kubernetes client
Dec 12 07:42:18.271: INFO: >>> kubeConfig: /home/prow/go/src/sigs.k8s.io/cluster-api-provider-aws/kubeconfig
STEP: Building a namespace api object, basename init-container
STEP: Waiting for a default service account to be provisioned in namespace
[BeforeEach] [k8s.io] InitContainer [NodeConformance]
  test/e2e/common/init_container.go:153
[It] should not start app containers if init containers fail on a RestartAlways pod [Conformance]
  test/e2e/framework/framework.go:639
STEP: creating the pod
Dec 12 07:42:18.502: INFO: PodSpec: initContainers in spec.initContainers
Dec 12 07:43:04.667: INFO: init container has failed twice: &v1.Pod{TypeMeta:v1.TypeMeta{Kind:"", APIVersion:""}, ObjectMeta:v1.ObjectMeta{Name:"pod-init-8fd94d93-f27b-4b12-a8d2-4c90d9e17915", GenerateName:"", Namespace:"init-container-3853", SelfLink:"/api/v1/namespaces/init-container-3853/pods/pod-init-8fd94d93-f27b-4b12-a8d2-4c90d9e17915", UID:"5d1c2a23-07ff-4809-8338-97e3d426e0ba", ResourceVersion:"19353", Generation:0, CreationTimestamp:v1.Time{Time:time.Time{wall:0x0, ext:63711733338, loc:(*time.Location)(0x7d673a0)}}, DeletionTimestamp:(*v1.Time)(nil), DeletionGracePeriodSeconds:(*int64)(nil), Labels:map[string]string{"name":"foo", "time":"502049806"}, Annotations:map[string]string{"cni.projectcalico.org/podIP":"192.168.220.56/32"}, OwnerReferences:[]v1.OwnerReference(nil), Finalizers:[]string(nil), ClusterName:"", ManagedFields:[]v1.ManagedFieldsEntry(nil)}, Spec:v1.PodSpec{Volumes:[]v1.Volume{v1.Volume{Name:"default-token-9qdt8", VolumeSource:v1.VolumeSource{HostPath:(*v1.HostPathVolumeSource)(nil), EmptyDir:(*v1.EmptyDirVolumeSource)(nil), GCEPersistentDisk:(*v1.GCEPersistentDiskVolumeSource)(nil), AWSElasticBlockStore:(*v1.AWSElasticBlockStoreVolumeSource)(nil), GitRepo:(*v1.GitRepoVolumeSource)(nil), Secret:(*v1.SecretVolumeSource)(0xc005dfab80), NFS:(*v1.NFSVolumeSource)(nil), ISCSI:(*v1.ISCSIVolumeSource)(nil), Glusterfs:(*v1.GlusterfsVolumeSource)(nil), PersistentVolumeClaim:(*v1.PersistentVolumeClaimVolumeSource)(nil), RBD:(*v1.RBDVolumeSource)(nil), FlexVolume:(*v1.FlexVolumeSource)(nil), Cinder:(*v1.CinderVolumeSource)(nil), CephFS:(*v1.CephFSVolumeSource)(nil), Flocker:(*v1.FlockerVolumeSource)(nil), DownwardAPI:(*v1.DownwardAPIVolumeSource)(nil), FC:(*v1.FCVolumeSource)(nil), AzureFile:(*v1.AzureFileVolumeSource)(nil), ConfigMap:(*v1.ConfigMapVolumeSource)(nil), VsphereVolume:(*v1.VsphereVirtualDiskVolumeSource)(nil), Quobyte:(*v1.QuobyteVolumeSource)(nil), AzureDisk:(*v1.AzureDiskVolumeSource)(nil), PhotonPersistentDisk:(*v1.PhotonPersistentDiskVolumeSource)(nil), Projected:(*v1.ProjectedVolumeSource)(nil), PortworxVolume:(*v1.PortworxVolumeSource)(nil), ScaleIO:(*v1.ScaleIOVolumeSource)(nil), StorageOS:(*v1.StorageOSVolumeSource)(nil), CSI:(*v1.CSIVolumeSource)(nil)}}}, InitContainers:[]v1.Container{v1.Container{Name:"init1", Image:"docker.io/library/busybox:1.29", Command:[]string{"/bin/false"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-9qdt8", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}, v1.Container{Name:"init2", Image:"docker.io/library/busybox:1.29", Command:[]string{"/bin/true"}, Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList(nil), Requests:v1.ResourceList(nil)}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-9qdt8", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, Containers:[]v1.Container{v1.Container{Name:"run1", Image:"k8s.gcr.io/pause:3.1", Command:[]string(nil), Args:[]string(nil), WorkingDir:"", Ports:[]v1.ContainerPort(nil), EnvFrom:[]v1.EnvFromSource(nil), Env:[]v1.EnvVar(nil), Resources:v1.ResourceRequirements{Limits:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}}, Requests:v1.ResourceList{"cpu":resource.Quantity{i:resource.int64Amount{value:100, scale:-3}, d:resource.infDecAmount{Dec:(*inf.Dec)(nil)}, s:"100m", Format:"DecimalSI"}}}, VolumeMounts:[]v1.VolumeMount{v1.VolumeMount{Name:"default-token-9qdt8", ReadOnly:true, MountPath:"/var/run/secrets/kubernetes.io/serviceaccount", SubPath:"", MountPropagation:(*v1.MountPropagationMode)(nil), SubPathExpr:""}}, VolumeDevices:[]v1.VolumeDevice(nil), LivenessProbe:(*v1.Probe)(nil), ReadinessProbe:(*v1.Probe)(nil), StartupProbe:(*v1.Probe)(nil), Lifecycle:(*v1.Lifecycle)(nil), TerminationMessagePath:"/dev/termination-log", TerminationMessagePolicy:"File", ImagePullPolicy:"IfNotPresent", SecurityContext:(*v1.SecurityContext)(nil), Stdin:false, StdinOnce:false, TTY:false}}, EphemeralContainers:[]v1.EphemeralContainer(nil), RestartPolicy:"Always", TerminationGracePeriodSeconds:(*int64)(0xc00242c498), ActiveDeadlineSeconds:(*int64)(nil), DNSPolicy:"ClusterFirst", NodeSelector:map[string]string(nil), ServiceAccountName:"default", DeprecatedServiceAccount:"default", AutomountServiceAccountToken:(*bool)(nil), NodeName:"ip-10-0-0-18.us-west-2.compute.internal", HostNetwork:false, HostPID:false, HostIPC:false, ShareProcessNamespace:(*bool)(nil), SecurityContext:(*v1.PodSecurityContext)(0xc001dfd320), ImagePullSecrets:[]v1.LocalObjectReference(nil), Hostname:"", Subdomain:"", Affinity:(*v1.Affinity)(nil), SchedulerName:"default-scheduler", Tolerations:[]v1.Toleration{v1.Toleration{Key:"node.kubernetes.io/not-ready", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc00242c670)}, v1.Toleration{Key:"node.kubernetes.io/unreachable", Operator:"Exists", Value:"", Effect:"NoExecute", TolerationSeconds:(*int64)(0xc00242c6c0)}}, HostAliases:[]v1.HostAlias(nil), PriorityClassName:"", Priority:(*int32)(0xc00242c6c8), DNSConfig:(*v1.PodDNSConfig)(nil), ReadinessGates:[]v1.PodReadinessGate(nil), RuntimeClassName:(*string)(nil), EnableServiceLinks:(*bool)(0xc00242c6cc), PreemptionPolicy:(*v1.PreemptionPolicy)(nil), Overhead:v1.ResourceList(nil), TopologySpreadConstraints:[]v1.TopologySpreadConstraint(nil)}, Status:v1.PodStatus{Phase:"Pending", Conditions:[]v1.PodCondition{v1.PodCondition{Type:"Initialized", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63711733338, loc:(*time.Location)(0x7d673a0)}}, Reason:"ContainersNotInitialized", Message:"containers with incomplete status: [init1 init2]"}, v1.PodCondition{Type:"Ready", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63711733338, loc:(*time.Location)(0x7d673a0)}}, Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"ContainersReady", Status:"False", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63711733338, loc:(*time.Location)(0x7d673a0)}}, Reason:"ContainersNotReady", Message:"containers with unready status: [run1]"}, v1.PodCondition{Type:"PodScheduled", Status:"True", LastProbeTime:v1.Time{Time:time.Time{wall:0x0, ext:0, loc:(*time.Location)(nil)}}, LastTransitionTime:v1.Time{Time:time.Time{wall:0x0, ext:63711733338, loc:(*time.Location)(0x7d673a0)}}, Reason:"", Message:""}}, Message:"", Reason:"", NominatedNodeName:"", HostIP:"10.0.0.18", PodIP:"192.168.220.56", PodIPs:[]v1.PodIP{v1.PodIP{IP:"192.168.220.56"}}, StartTime:(*v1.Time)(0xc0018fb9a0), InitContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"init1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc0021f0380)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(0xc0021f03f0)}, Ready:false, RestartCount:3, Image:"docker.io/library/busybox:1.29", ImageID:"docker.io/library/busybox@sha256:8ccbac733d19c0dd4d70b4f0c1e12245b5fa3ad24758a11035ee505c629c0796", ContainerID:"containerd://d2209e131d71e07765d00b09b3e20f78e400ef576e4bf2ddc94a8325fff2f7f4", Started:(*bool)(nil)}, v1.ContainerStatus{Name:"init2", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc0018fb9e0), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"docker.io/library/busybox:1.29", ImageID:"", ContainerID:"", Started:(*bool)(nil)}}, ContainerStatuses:[]v1.ContainerStatus{v1.ContainerStatus{Name:"run1", State:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(0xc0018fb9c0), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, LastTerminationState:v1.ContainerState{Waiting:(*v1.ContainerStateWaiting)(nil), Running:(*v1.ContainerStateRunning)(nil), Terminated:(*v1.ContainerStateTerminated)(nil)}, Ready:false, RestartCount:0, Image:"k8s.gcr.io/pause:3.1", ImageID:"", ContainerID:"", Started:(*bool)(0xc00242c7cf)}}, QOSClass:"Burstable", EphemeralContainerStatuses:[]v1.ContainerStatus(nil)}}
[AfterEach] [k8s.io] InitContainer [NodeConformance]
  test/e2e/framework/framework.go:150
Dec 12 07:43:04.669: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "init-container-3853" for this suite.
•{"msg":"PASSED [k8s.io] InitContainer [NodeConformance] should not start app containers if init containers fail on a RestartAlways pod [Conformance]","total":280,"completed":182,"skipped":3064,"failed":0}
S
------------------------------
[sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] 
  Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-apps] StatefulSet
... skipping 100 lines ...
Dec 12 07:44:24.589: INFO: Waiting for statefulset status.replicas updated to 0
Dec 12 07:44:24.647: INFO: Deleting statefulset ss
[AfterEach] [sig-apps] StatefulSet
  test/e2e/framework/framework.go:150
Dec 12 07:44:24.826: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "statefulset-4929" for this suite.
•{"msg":"PASSED [sig-apps] StatefulSet [k8s.io] Basic StatefulSet functionality [StatefulSetBasic] Scaling should happen in predictable order and halt if any stateful pod is unhealthy [Slow] [Conformance]","total":280,"completed":183,"skipped":3065,"failed":0}
SSSSSSSSSS
------------------------------
[sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] 
  should not be able to mutate or prevent deletion of webhook configuration objects [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin]
... skipping 25 lines ...
  test/e2e/framework/framework.go:150
Dec 12 07:44:32.170: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "webhook-504" for this suite.
STEP: Destroying namespace "webhook-504-markers" for this suite.
[AfterEach] [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin]
  test/e2e/apimachinery/webhook.go:101
•{"msg":"PASSED [sig-api-machinery] AdmissionWebhook [Privileged:ClusterAdmin] should not be able to mutate or prevent deletion of webhook configuration objects [Conformance]","total":280,"completed":184,"skipped":3075,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-api-machinery] Garbage collector 
  should orphan pods created by rc if delete options say so [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] Garbage collector
... skipping 34 lines ...

[AfterEach] [sig-api-machinery] Garbage collector
  test/e2e/framework/framework.go:150
Dec 12 07:45:13.271: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
W1212 07:45:13.271558   20158 metrics_grabber.go:79] Master node is not registered. Grabbing metrics from Scheduler, ControllerManager and ClusterAutoscaler is disabled.
STEP: Destroying namespace "gc-8433" for this suite.
•{"msg":"PASSED [sig-api-machinery] Garbage collector should orphan pods created by rc if delete options say so [Conformance]","total":280,"completed":185,"skipped":3109,"failed":0}
SSSSSSSSSS
------------------------------
[sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] 
  works for CRD preserving unknown fields at the schema root [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]
... skipping 23 lines ...
Dec 12 07:45:21.470: INFO: stderr: ""
Dec 12 07:45:21.470: INFO: stdout: "KIND:     E2e-test-crd-publish-openapi-6349-crd\nVERSION:  crd-publish-openapi-test-unknown-at-root.example.com/v1\n\nDESCRIPTION:\n     <empty>\n"
[AfterEach] [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin]
  test/e2e/framework/framework.go:150
Dec 12 07:45:25.316: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "crd-publish-openapi-8697" for this suite.
•{"msg":"PASSED [sig-api-machinery] CustomResourcePublishOpenAPI [Privileged:ClusterAdmin] works for CRD preserving unknown fields at the schema root [Conformance]","total":280,"completed":186,"skipped":3119,"failed":0}

------------------------------
[sig-storage] Downward API volume 
  should provide container's memory limit [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Downward API volume
... skipping 17 lines ...
Dec 12 07:45:28.051: INFO: Waiting for pod downwardapi-volume-85e00b85-e20b-4116-9456-74e60163be85 to disappear
Dec 12 07:45:28.108: INFO: Pod downwardapi-volume-85e00b85-e20b-4116-9456-74e60163be85 no longer exists
[AfterEach] [sig-storage] Downward API volume
  test/e2e/framework/framework.go:150
Dec 12 07:45:28.108: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "downward-api-5143" for this suite.
•{"msg":"PASSED [sig-storage] Downward API volume should provide container's memory limit [NodeConformance] [Conformance]","total":280,"completed":187,"skipped":3119,"failed":0}
SSSSSSSS
------------------------------
[sig-storage] Secrets 
  should be consumable from pods in volume with mappings [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] Secrets
... skipping 16 lines ...
Dec 12 07:45:30.905: INFO: Waiting for pod pod-secrets-e2181270-88ec-46f0-ad0c-032d917c7d4b to disappear
Dec 12 07:45:30.964: INFO: Pod pod-secrets-e2181270-88ec-46f0-ad0c-032d917c7d4b no longer exists
[AfterEach] [sig-storage] Secrets
  test/e2e/framework/framework.go:150
Dec 12 07:45:30.964: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "secrets-2863" for this suite.
•{"msg":"PASSED [sig-storage] Secrets should be consumable from pods in volume with mappings [NodeConformance] [Conformance]","total":280,"completed":188,"skipped":3127,"failed":0}
SSSSSS
------------------------------
[sig-storage] EmptyDir volumes 
  should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-storage] EmptyDir volumes
... skipping 15 lines ...
Dec 12 07:45:33.690: INFO: Waiting for pod pod-89af8e2e-67e5-42c1-b94e-5d8e8d4542f3 to disappear
Dec 12 07:45:33.748: INFO: Pod pod-89af8e2e-67e5-42c1-b94e-5d8e8d4542f3 no longer exists
[AfterEach] [sig-storage] EmptyDir volumes
  test/e2e/framework/framework.go:150
Dec 12 07:45:33.748: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "emptydir-5217" for this suite.
•{"msg":"PASSED [sig-storage] EmptyDir volumes should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance] [Conformance]","total":280,"completed":189,"skipped":3133,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-apps] Deployment 
  deployment should support proportional scaling [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-apps] Deployment
... skipping 76 lines ...
&Pod{ObjectMeta:{webserver-deployment-595b5b9587-wr4k4 webserver-deployment-595b5b9587- deployment-1931 /api/v1/namespaces/deployment-1931/pods/webserver-deployment-595b5b9587-wr4k4 05eadb19-8fa8-4d5d-b16b-b1db51d3633a 20790 0 2019-12-12 07:45:41 +0000 UTC <nil> <nil> map[name:httpd pod-template-hash:595b5b9587] map[] [{apps/v1 ReplicaSet webserver-deployment-595b5b9587 dc9342c9-0aa6-40ef-8d72-1ce38aab6ed0 0xc0043d8d30 0xc0043d8d31}] []  []},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-cgtxh,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-cgtxh,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-cgtxh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-0-0-96.us-west-2.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:41 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:<nil>,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},}
Dec 12 07:45:41.842: INFO: Pod "webserver-deployment-595b5b9587-xmdkf" is not available:
&Pod{ObjectMeta:{webserver-deployment-595b5b9587-xmdkf webserver-deployment-595b5b9587- deployment-1931 /api/v1/namespaces/deployment-1931/pods/webserver-deployment-595b5b9587-xmdkf 6619f289-ac5e-4038-be49-934ab5a6975e 20822 0 2019-12-12 07:45:41 +0000 UTC <nil> <nil> map[name:httpd pod-template-hash:595b5b9587] map[] [{apps/v1 ReplicaSet webserver-deployment-595b5b9587 dc9342c9-0aa6-40ef-8d72-1ce38aab6ed0 0xc0043d8ef0 0xc0043d8ef1}] []  []},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-cgtxh,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-cgtxh,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,},},},Containers:[]Container{Container{Name:httpd,Image:docker.io/library/httpd:2.4.38-alpine,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-cgtxh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-0-0-18.us-west-2.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:41 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:41 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:41 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:41 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:10.0.0.18,PodIP:,StartTime:2019-12-12 07:45:41 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:docker.io/library/httpd:2.4.38-alpine,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},}
Dec 12 07:45:41.842: INFO: Pod "webserver-deployment-c7997dcc8-48tnd" is not available:
&Pod{ObjectMeta:{webserver-deployment-c7997dcc8-48tnd webserver-deployment-c7997dcc8- deployment-1931 /api/v1/namespaces/deployment-1931/pods/webserver-deployment-c7997dcc8-48tnd 7fab631d-bbe2-4fc8-9b8a-5472e2eb2135 20820 0 2019-12-12 07:45:41 +0000 UTC <nil> <nil> map[name:httpd pod-template-hash:c7997dcc8] map[] [{apps/v1 ReplicaSet webserver-deployment-c7997dcc8 52293ca1-6cb1-4e90-8d00-3b3ccb43d72a 0xc0043d9140 0xc0043d9141}] []  []},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-cgtxh,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-cgtxh,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-cgtxh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-0-0-96.us-west-2.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:41 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:41 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:41 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:41 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:10.0.0.96,PodIP:,StartTime:2019-12-12 07:45:41 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},}
Dec 12 07:45:41.842: INFO: Pod "webserver-deployment-c7997dcc8-6qslb" is not available:
&Pod{ObjectMeta:{webserver-deployment-c7997dcc8-6qslb webserver-deployment-c7997dcc8- deployment-1931 /api/v1/namespaces/deployment-1931/pods/webserver-deployment-c7997dcc8-6qslb bcaf68cc-6380-4e70-bd18-746c3fd988a9 20761 0 2019-12-12 07:45:38 +0000 UTC <nil> <nil> map[name:httpd pod-template-hash:c7997dcc8] map[cni.projectcalico.org/podIP:192.168.76.145/32] [{apps/v1 ReplicaSet webserver-deployment-c7997dcc8 52293ca1-6cb1-4e90-8d00-3b3ccb43d72a 0xc0043d9360 0xc0043d9361}] []  []},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-cgtxh,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-cgtxh,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-cgtxh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-0-0-96.us-west-2.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:38 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:38 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:38 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:38 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:10.0.0.96,PodIP:192.168.76.145,StartTime:2019-12-12 07:45:38 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ErrImagePull,Message:rpc error: code = Unknown desc = failed to pull and unpack image "docker.io/library/webserver:404": failed to resolve reference "docker.io/library/webserver:404": pull access denied, repository does not exist or may require authorization: server message: insufficient_scope: authorization failed,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:192.168.76.145,},},EphemeralContainerStatuses:[]ContainerStatus{},},}
Dec 12 07:45:41.843: INFO: Pod "webserver-deployment-c7997dcc8-89nmk" is not available:
&Pod{ObjectMeta:{webserver-deployment-c7997dcc8-89nmk webserver-deployment-c7997dcc8- deployment-1931 /api/v1/namespaces/deployment-1931/pods/webserver-deployment-c7997dcc8-89nmk e9bc4461-cc9f-4b76-b19b-c30561f5aee7 20814 0 2019-12-12 07:45:41 +0000 UTC <nil> <nil> map[name:httpd pod-template-hash:c7997dcc8] map[] [{apps/v1 ReplicaSet webserver-deployment-c7997dcc8 52293ca1-6cb1-4e90-8d00-3b3ccb43d72a 0xc0043d95d0 0xc0043d95d1}] []  []},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-cgtxh,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-cgtxh,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-cgtxh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-0-0-18.us-west-2.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:41 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:41 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:41 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:41 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:10.0.0.18,PodIP:,StartTime:2019-12-12 07:45:41 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},}
Dec 12 07:45:41.844: INFO: Pod "webserver-deployment-c7997dcc8-8m8d5" is not available:
&Pod{ObjectMeta:{webserver-deployment-c7997dcc8-8m8d5 webserver-deployment-c7997dcc8- deployment-1931 /api/v1/namespaces/deployment-1931/pods/webserver-deployment-c7997dcc8-8m8d5 b9219823-8a05-4a47-9e6a-41032377b7d4 20720 0 2019-12-12 07:45:38 +0000 UTC <nil> <nil> map[name:httpd pod-template-hash:c7997dcc8] map[cni.projectcalico.org/podIP:192.168.220.9/32] [{apps/v1 ReplicaSet webserver-deployment-c7997dcc8 52293ca1-6cb1-4e90-8d00-3b3ccb43d72a 0xc0043d97a0 0xc0043d97a1}] []  []},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-cgtxh,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-cgtxh,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-cgtxh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-0-0-18.us-west-2.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:38 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:38 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:38 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:38 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:10.0.0.18,PodIP:192.168.220.9,StartTime:2019-12-12 07:45:38 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ErrImagePull,Message:rpc error: code = Unknown desc = failed to pull and unpack image "docker.io/library/webserver:404": failed to resolve reference "docker.io/library/webserver:404": pull access denied, repository does not exist or may require authorization: server message: insufficient_scope: authorization failed,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{PodIP{IP:192.168.220.9,},},EphemeralContainerStatuses:[]ContainerStatus{},},}
Dec 12 07:45:41.844: INFO: Pod "webserver-deployment-c7997dcc8-bwqhn" is not available:
&Pod{ObjectMeta:{webserver-deployment-c7997dcc8-bwqhn webserver-deployment-c7997dcc8- deployment-1931 /api/v1/namespaces/deployment-1931/pods/webserver-deployment-c7997dcc8-bwqhn 9de76a27-5b9d-4197-a2db-51d42e8215c2 20773 0 2019-12-12 07:45:41 +0000 UTC <nil> <nil> map[name:httpd pod-template-hash:c7997dcc8] map[] [{apps/v1 ReplicaSet webserver-deployment-c7997dcc8 52293ca1-6cb1-4e90-8d00-3b3ccb43d72a 0xc0043d9970 0xc0043d9971}] []  []},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-cgtxh,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-cgtxh,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-cgtxh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-0-0-18.us-west-2.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:41 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:41 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:41 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:41 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:10.0.0.18,PodIP:,StartTime:2019-12-12 07:45:41 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},}
Dec 12 07:45:41.844: INFO: Pod "webserver-deployment-c7997dcc8-cl7lf" is not available:
&Pod{ObjectMeta:{webserver-deployment-c7997dcc8-cl7lf webserver-deployment-c7997dcc8- deployment-1931 /api/v1/namespaces/deployment-1931/pods/webserver-deployment-c7997dcc8-cl7lf eafe682b-60a0-4c91-9670-f1e7f1a4268a 20797 0 2019-12-12 07:45:41 +0000 UTC <nil> <nil> map[name:httpd pod-template-hash:c7997dcc8] map[] [{apps/v1 ReplicaSet webserver-deployment-c7997dcc8 52293ca1-6cb1-4e90-8d00-3b3ccb43d72a 0xc0043d9b90 0xc0043d9b91}] []  []},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-cgtxh,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-cgtxh,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-cgtxh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-0-0-96.us-west-2.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:41 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:<nil>,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},}
Dec 12 07:45:41.844: INFO: Pod "webserver-deployment-c7997dcc8-lcmp4" is not available:
&Pod{ObjectMeta:{webserver-deployment-c7997dcc8-lcmp4 webserver-deployment-c7997dcc8- deployment-1931 /api/v1/namespaces/deployment-1931/pods/webserver-deployment-c7997dcc8-lcmp4 e0c8a7b7-90ec-4de9-b835-4c5ae3403a28 20706 0 2019-12-12 07:45:38 +0000 UTC <nil> <nil> map[name:httpd pod-template-hash:c7997dcc8] map[cni.projectcalico.org/podIP:192.168.76.144/32] [{apps/v1 ReplicaSet webserver-deployment-c7997dcc8 52293ca1-6cb1-4e90-8d00-3b3ccb43d72a 0xc0043d9cd0 0xc0043d9cd1}] []  []},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-cgtxh,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-cgtxh,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-cgtxh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-0-0-96.us-west-2.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:Initialized,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:38 +0000 UTC,Reason:,Message:,},PodCondition{Type:Ready,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:38 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:ContainersReady,Status:False,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:38 +0000 UTC,Reason:ContainersNotReady,Message:containers with unready status: [httpd],},PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:38 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:10.0.0.96,PodIP:,StartTime:2019-12-12 07:45:38 +0000 UTC,ContainerStatuses:[]ContainerStatus{ContainerStatus{Name:httpd,State:ContainerState{Waiting:&ContainerStateWaiting{Reason:ContainerCreating,Message:,},Running:nil,Terminated:nil,},LastTerminationState:ContainerState{Waiting:nil,Running:nil,Terminated:nil,},Ready:false,RestartCount:0,Image:webserver:404,ImageID:,ContainerID:,Started:*false,},},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},}
... skipping 10 lines ...
Dec 12 07:45:41.846: INFO: Pod "webserver-deployment-c7997dcc8-xrq5j" is not available:
&Pod{ObjectMeta:{webserver-deployment-c7997dcc8-xrq5j webserver-deployment-c7997dcc8- deployment-1931 /api/v1/namespaces/deployment-1931/pods/webserver-deployment-c7997dcc8-xrq5j 9aa78ac6-6770-448d-a8af-aedef0d0dde4 20791 0 2019-12-12 07:45:41 +0000 UTC <nil> <nil> map[name:httpd pod-template-hash:c7997dcc8] map[] [{apps/v1 ReplicaSet webserver-deployment-c7997dcc8 52293ca1-6cb1-4e90-8d00-3b3ccb43d72a 0xc004398880 0xc004398881}] []  []},Spec:PodSpec{Volumes:[]Volume{Volume{Name:default-token-cgtxh,VolumeSource:VolumeSource{HostPath:nil,EmptyDir:nil,GCEPersistentDisk:nil,AWSElasticBlockStore:nil,GitRepo:nil,Secret:&SecretVolumeSource{SecretName:default-token-cgtxh,Items:[]KeyToPath{},DefaultMode:*420,Optional:nil,},NFS:nil,ISCSI:nil,Glusterfs:nil,PersistentVolumeClaim:nil,RBD:nil,FlexVolume:nil,Cinder:nil,CephFS:nil,Flocker:nil,DownwardAPI:nil,FC:nil,AzureFile:nil,ConfigMap:nil,VsphereVolume:nil,Quobyte:nil,AzureDisk:nil,PhotonPersistentDisk:nil,PortworxVolume:nil,ScaleIO:nil,Projected:nil,StorageOS:nil,CSI:nil,},},},Containers:[]Container{Container{Name:httpd,Image:webserver:404,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:default-token-cgtxh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,},},RestartPolicy:Always,TerminationGracePeriodSeconds:*0,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:default,DeprecatedServiceAccount:default,NodeName:ip-10-0-0-18.us-west-2.compute.internal,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[]Sysctl{},WindowsOptions:nil,},ImagePullSecrets:[]LocalObjectReference{},Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[]Container{},AutomountServiceAccountToken:nil,Tolerations:[]Toleration{Toleration{Key:node.kubernetes.io/not-ready,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},Toleration{Key:node.kubernetes.io/unreachable,Operator:Exists,Value:,Effect:NoExecute,TolerationSeconds:*300,},},HostAliases:[]HostAlias{},PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[]PodReadinessGate{},RuntimeClassName:nil,EnableServiceLinks:*true,PreemptionPolicy:nil,Overhead:ResourceList{},TopologySpreadConstraints:[]TopologySpreadConstraint{},EphemeralContainers:[]EphemeralContainer{},},Status:PodStatus{Phase:Pending,Conditions:[]PodCondition{PodCondition{Type:PodScheduled,Status:True,LastProbeTime:0001-01-01 00:00:00 +0000 UTC,LastTransitionTime:2019-12-12 07:45:41 +0000 UTC,Reason:,Message:,},},Message:,Reason:,HostIP:,PodIP:,StartTime:<nil>,ContainerStatuses:[]ContainerStatus{},QOSClass:BestEffort,InitContainerStatuses:[]ContainerStatus{},NominatedNodeName:,PodIPs:[]PodIP{},EphemeralContainerStatuses:[]ContainerStatus{},},}
[AfterEach] [sig-apps] Deployment
  test/e2e/framework/framework.go:150
Dec 12 07:45:41.846: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "deployment-1931" for this suite.
•{"msg":"PASSED [sig-apps] Deployment deployment should support proportional scaling [Conformance]","total":280,"completed":190,"skipped":3180,"failed":0}
SSSSSSSSSSSSSSSSSSS
------------------------------
[sig-cli] Kubectl client Kubectl rolling-update 
  should support rolling-update to same image  [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-cli] Kubectl client
... skipping 44 lines ...
Dec 12 07:46:09.743: INFO: stderr: ""
Dec 12 07:46:09.743: INFO: stdout: "replicationcontroller \"e2e-test-httpd-rc\" deleted\n"
[AfterEach] [sig-cli] Kubectl client
  test/e2e/framework/framework.go:150
Dec 12 07:46:09.743: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "kubectl-5649" for this suite.
•{"msg":"PASSED [sig-cli] Kubectl client Kubectl rolling-update should support rolling-update to same image  [Conformance]","total":280,"completed":191,"skipped":3199,"failed":0}
SSSSSSSSSSSSSSSSSSSSSSSSSSS
------------------------------
[sig-api-machinery] Watchers 
  should be able to start watching from a specific resource version [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] Watchers
... skipping 13 lines ...
Dec 12 07:46:10.507: INFO: Got : MODIFIED &ConfigMap{ObjectMeta:{e2e-watch-test-resource-version  watch-9221 /api/v1/namespaces/watch-9221/configmaps/e2e-watch-test-resource-version bdf0cdae-5a5f-4044-b673-a87bf537e195 21402 0 2019-12-12 07:46:10 +0000 UTC <nil> <nil> map[watch-this-configmap:from-resource-version] map[] [] []  []},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
Dec 12 07:46:10.507: INFO: Got : DELETED &ConfigMap{ObjectMeta:{e2e-watch-test-resource-version  watch-9221 /api/v1/namespaces/watch-9221/configmaps/e2e-watch-test-resource-version bdf0cdae-5a5f-4044-b673-a87bf537e195 21403 0 2019-12-12 07:46:10 +0000 UTC <nil> <nil> map[watch-this-configmap:from-resource-version] map[] [] []  []},Data:map[string]string{mutation: 2,},BinaryData:map[string][]byte{},}
[AfterEach] [sig-api-machinery] Watchers
  test/e2e/framework/framework.go:150
Dec 12 07:46:10.507: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "watch-9221" for this suite.
•{"msg":"PASSED [sig-api-machinery] Watchers should be able to start watching from a specific resource version [Conformance]","total":280,"completed":192,"skipped":3226,"failed":0}
S
------------------------------
[sig-apps] Daemon set [Serial] 
  should run and stop simple daemon [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-apps] Daemon set [Serial]
... skipping 67 lines ...
Dec 12 07:46:23.618: INFO: pods: {"kind":"PodList","apiVersion":"v1","metadata":{"selfLink":"/api/v1/namespaces/daemonsets-5040/pods","resourceVersion":"21536"},"items":null}

[AfterEach] [sig-apps] Daemon set [Serial]
  test/e2e/framework/framework.go:150
Dec 12 07:46:23.795: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "daemonsets-5040" for this suite.
•{"msg":"PASSED [sig-apps] Daemon set [Serial] should run and stop simple daemon [Conformance]","total":280,"completed":193,"skipped":3227,"failed":0}
SSS
------------------------------
[sig-api-machinery] Aggregator 
  Should be able to support the 1.10 Sample API Server using the current Aggregator [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-api-machinery] Aggregator
... skipping 18 lines ...
[AfterEach] [sig-api-machinery] Aggregator
  test/e2e/apimachinery/aggregator.go:64
[AfterEach] [sig-api-machinery] Aggregator
  test/e2e/framework/framework.go:150
Dec 12 07:46:40.567: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "aggregator-3149" for this suite.
•{"msg":"PASSED [sig-api-machinery] Aggregator Should be able to support the 1.10 Sample API Server using the current Aggregator [Conformance]","total":280,"completed":194,"skipped":3230,"failed":0}
SSSSSSSS
------------------------------
[k8s.io] Kubelet when scheduling a busybox command that always fails in a pod 
  should have an terminated reason [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [k8s.io] Kubelet
... skipping 9 lines ...
[It] should have an terminated reason [NodeConformance] [Conformance]
  test/e2e/framework/framework.go:639
[AfterEach] [k8s.io] Kubelet
  test/e2e/framework/framework.go:150
Dec 12 07:46:45.101: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
STEP: Destroying namespace "kubelet-test-6593" for this suite.
•{"msg":"PASSED [k8s.io] Kubelet when scheduling a busybox command that always fails in a pod should have an terminated reason [NodeConformance] [Conformance]","total":280,"completed":195,"skipped":3238,"failed":0}
SSSSSS
------------------------------
[sig-apps] Deployment 
  deployment should delete old replica sets [Conformance]
  test/e2e/framework/framework.go:639
[BeforeEach] [sig-apps] Deployment
... skipping 7 lines ...
[It] deployment should delete old replica sets [Conformance]
  test/e2e/framework/framework.go:639
Dec 12 07:46:45.574: INFO: Pod name cleanup-pod: Found 1 pods out of 1
STEP: ensuring each pod is running
Dec 12 07:46:47.689: INFO: Creating deployment test-cleanup-deployment
STEP: Waiting for deployment test-cleanup-deployment history to be cleaned up
Dec 12 07:46:47.944: FAIL: Expect only one pod creation, the second creation event: watch.Event{Type:"ADDED", Object:(*v1.Pod)(0xc006d5e400)}


Full Stack Trace
k8s.io/kubernetes/test/e2e/apps.testDeploymentCleanUpPolicy.func1(0x5321440, 0xc006d446c0, 0xc006c3f680)
	test/e2e/apps/deployment.go:389 +0x33f
created by k8s.io/kubernetes/test/e2e/apps.testDeploymentCleanUpPolicy
	test/e2e/apps/deployment.go:377 +0x96d
panic: 
Your test failed.
Ginkgo panics to prevent subsequent assertions from running.
Normally Ginkgo rescues this panic so you shouldn't see it.

But, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
To circumvent this, you should call

	defer GinkgoRecover()

at the top of the goroutine that caused this panic.
 [recovered]
	panic: 
Your test failed.
Ginkgo panics to prevent subsequent assertions from running.
Normally Ginkgo rescues this panic so you shouldn't see it.

But, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
To circumvent this, you should call

	defer GinkgoRecover()

at the top of the goroutine that caused this panic.


goroutine 8956 [running]:
k8s.io/kubernetes/test/e2e/framework/ginkgowrapper.Fail.func1(0xc005b01200, 0x88, 0x7b6829f, 0x1b, 0x185, 0xc005122300, 0xff)
	test/e2e/framework/ginkgowrapper/wrapper.go:63 +0xa1
panic(0x3ee64a0, 0x524cdc0)
	GOROOT/src/runtime/panic.go:679 +0x1b2
k8s.io/kubernetes/vendor/github.com/onsi/ginkgo.Fail(0xc005b01200, 0x88, 0xc006d4dd30, 0x1, 0x1)
	vendor/github.com/onsi/ginkgo/ginkgo_dsl.go:262 +0xc8
k8s.io/kubernetes/test/e2e/framework/ginkgowrapper.Fail(0xc005b01200, 0x88, 0xc006d4de48, 0x1, 0x1)
	test/e2e/framework/ginkgowrapper/wrapper.go:67 +0x1cc
k8s.io/kubernetes/test/e2e/framework.FailfWithOffset(0x1, 0x4bcaa25, 0x3d, 0xc00278aef8, 0x1, 0x1)
	test/e2e/framework/log.go:56 +0x215
k8s.io/kubernetes/test/e2e/framework.Failf(...)
	test/e2e/framework/log.go:47
k8s.io/kubernetes/test/e2e/apps.testDeploymentCleanUpPolicy.func1(0x5321440, 0xc006d446c0, 0xc006c3f680)
	test/e2e/apps/deployment.go:389 +0x33f
created by k8s.io/kubernetes/test/e2e/apps.testDeploymentCleanUpPolicy
	test/e2e/apps/deployment.go:377 +0x96d

Ginkgo ran 1 suite in 52m14.416037777s
Test Suite Failed
!!! Error in ./hack/ginkgo-e2e.sh:150
  Error in ./hack/ginkgo-e2e.sh:150. '"${ginkgo}" "${ginkgo_args[@]:+${ginkgo_args[@]}}" "${e2e_test}" -- "${auth_config[@]:+${auth_config[@]}}" --ginkgo.flakeAttempts="${FLAKE_ATTEMPTS}" --host="${KUBE_MASTER_URL}" --provider="${KUBERNETES_PROVIDER}" --gce-project="${PROJECT:-}" --gce-zone="${ZONE:-}" --gce-region="${REGION:-}" --gce-multizone="${MULTIZONE:-false}" --gke-cluster="${CLUSTER_NAME:-}" --kube-master="${KUBE_MASTER:-}" --cluster-tag="${CLUSTER_ID:-}" --cloud-config-file="${CLOUD_CONFIG:-}" --repo-root="${KUBE_ROOT}" --node-instance-group="${NODE_INSTANCE_GROUP:-}" --prefix="${KUBE_GCE_INSTANCE_PREFIX:-e2e}" --network="${KUBE_GCE_NETWORK:-${KUBE_GKE_NETWORK:-e2e}}" --node-tag="${NODE_TAG:-}" --master-tag="${MASTER_TAG:-}" --cluster-monitoring-mode="${KUBE_ENABLE_CLUSTER_MONITORING:-standalone}" --dns-domain="${KUBE_DNS_DOMAIN:-cluster.local}" --ginkgo.slowSpecThreshold="${GINKGO_SLOW_SPEC_THRESHOLD:-300}" ${KUBE_CONTAINER_RUNTIME:+"--container-runtime=${KUBE_CONTAINER_RUNTIME}"} ${MASTER_OS_DISTRIBUTION:+"--master-os-distro=${MASTER_OS_DISTRIBUTION}"} ${NODE_OS_DISTRIBUTION:+"--node-os-distro=${NODE_OS_DISTRIBUTION}"} ${NUM_NODES:+"--num-nodes=${NUM_NODES}"} ${E2E_REPORT_DIR:+"--report-dir=${E2E_REPORT_DIR}"} ${E2E_REPORT_PREFIX:+"--report-prefix=${E2E_REPORT_PREFIX}"} "${@:-}"' exited with status 1
Call stack:
  1: ./hack/ginkgo-e2e.sh:150 main(...)
Exiting with status 1
+ exit-handler
+ dump-logs
+ echo '=== versions ==='
... skipping 364 lines ...
++ kind get kubeconfig-path --name=clusterapi
+ timeout 600 kubectl --kubeconfig=/root/.kube/kind-config-clusterapi delete cluster test-1576132495
cluster.cluster.x-k8s.io "test-1576132495" deleted
+ true
++ kind get kubeconfig-path --name=clusterapi
+ timeout 600 kubectl --kubeconfig=/root/.kube/kind-config-clusterapi wait --for=delete cluster/test-1576132495
error: timed out waiting for the condition on clusters/test-1576132495
+ true
+ make kind-reset
kind delete cluster --name=clusterapi || true
Deleting cluster "clusterapi" ...
++ go env GOPATH
+ cd /home/prow/go/src/k8s.io/kubernetes
... skipping 26 lines ...