This job view page is being replaced by Spyglass soon. Check out the new job view.
PRPatrickLang: docs: few clarifications for Windows
ResultFAILURE
Tests 0 failed / 527 succeeded
Started2019-10-17 20:34
Elapsed28m34s
Revision6491b39f0e567ff158d6bfb9339b2e7fc1135ca1
Refs 959

No Test Failures!


Show 527 Passed Tests

Show 4244 Skipped Tests

Error lines from build-log.txt

... skipping 634 lines ...
localAPIEndpoint:
  advertiseAddress: "172.17.0.3"
  bindPort: 6443
nodeRegistration:
  criSocket: "/run/containerd/containerd.sock"
  kubeletExtraArgs:
    fail-swap-on: "false"
    node-ip: "172.17.0.3"
---
# no-op entry that exists solely so it can be patched
apiVersion: kubeadm.k8s.io/v1beta2
kind: JoinConfiguration
metadata:
  name: config

nodeRegistration:
  criSocket: "/run/containerd/containerd.sock"
  kubeletExtraArgs:
    fail-swap-on: "false"
    node-ip: "172.17.0.3"
discovery:
  bootstrapToken:
    apiServerEndpoint: "172.17.0.2:6443"
    token: "abcdef.0123456789abcdef"
    unsafeSkipCAVerification: true
... skipping 57 lines ...
localAPIEndpoint:
  advertiseAddress: "172.17.0.4"
  bindPort: 6443
nodeRegistration:
  criSocket: "/run/containerd/containerd.sock"
  kubeletExtraArgs:
    fail-swap-on: "false"
    node-ip: "172.17.0.4"
---
# no-op entry that exists solely so it can be patched
apiVersion: kubeadm.k8s.io/v1beta2
kind: JoinConfiguration
metadata:
  name: config

nodeRegistration:
  criSocket: "/run/containerd/containerd.sock"
  kubeletExtraArgs:
    fail-swap-on: "false"
    node-ip: "172.17.0.4"
discovery:
  bootstrapToken:
    apiServerEndpoint: "172.17.0.2:6443"
    token: "abcdef.0123456789abcdef"
    unsafeSkipCAVerification: true
... skipping 58 lines ...
localAPIEndpoint:
  advertiseAddress: "172.17.0.2"
  bindPort: 6443
nodeRegistration:
  criSocket: "/run/containerd/containerd.sock"
  kubeletExtraArgs:
    fail-swap-on: "false"
    node-ip: "172.17.0.2"
---
# no-op entry that exists solely so it can be patched
apiVersion: kubeadm.k8s.io/v1beta2
kind: JoinConfiguration
metadata:
... skipping 2 lines ...
  localAPIEndpoint:
    advertiseAddress: "172.17.0.2"
    bindPort: 6443
nodeRegistration:
  criSocket: "/run/containerd/containerd.sock"
  kubeletExtraArgs:
    fail-swap-on: "false"
    node-ip: "172.17.0.2"
discovery:
  bootstrapToken:
    apiServerEndpoint: "172.17.0.2:6443"
    token: "abcdef.0123456789abcdef"
    unsafeSkipCAVerification: true
... skipping 44 lines ...
localAPIEndpoint:
  advertiseAddress: 172.17.0.4
  bindPort: 6443
nodeRegistration:
  criSocket: /run/containerd/containerd.sock
  kubeletExtraArgs:
    fail-swap-on: "false"
    node-ip: 172.17.0.4
---
apiVersion: kubeadm.k8s.io/v1beta2
discovery:
  bootstrapToken:
    apiServerEndpoint: 172.17.0.2:6443
    token: abcdef.0123456789abcdef
    unsafeSkipCAVerification: true
kind: JoinConfiguration
nodeRegistration:
  criSocket: /run/containerd/containerd.sock
  kubeletExtraArgs:
    fail-swap-on: "false"
    node-ip: 172.17.0.4
---
apiVersion: kubelet.config.k8s.io/v1beta1
evictionHard:
  imagefs.available: 0%
  nodefs.available: 0%
... skipping 30 lines ...
localAPIEndpoint:
  advertiseAddress: 172.17.0.3
  bindPort: 6443
nodeRegistration:
  criSocket: /run/containerd/containerd.sock
  kubeletExtraArgs:
    fail-swap-on: "false"
    node-ip: 172.17.0.3
---
apiVersion: kubeadm.k8s.io/v1beta2
discovery:
  bootstrapToken:
    apiServerEndpoint: 172.17.0.2:6443
    token: abcdef.0123456789abcdef
    unsafeSkipCAVerification: true
kind: JoinConfiguration
nodeRegistration:
  criSocket: /run/containerd/containerd.sock
  kubeletExtraArgs:
    fail-swap-on: "false"
    node-ip: 172.17.0.3
---
apiVersion: kubelet.config.k8s.io/v1beta1
evictionHard:
  imagefs.available: 0%
  nodefs.available: 0%
... skipping 30 lines ...
localAPIEndpoint:
  advertiseAddress: 172.17.0.2
  bindPort: 6443
nodeRegistration:
  criSocket: /run/containerd/containerd.sock
  kubeletExtraArgs:
    fail-swap-on: "false"
    node-ip: 172.17.0.2
---
apiVersion: kubeadm.k8s.io/v1beta2
controlPlane:
  localAPIEndpoint:
    advertiseAddress: 172.17.0.2
... skipping 4 lines ...
    token: abcdef.0123456789abcdef
    unsafeSkipCAVerification: true
kind: JoinConfiguration
nodeRegistration:
  criSocket: /run/containerd/containerd.sock
  kubeletExtraArgs:
    fail-swap-on: "false"
    node-ip: 172.17.0.2
---
apiVersion: kubelet.config.k8s.io/v1beta1
evictionHard:
  imagefs.available: 0%
  nodefs.available: 0%
... skipping 28 lines ...
I1017 20:47:21.749710      25 checks.go:287] validating the existence of file /etc/kubernetes/manifests/kube-scheduler.yaml
I1017 20:47:21.749715      25 checks.go:287] validating the existence of file /etc/kubernetes/manifests/etcd.yaml
I1017 20:47:21.749722      25 checks.go:433] validating if the connectivity type is via proxy or direct
I1017 20:47:21.750866      25 checks.go:472] validating http connectivity to first IP address in the CIDR
I1017 20:47:21.750903      25 checks.go:472] validating http connectivity to first IP address in the CIDR
I1017 20:47:21.750913      25 checks.go:103] validating the container runtime
	[WARNING CRI]: container runtime is not running: output: time="2019-10-17T20:47:23Z" level=fatal msg="failed to connect: failed to connect, make sure you are running as root and the runtime has been started: context deadline exceeded"
, error: exit status 1
I1017 20:47:23.920936      25 checks.go:377] validating the presence of executable crictl
I1017 20:47:23.921005      25 checks.go:336] validating the contents of file /proc/sys/net/bridge/bridge-nf-call-iptables
	[WARNING FileContent--proc-sys-net-bridge-bridge-nf-call-iptables]: /proc/sys/net/bridge/bridge-nf-call-iptables does not exist
I1017 20:47:23.921084      25 checks.go:336] validating the contents of file /proc/sys/net/ipv4/ip_forward
I1017 20:47:23.921151      25 checks.go:650] validating whether swap is enabled or not
I1017 20:47:23.921249      25 checks.go:377] validating the presence of executable ip
... skipping 20 lines ...
I1017 20:47:27.164897      25 checks.go:839] image exists: k8s.gcr.io/kube-controller-manager:v1.17.0-alpha.2.155_006378fcc1c8db
I1017 20:47:27.182369      25 checks.go:839] image exists: k8s.gcr.io/kube-scheduler:v1.17.0-alpha.2.155_006378fcc1c8db
I1017 20:47:27.199821      25 checks.go:839] image exists: k8s.gcr.io/kube-proxy:v1.17.0-alpha.2.155_006378fcc1c8db
I1017 20:47:27.213776      25 checks.go:839] image exists: k8s.gcr.io/pause:3.1
I1017 20:47:27.227555      25 checks.go:839] image exists: k8s.gcr.io/etcd:3.3.17-0
I1017 20:47:27.249605      25 checks.go:839] image exists: k8s.gcr.io/coredns:1.6.2
	[WARNING ImagePull]: failed to pull image k8s.gcr.io/kube-apiserver:v1.17.0-alpha.2.155_006378fcc1c8db: output: time="2019-10-17T20:47:27Z" level=fatal msg="pulling image failed: rpc error: code = NotFound desc = failed to pull and unpack image \"k8s.gcr.io/kube-apiserver:v1.17.0-alpha.2.155_006378fcc1c8db\": failed to resolve reference \"k8s.gcr.io/kube-apiserver:v1.17.0-alpha.2.155_006378fcc1c8db\": k8s.gcr.io/kube-apiserver:v1.17.0-alpha.2.155_006378fcc1c8db: not found"
, error: exit status 1
I1017 20:47:27.249675      25 kubelet.go:61] Stopping the kubelet
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
I1017 20:47:27.297499      25 kubelet.go:79] Starting the kubelet
[kubelet-start] Activating the kubelet service
[certs] Using certificateDir folder "/etc/kubernetes/pki"
... skipping 120 lines ...
I1017 20:48:09.765169      25 round_trippers.go:443] GET https://172.17.0.2:6443/healthz?timeout=32s  in 0 milliseconds
I1017 20:48:10.265113      25 round_trippers.go:443] GET https://172.17.0.2:6443/healthz?timeout=32s  in 0 milliseconds
I1017 20:48:10.765156      25 round_trippers.go:443] GET https://172.17.0.2:6443/healthz?timeout=32s  in 0 milliseconds
I1017 20:48:11.265179      25 round_trippers.go:443] GET https://172.17.0.2:6443/healthz?timeout=32s  in 0 milliseconds
[kubelet-check] Initial timeout of 40s passed.
I1017 20:48:11.764970      25 round_trippers.go:443] GET https://172.17.0.2:6443/healthz?timeout=32s  in 0 milliseconds
I1017 20:48:17.762405      25 round_trippers.go:443] GET https://172.17.0.2:6443/healthz?timeout=32s 500 Internal Server Error in 5497 milliseconds
I1017 20:48:17.770715      25 round_trippers.go:443] GET https://172.17.0.2:6443/healthz?timeout=32s 500 Internal Server Error in 5 milliseconds
I1017 20:48:18.267575      25 round_trippers.go:443] GET https://172.17.0.2:6443/healthz?timeout=32s 500 Internal Server Error in 2 milliseconds
I1017 20:48:18.766479      25 round_trippers.go:443] GET https://172.17.0.2:6443/healthz?timeout=32s 500 Internal Server Error in 1 milliseconds
I1017 20:48:19.266854      25 round_trippers.go:443] GET https://172.17.0.2:6443/healthz?timeout=32s 500 Internal Server Error in 1 milliseconds
[apiclient] All control plane components are healthy after 48.008179 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
I1017 20:48:19.767928      25 round_trippers.go:443] GET https://172.17.0.2:6443/healthz?timeout=32s 200 OK in 2 milliseconds
I1017 20:48:19.768041      25 uploadconfig.go:108] [upload-config] Uploading the kubeadm ClusterConfiguration to a ConfigMap
I1017 20:48:19.777118      25 round_trippers.go:443] POST https://172.17.0.2:6443/api/v1/namespaces/kube-system/configmaps 201 Created in 4 milliseconds
I1017 20:48:19.781863      25 round_trippers.go:443] POST https://172.17.0.2:6443/apis/rbac.authorization.k8s.io/v1/namespaces/kube-system/roles 201 Created in 4 milliseconds
... skipping 140 lines ...
I1017 20:48:25.689368     608 checks.go:287] validating the existence of file /etc/kubernetes/pki/ca.crt
I1017 20:48:25.689472     608 checks.go:433] validating if the connectivity type is via proxy or direct
I1017 20:48:25.689591     608 join.go:441] [preflight] Discovering cluster-info
I1017 20:48:25.689739     608 token.go:199] [discovery] Trying to connect to API Server "172.17.0.2:6443"
I1017 20:48:25.690647     608 token.go:74] [discovery] Created cluster-info discovery client, requesting info from "https://172.17.0.2:6443"
I1017 20:48:25.702351     608 round_trippers.go:443] GET https://172.17.0.2:6443/api/v1/namespaces/kube-public/configmaps/cluster-info 200 OK in 11 milliseconds
I1017 20:48:25.703915     608 token.go:202] [discovery] Failed to connect to API Server "172.17.0.2:6443": token id "abcdef" is invalid for this cluster or it has expired. Use "kubeadm token create" on the control-plane node to create a new valid token
I1017 20:48:30.704124     608 token.go:199] [discovery] Trying to connect to API Server "172.17.0.2:6443"
I1017 20:48:30.705413     608 token.go:74] [discovery] Created cluster-info discovery client, requesting info from "https://172.17.0.2:6443"
I1017 20:48:30.708326     608 round_trippers.go:443] GET https://172.17.0.2:6443/api/v1/namespaces/kube-public/configmaps/cluster-info 200 OK in 2 milliseconds
I1017 20:48:30.708640     608 token.go:202] [discovery] Failed to connect to API Server "172.17.0.2:6443": token id "abcdef" is invalid for this cluster or it has expired. Use "kubeadm token create" on the control-plane node to create a new valid token
I1017 20:48:35.708918     608 token.go:199] [discovery] Trying to connect to API Server "172.17.0.2:6443"
I1017 20:48:35.709898     608 token.go:74] [discovery] Created cluster-info discovery client, requesting info from "https://172.17.0.2:6443"
I1017 20:48:35.714744     608 round_trippers.go:443] GET https://172.17.0.2:6443/api/v1/namespaces/kube-public/configmaps/cluster-info 200 OK in 4 milliseconds
I1017 20:48:35.715136     608 token.go:202] [discovery] Failed to connect to API Server "172.17.0.2:6443": token id "abcdef" is invalid for this cluster or it has expired. Use "kubeadm token create" on the control-plane node to create a new valid token
I1017 20:48:40.715328     608 token.go:199] [discovery] Trying to connect to API Server "172.17.0.2:6443"
I1017 20:48:40.716065     608 token.go:74] [discovery] Created cluster-info discovery client, requesting info from "https://172.17.0.2:6443"
I1017 20:48:40.718596     608 round_trippers.go:443] GET https://172.17.0.2:6443/api/v1/namespaces/kube-public/configmaps/cluster-info 200 OK in 2 milliseconds
I1017 20:48:40.720742     608 token.go:109] [discovery] Cluster info signature and contents are valid and no TLS pinning was specified, will use API Server "172.17.0.2:6443"
I1017 20:48:40.720761     608 token.go:205] [discovery] Successfully established connection with API Server "172.17.0.2:6443"
I1017 20:48:40.720793     608 discovery.go:51] [discovery] Using provided TLSBootstrapToken as authentication credentials for the join process
... skipping 90 lines ...
I1017 20:48:25.683623     600 checks.go:287] validating the existence of file /etc/kubernetes/pki/ca.crt
I1017 20:48:25.683715     600 checks.go:433] validating if the connectivity type is via proxy or direct
I1017 20:48:25.683816     600 join.go:441] [preflight] Discovering cluster-info
I1017 20:48:25.683935     600 token.go:199] [discovery] Trying to connect to API Server "172.17.0.2:6443"
I1017 20:48:25.684672     600 token.go:74] [discovery] Created cluster-info discovery client, requesting info from "https://172.17.0.2:6443"
I1017 20:48:25.698242     600 round_trippers.go:443] GET https://172.17.0.2:6443/api/v1/namespaces/kube-public/configmaps/cluster-info 200 OK in 13 milliseconds
I1017 20:48:25.699754     600 token.go:202] [discovery] Failed to connect to API Server "172.17.0.2:6443": token id "abcdef" is invalid for this cluster or it has expired. Use "kubeadm token create" on the control-plane node to create a new valid token
I1017 20:48:30.700055     600 token.go:199] [discovery] Trying to connect to API Server "172.17.0.2:6443"
I1017 20:48:30.700895     600 token.go:74] [discovery] Created cluster-info discovery client, requesting info from "https://172.17.0.2:6443"
I1017 20:48:30.703866     600 round_trippers.go:443] GET https://172.17.0.2:6443/api/v1/namespaces/kube-public/configmaps/cluster-info 200 OK in 2 milliseconds
I1017 20:48:30.704538     600 token.go:202] [discovery] Failed to connect to API Server "172.17.0.2:6443": token id "abcdef" is invalid for this cluster or it has expired. Use "kubeadm token create" on the control-plane node to create a new valid token
I1017 20:48:35.704871     600 token.go:199] [discovery] Trying to connect to API Server "172.17.0.2:6443"
I1017 20:48:35.705646     600 token.go:74] [discovery] Created cluster-info discovery client, requesting info from "https://172.17.0.2:6443"
I1017 20:48:35.711094     600 round_trippers.go:443] GET https://172.17.0.2:6443/api/v1/namespaces/kube-public/configmaps/cluster-info 200 OK in 5 milliseconds
I1017 20:48:35.711728     600 token.go:202] [discovery] Failed to connect to API Server "172.17.0.2:6443": token id "abcdef" is invalid for this cluster or it has expired. Use "kubeadm token create" on the control-plane node to create a new valid token
I1017 20:48:40.712049     600 token.go:199] [discovery] Trying to connect to API Server "172.17.0.2:6443"
I1017 20:48:40.713026     600 token.go:74] [discovery] Created cluster-info discovery client, requesting info from "https://172.17.0.2:6443"
I1017 20:48:40.716729     600 round_trippers.go:443] GET https://172.17.0.2:6443/api/v1/namespaces/kube-public/configmaps/cluster-info 200 OK in 3 milliseconds
I1017 20:48:40.718099     600 token.go:109] [discovery] Cluster info signature and contents are valid and no TLS pinning was specified, will use API Server "172.17.0.2:6443"
I1017 20:48:40.718137     600 token.go:205] [discovery] Successfully established connection with API Server "172.17.0.2:6443"
I1017 20:48:40.718178     600 discovery.go:51] [discovery] Using provided TLSBootstrapToken as authentication credentials for the join process
... skipping 103 lines ...
Oct 17 20:49:06.882: INFO: Waiting up to 30m0s for all (but 0) nodes to be schedulable
Oct 17 20:49:06.984: INFO: Condition Ready of node kind-worker is true, but Node is tainted by NodeController with [{node.kubernetes.io/not-ready  NoExecute 2019-10-17 20:49:02 +0000 UTC}]. Failure
Oct 17 20:49:06.986: INFO: Unschedulable nodes:
Oct 17 20:49:06.986: INFO: -> kind-worker Ready=false Network=false Taints=[{node.kubernetes.io/not-ready  NoExecute 2019-10-17 20:49:02 +0000 UTC}] NonblockingTaints:node-role.kubernetes.io/master
Oct 17 20:49:06.986: INFO: ================================
Oct 17 20:49:36.989: INFO: Waiting up to 10m0s for all pods (need at least 0) in namespace 'kube-system' to be running and ready
Oct 17 20:49:37.030: INFO: The status of Pod kube-apiserver-kind-control-plane is Pending (Ready = false), waiting for it to be either Running (with Ready = true) or Failed
Oct 17 20:49:37.030: INFO: 11 / 12 pods in namespace 'kube-system' are running and ready (0 seconds elapsed)
Oct 17 20:49:37.030: INFO: expected 2 pod replicas in namespace 'kube-system', 2 are Running and Ready.
Oct 17 20:49:37.030: INFO: POD                                NODE                PHASE    GRACE  CONDITIONS
Oct 17 20:49:37.030: INFO: kube-apiserver-kind-control-plane  kind-control-plane  Pending         []
Oct 17 20:49:37.030: INFO: 
Oct 17 20:49:39.043: INFO: The status of Pod kube-apiserver-kind-control-plane is Pending (Ready = false), waiting for it to be either Running (with Ready = true) or Failed
Oct 17 20:49:39.043: INFO: 11 / 12 pods in namespace 'kube-system' are running and ready (2 seconds elapsed)
Oct 17 20:49:39.043: INFO: expected 2 pod replicas in namespace 'kube-system', 2 are Running and Ready.
Oct 17 20:49:39.043: INFO: POD                                NODE                PHASE    GRACE  CONDITIONS
Oct 17 20:49:39.043: INFO: kube-apiserver-kind-control-plane  kind-control-plane  Pending         []
Oct 17 20:49:39.043: INFO: 
Oct 17 20:49:41.043: INFO: The status of Pod kube-apiserver-kind-control-plane is Pending (Ready = false), waiting for it to be either Running (with Ready = true) or Failed
Oct 17 20:49:41.043: INFO: 11 / 12 pods in namespace 'kube-system' are running and ready (4 seconds elapsed)
Oct 17 20:49:41.043: INFO: expected 2 pod replicas in namespace 'kube-system', 2 are Running and Ready.
Oct 17 20:49:41.043: INFO: POD                                NODE                PHASE    GRACE  CONDITIONS
Oct 17 20:49:41.043: INFO: kube-apiserver-kind-control-plane  kind-control-plane  Pending         []
Oct 17 20:49:41.043: INFO: 
Oct 17 20:49:43.043: INFO: 12 / 12 pods in namespace 'kube-system' are running and ready (6 seconds elapsed)
... skipping 629 lines ...
Oct 17 20:49:43.247: INFO: >>> kubeConfig: /root/.kube/kind-config-kind
STEP: Building a namespace api object, basename job
Oct 17 20:49:43.415: INFO: Found PodSecurityPolicies; assuming PodSecurityPolicy is enabled.
Oct 17 20:49:43.430: INFO: Found ClusterRoles; assuming RBAC is enabled.
STEP: Binding the e2e-test-privileged-psp PodSecurityPolicy to the default service account in job-7711
STEP: Waiting for a default service account to be provisioned in namespace
[It] should fail when exceeds active deadline
  test/e2e/apps/job.go:133
STEP: Creating a job
STEP: Ensuring job past active deadline
[AfterEach] [sig-apps] Job
  test/e2e/framework/framework.go:151
Oct 17 20:49:45.919: INFO: Waiting up to 3m0s for all (but 0) nodes to be ready
... skipping 2408 lines ...
Oct 17 20:50:32.123: INFO: PersistentVolumeClaim pvc-kcgxl found but phase is Pending instead of Bound.
Oct 17 20:50:34.139: INFO: PersistentVolumeClaim pvc-kcgxl found but phase is Pending instead of Bound.
Oct 17 20:50:36.225: INFO: PersistentVolumeClaim pvc-kcgxl found but phase is Pending instead of Bound.
Oct 17 20:50:38.290: INFO: PersistentVolumeClaim pvc-kcgxl found and phase=Bound (14.23269436s)
Oct 17 20:50:38.292: INFO: Waiting up to 3m0s for PersistentVolume local-pvn5mgc to have phase Bound
Oct 17 20:50:38.320: INFO: PersistentVolume local-pvn5mgc found and phase=Bound (28.422864ms)
[It] should fail scheduling due to different NodeSelector
  test/e2e/storage/persistent_volumes-local.go:363
STEP: local-volume-type: dir
STEP: Initializing test volumes
Oct 17 20:50:38.381: INFO: Running '/home/prow/go/src/k8s.io/kubernetes/bazel-bin/cmd/kubectl/linux_amd64_pure_stripped/kubectl --server=https://127.0.0.1:45841 --kubeconfig=/root/.kube/kind-config-kind exec --namespace=persistent-local-volumes-test-7461 hostexec-kind-worker-5b5xd -- nsenter --mount=/rootfs/proc/1/ns/mnt -- sh -c mkdir -p /tmp/local-volume-test-d7a93392-2a6e-4dda-b2cc-7b89d472c8c0'
Oct 17 20:50:39.109: INFO: stderr: ""
Oct 17 20:50:39.109: INFO: stdout: ""
... skipping 25 lines ...

• [SLOW TEST:27.725 seconds]
[sig-storage] PersistentVolumes-local 
test/e2e/storage/utils/framework.go:23
  Pod with node different from PV's NodeAffinity
  test/e2e/storage/persistent_volumes-local.go:337
    should fail scheduling due to different NodeSelector
    test/e2e/storage/persistent_volumes-local.go:363
------------------------------
[BeforeEach] [sig-storage] Zone Support
  test/e2e/framework/framework.go:150
STEP: Creating a kubernetes client
Oct 17 20:50:41.682: INFO: >>> kubeConfig: /root/.kube/kind-config-kind
... skipping 490 lines ...
Oct 17 20:50:50.821: INFO: ForEach: Found 1 pods from the filter.  Now looping through them.
Oct 17 20:50:50.822: INFO: Running '/home/prow/go/src/k8s.io/kubernetes/bazel-bin/cmd/kubectl/linux_amd64_pure_stripped/kubectl --server=https://127.0.0.1:45841 --kubeconfig=/root/.kube/kind-config-kind describe pod agnhost-master-z4gcq --namespace=kubectl-6436'
Oct 17 20:50:50.999: INFO: stderr: ""
Oct 17 20:50:51.000: INFO: stdout: "Name:         agnhost-master-z4gcq\nNamespace:    kubectl-6436\nPriority:     0\nNode:         kind-worker/172.17.0.3\nStart Time:   Thu, 17 Oct 2019 20:50:36 +0000\nLabels:       app=agnhost\n              role=master\nAnnotations:  <none>\nStatus:       Running\nIP:           10.244.1.33\nIPs:\n  IP:           10.244.1.33\nControlled By:  ReplicationController/agnhost-master\nContainers:\n  agnhost-master:\n    Container ID:   containerd://4adea4d0e91ca1c422073a1cf4f01a35c5692eba6755e35e0ab19d4b67b0ed73\n    Image:          gcr.io/kubernetes-e2e-test-images/agnhost:2.6\n    Image ID:       gcr.io/kubernetes-e2e-test-images/agnhost@sha256:4057a5580c7b59c4fe10d8ab2732c9dec35eea80fd41f7bafc7bd5acc7edf727\n    Port:           6379/TCP\n    Host Port:      0/TCP\n    State:          Running\n      Started:      Thu, 17 Oct 2019 20:50:38 +0000\n    Ready:          True\n    Restart Count:  0\n    Environment:    <none>\n    Mounts:\n      /var/run/secrets/kubernetes.io/serviceaccount from default-token-2lckt (ro)\nConditions:\n  Type              Status\n  Initialized       True \n  Ready             True \n  ContainersReady   True \n  PodScheduled      True \nVolumes:\n  default-token-2lckt:\n    Type:        Secret (a volume populated by a Secret)\n    SecretName:  default-token-2lckt\n    Optional:    false\nQoS Class:       BestEffort\nNode-Selectors:  <none>\nTolerations:     node.kubernetes.io/not-ready:NoExecute for 300s\n                 node.kubernetes.io/unreachable:NoExecute for 300s\nEvents:\n  Type    Reason     Age        From                  Message\n  ----    ------     ----       ----                  -------\n  Normal  Scheduled  <unknown>  default-scheduler     Successfully assigned kubectl-6436/agnhost-master-z4gcq to kind-worker\n  Normal  Pulled     13s        kubelet, kind-worker  Container image \"gcr.io/kubernetes-e2e-test-images/agnhost:2.6\" already present on machine\n  Normal  Created    13s        kubelet, kind-worker  Created container agnhost-master\n  Normal  Started    12s        kubelet, kind-worker  Started container agnhost-master\n"
Oct 17 20:50:51.000: INFO: Running '/home/prow/go/src/k8s.io/kubernetes/bazel-bin/cmd/kubectl/linux_amd64_pure_stripped/kubectl --server=https://127.0.0.1:45841 --kubeconfig=/root/.kube/kind-config-kind describe rc agnhost-master --namespace=kubectl-6436'
Oct 17 20:50:51.167: INFO: stderr: ""
Oct 17 20:50:51.167: INFO: stdout: "Name:         agnhost-master\nNamespace:    kubectl-6436\nSelector:     app=agnhost,role=master\nLabels:       app=agnhost\n              role=master\nAnnotations:  <none>\nReplicas:     1 current / 1 desired\nPods Status:  1 Running / 0 Waiting / 0 Succeeded / 0 Failed\nPod Template:\n  Labels:  app=agnhost\n           role=master\n  Containers:\n   agnhost-master:\n    Image:        gcr.io/kubernetes-e2e-test-images/agnhost:2.6\n    Port:         6379/TCP\n    Host Port:    0/TCP\n    Environment:  <none>\n    Mounts:       <none>\n  Volumes:        <none>\nEvents:\n  Type    Reason            Age   From                    Message\n  ----    ------            ----  ----                    -------\n  Normal  SuccessfulCreate  15s   replication-controller  Created pod: agnhost-master-z4gcq\n"
Oct 17 20:50:51.167: INFO: Running '/home/prow/go/src/k8s.io/kubernetes/bazel-bin/cmd/kubectl/linux_amd64_pure_stripped/kubectl --server=https://127.0.0.1:45841 --kubeconfig=/root/.kube/kind-config-kind describe service agnhost-master --namespace=kubectl-6436'
Oct 17 20:50:51.314: INFO: stderr: ""
Oct 17 20:50:51.314: INFO: stdout: "Name:              agnhost-master\nNamespace:         kubectl-6436\nLabels:            app=agnhost\n                   role=master\nAnnotations:       <none>\nSelector:          app=agnhost,role=master\nType:              ClusterIP\nIP:                10.103.207.104\nPort:              <unset>  6379/TCP\nTargetPort:        agnhost-server/TCP\nEndpoints:         10.244.1.33:6379\nSession Affinity:  None\nEvents:            <none>\n"
Oct 17 20:50:51.321: INFO: Running '/home/prow/go/src/k8s.io/kubernetes/bazel-bin/cmd/kubectl/linux_amd64_pure_stripped/kubectl --server=https://127.0.0.1:45841 --kubeconfig=/root/.kube/kind-config-kind describe node kind-control-plane'
Oct 17 20:50:51.520: INFO: stderr: ""
Oct 17 20:50:51.520: INFO: stdout: "Name:               kind-control-plane\nRoles:              master\nLabels:             beta.kubernetes.io/arch=amd64\n                    beta.kubernetes.io/os=linux\n                    kubernetes.io/arch=amd64\n                    kubernetes.io/hostname=kind-control-plane\n                    kubernetes.io/os=linux\n                    node-role.kubernetes.io/master=\nAnnotations:        kubeadm.alpha.kubernetes.io/cri-socket: /run/containerd/containerd.sock\n                    node.alpha.kubernetes.io/ttl: 0\n                    volumes.kubernetes.io/controller-managed-attach-detach: true\nCreationTimestamp:  Thu, 17 Oct 2019 20:48:17 +0000\nTaints:             node-role.kubernetes.io/master:NoSchedule\nUnschedulable:      false\nLease:\n  HolderIdentity:  kind-control-plane\n  AcquireTime:     <unset>\n  RenewTime:       Thu, 17 Oct 2019 20:50:47 +0000\nConditions:\n  Type             Status  LastHeartbeatTime                 LastTransitionTime                Reason                       Message\n  ----             ------  -----------------                 ------------------                ------                       -------\n  MemoryPressure   False   Thu, 17 Oct 2019 20:50:48 +0000   Thu, 17 Oct 2019 20:48:12 +0000   KubeletHasSufficientMemory   kubelet has sufficient memory available\n  DiskPressure     False   Thu, 17 Oct 2019 20:50:48 +0000   Thu, 17 Oct 2019 20:48:12 +0000   Kubel