ResultFAILURE
Tests 1 failed / 0 succeeded
Started2019-06-15 16:01
Elapsed14m52s
Revision

Test Failures


istio.io/istio/tests/e2e/tests/pilot Failure 0.00s


				
tests/istio.mk:184: recipe for target 'test/local/noauth/e2e_pilotv2' failed
make[1]: *** [test/local/noauth/e2e_pilotv2] Error 1
make[1]: Leaving directory '/home/prow/go/src/istio.io/istio'
make generate_e2e_yaml
make[2]: Entering directory '/home/prow/go/src/istio.io/istio'
/home/prow/go/out/linux_amd64/release/helm init --client-only
Creating /root/.helm 
Creating /root/.helm/repository 
Creating /root/.helm/repository/cache 
Creating /root/.helm/repository/local 
Creating /root/.helm/plugins 
Creating /root/.helm/starters 
Creating /root/.helm/cache/archive 
Creating /root/.helm/repository/repositories.yaml 
Adding stable repo with URL: https://kubernetes-charts.storage.googleapis.com 
Adding local repo with URL: http://127.0.0.1:8879/charts 
$HELM_HOME has been configured at /root/.helm.
Not installing Tiller due to 'client-only' flag having been set
Happy Helming!
cat install/kubernetes/namespace.yaml > install/kubernetes/istio-init.yaml
cat install/kubernetes/helm/istio-init/files/crd-* >> install/kubernetes/istio-init.yaml
/home/prow/go/out/linux_amd64/release/helm template --name=istio --namespace=istio-system \
cat install/kubernetes/namespace.yaml > install/kubernetes/istio-auth-non-mcp.yaml
cat install/kubernetes/helm/istio-init/files/crd-* >> install/kubernetes/istio-auth-non-mcp.yaml
/home/prow/go/out/linux_amd64/release/helm template \
cat install/kubernetes/namespace.yaml > install/kubernetes/istio-auth-sds.yaml
cat install/kubernetes/helm/istio-init/files/crd-* >> install/kubernetes/istio-auth-sds.yaml
/home/prow/go/out/linux_amd64/release/helm template \
cat install/kubernetes/namespace.yaml > install/kubernetes/istio-non-mcp.yaml
cat install/kubernetes/helm/istio-init/files/crd-* >> install/kubernetes/istio-non-mcp.yaml
/home/prow/go/out/linux_amd64/release/helm template \
cat install/kubernetes/namespace.yaml > install/kubernetes/istio.yaml
cat install/kubernetes/helm/istio-init/files/crd-* >> install/kubernetes/istio.yaml
/home/prow/go/out/linux_amd64/release/helm template \
cat install/kubernetes/namespace.yaml > install/kubernetes/istio-auth.yaml
cat install/kubernetes/helm/istio-init/files/crd-* >> install/kubernetes/istio-auth.yaml
/home/prow/go/out/linux_amd64/release/helm template \
cat install/kubernetes/namespace.yaml > install/kubernetes/istio-auth-mcp.yaml
cat install/kubernetes/helm/istio-init/files/crd-* >> install/kubernetes/istio-auth-mcp.yaml
/home/prow/go/out/linux_amd64/release/helm template \
cat install/kubernetes/namespace.yaml > install/kubernetes/istio-auth-multicluster.yaml
cat install/kubernetes/helm/istio-init/files/crd-* >> install/kubernetes/istio-auth-multicluster.yaml
/home/prow/go/out/linux_amd64/release/helm template \
cat install/kubernetes/namespace.yaml > install/kubernetes/istio-mcp.yaml
cat install/kubernetes/helm/istio-init/files/crd-* >> install/kubernetes/istio-mcp.yaml
/home/prow/go/out/linux_amd64/release/helm template \
cat install/kubernetes/namespace.yaml > install/kubernetes/istio-one-namespace.yaml
cat install/kubernetes/helm/istio-init/files/crd-* >> install/kubernetes/istio-one-namespace.yaml
/home/prow/go/out/linux_amd64/release/helm template \
cat install/kubernetes/namespace.yaml > install/kubernetes/istio-one-namespace-auth.yaml
cat install/kubernetes/helm/istio-init/files/crd-* >> install/kubernetes/istio-one-namespace-auth.yaml
/home/prow/go/out/linux_amd64/release/helm template \
cat install/kubernetes/namespace.yaml > install/kubernetes/istio-one-namespace-trust-domain.yaml
cat install/kubernetes/helm/istio-init/files/crd-* >> install/kubernetes/istio-one-namespace-trust-domain.yaml
/home/prow/go/out/linux_amd64/release/helm template \
cat install/kubernetes/namespace.yaml > install/kubernetes/istio-multicluster.yaml
cat install/kubernetes/helm/istio-init/files/crd-* >> install/kubernetes/istio-multicluster.yaml
/home/prow/go/out/linux_amd64/release/helm template \
cat install/kubernetes/namespace.yaml > install/kubernetes/istio-multicluster-split-horizon.yaml
cat install/kubernetes/helm/istio-init/files/crd-* >> install/kubernetes/istio-multicluster-split-horizon.yaml
/home/prow/go/out/linux_amd64/release/helm template \
make[2]: Leaving directory '/home/prow/go/src/istio.io/istio'
set -o pipefail; go test -v -timeout 50m ./tests/e2e/tests/pilot \


Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
namespace "istio-system" configured
customresourcedefinition.apiextensions.k8s.io "virtualservices.networking.istio.io" created
customresourcedefinition.apiextensions.k8s.io "destinationrules.networking.istio.io" created
customresourcedefinition.apiextensions.k8s.io "serviceentries.networking.istio.io" created
customresourcedefinition.apiextensions.k8s.io "gateways.networking.istio.io" created
customresourcedefinition.apiextensions.k8s.io "sidecars.networking.istio.io" created
customresourcedefinition.apiextensions.k8s.io "envoyfilters.networking.istio.io" created
customresourcedefinition.apiextensions.k8s.io "clusterrbacconfigs.rbac.istio.io" created
customresourcedefinition.apiextensions.k8s.io "policies.authentication.istio.io" created
customresourcedefinition.apiextensions.k8s.io "meshpolicies.authentication.istio.io" created
customresourcedefinition.apiextensions.k8s.io "httpapispecbindings.config.istio.io" created
customresourcedefinition.apiextensions.k8s.io "httpapispecs.config.istio.io" created
customresourcedefinition.apiextensions.k8s.io "quotaspecbindings.config.istio.io" created
customresourcedefinition.apiextensions.k8s.io "quotaspecs.config.istio.io" created
customresourcedefinition.apiextensions.k8s.io "rules.config.istio.io" created
customresourcedefinition.apiextensions.k8s.io "attributemanifests.config.istio.io" created
customresourcedefinition.apiextensions.k8s.io "rbacconfigs.rbac.istio.io" created
customresourcedefinition.apiextensions.k8s.io "serviceroles.rbac.istio.io" created
customresourcedefinition.apiextensions.k8s.io "servicerolebindings.rbac.istio.io" created
customresourcedefinition.apiextensions.k8s.io "adapters.config.istio.io" created
customresourcedefinition.apiextensions.k8s.io "instances.config.istio.io" created
customresourcedefinition.apiextensions.k8s.io "templates.config.istio.io" created
customresourcedefinition.apiextensions.k8s.io "handlers.config.istio.io" created
customresourcedefinition.apiextensions.k8s.io "sidecars.networking.istio.io" configured
customresourcedefinition.apiextensions.k8s.io "authorizationpolicies.rbac.istio.io" created
customresourcedefinition.apiextensions.k8s.io "clusterissuers.certmanager.k8s.io" created
customresourcedefinition.apiextensions.k8s.io "issuers.certmanager.k8s.io" created
customresourcedefinition.apiextensions.k8s.io "orders.certmanager.k8s.io" created
customresourcedefinition.apiextensions.k8s.io "challenges.certmanager.k8s.io" created
configmap "istio-crd-10" created
configmap "istio-crd-11" created
configmap "istio-crd-12" created
serviceaccount "istio-init-service-account" created
clusterrole.rbac.authorization.k8s.io "istio-init-istio-system" created
clusterrolebinding.rbac.authorization.k8s.io "istio-init-admin-role-binding-istio-system" created
job.batch "istio-init-crd-10" created
job.batch "istio-init-crd-11" created
job.batch "istio-init-crd-12" created
secret "cacerts" created
namespace "istio-system" configured
customresourcedefinition.apiextensions.k8s.io "virtualservices.networking.istio.io" configured
customresourcedefinition.apiextensions.k8s.io "destinationrules.networking.istio.io" configured
customresourcedefinition.apiextensions.k8s.io "serviceentries.networking.istio.io" configured
customresourcedefinition.apiextensions.k8s.io "gateways.networking.istio.io" configured
customresourcedefinition.apiextensions.k8s.io "sidecars.networking.istio.io" configured
customresourcedefinition.apiextensions.k8s.io "envoyfilters.networking.istio.io" configured
customresourcedefinition.apiextensions.k8s.io "clusterrbacconfigs.rbac.istio.io" configured
customresourcedefinition.apiextensions.k8s.io "policies.authentication.istio.io" configured
customresourcedefinition.apiextensions.k8s.io "meshpolicies.authentication.istio.io" configured
customresourcedefinition.apiextensions.k8s.io "httpapispecbindings.config.istio.io" configured
customresourcedefinition.apiextensions.k8s.io "httpapispecs.config.istio.io" configured
customresourcedefinition.apiextensions.k8s.io "quotaspecbindings.config.istio.io" configured
customresourcedefinition.apiextensions.k8s.io "quotaspecs.config.istio.io" configured
customresourcedefinition.apiextensions.k8s.io "rules.config.istio.io" configured
customresourcedefinition.apiextensions.k8s.io "attributemanifests.config.istio.io" configured
customresourcedefinition.apiextensions.k8s.io "rbacconfigs.rbac.istio.io" configured
customresourcedefinition.apiextensions.k8s.io "serviceroles.rbac.istio.io" configured
customresourcedefinition.apiextensions.k8s.io "servicerolebindings.rbac.istio.io" configured
customresourcedefinition.apiextensions.k8s.io "adapters.config.istio.io" configured
customresourcedefinition.apiextensions.k8s.io "instances.config.istio.io" configured
customresourcedefinition.apiextensions.k8s.io "templates.config.istio.io" configured
customresourcedefinition.apiextensions.k8s.io "handlers.config.istio.io" configured
customresourcedefinition.apiextensions.k8s.io "sidecars.networking.istio.io" configured
customresourcedefinition.apiextensions.k8s.io "authorizationpolicies.rbac.istio.io" configured
customresourcedefinition.apiextensions.k8s.io "clusterissuers.certmanager.k8s.io" configured
customresourcedefinition.apiextensions.k8s.io "issuers.certmanager.k8s.io" configured
customresourcedefinition.apiextensions.k8s.io "orders.certmanager.k8s.io" configured
customresourcedefinition.apiextensions.k8s.io "challenges.certmanager.k8s.io" configured
configmap "istio-galley-configuration" created
configmap "prometheus" created
configmap "istio-security-custom-resources" created
configmap "istio" created
configmap "istio-sidecar-injector" created
serviceaccount "istio-galley-service-account" created
serviceaccount "istio-egressgateway-service-account" created
serviceaccount "istio-ingressgateway-service-account" created
serviceaccount "istio-mixer-service-account" created
serviceaccount "istio-pilot-service-account" created
serviceaccount "prometheus" created
serviceaccount "istio-cleanup-secrets-service-account" created
clusterrole.rbac.authorization.k8s.io "istio-cleanup-secrets-istio-system" created
clusterrolebinding.rbac.authorization.k8s.io "istio-cleanup-secrets-istio-system" created
job.batch "istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42" created
serviceaccount "istio-security-post-install-account" created
clusterrole.rbac.authorization.k8s.io "istio-security-post-install-istio-system" created
clusterrolebinding.rbac.authorization.k8s.io "istio-security-post-install-role-binding-istio-system" created
job.batch "istio-security-post-install-283d57410a29fa84b1a7971211380e42" created
serviceaccount "istio-citadel-service-account" created
serviceaccount "istio-sidecar-injector-service-account" created
serviceaccount "istio-multi" created
clusterrole.rbac.authorization.k8s.io "istio-galley-istio-system" created
clusterrole.rbac.authorization.k8s.io "istio-mixer-istio-system" created
clusterrole.rbac.authorization.k8s.io "istio-pilot-istio-system" created
clusterrole.rbac.authorization.k8s.io "prometheus-istio-system" created
clusterrole.rbac.authorization.k8s.io "istio-citadel-istio-system" created
clusterrole.rbac.authorization.k8s.io "istio-sidecar-injector-istio-system" created
clusterrole.rbac.authorization.k8s.io "istio-reader" created
clusterrolebinding.rbac.authorization.k8s.io "istio-galley-admin-role-binding-istio-system" created
clusterrolebinding.rbac.authorization.k8s.io "istio-mixer-admin-role-binding-istio-system" created
clusterrolebinding.rbac.authorization.k8s.io "istio-pilot-istio-system" created
clusterrolebinding.rbac.authorization.k8s.io "prometheus-istio-system" created
clusterrolebinding.rbac.authorization.k8s.io "istio-citadel-istio-system" created
clusterrolebinding.rbac.authorization.k8s.io "istio-sidecar-injector-admin-role-binding-istio-system" created
clusterrolebinding.rbac.authorization.k8s.io "istio-multi" created
role.rbac.authorization.k8s.io "istio-ingressgateway-sds" created
rolebinding.rbac.authorization.k8s.io "istio-ingressgateway-sds" created
service "istio-galley" created
service "istio-egressgateway" created
service "istio-ingressgateway" created
service "istio-policy" created
service "istio-telemetry" created
service "istio-pilot" created
service "prometheus" created
service "istio-citadel" created
service "istio-sidecar-injector" created
deployment.apps "istio-galley" created
deployment.apps "istio-egressgateway" created
deployment.apps "istio-ingressgateway" created
deployment.apps "istio-policy" created
deployment.apps "istio-telemetry" created
deployment.apps "istio-pilot" created
deployment.apps "prometheus" created
deployment.apps "istio-citadel" created
deployment.apps "istio-sidecar-injector" created
horizontalpodautoscaler.autoscaling "istio-egressgateway" created
horizontalpodautoscaler.autoscaling "istio-ingressgateway" created
horizontalpodautoscaler.autoscaling "istio-telemetry" created
horizontalpodautoscaler.autoscaling "istio-pilot" created
mutatingwebhookconfiguration.admissionregistration.k8s.io "istio-sidecar-injector" created
poddisruptionbudget.policy "istio-galley" created
poddisruptionbudget.policy "istio-egressgateway" created
poddisruptionbudget.policy "istio-ingressgateway" created
poddisruptionbudget.policy "istio-policy" created
poddisruptionbudget.policy "istio-telemetry" created
poddisruptionbudget.policy "istio-pilot" created
poddisruptionbudget.policy "istio-sidecar-injector" created
attributemanifest.config.istio.io "istioproxy" created
attributemanifest.config.istio.io "kubernetes" created
handler.config.istio.io "stdio" created
instance.config.istio.io "accesslog" created
instance.config.istio.io "tcpaccesslog" created
rule.config.istio.io "stdio" created
rule.config.istio.io "stdiotcp" created
instance.config.istio.io "requestcount" created
instance.config.istio.io "requestduration" created
instance.config.istio.io "requestsize" created
instance.config.istio.io "responsesize" created
instance.config.istio.io "tcpbytesent" created
instance.config.istio.io "tcpbytereceived" created
instance.config.istio.io "tcpconnectionsopened" created
instance.config.istio.io "tcpconnectionsclosed" created
handler.config.istio.io "prometheus" created
rule.config.istio.io "promhttp" created
rule.config.istio.io "promtcp" created
rule.config.istio.io "promtcpconnectionopen" created
rule.config.istio.io "promtcpconnectionclosed" created
handler.config.istio.io "kubernetesenv" created
rule.config.istio.io "kubeattrgenrulerule" created
rule.config.istio.io "tcpkubeattrgenrulerule" created
instance.config.istio.io "attributes" created
destinationrule.networking.istio.io "istio-policy" created
destinationrule.networking.istio.io "istio-telemetry" created

secret "cacerts" created
$HELM_HOME has been configured at /root/.helm.
Not installing Tiller due to 'client-only' flag having been set
Happy Helming!
configmap "istio-security-custom-resources" created
configmap "istio" created
configmap "istio-sidecar-injector" created
serviceaccount "istio-cleanup-secrets-service-account" created
clusterrole.rbac.authorization.k8s.io "istio-cleanup-secrets-istio-system" created
clusterrolebinding.rbac.authorization.k8s.io "istio-cleanup-secrets-istio-system" created
job.batch "istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42" created
serviceaccount "istio-citadel-service-account" created
serviceaccount "istio-multi" created
clusterrole.rbac.authorization.k8s.io "istio-citadel-istio-system" created
clusterrole.rbac.authorization.k8s.io "istio-reader" created
clusterrolebinding.rbac.authorization.k8s.io "istio-citadel-istio-system" created
clusterrolebinding.rbac.authorization.k8s.io "istio-multi" created
service "istio-citadel" created
deployment.apps "istio-citadel" created
service "istio-ingressgateway" created
endpoints "istio-ingressgateway" created


deployment.extensions/istio-citadel
deployment.extensions/istio-egressgateway
deployment.extensions/istio-galley
deployment.extensions/istio-ingressgateway
deployment.extensions/istio-pilot
deployment.extensions/istio-policy
deployment.extensions/istio-sidecar-injector
deployment.extensions/istio-telemetry
deployment.extensions/prometheus
No resources found.
NAME                                                              READY     STATUS                  RESTARTS   AGE
istio-citadel-64f666748f-hn5lx                                    0/1       ImagePullBackOff        0          10m
istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42-hqtkx      0/1       ImagePullBackOff        0          10m
istio-egressgateway-75fc469ddf-qzz76                              0/1       Init:ImagePullBackOff   0          10m
istio-galley-6978b79858-np854                                     0/1       ContainerCreating       0          10m
istio-ingressgateway-864db98588-9pcs4                             0/1       Init:ImagePullBackOff   0          10m
istio-init-crd-10-l6gjh                                           0/1       ImagePullBackOff        0          10m
istio-init-crd-11-wsqpp                                           0/1       ImagePullBackOff        0          10m
istio-init-crd-12-kbfdx                                           0/1       ImagePullBackOff        0          10m
istio-pilot-696b75d9f8-cz6lj                                      1/2       Running                 1          10m
istio-policy-6bb8d44f78-bwswb                                     1/2       ImagePullBackOff        0          10m
istio-policy-6bb8d44f78-tbwhc                                     1/2       ImagePullBackOff        0          10m
istio-security-post-install-283d57410a29fa84b1a7971211380e2xh4v   0/1       ImagePullBackOff        0          10m
istio-sidecar-injector-7896cddb4f-lzwkv                           0/1       ContainerCreating       0          10m
istio-telemetry-65874b4d75-p29cv                                  1/2       ImagePullBackOff        0          10m
prometheus-5b48f5d49-qvflw                                        0/1       ContainerCreating       0          10m
Name:               istio-citadel-64f666748f-hn5lx
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f/10.150.0.47
Start Time:         Sat, 15 Jun 2019 16:05:23 +0000
Labels:             app=security
                    chart=security
                    heritage=Tiller
                    istio=citadel
                    pod-template-hash=64f666748f
                    release=istio
Annotations:        sidecar.istio.io/inject=false
Status:             Pending
IP:                 10.0.4.7
Controlled By:      ReplicaSet/istio-citadel-64f666748f
Containers:
  citadel:
    Container ID:  
    Image:         gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Port:          <none>
    Host Port:     <none>
    Args:
      --append-dns-names=true
      --grpc-port=8060
      --citadel-storage-namespace=istio-system
      --custom-dns-names=istio-pilot-service-account.istio-system:istio-pilot.istio-system
      --monitoring-port=15014
      --self-signed-ca=false
      --signing-cert=/etc/cacerts/ca-cert.pem
      --signing-key=/etc/cacerts/ca-key.pem
      --root-cert=/etc/cacerts/root-cert.pem
      --cert-chain=/etc/cacerts/cert-chain.pem
    State:          Waiting
      Reason:       ImagePullBackOff
    Ready:          False
    Restart Count:  0
    Requests:
      cpu:        10m
    Liveness:     http-get http://:15014/version delay=5s timeout=1s period=5s #success=1 #failure=3
    Environment:  <none>
    Mounts:
      /etc/cacerts from cacerts (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-citadel-service-account-token-2t2t6 (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  cacerts:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  cacerts
    Optional:    true
  istio-citadel-service-account-token-2t2t6:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-citadel-service-account-token-2t2t6
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age               From                                                          Message
  ----     ------     ----              ----                                                          -------
  Normal   Scheduled  10m               default-scheduler                                             Successfully assigned istio-system/istio-citadel-64f666748f-hn5lx to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f
  Normal   Pulling    8m (x4 over 9m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  pulling image "gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     8m (x4 over 9m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Failed to pull image "gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     8m (x4 over 9m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ErrImagePull
  Normal   BackOff    8m (x6 over 9m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Back-off pulling image "gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     4m (x20 over 9m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ImagePullBackOff
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f/10.150.0.47
Start Time:         Sat, 15 Jun 2019 16:05:23 +0000
Labels:             app=security
                    chart=security
                    heritage=Tiller
                    istio=citadel
                    pod-template-hash=64f666748f
                    release=istio
Annotations:        sidecar.istio.io/inject=false
Status:             Pending
IP:                 10.0.4.7
Controlled By:      ReplicaSet/istio-citadel-64f666748f
Containers:
  citadel:
    Container ID:  
    Image:         gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Port:          <none>
    Host Port:     <none>
    Args:
      --append-dns-names=true
      --grpc-port=8060
      --citadel-storage-namespace=istio-system
      --custom-dns-names=istio-pilot-service-account.istio-system:istio-pilot.istio-system
      --monitoring-port=15014
      --self-signed-ca=false
      --signing-cert=/etc/cacerts/ca-cert.pem
      --signing-key=/etc/cacerts/ca-key.pem
      --root-cert=/etc/cacerts/root-cert.pem
      --cert-chain=/etc/cacerts/cert-chain.pem
    State:          Waiting
      Reason:       ImagePullBackOff
    Ready:          False
    Restart Count:  0
    Requests:
      cpu:        10m
    Liveness:     http-get http://:15014/version delay=5s timeout=1s period=5s #success=1 #failure=3
    Environment:  <none>
    Mounts:
      /etc/cacerts from cacerts (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-citadel-service-account-token-2t2t6 (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  cacerts:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  cacerts
    Optional:    true
  istio-citadel-service-account-token-2t2t6:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-citadel-service-account-token-2t2t6
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age               From                                                          Message
  ----     ------     ----              ----                                                          -------
  Normal   Scheduled  10m               default-scheduler                                             Successfully assigned istio-system/istio-citadel-64f666748f-hn5lx to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f
  Normal   Pulling    8m (x4 over 9m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  pulling image "gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     8m (x4 over 9m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Failed to pull image "gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     8m (x4 over 9m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ErrImagePull
  Normal   BackOff    8m (x6 over 9m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Back-off pulling image "gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     4m (x20 over 9m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ImagePullBackOff

citadel

Name:               istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42-hqtkx
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6/10.150.0.41
Start Time:         Sat, 15 Jun 2019 16:05:15 +0000
Labels:             app=security
                    chart=security
                    controller-uid=5ce75332-8f87-11e9-a033-42010a960fc1
                    heritage=Tiller
                    job-name=istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42
                    release=istio
Annotations:        <none>
Status:             Pending
IP:                 10.0.3.4
Controlled By:      Job/istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42
Containers:
  kubectl:
    Container ID:  
    Image:         gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Port:          <none>
    Host Port:     <none>
    Command:
      /bin/bash
      -c
      kubectl get secret --all-namespaces | grep "istio.io/key-and-cert" |  while read -r entry; do
  ns=$(echo $entry | awk '{print $1}');
  name=$(echo $entry | awk '{print $2}');
  kubectl delete secret $name -n $ns;
done

    State:          Waiting
      Reason:       ImagePullBackOff
    Ready:          False
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from istio-cleanup-secrets-service-account-token-ksbkj (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  istio-cleanup-secrets-service-account-token-ksbkj:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-cleanup-secrets-service-account-token-ksbkj
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                From                                                          Message
  ----     ------     ----               ----                                                          -------
  Normal   Scheduled  10m                default-scheduler                                             Successfully assigned istio-system/istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42-hqtkx to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6
  Normal   Pulling    8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Failed to pull image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ErrImagePull
  Normal   BackOff    8m (x7 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Back-off pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     4m (x20 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ImagePullBackOff
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6/10.150.0.41
Start Time:         Sat, 15 Jun 2019 16:05:15 +0000
Labels:             app=security
                    chart=security
                    controller-uid=5ce75332-8f87-11e9-a033-42010a960fc1
                    heritage=Tiller
                    job-name=istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42
                    release=istio
Annotations:        <none>
Status:             Pending
IP:                 10.0.3.4
Controlled By:      Job/istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42
Containers:
  kubectl:
    Container ID:  
    Image:         gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Port:          <none>
    Host Port:     <none>
    Command:
      /bin/bash
      -c
      kubectl get secret --all-namespaces | grep "istio.io/key-and-cert" |  while read -r entry; do
  ns=$(echo $entry | awk '{print $1}');
  name=$(echo $entry | awk '{print $2}');
  kubectl delete secret $name -n $ns;
done

    State:          Waiting
      Reason:       ImagePullBackOff
    Ready:          False
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from istio-cleanup-secrets-service-account-token-ksbkj (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  istio-cleanup-secrets-service-account-token-ksbkj:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-cleanup-secrets-service-account-token-ksbkj
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                From                                                          Message
  ----     ------     ----               ----                                                          -------
  Normal   Scheduled  10m                default-scheduler                                             Successfully assigned istio-system/istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42-hqtkx to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6
  Normal   Pulling    8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Failed to pull image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ErrImagePull
  Normal   BackOff    8m (x7 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Back-off pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     4m (x20 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ImagePullBackOff

kubectl

Name:               istio-egressgateway-75fc469ddf-qzz76
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6/10.150.0.41
Start Time:         Sat, 15 Jun 2019 16:05:21 +0000
Labels:             app=istio-egressgateway
                    chart=gateways
                    heritage=Tiller
                    istio=egressgateway
                    pod-template-hash=75fc469ddf
                    release=istio
Annotations:        sidecar.istio.io/inject=false
Status:             Pending
IP:                 10.0.3.7
Controlled By:      ReplicaSet/istio-egressgateway-75fc469ddf
Init Containers:
  enable-core-dump:
    Container ID:  
    Image:         gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Port:          <none>
    Host Port:     <none>
    Command:
      /bin/sh
    Args:
      -c
      sysctl -w kernel.core_pattern=/var/lib/istio/core.proxy && ulimit -c unlimited
    State:          Waiting
      Reason:       ImagePullBackOff
    Ready:          False
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from istio-egressgateway-service-account-token-tk97f (ro)
Containers:
  istio-proxy:
    Container ID:  
    Image:         gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Ports:         80/TCP, 443/TCP, 15443/TCP, 15090/TCP
    Host Ports:    0/TCP, 0/TCP, 0/TCP, 0/TCP
    Args:
      proxy
      router
      --domain
      $(POD_NAMESPACE).svc.cluster.local
      --log_output_level=default:info
      --drainDuration
      2s
      --parentShutdownDuration
      3s
      --connectTimeout
      1s
      --serviceCluster
      istio-egressgateway
      --zipkinAddress
      zipkin:9411
      --proxyAdminPort
      15000
      --statusPort
      15020
      --controlPlaneAuthPolicy
      NONE
      --discoveryAddress
      istio-pilot:15010
    State:          Waiting
      Reason:       PodInitializing
    Ready:          False
    Restart Count:  0
    Limits:
      cpu:     100m
      memory:  128Mi
    Requests:
      cpu:      10m
      memory:   40Mi
    Readiness:  http-get http://:15020/healthz/ready delay=1s timeout=1s period=2s #success=1 #failure=30
    Environment:
      NODE_NAME:                     (v1:spec.nodeName)
      POD_NAME:                     istio-egressgateway-75fc469ddf-qzz76 (v1:metadata.name)
      POD_NAMESPACE:                istio-system (v1:metadata.namespace)
      INSTANCE_IP:                   (v1:status.podIP)
      HOST_IP:                       (v1:status.hostIP)
      ISTIO_META_POD_NAME:          istio-egressgateway-75fc469ddf-qzz76 (v1:metadata.name)
      ISTIO_META_CONFIG_NAMESPACE:  istio-system (v1:metadata.namespace)
      ISTIO_META_ROUTER_MODE:       sni-dnat
    Mounts:
      /etc/certs from istio-certs (ro)
      /etc/istio/egressgateway-ca-certs from egressgateway-ca-certs (ro)
      /etc/istio/egressgateway-certs from egressgateway-certs (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-egressgateway-service-account-token-tk97f (ro)
Conditions:
  Type              Status
  Initialized       False 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  istio-certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio.istio-egressgateway-service-account
    Optional:    true
  egressgateway-certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-egressgateway-certs
    Optional:    true
  egressgateway-ca-certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-egressgateway-ca-certs
    Optional:    true
  istio-egressgateway-service-account-token-tk97f:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-egressgateway-service-account-token-tk97f
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason          Age                From                                                          Message
  ----     ------          ----               ----                                                          -------
  Normal   Scheduled       10m                default-scheduler                                             Successfully assigned istio-system/istio-egressgateway-75fc469ddf-qzz76 to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6
  Normal   SandboxChanged  10m                kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Pod sandbox changed, it will be killed and re-created.
  Warning  Failed          9m (x3 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Failed to pull image "gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed          9m (x3 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ErrImagePull
  Warning  Failed          8m (x7 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ImagePullBackOff
  Normal   Pulling         8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  pulling image "gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   BackOff         5m (x22 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Back-off pulling image "gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa"
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6/10.150.0.41
Start Time:         Sat, 15 Jun 2019 16:05:21 +0000
Labels:             app=istio-egressgateway
                    chart=gateways
                    heritage=Tiller
                    istio=egressgateway
                    pod-template-hash=75fc469ddf
                    release=istio
Annotations:        sidecar.istio.io/inject=false
Status:             Pending
IP:                 10.0.3.7
Controlled By:      ReplicaSet/istio-egressgateway-75fc469ddf
Init Containers:
  enable-core-dump:
    Container ID:  
    Image:         gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Port:          <none>
    Host Port:     <none>
    Command:
      /bin/sh
    Args:
      -c
      sysctl -w kernel.core_pattern=/var/lib/istio/core.proxy && ulimit -c unlimited
    State:          Waiting
      Reason:       ImagePullBackOff
    Ready:          False
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from istio-egressgateway-service-account-token-tk97f (ro)
Containers:
  istio-proxy:
    Container ID:  
    Image:         gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Ports:         80/TCP, 443/TCP, 15443/TCP, 15090/TCP
    Host Ports:    0/TCP, 0/TCP, 0/TCP, 0/TCP
    Args:
      proxy
      router
      --domain
      $(POD_NAMESPACE).svc.cluster.local
      --log_output_level=default:info
      --drainDuration
      2s
      --parentShutdownDuration
      3s
      --connectTimeout
      1s
      --serviceCluster
      istio-egressgateway
      --zipkinAddress
      zipkin:9411
      --proxyAdminPort
      15000
      --statusPort
      15020
      --controlPlaneAuthPolicy
      NONE
      --discoveryAddress
      istio-pilot:15010
    State:          Waiting
      Reason:       PodInitializing
    Ready:          False
    Restart Count:  0
    Limits:
      cpu:     100m
      memory:  128Mi
    Requests:
      cpu:      10m
      memory:   40Mi
    Readiness:  http-get http://:15020/healthz/ready delay=1s timeout=1s period=2s #success=1 #failure=30
    Environment:
      NODE_NAME:                     (v1:spec.nodeName)
      POD_NAME:                     istio-egressgateway-75fc469ddf-qzz76 (v1:metadata.name)
      POD_NAMESPACE:                istio-system (v1:metadata.namespace)
      INSTANCE_IP:                   (v1:status.podIP)
      HOST_IP:                       (v1:status.hostIP)
      ISTIO_META_POD_NAME:          istio-egressgateway-75fc469ddf-qzz76 (v1:metadata.name)
      ISTIO_META_CONFIG_NAMESPACE:  istio-system (v1:metadata.namespace)
      ISTIO_META_ROUTER_MODE:       sni-dnat
    Mounts:
      /etc/certs from istio-certs (ro)
      /etc/istio/egressgateway-ca-certs from egressgateway-ca-certs (ro)
      /etc/istio/egressgateway-certs from egressgateway-certs (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-egressgateway-service-account-token-tk97f (ro)
Conditions:
  Type              Status
  Initialized       False 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  istio-certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio.istio-egressgateway-service-account
    Optional:    true
  egressgateway-certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-egressgateway-certs
    Optional:    true
  egressgateway-ca-certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-egressgateway-ca-certs
    Optional:    true
  istio-egressgateway-service-account-token-tk97f:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-egressgateway-service-account-token-tk97f
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason          Age                From                                                          Message
  ----     ------          ----               ----                                                          -------
  Normal   Scheduled       10m                default-scheduler                                             Successfully assigned istio-system/istio-egressgateway-75fc469ddf-qzz76 to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6
  Normal   SandboxChanged  10m                kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Pod sandbox changed, it will be killed and re-created.
  Warning  Failed          9m (x3 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Failed to pull image "gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed          9m (x3 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ErrImagePull
  Warning  Failed          8m (x7 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ImagePullBackOff
  Normal   Pulling         8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  pulling image "gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   BackOff         5m (x22 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Back-off pulling image "gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa"

istio-proxy

Name:               istio-galley-6978b79858-np854
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6/10.150.0.41
Start Time:         Sat, 15 Jun 2019 16:05:21 +0000
Labels:             app=galley
                    chart=galley
                    heritage=Tiller
                    istio=galley
                    pod-template-hash=6978b79858
                    release=istio
Annotations:        sidecar.istio.io/inject=false
Status:             Pending
IP:                 
Controlled By:      ReplicaSet/istio-galley-6978b79858
Containers:
  galley:
    Container ID:  
    Image:         gcr.io/istio-testing/galley:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Ports:         443/TCP, 15014/TCP, 9901/TCP
    Host Ports:    0/TCP, 0/TCP, 0/TCP
    Command:
      /usr/local/bin/galley
      server
      --meshConfigFile=/etc/mesh-config/mesh
      --livenessProbeInterval=1s
      --livenessProbePath=/healthliveness
      --readinessProbePath=/healthready
      --readinessProbeInterval=1s
      --deployment-namespace=istio-system
      --insecure=true
      --validation-webhook-config-file
      /etc/config/validatingwebhookconfiguration.yaml
      --monitoringPort=15014
      --log_output_level=default:info
    State:          Waiting
      Reason:       ContainerCreating
    Ready:          False
    Restart Count:  0
    Requests:
      cpu:        10m
    Liveness:     exec [/usr/local/bin/galley probe --probe-path=/healthliveness --interval=10s] delay=5s timeout=1s period=5s #success=1 #failure=3
    Readiness:    exec [/usr/local/bin/galley probe --probe-path=/healthready --interval=10s] delay=5s timeout=1s period=5s #success=1 #failure=3
    Environment:  <none>
    Mounts:
      /etc/certs from certs (ro)
      /etc/config from config (ro)
      /etc/mesh-config from mesh-config (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-galley-service-account-token-jgt4f (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio.istio-galley-service-account
    Optional:    false
  config:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      istio-galley-configuration
    Optional:  false
  mesh-config:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      istio
    Optional:  false
  istio-galley-service-account-token-jgt4f:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-galley-service-account-token-jgt4f
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason       Age                From                                                          Message
  ----     ------       ----               ----                                                          -------
  Normal   Scheduled    10m                default-scheduler                                             Successfully assigned istio-system/istio-galley-6978b79858-np854 to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6
  Warning  FailedMount  1m (x12 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  MountVolume.SetUp failed for volume "certs" : secrets "istio.istio-galley-service-account" not found
  Warning  FailedMount  1m (x4 over 8m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Unable to mount volumes for pod "istio-galley-6978b79858-np854_istio-system(605c6eb2-8f87-11e9-a033-42010a960fc1)": timeout expired waiting for volumes to attach or mount for pod "istio-system"/"istio-galley-6978b79858-np854". list of unmounted volumes=[certs]. list of unattached volumes=[certs config mesh-config istio-galley-service-account-token-jgt4f]
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6/10.150.0.41
Start Time:         Sat, 15 Jun 2019 16:05:21 +0000
Labels:             app=galley
                    chart=galley
                    heritage=Tiller
                    istio=galley
                    pod-template-hash=6978b79858
                    release=istio
Annotations:        sidecar.istio.io/inject=false
Status:             Pending
IP:                 
Controlled By:      ReplicaSet/istio-galley-6978b79858
Containers:
  galley:
    Container ID:  
    Image:         gcr.io/istio-testing/galley:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Ports:         443/TCP, 15014/TCP, 9901/TCP
    Host Ports:    0/TCP, 0/TCP, 0/TCP
    Command:
      /usr/local/bin/galley
      server
      --meshConfigFile=/etc/mesh-config/mesh
      --livenessProbeInterval=1s
      --livenessProbePath=/healthliveness
      --readinessProbePath=/healthready
      --readinessProbeInterval=1s
      --deployment-namespace=istio-system
      --insecure=true
      --validation-webhook-config-file
      /etc/config/validatingwebhookconfiguration.yaml
      --monitoringPort=15014
      --log_output_level=default:info
    State:          Waiting
      Reason:       ContainerCreating
    Ready:          False
    Restart Count:  0
    Requests:
      cpu:        10m
    Liveness:     exec [/usr/local/bin/galley probe --probe-path=/healthliveness --interval=10s] delay=5s timeout=1s period=5s #success=1 #failure=3
    Readiness:    exec [/usr/local/bin/galley probe --probe-path=/healthready --interval=10s] delay=5s timeout=1s period=5s #success=1 #failure=3
    Environment:  <none>
    Mounts:
      /etc/certs from certs (ro)
      /etc/config from config (ro)
      /etc/mesh-config from mesh-config (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-galley-service-account-token-jgt4f (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio.istio-galley-service-account
    Optional:    false
  config:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      istio-galley-configuration
    Optional:  false
  mesh-config:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      istio
    Optional:  false
  istio-galley-service-account-token-jgt4f:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-galley-service-account-token-jgt4f
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason       Age                From                                                          Message
  ----     ------       ----               ----                                                          -------
  Normal   Scheduled    10m                default-scheduler                                             Successfully assigned istio-system/istio-galley-6978b79858-np854 to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6
  Warning  FailedMount  1m (x12 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  MountVolume.SetUp failed for volume "certs" : secrets "istio.istio-galley-service-account" not found
  Warning  FailedMount  1m (x4 over 8m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Unable to mount volumes for pod "istio-galley-6978b79858-np854_istio-system(605c6eb2-8f87-11e9-a033-42010a960fc1)": timeout expired waiting for volumes to attach or mount for pod "istio-system"/"istio-galley-6978b79858-np854". list of unmounted volumes=[certs]. list of unattached volumes=[certs config mesh-config istio-galley-service-account-token-jgt4f]

galley

Name:               istio-ingressgateway-864db98588-9pcs4
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f/10.150.0.47
Start Time:         Sat, 15 Jun 2019 16:05:22 +0000
Labels:             app=istio-ingressgateway
                    chart=gateways
                    heritage=Tiller
                    istio=ingressgateway
                    pod-template-hash=864db98588
                    release=istio
Annotations:        sidecar.istio.io/inject=false
Status:             Pending
IP:                 10.0.4.5
Controlled By:      ReplicaSet/istio-ingressgateway-864db98588
Init Containers:
  enable-core-dump:
    Container ID:  
    Image:         gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Port:          <none>
    Host Port:     <none>
    Command:
      /bin/sh
    Args:
      -c
      sysctl -w kernel.core_pattern=/var/lib/istio/core.proxy && ulimit -c unlimited
    State:          Waiting
      Reason:       ImagePullBackOff
    Ready:          False
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from istio-ingressgateway-service-account-token-pnmfh (ro)
Containers:
  istio-proxy:
    Container ID:  
    Image:         gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Ports:         15020/TCP, 80/TCP, 443/TCP, 31400/TCP, 15029/TCP, 15030/TCP, 15031/TCP, 15032/TCP, 15443/TCP, 15090/TCP
    Host Ports:    0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP
    Args:
      proxy
      router
      --domain
      $(POD_NAMESPACE).svc.cluster.local
      --log_output_level=default:info
      --drainDuration
      2s
      --parentShutdownDuration
      3s
      --connectTimeout
      1s
      --serviceCluster
      istio-ingressgateway
      --zipkinAddress
      zipkin:9411
      --proxyAdminPort
      15000
      --statusPort
      15020
      --controlPlaneAuthPolicy
      NONE
      --discoveryAddress
      istio-pilot:15010
    State:          Waiting
      Reason:       PodInitializing
    Ready:          False
    Restart Count:  0
    Limits:
      cpu:     100m
      memory:  128Mi
    Requests:
      cpu:      10m
      memory:   40Mi
    Readiness:  http-get http://:15020/healthz/ready delay=1s timeout=1s period=2s #success=1 #failure=30
    Environment:
      NODE_NAME:                     (v1:spec.nodeName)
      POD_NAME:                     istio-ingressgateway-864db98588-9pcs4 (v1:metadata.name)
      POD_NAMESPACE:                istio-system (v1:metadata.namespace)
      INSTANCE_IP:                   (v1:status.podIP)
      HOST_IP:                       (v1:status.hostIP)
      ISTIO_META_POD_NAME:          istio-ingressgateway-864db98588-9pcs4 (v1:metadata.name)
      ISTIO_META_CONFIG_NAMESPACE:  istio-system (v1:metadata.namespace)
      ISTIO_META_ROUTER_MODE:       sni-dnat
    Mounts:
      /etc/certs from istio-certs (ro)
      /etc/istio/ingressgateway-ca-certs from ingressgateway-ca-certs (ro)
      /etc/istio/ingressgateway-certs from ingressgateway-certs (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-ingressgateway-service-account-token-pnmfh (ro)
Conditions:
  Type              Status
  Initialized       False 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  istio-certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio.istio-ingressgateway-service-account
    Optional:    true
  ingressgateway-certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-ingressgateway-certs
    Optional:    true
  ingressgateway-ca-certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-ingressgateway-ca-certs
    Optional:    true
  istio-ingressgateway-service-account-token-pnmfh:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-ingressgateway-service-account-token-pnmfh
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                From                                                          Message
  ----     ------     ----               ----                                                          -------
  Normal   Scheduled  10m                default-scheduler                                             Successfully assigned istio-system/istio-ingressgateway-864db98588-9pcs4 to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f
  Normal   Pulling    8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  pulling image "gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Failed to pull image "gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ErrImagePull
  Warning  Failed     8m (x6 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ImagePullBackOff
  Normal   BackOff    2s (x43 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Back-off pulling image "gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa"
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f/10.150.0.47
Start Time:         Sat, 15 Jun 2019 16:05:22 +0000
Labels:             app=istio-ingressgateway
                    chart=gateways
                    heritage=Tiller
                    istio=ingressgateway
                    pod-template-hash=864db98588
                    release=istio
Annotations:        sidecar.istio.io/inject=false
Status:             Pending
IP:                 10.0.4.5
Controlled By:      ReplicaSet/istio-ingressgateway-864db98588
Init Containers:
  enable-core-dump:
    Container ID:  
    Image:         gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Port:          <none>
    Host Port:     <none>
    Command:
      /bin/sh
    Args:
      -c
      sysctl -w kernel.core_pattern=/var/lib/istio/core.proxy && ulimit -c unlimited
    State:          Waiting
      Reason:       ImagePullBackOff
    Ready:          False
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from istio-ingressgateway-service-account-token-pnmfh (ro)
Containers:
  istio-proxy:
    Container ID:  
    Image:         gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Ports:         15020/TCP, 80/TCP, 443/TCP, 31400/TCP, 15029/TCP, 15030/TCP, 15031/TCP, 15032/TCP, 15443/TCP, 15090/TCP
    Host Ports:    0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP
    Args:
      proxy
      router
      --domain
      $(POD_NAMESPACE).svc.cluster.local
      --log_output_level=default:info
      --drainDuration
      2s
      --parentShutdownDuration
      3s
      --connectTimeout
      1s
      --serviceCluster
      istio-ingressgateway
      --zipkinAddress
      zipkin:9411
      --proxyAdminPort
      15000
      --statusPort
      15020
      --controlPlaneAuthPolicy
      NONE
      --discoveryAddress
      istio-pilot:15010
    State:          Waiting
      Reason:       PodInitializing
    Ready:          False
    Restart Count:  0
    Limits:
      cpu:     100m
      memory:  128Mi
    Requests:
      cpu:      10m
      memory:   40Mi
    Readiness:  http-get http://:15020/healthz/ready delay=1s timeout=1s period=2s #success=1 #failure=30
    Environment:
      NODE_NAME:                     (v1:spec.nodeName)
      POD_NAME:                     istio-ingressgateway-864db98588-9pcs4 (v1:metadata.name)
      POD_NAMESPACE:                istio-system (v1:metadata.namespace)
      INSTANCE_IP:                   (v1:status.podIP)
      HOST_IP:                       (v1:status.hostIP)
      ISTIO_META_POD_NAME:          istio-ingressgateway-864db98588-9pcs4 (v1:metadata.name)
      ISTIO_META_CONFIG_NAMESPACE:  istio-system (v1:metadata.namespace)
      ISTIO_META_ROUTER_MODE:       sni-dnat
    Mounts:
      /etc/certs from istio-certs (ro)
      /etc/istio/ingressgateway-ca-certs from ingressgateway-ca-certs (ro)
      /etc/istio/ingressgateway-certs from ingressgateway-certs (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-ingressgateway-service-account-token-pnmfh (ro)
Conditions:
  Type              Status
  Initialized       False 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  istio-certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio.istio-ingressgateway-service-account
    Optional:    true
  ingressgateway-certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-ingressgateway-certs
    Optional:    true
  ingressgateway-ca-certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-ingressgateway-ca-certs
    Optional:    true
  istio-ingressgateway-service-account-token-pnmfh:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-ingressgateway-service-account-token-pnmfh
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                From                                                          Message
  ----     ------     ----               ----                                                          -------
  Normal   Scheduled  10m                default-scheduler                                             Successfully assigned istio-system/istio-ingressgateway-864db98588-9pcs4 to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f
  Normal   Pulling    8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  pulling image "gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Failed to pull image "gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ErrImagePull
  Warning  Failed     8m (x6 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ImagePullBackOff
  Normal   BackOff    2s (x43 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Back-off pulling image "gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa"

istio-proxy

Name:               istio-init-crd-10-l6gjh
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6/10.150.0.41
Start Time:         Sat, 15 Jun 2019 16:04:46 +0000
Labels:             controller-uid=4bd31848-8f87-11e9-a033-42010a960fc1
                    job-name=istio-init-crd-10
Annotations:        sidecar.istio.io/inject=false
Status:             Pending
IP:                 10.0.3.3
Controlled By:      Job/istio-init-crd-10
Containers:
  istio-init-crd-10:
    Container ID:  
    Image:         gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Port:          <none>
    Host Port:     <none>
    Command:
      kubectl
      apply
      -f
      /etc/istio/crd-10/crd-10.yaml
    State:          Waiting
      Reason:       ImagePullBackOff
    Ready:          False
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /etc/istio/crd-10 from crd-10 (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-init-service-account-token-hhmvp (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  crd-10:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      istio-crd-10
    Optional:  false
  istio-init-service-account-token-hhmvp:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-init-service-account-token-hhmvp
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason          Age                 From                                                          Message
  ----     ------          ----                ----                                                          -------
  Normal   Scheduled       10m                 default-scheduler                                             Successfully assigned istio-system/istio-init-crd-10-l6gjh to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6
  Normal   SandboxChanged  10m                 kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Pod sandbox changed, it will be killed and re-created.
  Warning  Failed          10m (x3 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Failed to pull image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed          10m (x3 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ErrImagePull
  Normal   BackOff         9m (x7 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Back-off pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   Pulling         9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed          38s (x45 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ImagePullBackOff
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6/10.150.0.41
Start Time:         Sat, 15 Jun 2019 16:04:46 +0000
Labels:             controller-uid=4bd31848-8f87-11e9-a033-42010a960fc1
                    job-name=istio-init-crd-10
Annotations:        sidecar.istio.io/inject=false
Status:             Pending
IP:                 10.0.3.3
Controlled By:      Job/istio-init-crd-10
Containers:
  istio-init-crd-10:
    Container ID:  
    Image:         gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Port:          <none>
    Host Port:     <none>
    Command:
      kubectl
      apply
      -f
      /etc/istio/crd-10/crd-10.yaml
    State:          Waiting
      Reason:       ImagePullBackOff
    Ready:          False
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /etc/istio/crd-10 from crd-10 (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-init-service-account-token-hhmvp (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  crd-10:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      istio-crd-10
    Optional:  false
  istio-init-service-account-token-hhmvp:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-init-service-account-token-hhmvp
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason          Age                 From                                                          Message
  ----     ------          ----                ----                                                          -------
  Normal   Scheduled       10m                 default-scheduler                                             Successfully assigned istio-system/istio-init-crd-10-l6gjh to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6
  Normal   SandboxChanged  10m                 kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Pod sandbox changed, it will be killed and re-created.
  Warning  Failed          10m (x3 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Failed to pull image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed          10m (x3 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ErrImagePull
  Normal   BackOff         9m (x7 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Back-off pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   Pulling         9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed          38s (x45 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ImagePullBackOff

istio-init-crd-10

Name:               istio-init-crd-11-wsqpp
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f/10.150.0.47
Start Time:         Sat, 15 Jun 2019 16:04:46 +0000
Labels:             controller-uid=4bf25ff9-8f87-11e9-a033-42010a960fc1
                    job-name=istio-init-crd-11
Annotations:        sidecar.istio.io/inject=false
Status:             Pending
IP:                 10.0.4.3
Controlled By:      Job/istio-init-crd-11
Containers:
  istio-init-crd-11:
    Container ID:  
    Image:         gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Port:          <none>
    Host Port:     <none>
    Command:
      kubectl
      apply
      -f
      /etc/istio/crd-11/crd-11.yaml
    State:          Waiting
      Reason:       ImagePullBackOff
    Ready:          False
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /etc/istio/crd-11 from crd-11 (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-init-service-account-token-hhmvp (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  crd-11:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      istio-crd-11
    Optional:  false
  istio-init-service-account-token-hhmvp:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-init-service-account-token-hhmvp
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason          Age                 From                                                          Message
  ----     ------          ----                ----                                                          -------
  Normal   Scheduled       10m                 default-scheduler                                             Successfully assigned istio-system/istio-init-crd-11-wsqpp to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f
  Normal   SandboxChanged  10m                 kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Pod sandbox changed, it will be killed and re-created.
  Normal   Pulling         9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed          9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Failed to pull image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed          9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ErrImagePull
  Warning  Failed          5m (x22 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ImagePullBackOff
  Normal   BackOff         49s (x43 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Back-off pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f/10.150.0.47
Start Time:         Sat, 15 Jun 2019 16:04:46 +0000
Labels:             controller-uid=4bf25ff9-8f87-11e9-a033-42010a960fc1
                    job-name=istio-init-crd-11
Annotations:        sidecar.istio.io/inject=false
Status:             Pending
IP:                 10.0.4.3
Controlled By:      Job/istio-init-crd-11
Containers:
  istio-init-crd-11:
    Container ID:  
    Image:         gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Port:          <none>
    Host Port:     <none>
    Command:
      kubectl
      apply
      -f
      /etc/istio/crd-11/crd-11.yaml
    State:          Waiting
      Reason:       ImagePullBackOff
    Ready:          False
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /etc/istio/crd-11 from crd-11 (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-init-service-account-token-hhmvp (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  crd-11:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      istio-crd-11
    Optional:  false
  istio-init-service-account-token-hhmvp:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-init-service-account-token-hhmvp
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason          Age                 From                                                          Message
  ----     ------          ----                ----                                                          -------
  Normal   Scheduled       10m                 default-scheduler                                             Successfully assigned istio-system/istio-init-crd-11-wsqpp to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f
  Normal   SandboxChanged  10m                 kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Pod sandbox changed, it will be killed and re-created.
  Normal   Pulling         9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed          9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Failed to pull image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed          9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ErrImagePull
  Warning  Failed          5m (x22 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ImagePullBackOff
  Normal   BackOff         49s (x43 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Back-off pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"

istio-init-crd-11

Name:               istio-init-crd-12-kbfdx
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp/10.150.0.39
Start Time:         Sat, 15 Jun 2019 16:04:47 +0000
Labels:             controller-uid=4c116ace-8f87-11e9-a033-42010a960fc1
                    job-name=istio-init-crd-12
Annotations:        sidecar.istio.io/inject=false
Status:             Pending
IP:                 10.0.2.3
Controlled By:      Job/istio-init-crd-12
Containers:
  istio-init-crd-12:
    Container ID:  
    Image:         gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Port:          <none>
    Host Port:     <none>
    Command:
      kubectl
      apply
      -f
      /etc/istio/crd-12/crd-12.yaml
    State:          Waiting
      Reason:       ImagePullBackOff
    Ready:          False
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /etc/istio/crd-12 from crd-12 (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-init-service-account-token-hhmvp (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  crd-12:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      istio-crd-12
    Optional:  false
  istio-init-service-account-token-hhmvp:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-init-service-account-token-hhmvp
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                 From                                                          Message
  ----     ------     ----                ----                                                          -------
  Normal   Scheduled  10m                 default-scheduler                                             Successfully assigned istio-system/istio-init-crd-12-kbfdx to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp
  Normal   Pulling    9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Failed to pull image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ErrImagePull
  Warning  Failed     9m (x6 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ImagePullBackOff
  Normal   BackOff    50s (x42 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Back-off pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp/10.150.0.39
Start Time:         Sat, 15 Jun 2019 16:04:47 +0000
Labels:             controller-uid=4c116ace-8f87-11e9-a033-42010a960fc1
                    job-name=istio-init-crd-12
Annotations:        sidecar.istio.io/inject=false
Status:             Pending
IP:                 10.0.2.3
Controlled By:      Job/istio-init-crd-12
Containers:
  istio-init-crd-12:
    Container ID:  
    Image:         gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Port:          <none>
    Host Port:     <none>
    Command:
      kubectl
      apply
      -f
      /etc/istio/crd-12/crd-12.yaml
    State:          Waiting
      Reason:       ImagePullBackOff
    Ready:          False
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /etc/istio/crd-12 from crd-12 (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-init-service-account-token-hhmvp (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  crd-12:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      istio-crd-12
    Optional:  false
  istio-init-service-account-token-hhmvp:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-init-service-account-token-hhmvp
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                 From                                                          Message
  ----     ------     ----                ----                                                          -------
  Normal   Scheduled  10m                 default-scheduler                                             Successfully assigned istio-system/istio-init-crd-12-kbfdx to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp
  Normal   Pulling    9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Failed to pull image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ErrImagePull
  Warning  Failed     9m (x6 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ImagePullBackOff
  Normal   BackOff    50s (x42 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Back-off pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"

istio-init-crd-12

Name:               istio-pilot-696b75d9f8-cz6lj
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp/10.150.0.39
Start Time:         Sat, 15 Jun 2019 16:05:23 +0000
Labels:             app=pilot
                    chart=pilot
                    heritage=Tiller
                    istio=pilot
                    pod-template-hash=696b75d9f8
                    release=istio
Annotations:        sidecar.istio.io/inject=false
Status:             Running
IP:                 10.0.2.6
Controlled By:      ReplicaSet/istio-pilot-696b75d9f8
Containers:
  discovery:
    Container ID:  docker://bf153d0cf7a356156db1c9d091e1063775893204d6136427ff51b8f834d9b254
    Image:         gcr.io/istio-testing/pilot:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      docker-pullable://gcr.io/istio-testing/pilot@sha256:ae303591c1863b9d6221c58bae4c4e04439349c688b7031aca4d89fdcc5cd05e
    Ports:         8080/TCP, 15010/TCP
    Host Ports:    0/TCP, 0/TCP
    Args:
      discovery
      --monitoringAddr=:15014
      --log_output_level=default:info
      --domain
      cluster.local
      --secureGrpcAddr
      
      --keepaliveMaxServerConnectionAge
      30m
    State:          Running
      Started:      Sat, 15 Jun 2019 16:08:38 +0000
    Ready:          False
    Restart Count:  0
    Requests:
      cpu:      500m
      memory:   2Gi
    Readiness:  http-get http://:8080/ready delay=5s timeout=5s period=30s #success=1 #failure=3
    Environment:
      POD_NAME:                             istio-pilot-696b75d9f8-cz6lj (v1:metadata.name)
      POD_NAMESPACE:                        istio-system (v1:metadata.namespace)
      GODEBUG:                              gctrace=1
      PILOT_PUSH_THROTTLE:                  100
      PILOT_TRACE_SAMPLING:                 1
      PILOT_DISABLE_XDS_MARSHALING_TO_ANY:  1
    Mounts:
      /etc/certs from istio-certs (ro)
      /etc/istio/config from config-volume (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-pilot-service-account-token-w6p96 (ro)
  istio-proxy:
    Container ID:  docker://f1f1393d11eb394c0722629ba28b3b1546ad63ad64891004ebe605ff8b9f1f3e
    Image:         gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      docker-pullable://gcr.io/istio-testing/proxyv2@sha256:690c9ec4efde9be9cbd5fe56498c6f3ba1d61a2432eeae70cf3438c7f0e00949
    Ports:         15003/TCP, 15005/TCP, 15007/TCP, 15011/TCP
    Host Ports:    0/TCP, 0/TCP, 0/TCP, 0/TCP
    Args:
      proxy
      --domain
      $(POD_NAMESPACE).svc.cluster.local
      --serviceCluster
      istio-pilot
      --templateFile
      /etc/istio/proxy/envoy_pilot.yaml.tmpl
      --controlPlaneAuthPolicy
      NONE
    State:          Running
      Started:      Sat, 15 Jun 2019 16:14:53 +0000
    Last State:     Terminated
      Reason:       Error
      Exit Code:    255
      Started:      Sat, 15 Jun 2019 16:11:27 +0000
      Finished:     Sat, 15 Jun 2019 16:14:52 +0000
    Ready:          True
    Restart Count:  1
    Limits:
      cpu:     2
      memory:  1Gi
    Requests:
      cpu:     10m
      memory:  40Mi
    Environment:
      POD_NAME:       istio-pilot-696b75d9f8-cz6lj (v1:metadata.name)
      POD_NAMESPACE:  istio-system (v1:metadata.namespace)
      INSTANCE_IP:     (v1:status.podIP)
    Mounts:
      /etc/certs from istio-certs (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-pilot-service-account-token-w6p96 (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  config-volume:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      istio
    Optional:  false
  istio-certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio.istio-pilot-service-account
    Optional:    true
  istio-pilot-service-account-token-w6p96:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-pilot-service-account-token-w6p96
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason          Age                From                                                          Message
  ----     ------          ----               ----                                                          -------
  Normal   Scheduled       10m                default-scheduler                                             Successfully assigned istio-system/istio-pilot-696b75d9f8-cz6lj to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp
  Normal   SandboxChanged  10m                kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Pod sandbox changed, it will be killed and re-created.
  Normal   BackOff         10m (x3 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Back-off pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed          10m (x3 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ImagePullBackOff
  Normal   BackOff         10m (x3 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Back-off pulling image "gcr.io/istio-testing/pilot:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   Pulling         9m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  pulling image "gcr.io/istio-testing/pilot:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed          9m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ErrImagePull
  Warning  Failed          9m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Failed to pull image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Normal   Pulling         9m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed          9m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ErrImagePull
  Warning  Failed          9m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Failed to pull image "gcr.io/istio-testing/pilot:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/pilot:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed          5m (x22 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ImagePullBackOff
  Warning  Unhealthy       10s (x14 over 6m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Readiness probe failed: Get http://10.0.2.6:8080/ready: net/http: request canceled (Client.Timeout exceeded while awaiting headers)
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp/10.150.0.39
Start Time:         Sat, 15 Jun 2019 16:05:23 +0000
Labels:             app=pilot
                    chart=pilot
                    heritage=Tiller
                    istio=pilot
                    pod-template-hash=696b75d9f8
                    release=istio
Annotations:        sidecar.istio.io/inject=false
Status:             Running
IP:                 10.0.2.6
Controlled By:      ReplicaSet/istio-pilot-696b75d9f8
Containers:
  discovery:
    Container ID:  docker://bf153d0cf7a356156db1c9d091e1063775893204d6136427ff51b8f834d9b254
    Image:         gcr.io/istio-testing/pilot:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      docker-pullable://gcr.io/istio-testing/pilot@sha256:ae303591c1863b9d6221c58bae4c4e04439349c688b7031aca4d89fdcc5cd05e
    Ports:         8080/TCP, 15010/TCP
    Host Ports:    0/TCP, 0/TCP
    Args:
      discovery
      --monitoringAddr=:15014
      --log_output_level=default:info
      --domain
      cluster.local
      --secureGrpcAddr
      
      --keepaliveMaxServerConnectionAge
      30m
    State:          Running
      Started:      Sat, 15 Jun 2019 16:08:38 +0000
    Ready:          False
    Restart Count:  0
    Requests:
      cpu:      500m
      memory:   2Gi
    Readiness:  http-get http://:8080/ready delay=5s timeout=5s period=30s #success=1 #failure=3
    Environment:
      POD_NAME:                             istio-pilot-696b75d9f8-cz6lj (v1:metadata.name)
      POD_NAMESPACE:                        istio-system (v1:metadata.namespace)
      GODEBUG:                              gctrace=1
      PILOT_PUSH_THROTTLE:                  100
      PILOT_TRACE_SAMPLING:                 1
      PILOT_DISABLE_XDS_MARSHALING_TO_ANY:  1
    Mounts:
      /etc/certs from istio-certs (ro)
      /etc/istio/config from config-volume (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-pilot-service-account-token-w6p96 (ro)
  istio-proxy:
    Container ID:  docker://f1f1393d11eb394c0722629ba28b3b1546ad63ad64891004ebe605ff8b9f1f3e
    Image:         gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      docker-pullable://gcr.io/istio-testing/proxyv2@sha256:690c9ec4efde9be9cbd5fe56498c6f3ba1d61a2432eeae70cf3438c7f0e00949
    Ports:         15003/TCP, 15005/TCP, 15007/TCP, 15011/TCP
    Host Ports:    0/TCP, 0/TCP, 0/TCP, 0/TCP
    Args:
      proxy
      --domain
      $(POD_NAMESPACE).svc.cluster.local
      --serviceCluster
      istio-pilot
      --templateFile
      /etc/istio/proxy/envoy_pilot.yaml.tmpl
      --controlPlaneAuthPolicy
      NONE
    State:          Running
      Started:      Sat, 15 Jun 2019 16:14:53 +0000
    Last State:     Terminated
      Reason:       Error
      Exit Code:    255
      Started:      Sat, 15 Jun 2019 16:11:27 +0000
      Finished:     Sat, 15 Jun 2019 16:14:52 +0000
    Ready:          True
    Restart Count:  1
    Limits:
      cpu:     2
      memory:  1Gi
    Requests:
      cpu:     10m
      memory:  40Mi
    Environment:
      POD_NAME:       istio-pilot-696b75d9f8-cz6lj (v1:metadata.name)
      POD_NAMESPACE:  istio-system (v1:metadata.namespace)
      INSTANCE_IP:     (v1:status.podIP)
    Mounts:
      /etc/certs from istio-certs (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-pilot-service-account-token-w6p96 (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  config-volume:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      istio
    Optional:  false
  istio-certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio.istio-pilot-service-account
    Optional:    true
  istio-pilot-service-account-token-w6p96:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-pilot-service-account-token-w6p96
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason          Age                From                                                          Message
  ----     ------          ----               ----                                                          -------
  Normal   Scheduled       10m                default-scheduler                                             Successfully assigned istio-system/istio-pilot-696b75d9f8-cz6lj to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp
  Normal   SandboxChanged  10m                kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Pod sandbox changed, it will be killed and re-created.
  Normal   BackOff         10m (x3 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Back-off pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed          10m (x3 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ImagePullBackOff
  Normal   BackOff         10m (x3 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Back-off pulling image "gcr.io/istio-testing/pilot:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   Pulling         9m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  pulling image "gcr.io/istio-testing/pilot:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed          9m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ErrImagePull
  Warning  Failed          9m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Failed to pull image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Normal   Pulling         9m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed          9m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ErrImagePull
  Warning  Failed          9m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Failed to pull image "gcr.io/istio-testing/pilot:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/pilot:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed          5m (x22 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ImagePullBackOff
  Warning  Unhealthy       10s (x14 over 6m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Readiness probe failed: Get http://10.0.2.6:8080/ready: net/http: request canceled (Client.Timeout exceeded while awaiting headers)

discovery istio-proxy
Name:               istio-policy-6bb8d44f78-bwswb
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6/10.150.0.41
Start Time:         Sat, 15 Jun 2019 16:05:22 +0000
Labels:             app=policy
                    chart=mixer
                    heritage=Tiller
                    istio=mixer
                    istio-mixer-type=policy
                    pod-template-hash=6bb8d44f78
                    release=istio
Annotations:        sidecar.istio.io/inject=false
Status:             Pending
IP:                 10.0.3.6
Controlled By:      ReplicaSet/istio-policy-6bb8d44f78
Containers:
  mixer:
    Container ID:  
    Image:         gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Ports:         15014/TCP, 42422/TCP
    Host Ports:    0/TCP, 0/TCP
    Args:
      --monitoringPort=15014
      --address
      unix:///sock/mixer.socket
      --log_output_level=default:info
      --configStoreURL=mcp://istio-galley.istio-system.svc:9901
      --configDefaultNamespace=istio-system
      --useAdapterCRDs=false
      --useTemplateCRDs=false
      --trace_zipkin_url=http://zipkin.istio-system:9411/api/v1/spans
    State:          Waiting
      Reason:       ImagePullBackOff
    Ready:          False
    Restart Count:  0
    Limits:
      cpu:     100m
      memory:  100Mi
    Requests:
      cpu:     10m
      memory:  100Mi
    Liveness:  http-get http://:15014/version delay=5s timeout=1s period=5s #success=1 #failure=3
    Environment:
      GODEBUG:     gctrace=1
      GOMAXPROCS:  6
    Mounts:
      /etc/certs from istio-certs (ro)
      /sock from uds-socket (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-mixer-service-account-token-rgk56 (ro)
  istio-proxy:
    Container ID:  docker://48f467878ba8f727b3d45cf6894d362d4de5b8f6457aea5af57044d9e9096005
    Image:         gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      docker-pullable://gcr.io/istio-testing/proxyv2@sha256:690c9ec4efde9be9cbd5fe56498c6f3ba1d61a2432eeae70cf3438c7f0e00949
    Ports:         9091/TCP, 15004/TCP, 15090/TCP
    Host Ports:    0/TCP, 0/TCP, 0/TCP
    Args:
      proxy
      --domain
      $(POD_NAMESPACE).svc.cluster.local
      --serviceCluster
      istio-policy
      --templateFile
      /etc/istio/proxy/envoy_policy.yaml.tmpl
      --controlPlaneAuthPolicy
      NONE
    State:          Running
      Started:      Sat, 15 Jun 2019 16:11:14 +0000
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     2
      memory:  1Gi
    Requests:
      cpu:     10m
      memory:  40Mi
    Environment:
      POD_NAME:       istio-policy-6bb8d44f78-bwswb (v1:metadata.name)
      POD_NAMESPACE:  istio-system (v1:metadata.namespace)
      INSTANCE_IP:     (v1:status.podIP)
    Mounts:
      /etc/certs from istio-certs (ro)
      /sock from uds-socket (rw)
      /var/run/secrets/istio.io/policy/adapter from policy-adapter-secret (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-mixer-service-account-token-rgk56 (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  istio-certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio.istio-mixer-service-account
    Optional:    true
  uds-socket:
    Type:    EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:  
  policy-adapter-secret:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  policy-adapter-secret
    Optional:    true
  istio-mixer-service-account-token-rgk56:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-mixer-service-account-token-rgk56
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                 From                                                          Message
  ----     ------     ----                ----                                                          -------
  Normal   Scheduled  10m                 default-scheduler                                             Successfully assigned istio-system/istio-policy-6bb8d44f78-bwswb to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6
  Warning  Failed     10m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Failed to pull image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Normal   BackOff    9m (x2 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Back-off pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x2 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ImagePullBackOff
  Normal   Pulling    9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  pulling image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Failed to pull image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ErrImagePull
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ErrImagePull
  Normal   Pulling    9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   BackOff    5m (x21 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Back-off pulling image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     14s (x43 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ImagePullBackOff
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6/10.150.0.41
Start Time:         Sat, 15 Jun 2019 16:05:22 +0000
Labels:             app=policy
                    chart=mixer
                    heritage=Tiller
                    istio=mixer
                    istio-mixer-type=policy
                    pod-template-hash=6bb8d44f78
                    release=istio
Annotations:        sidecar.istio.io/inject=false
Status:             Pending
IP:                 10.0.3.6
Controlled By:      ReplicaSet/istio-policy-6bb8d44f78
Containers:
  mixer:
    Container ID:  
    Image:         gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Ports:         15014/TCP, 42422/TCP
    Host Ports:    0/TCP, 0/TCP
    Args:
      --monitoringPort=15014
      --address
      unix:///sock/mixer.socket
      --log_output_level=default:info
      --configStoreURL=mcp://istio-galley.istio-system.svc:9901
      --configDefaultNamespace=istio-system
      --useAdapterCRDs=false
      --useTemplateCRDs=false
      --trace_zipkin_url=http://zipkin.istio-system:9411/api/v1/spans
    State:          Waiting
      Reason:       ImagePullBackOff
    Ready:          False
    Restart Count:  0
    Limits:
      cpu:     100m
      memory:  100Mi
    Requests:
      cpu:     10m
      memory:  100Mi
    Liveness:  http-get http://:15014/version delay=5s timeout=1s period=5s #success=1 #failure=3
    Environment:
      GODEBUG:     gctrace=1
      GOMAXPROCS:  6
    Mounts:
      /etc/certs from istio-certs (ro)
      /sock from uds-socket (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-mixer-service-account-token-rgk56 (ro)
  istio-proxy:
    Container ID:  docker://48f467878ba8f727b3d45cf6894d362d4de5b8f6457aea5af57044d9e9096005
    Image:         gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      docker-pullable://gcr.io/istio-testing/proxyv2@sha256:690c9ec4efde9be9cbd5fe56498c6f3ba1d61a2432eeae70cf3438c7f0e00949
    Ports:         9091/TCP, 15004/TCP, 15090/TCP
    Host Ports:    0/TCP, 0/TCP, 0/TCP
    Args:
      proxy
      --domain
      $(POD_NAMESPACE).svc.cluster.local
      --serviceCluster
      istio-policy
      --templateFile
      /etc/istio/proxy/envoy_policy.yaml.tmpl
      --controlPlaneAuthPolicy
      NONE
    State:          Running
      Started:      Sat, 15 Jun 2019 16:11:14 +0000
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     2
      memory:  1Gi
    Requests:
      cpu:     10m
      memory:  40Mi
    Environment:
      POD_NAME:       istio-policy-6bb8d44f78-bwswb (v1:metadata.name)
      POD_NAMESPACE:  istio-system (v1:metadata.namespace)
      INSTANCE_IP:     (v1:status.podIP)
    Mounts:
      /etc/certs from istio-certs (ro)
      /sock from uds-socket (rw)
      /var/run/secrets/istio.io/policy/adapter from policy-adapter-secret (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-mixer-service-account-token-rgk56 (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  istio-certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio.istio-mixer-service-account
    Optional:    true
  uds-socket:
    Type:    EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:  
  policy-adapter-secret:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  policy-adapter-secret
    Optional:    true
  istio-mixer-service-account-token-rgk56:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-mixer-service-account-token-rgk56
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                 From                                                          Message
  ----     ------     ----                ----                                                          -------
  Normal   Scheduled  10m                 default-scheduler                                             Successfully assigned istio-system/istio-policy-6bb8d44f78-bwswb to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6
  Warning  Failed     10m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Failed to pull image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Normal   BackOff    9m (x2 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Back-off pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x2 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ImagePullBackOff
  Normal   Pulling    9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  pulling image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Failed to pull image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ErrImagePull
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ErrImagePull
  Normal   Pulling    9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   BackOff    5m (x21 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Back-off pulling image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     14s (x43 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ImagePullBackOff

mixer istio-proxy

Name:               istio-policy-6bb8d44f78-tbwhc
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp/10.150.0.39
Start Time:         Sat, 15 Jun 2019 16:05:22 +0000
Labels:             app=policy
                    chart=mixer
                    heritage=Tiller
                    istio=mixer
                    istio-mixer-type=policy
                    pod-template-hash=6bb8d44f78
                    release=istio
Annotations:        sidecar.istio.io/inject=false
Status:             Pending
IP:                 10.0.2.4
Controlled By:      ReplicaSet/istio-policy-6bb8d44f78
Containers:
  mixer:
    Container ID:  
    Image:         gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Ports:         15014/TCP, 42422/TCP
    Host Ports:    0/TCP, 0/TCP
    Args:
      --monitoringPort=15014
      --address
      unix:///sock/mixer.socket
      --log_output_level=default:info
      --configStoreURL=mcp://istio-galley.istio-system.svc:9901
      --configDefaultNamespace=istio-system
      --useAdapterCRDs=false
      --useTemplateCRDs=false
      --trace_zipkin_url=http://zipkin.istio-system:9411/api/v1/spans
    State:          Waiting
      Reason:       ImagePullBackOff
    Ready:          False
    Restart Count:  0
    Limits:
      cpu:     100m
      memory:  100Mi
    Requests:
      cpu:     10m
      memory:  100Mi
    Liveness:  http-get http://:15014/version delay=5s timeout=1s period=5s #success=1 #failure=3
    Environment:
      GODEBUG:     gctrace=1
      GOMAXPROCS:  6
    Mounts:
      /etc/certs from istio-certs (ro)
      /sock from uds-socket (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-mixer-service-account-token-rgk56 (ro)
  istio-proxy:
    Container ID:  docker://a30084dcd2f9a0152d60316d8ab0305a33f1ca5d2bdec1464edbd6cac568f25f
    Image:         gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      docker-pullable://gcr.io/istio-testing/proxyv2@sha256:690c9ec4efde9be9cbd5fe56498c6f3ba1d61a2432eeae70cf3438c7f0e00949
    Ports:         9091/TCP, 15004/TCP, 15090/TCP
    Host Ports:    0/TCP, 0/TCP, 0/TCP
    Args:
      proxy
      --domain
      $(POD_NAMESPACE).svc.cluster.local
      --serviceCluster
      istio-policy
      --templateFile
      /etc/istio/proxy/envoy_policy.yaml.tmpl
      --controlPlaneAuthPolicy
      NONE
    State:          Running
      Started:      Sat, 15 Jun 2019 16:11:15 +0000
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     2
      memory:  1Gi
    Requests:
      cpu:     10m
      memory:  40Mi
    Environment:
      POD_NAME:       istio-policy-6bb8d44f78-tbwhc (v1:metadata.name)
      POD_NAMESPACE:  istio-system (v1:metadata.namespace)
      INSTANCE_IP:     (v1:status.podIP)
    Mounts:
      /etc/certs from istio-certs (ro)
      /sock from uds-socket (rw)
      /var/run/secrets/istio.io/policy/adapter from policy-adapter-secret (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-mixer-service-account-token-rgk56 (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  istio-certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio.istio-mixer-service-account
    Optional:    true
  uds-socket:
    Type:    EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:  
  policy-adapter-secret:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  policy-adapter-secret
    Optional:    true
  istio-mixer-service-account-token-rgk56:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-mixer-service-account-token-rgk56
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                 From                                                          Message
  ----     ------     ----                ----                                                          -------
  Normal   Scheduled  10m                 default-scheduler                                             Successfully assigned istio-system/istio-policy-6bb8d44f78-tbwhc to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp
  Warning  Failed     10m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Failed to pull image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Normal   BackOff    9m (x2 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Back-off pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x2 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ImagePullBackOff
  Normal   Pulling    9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  pulling image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Failed to pull image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ErrImagePull
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ErrImagePull
  Normal   Pulling    9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   BackOff    5m (x21 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Back-off pulling image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     11s (x44 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ImagePullBackOff
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp/10.150.0.39
Start Time:         Sat, 15 Jun 2019 16:05:22 +0000
Labels:             app=policy
                    chart=mixer
                    heritage=Tiller
                    istio=mixer
                    istio-mixer-type=policy
                    pod-template-hash=6bb8d44f78
                    release=istio
Annotations:        sidecar.istio.io/inject=false
Status:             Pending
IP:                 10.0.2.4
Controlled By:      ReplicaSet/istio-policy-6bb8d44f78
Containers:
  mixer:
    Container ID:  
    Image:         gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Ports:         15014/TCP, 42422/TCP
    Host Ports:    0/TCP, 0/TCP
    Args:
      --monitoringPort=15014
      --address
      unix:///sock/mixer.socket
      --log_output_level=default:info
      --configStoreURL=mcp://istio-galley.istio-system.svc:9901
      --configDefaultNamespace=istio-system
      --useAdapterCRDs=false
      --useTemplateCRDs=false
      --trace_zipkin_url=http://zipkin.istio-system:9411/api/v1/spans
    State:          Waiting
      Reason:       ImagePullBackOff
    Ready:          False
    Restart Count:  0
    Limits:
      cpu:     100m
      memory:  100Mi
    Requests:
      cpu:     10m
      memory:  100Mi
    Liveness:  http-get http://:15014/version delay=5s timeout=1s period=5s #success=1 #failure=3
    Environment:
      GODEBUG:     gctrace=1
      GOMAXPROCS:  6
    Mounts:
      /etc/certs from istio-certs (ro)
      /sock from uds-socket (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-mixer-service-account-token-rgk56 (ro)
  istio-proxy:
    Container ID:  docker://a30084dcd2f9a0152d60316d8ab0305a33f1ca5d2bdec1464edbd6cac568f25f
    Image:         gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      docker-pullable://gcr.io/istio-testing/proxyv2@sha256:690c9ec4efde9be9cbd5fe56498c6f3ba1d61a2432eeae70cf3438c7f0e00949
    Ports:         9091/TCP, 15004/TCP, 15090/TCP
    Host Ports:    0/TCP, 0/TCP, 0/TCP
    Args:
      proxy
      --domain
      $(POD_NAMESPACE).svc.cluster.local
      --serviceCluster
      istio-policy
      --templateFile
      /etc/istio/proxy/envoy_policy.yaml.tmpl
      --controlPlaneAuthPolicy
      NONE
    State:          Running
      Started:      Sat, 15 Jun 2019 16:11:15 +0000
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     2
      memory:  1Gi
    Requests:
      cpu:     10m
      memory:  40Mi
    Environment:
      POD_NAME:       istio-policy-6bb8d44f78-tbwhc (v1:metadata.name)
      POD_NAMESPACE:  istio-system (v1:metadata.namespace)
      INSTANCE_IP:     (v1:status.podIP)
    Mounts:
      /etc/certs from istio-certs (ro)
      /sock from uds-socket (rw)
      /var/run/secrets/istio.io/policy/adapter from policy-adapter-secret (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-mixer-service-account-token-rgk56 (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  istio-certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio.istio-mixer-service-account
    Optional:    true
  uds-socket:
    Type:    EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:  
  policy-adapter-secret:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  policy-adapter-secret
    Optional:    true
  istio-mixer-service-account-token-rgk56:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-mixer-service-account-token-rgk56
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                 From                                                          Message
  ----     ------     ----                ----                                                          -------
  Normal   Scheduled  10m                 default-scheduler                                             Successfully assigned istio-system/istio-policy-6bb8d44f78-tbwhc to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp
  Warning  Failed     10m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Failed to pull image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Normal   BackOff    9m (x2 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Back-off pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x2 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ImagePullBackOff
  Normal   Pulling    9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  pulling image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Failed to pull image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ErrImagePull
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ErrImagePull
  Normal   Pulling    9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   BackOff    5m (x21 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Back-off pulling image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     11s (x44 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ImagePullBackOff

mixer istio-proxy

Name:               istio-security-post-install-283d57410a29fa84b1a7971211380e2xh4v
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f/10.150.0.47
Start Time:         Sat, 15 Jun 2019 16:05:16 +0000
Labels:             app=security
                    chart=security
                    controller-uid=5d4f2bca-8f87-11e9-a033-42010a960fc1
                    heritage=Tiller
                    job-name=istio-security-post-install-283d57410a29fa84b1a7971211380e42
                    release=istio
Annotations:        <none>
Status:             Pending
IP:                 10.0.4.4
Controlled By:      Job/istio-security-post-install-283d57410a29fa84b1a7971211380e42
Containers:
  kubectl:
    Container ID:  
    Image:         gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Port:          <none>
    Host Port:     <none>
    Command:
      /bin/bash
      /tmp/security/run.sh
      /tmp/security/custom-resources.yaml
    State:          Waiting
      Reason:       ImagePullBackOff
    Ready:          False
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /tmp/security from tmp-configmap-security (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-security-post-install-account-token-fn6xw (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  tmp-configmap-security:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      istio-security-custom-resources
    Optional:  false
  istio-security-post-install-account-token-fn6xw:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-security-post-install-account-token-fn6xw
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                 From                                                          Message
  ----     ------     ----                ----                                                          -------
  Normal   Scheduled  10m                 default-scheduler                                             Successfully assigned istio-system/istio-security-post-install-283d57410a29fa84b1a7971211380e2xh4v to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f
  Normal   Pulling    9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Failed to pull image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ErrImagePull
  Normal   BackOff    8m (x7 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Back-off pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     23s (x42 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ImagePullBackOff
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f/10.150.0.47
Start Time:         Sat, 15 Jun 2019 16:05:16 +0000
Labels:             app=security
                    chart=security
                    controller-uid=5d4f2bca-8f87-11e9-a033-42010a960fc1
                    heritage=Tiller
                    job-name=istio-security-post-install-283d57410a29fa84b1a7971211380e42
                    release=istio
Annotations:        <none>
Status:             Pending
IP:                 10.0.4.4
Controlled By:      Job/istio-security-post-install-283d57410a29fa84b1a7971211380e42
Containers:
  kubectl:
    Container ID:  
    Image:         gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Port:          <none>
    Host Port:     <none>
    Command:
      /bin/bash
      /tmp/security/run.sh
      /tmp/security/custom-resources.yaml
    State:          Waiting
      Reason:       ImagePullBackOff
    Ready:          False
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /tmp/security from tmp-configmap-security (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-security-post-install-account-token-fn6xw (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  tmp-configmap-security:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      istio-security-custom-resources
    Optional:  false
  istio-security-post-install-account-token-fn6xw:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-security-post-install-account-token-fn6xw
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                 From                                                          Message
  ----     ------     ----                ----                                                          -------
  Normal   Scheduled  10m                 default-scheduler                                             Successfully assigned istio-system/istio-security-post-install-283d57410a29fa84b1a7971211380e2xh4v to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f
  Normal   Pulling    9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Failed to pull image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ErrImagePull
  Normal   BackOff    8m (x7 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Back-off pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     23s (x42 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ImagePullBackOff

kubectl

Name:               istio-sidecar-injector-7896cddb4f-lzwkv
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-f855/10.150.0.57
Start Time:         Sat, 15 Jun 2019 16:05:23 +0000
Labels:             app=sidecarInjectorWebhook
                    chart=sidecarInjectorWebhook
                    heritage=Tiller
                    istio=sidecar-injector
                    pod-template-hash=7896cddb4f
                    release=istio
Annotations:        sidecar.istio.io/inject=false
Status:             Pending
IP:                 
Controlled By:      ReplicaSet/istio-sidecar-injector-7896cddb4f
Containers:
  sidecar-injector-webhook:
    Container ID:  
    Image:         gcr.io/istio-testing/sidecar_injector:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Port:          <none>
    Host Port:     <none>
    Args:
      --caCertFile=/etc/istio/certs/root-cert.pem
      --tlsCertFile=/etc/istio/certs/cert-chain.pem
      --tlsKeyFile=/etc/istio/certs/key.pem
      --injectConfig=/etc/istio/inject/config
      --meshConfig=/etc/istio/config/mesh
      --healthCheckInterval=2s
      --healthCheckFile=/health
    State:          Waiting
      Reason:       ContainerCreating
    Ready:          False
    Restart Count:  0
    Requests:
      cpu:        10m
    Liveness:     exec [/usr/local/bin/sidecar-injector probe --probe-path=/health --interval=4s] delay=4s timeout=1s period=4s #success=1 #failure=3
    Readiness:    exec [/usr/local/bin/sidecar-injector probe --probe-path=/health --interval=4s] delay=4s timeout=1s period=4s #success=1 #failure=3
    Environment:  <none>
    Mounts:
      /etc/istio/certs from certs (ro)
      /etc/istio/config from config-volume (ro)
      /etc/istio/inject from inject-config (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-sidecar-injector-service-account-token-fjwdg (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  config-volume:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      istio
    Optional:  false
  certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio.istio-sidecar-injector-service-account
    Optional:    false
  inject-config:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      istio-sidecar-injector
    Optional:  false
  istio-sidecar-injector-service-account-token-fjwdg:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-sidecar-injector-service-account-token-fjwdg
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason       Age                 From                                                          Message
  ----     ------       ----                ----                                                          -------
  Normal   Scheduled    10m                 default-scheduler                                             Successfully assigned istio-system/istio-sidecar-injector-7896cddb4f-lzwkv to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-f855
  Warning  FailedMount  1m (x4 over 8m)     kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-f855  Unable to mount volumes for pod "istio-sidecar-injector-7896cddb4f-lzwkv_istio-system(61f3fbbb-8f87-11e9-a033-42010a960fc1)": timeout expired waiting for volumes to attach or mount for pod "istio-system"/"istio-sidecar-injector-7896cddb4f-lzwkv". list of unmounted volumes=[certs]. list of unattached volumes=[config-volume certs inject-config istio-sidecar-injector-service-account-token-fjwdg]
  Warning  FailedMount  10s (x13 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-f855  MountVolume.SetUp failed for volume "certs" : secrets "istio.istio-sidecar-injector-service-account" not found
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-f855/10.150.0.57
Start Time:         Sat, 15 Jun 2019 16:05:23 +0000
Labels:             app=sidecarInjectorWebhook
                    chart=sidecarInjectorWebhook
                    heritage=Tiller
                    istio=sidecar-injector
                    pod-template-hash=7896cddb4f
                    release=istio
Annotations:        sidecar.istio.io/inject=false
Status:             Pending
IP:                 
Controlled By:      ReplicaSet/istio-sidecar-injector-7896cddb4f
Containers:
  sidecar-injector-webhook:
    Container ID:  
    Image:         gcr.io/istio-testing/sidecar_injector:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Port:          <none>
    Host Port:     <none>
    Args:
      --caCertFile=/etc/istio/certs/root-cert.pem
      --tlsCertFile=/etc/istio/certs/cert-chain.pem
      --tlsKeyFile=/etc/istio/certs/key.pem
      --injectConfig=/etc/istio/inject/config
      --meshConfig=/etc/istio/config/mesh
      --healthCheckInterval=2s
      --healthCheckFile=/health
    State:          Waiting
      Reason:       ContainerCreating
    Ready:          False
    Restart Count:  0
    Requests:
      cpu:        10m
    Liveness:     exec [/usr/local/bin/sidecar-injector probe --probe-path=/health --interval=4s] delay=4s timeout=1s period=4s #success=1 #failure=3
    Readiness:    exec [/usr/local/bin/sidecar-injector probe --probe-path=/health --interval=4s] delay=4s timeout=1s period=4s #success=1 #failure=3
    Environment:  <none>
    Mounts:
      /etc/istio/certs from certs (ro)
      /etc/istio/config from config-volume (ro)
      /etc/istio/inject from inject-config (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-sidecar-injector-service-account-token-fjwdg (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  config-volume:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      istio
    Optional:  false
  certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio.istio-sidecar-injector-service-account
    Optional:    false
  inject-config:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      istio-sidecar-injector
    Optional:  false
  istio-sidecar-injector-service-account-token-fjwdg:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-sidecar-injector-service-account-token-fjwdg
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason       Age                 From                                                          Message
  ----     ------       ----                ----                                                          -------
  Normal   Scheduled    10m                 default-scheduler                                             Successfully assigned istio-system/istio-sidecar-injector-7896cddb4f-lzwkv to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-f855
  Warning  FailedMount  1m (x4 over 8m)     kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-f855  Unable to mount volumes for pod "istio-sidecar-injector-7896cddb4f-lzwkv_istio-system(61f3fbbb-8f87-11e9-a033-42010a960fc1)": timeout expired waiting for volumes to attach or mount for pod "istio-system"/"istio-sidecar-injector-7896cddb4f-lzwkv". list of unmounted volumes=[certs]. list of unattached volumes=[config-volume certs inject-config istio-sidecar-injector-service-account-token-fjwdg]
  Warning  FailedMount  10s (x13 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-f855  MountVolume.SetUp failed for volume "certs" : secrets "istio.istio-sidecar-injector-service-account" not found

sidecar-injector-webhook

Name:               istio-telemetry-65874b4d75-p29cv
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f/10.150.0.47
Start Time:         Sat, 15 Jun 2019 16:05:22 +0000
Labels:             app=telemetry
                    chart=mixer
                    heritage=Tiller
                    istio=mixer
                    istio-mixer-type=telemetry
                    pod-template-hash=65874b4d75
                    release=istio
Annotations:        sidecar.istio.io/inject=false
Status:             Pending
IP:                 10.0.4.6
Controlled By:      ReplicaSet/istio-telemetry-65874b4d75
Containers:
  mixer:
    Container ID:  
    Image:         gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Ports:         15014/TCP, 42422/TCP
    Host Ports:    0/TCP, 0/TCP
    Args:
      --monitoringPort=15014
      --address
      unix:///sock/mixer.socket
      --log_output_level=default:info
      --configStoreURL=mcp://istio-galley.istio-system.svc:9901
      --configDefaultNamespace=istio-system
      --useAdapterCRDs=false
      --trace_zipkin_url=http://zipkin.istio-system:9411/api/v1/spans
      --averageLatencyThreshold
      100ms
      --loadsheddingMode
      disabled
    State:          Waiting
      Reason:       ImagePullBackOff
    Ready:          False
    Restart Count:  0
    Limits:
      cpu:     100m
      memory:  100Mi
    Requests:
      cpu:     50m
      memory:  100Mi
    Liveness:  http-get http://:15014/version delay=5s timeout=1s period=5s #success=1 #failure=3
    Environment:
      GODEBUG:     gctrace=1
      GOMAXPROCS:  6
    Mounts:
      /etc/certs from istio-certs (ro)
      /sock from uds-socket (rw)
      /var/run/secrets/istio.io/telemetry/adapter from telemetry-adapter-secret (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-mixer-service-account-token-rgk56 (ro)
  istio-proxy:
    Container ID:  docker://4135231dd035c34293290dd4413eb4430015139e18b4a46c8e7dff690209b16f
    Image:         gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      docker-pullable://gcr.io/istio-testing/proxyv2@sha256:690c9ec4efde9be9cbd5fe56498c6f3ba1d61a2432eeae70cf3438c7f0e00949
    Ports:         9091/TCP, 15004/TCP, 15090/TCP
    Host Ports:    0/TCP, 0/TCP, 0/TCP
    Args:
      proxy
      --domain
      $(POD_NAMESPACE).svc.cluster.local
      --serviceCluster
      istio-telemetry
      --templateFile
      /etc/istio/proxy/envoy_telemetry.yaml.tmpl
      --controlPlaneAuthPolicy
      NONE
    State:          Running
      Started:      Sat, 15 Jun 2019 16:11:23 +0000
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     2
      memory:  1Gi
    Requests:
      cpu:     10m
      memory:  40Mi
    Environment:
      POD_NAME:       istio-telemetry-65874b4d75-p29cv (v1:metadata.name)
      POD_NAMESPACE:  istio-system (v1:metadata.namespace)
      INSTANCE_IP:     (v1:status.podIP)
    Mounts:
      /etc/certs from istio-certs (ro)
      /sock from uds-socket (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-mixer-service-account-token-rgk56 (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  istio-certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio.istio-mixer-service-account
    Optional:    true
  uds-socket:
    Type:    EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:  
  telemetry-adapter-secret:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  telemetry-adapter-secret
    Optional:    true
  istio-mixer-service-account-token-rgk56:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-mixer-service-account-token-rgk56
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                 From                                                          Message
  ----     ------     ----                ----                                                          -------
  Normal   Scheduled  10m                 default-scheduler                                             Successfully assigned istio-system/istio-telemetry-65874b4d75-p29cv to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f
  Warning  Failed     10m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ErrImagePull
  Normal   BackOff    10m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Back-off pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     10m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ImagePullBackOff
  Normal   Pulling    9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  pulling image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Failed to pull image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Failed to pull image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ErrImagePull
  Normal   Pulling    9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   BackOff    5m (x21 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Back-off pulling image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     20s (x43 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ImagePullBackOff
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f/10.150.0.47
Start Time:         Sat, 15 Jun 2019 16:05:22 +0000
Labels:             app=telemetry
                    chart=mixer
                    heritage=Tiller
                    istio=mixer
                    istio-mixer-type=telemetry
                    pod-template-hash=65874b4d75
                    release=istio
Annotations:        sidecar.istio.io/inject=false
Status:             Pending
IP:                 10.0.4.6
Controlled By:      ReplicaSet/istio-telemetry-65874b4d75
Containers:
  mixer:
    Container ID:  
    Image:         gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      
    Ports:         15014/TCP, 42422/TCP
    Host Ports:    0/TCP, 0/TCP
    Args:
      --monitoringPort=15014
      --address
      unix:///sock/mixer.socket
      --log_output_level=default:info
      --configStoreURL=mcp://istio-galley.istio-system.svc:9901
      --configDefaultNamespace=istio-system
      --useAdapterCRDs=false
      --trace_zipkin_url=http://zipkin.istio-system:9411/api/v1/spans
      --averageLatencyThreshold
      100ms
      --loadsheddingMode
      disabled
    State:          Waiting
      Reason:       ImagePullBackOff
    Ready:          False
    Restart Count:  0
    Limits:
      cpu:     100m
      memory:  100Mi
    Requests:
      cpu:     50m
      memory:  100Mi
    Liveness:  http-get http://:15014/version delay=5s timeout=1s period=5s #success=1 #failure=3
    Environment:
      GODEBUG:     gctrace=1
      GOMAXPROCS:  6
    Mounts:
      /etc/certs from istio-certs (ro)
      /sock from uds-socket (rw)
      /var/run/secrets/istio.io/telemetry/adapter from telemetry-adapter-secret (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-mixer-service-account-token-rgk56 (ro)
  istio-proxy:
    Container ID:  docker://4135231dd035c34293290dd4413eb4430015139e18b4a46c8e7dff690209b16f
    Image:         gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      docker-pullable://gcr.io/istio-testing/proxyv2@sha256:690c9ec4efde9be9cbd5fe56498c6f3ba1d61a2432eeae70cf3438c7f0e00949
    Ports:         9091/TCP, 15004/TCP, 15090/TCP
    Host Ports:    0/TCP, 0/TCP, 0/TCP
    Args:
      proxy
      --domain
      $(POD_NAMESPACE).svc.cluster.local
      --serviceCluster
      istio-telemetry
      --templateFile
      /etc/istio/proxy/envoy_telemetry.yaml.tmpl
      --controlPlaneAuthPolicy
      NONE
    State:          Running
      Started:      Sat, 15 Jun 2019 16:11:23 +0000
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     2
      memory:  1Gi
    Requests:
      cpu:     10m
      memory:  40Mi
    Environment:
      POD_NAME:       istio-telemetry-65874b4d75-p29cv (v1:metadata.name)
      POD_NAMESPACE:  istio-system (v1:metadata.namespace)
      INSTANCE_IP:     (v1:status.podIP)
    Mounts:
      /etc/certs from istio-certs (ro)
      /sock from uds-socket (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-mixer-service-account-token-rgk56 (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  istio-certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio.istio-mixer-service-account
    Optional:    true
  uds-socket:
    Type:    EmptyDir (a temporary directory that shares a pod's lifetime)
    Medium:  
  telemetry-adapter-secret:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  telemetry-adapter-secret
    Optional:    true
  istio-mixer-service-account-token-rgk56:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-mixer-service-account-token-rgk56
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                 From                                                          Message
  ----     ------     ----                ----                                                          -------
  Normal   Scheduled  10m                 default-scheduler                                             Successfully assigned istio-system/istio-telemetry-65874b4d75-p29cv to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f
  Warning  Failed     10m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ErrImagePull
  Normal   BackOff    10m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Back-off pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     10m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ImagePullBackOff
  Normal   Pulling    9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  pulling image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Failed to pull image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Failed to pull image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ErrImagePull
  Normal   Pulling    9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   BackOff    5m (x21 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Back-off pulling image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     20s (x43 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ImagePullBackOff

mixer istio-proxy

Name:               prometheus-5b48f5d49-qvflw
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6/10.150.0.41
Start Time:         Sat, 15 Jun 2019 16:05:23 +0000
Labels:             app=prometheus
                    chart=prometheus
                    heritage=Tiller
                    pod-template-hash=5b48f5d49
                    release=istio
Annotations:        sidecar.istio.io/inject=false
Status:             Pending
IP:                 
Controlled By:      ReplicaSet/prometheus-5b48f5d49
Containers:
  prometheus:
    Container ID:  
    Image:         docker.io/prom/prometheus:v2.8.0
    Image ID:      
    Port:          9090/TCP
    Host Port:     0/TCP
    Args:
      --storage.tsdb.retention=6h
      --config.file=/etc/prometheus/prometheus.yml
    State:          Waiting
      Reason:       ContainerCreating
    Ready:          False
    Restart Count:  0
    Requests:
      cpu:        10m
    Liveness:     http-get http://:9090/-/healthy delay=0s timeout=1s period=10s #success=1 #failure=3
    Readiness:    http-get http://:9090/-/ready delay=0s timeout=1s period=10s #success=1 #failure=3
    Environment:  <none>
    Mounts:
      /etc/istio-certs from istio-certs (rw)
      /etc/prometheus from config-volume (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from prometheus-token-2dsjk (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  config-volume:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      prometheus
    Optional:  false
  istio-certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio.default
    Optional:    false
  prometheus-token-2dsjk:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  prometheus-token-2dsjk
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason       Age                 From                                                          Message
  ----     ------       ----                ----                                                          -------
  Normal   Scheduled    10m                 default-scheduler                                             Successfully assigned istio-system/prometheus-5b48f5d49-qvflw to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6
  Warning  FailedMount  1m (x4 over 8m)     kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Unable to mount volumes for pod "prometheus-5b48f5d49-qvflw_istio-system(6197df6e-8f87-11e9-a033-42010a960fc1)": timeout expired waiting for volumes to attach or mount for pod "istio-system"/"prometheus-5b48f5d49-qvflw". list of unmounted volumes=[istio-certs]. list of unattached volumes=[config-volume istio-certs prometheus-token-2dsjk]
  Warning  FailedMount  12s (x13 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  MountVolume.SetUp failed for volume "istio-certs" : secrets "istio.default" not found
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6/10.150.0.41
Start Time:         Sat, 15 Jun 2019 16:05:23 +0000
Labels:             app=prometheus
                    chart=prometheus
                    heritage=Tiller
                    pod-template-hash=5b48f5d49
                    release=istio
Annotations:        sidecar.istio.io/inject=false
Status:             Pending
IP:                 
Controlled By:      ReplicaSet/prometheus-5b48f5d49
Containers:
  prometheus:
    Container ID:  
    Image:         docker.io/prom/prometheus:v2.8.0
    Image ID:      
    Port:          9090/TCP
    Host Port:     0/TCP
    Args:
      --storage.tsdb.retention=6h
      --config.file=/etc/prometheus/prometheus.yml
    State:          Waiting
      Reason:       ContainerCreating
    Ready:          False
    Restart Count:  0
    Requests:
      cpu:        10m
    Liveness:     http-get http://:9090/-/healthy delay=0s timeout=1s period=10s #success=1 #failure=3
    Readiness:    http-get http://:9090/-/ready delay=0s timeout=1s period=10s #success=1 #failure=3
    Environment:  <none>
    Mounts:
      /etc/istio-certs from istio-certs (rw)
      /etc/prometheus from config-volume (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from prometheus-token-2dsjk (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  config-volume:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      prometheus
    Optional:  false
  istio-certs:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio.default
    Optional:    false
  prometheus-token-2dsjk:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  prometheus-token-2dsjk
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason       Age                 From                                                          Message
  ----     ------       ----                ----                                                          -------
  Normal   Scheduled    10m                 default-scheduler                                             Successfully assigned istio-system/prometheus-5b48f5d49-qvflw to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6
  Warning  FailedMount  1m (x4 over 8m)     kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Unable to mount volumes for pod "prometheus-5b48f5d49-qvflw_istio-system(6197df6e-8f87-11e9-a033-42010a960fc1)": timeout expired waiting for volumes to attach or mount for pod "istio-system"/"prometheus-5b48f5d49-qvflw". list of unmounted volumes=[istio-certs]. list of unattached volumes=[config-volume istio-certs prometheus-token-2dsjk]
  Warning  FailedMount  12s (x13 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  MountVolume.SetUp failed for volume "istio-certs" : secrets "istio.default" not found

prometheus




No resources found.
NAME                                                           READY     STATUS      RESTARTS   AGE
istio-citadel-5479f6bd74-smnkz                                 1/1       Running     0          6m
istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42-r7jx5   0/1       Completed   0          6m
Name:               istio-citadel-5479f6bd74-smnkz
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7/10.150.15.236
Start Time:         Sat, 15 Jun 2019 16:09:12 +0000
Labels:             app=security
                    chart=security
                    heritage=Tiller
                    istio=citadel
                    pod-template-hash=5479f6bd74
                    release=istio-remote
Annotations:        sidecar.istio.io/inject=false
Status:             Running
IP:                 10.44.3.3
Controlled By:      ReplicaSet/istio-citadel-5479f6bd74
Containers:
  citadel:
    Container ID:  docker://2f2e0e8683490d2915a845198cd1da02d9fd0a0f1c4160013cbc2a621354b26b
    Image:         gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      docker-pullable://gcr.io/istio-testing/citadel@sha256:e43f9dbbeb15ebb4a9615ebebc3f11e5df7accf3eecd31f9a0f5aa2594b5c254
    Port:          <none>
    Host Port:     <none>
    Args:
      --append-dns-names=true
      --grpc-port=8060
      --citadel-storage-namespace=istio-system
      --custom-dns-names=istio-pilot-service-account.istio-system:istio-pilot.istio-system
      --monitoring-port=15014
      --self-signed-ca=false
      --signing-cert=/etc/cacerts/ca-cert.pem
      --signing-key=/etc/cacerts/ca-key.pem
      --root-cert=/etc/cacerts/root-cert.pem
      --cert-chain=/etc/cacerts/cert-chain.pem
    State:          Running
      Started:      Sat, 15 Jun 2019 16:14:59 +0000
    Ready:          True
    Restart Count:  0
    Requests:
      cpu:        10m
    Liveness:     http-get http://:15014/version delay=5s timeout=1s period=5s #success=1 #failure=3
    Environment:  <none>
    Mounts:
      /etc/cacerts from cacerts (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-citadel-service-account-token-dmtg8 (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  cacerts:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  cacerts
    Optional:    true
  istio-citadel-service-account-token-dmtg8:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-citadel-service-account-token-dmtg8
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason          Age               From                                                          Message
  ----     ------          ----              ----                                                          -------
  Normal   Scheduled       6m                default-scheduler                                             Successfully assigned istio-system/istio-citadel-5479f6bd74-smnkz to gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7
  Normal   SandboxChanged  6m                kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7  Pod sandbox changed, it will be killed and re-created.
  Warning  Failed          6m (x3 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7  Failed to pull image "gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed          6m (x3 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7  Error: ErrImagePull
  Warning  Failed          5m (x7 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7  Error: ImagePullBackOff
  Normal   Pulling         5m (x4 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7  pulling image "gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   BackOff         1m (x23 over 6m)  kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7  Back-off pulling image "gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa"
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7/10.150.15.236
Start Time:         Sat, 15 Jun 2019 16:09:12 +0000
Labels:             app=security
                    chart=security
                    heritage=Tiller
                    istio=citadel
                    pod-template-hash=5479f6bd74
                    release=istio-remote
Annotations:        sidecar.istio.io/inject=false
Status:             Running
IP:                 10.44.3.3
Controlled By:      ReplicaSet/istio-citadel-5479f6bd74
Containers:
  citadel:
    Container ID:  docker://2f2e0e8683490d2915a845198cd1da02d9fd0a0f1c4160013cbc2a621354b26b
    Image:         gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      docker-pullable://gcr.io/istio-testing/citadel@sha256:e43f9dbbeb15ebb4a9615ebebc3f11e5df7accf3eecd31f9a0f5aa2594b5c254
    Port:          <none>
    Host Port:     <none>
    Args:
      --append-dns-names=true
      --grpc-port=8060
      --citadel-storage-namespace=istio-system
      --custom-dns-names=istio-pilot-service-account.istio-system:istio-pilot.istio-system
      --monitoring-port=15014
      --self-signed-ca=false
      --signing-cert=/etc/cacerts/ca-cert.pem
      --signing-key=/etc/cacerts/ca-key.pem
      --root-cert=/etc/cacerts/root-cert.pem
      --cert-chain=/etc/cacerts/cert-chain.pem
    State:          Running
      Started:      Sat, 15 Jun 2019 16:14:59 +0000
    Ready:          True
    Restart Count:  0
    Requests:
      cpu:        10m
    Liveness:     http-get http://:15014/version delay=5s timeout=1s period=5s #success=1 #failure=3
    Environment:  <none>
    Mounts:
      /etc/cacerts from cacerts (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from istio-citadel-service-account-token-dmtg8 (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  cacerts:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  cacerts
    Optional:    true
  istio-citadel-service-account-token-dmtg8:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-citadel-service-account-token-dmtg8
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason          Age               From                                                          Message
  ----     ------          ----              ----                                                          -------
  Normal   Scheduled       6m                default-scheduler                                             Successfully assigned istio-system/istio-citadel-5479f6bd74-smnkz to gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7
  Normal   SandboxChanged  6m                kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7  Pod sandbox changed, it will be killed and re-created.
  Warning  Failed          6m (x3 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7  Failed to pull image "gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed          6m (x3 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7  Error: ErrImagePull
  Warning  Failed          5m (x7 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7  Error: ImagePullBackOff
  Normal   Pulling         5m (x4 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7  pulling image "gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   BackOff         1m (x23 over 6m)  kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7  Back-off pulling image "gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa"

citadel
Name:               istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42-r7jx5
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n/10.150.15.225
Start Time:         Sat, 15 Jun 2019 16:09:11 +0000
Labels:             app=security
                    chart=security
                    controller-uid=e99420c1-8f87-11e9-b626-42010a9600f9
                    heritage=Tiller
                    job-name=istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42
                    release=istio-remote
Annotations:        <none>
Status:             Succeeded
IP:                 10.44.1.3
Controlled By:      Job/istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42
Containers:
  kubectl:
    Container ID:  docker://983b81ce91454835cfb57d95a5a8ec10cdb49bcabddf8241f4b7858c3e5ac260
    Image:         gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      docker-pullable://gcr.io/istio-testing/kubectl@sha256:699d46a7d82a3ba4c6d25ff250fd0ad0649fa814f1e99af95a0115968e163ec5
    Port:          <none>
    Host Port:     <none>
    Command:
      /bin/bash
      -c
      kubectl get secret --all-namespaces | grep "istio.io/key-and-cert" |  while read -r entry; do
  ns=$(echo $entry | awk '{print $1}');
  name=$(echo $entry | awk '{print $2}');
  kubectl delete secret $name -n $ns;
done

    State:          Terminated
      Reason:       Completed
      Exit Code:    0
      Started:      Sat, 15 Jun 2019 16:15:01 +0000
      Finished:     Sat, 15 Jun 2019 16:15:01 +0000
    Ready:          False
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from istio-cleanup-secrets-service-account-token-29dcf (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  istio-cleanup-secrets-service-account-token-29dcf:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-cleanup-secrets-service-account-token-29dcf
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason          Age               From                                                          Message
  ----     ------          ----              ----                                                          -------
  Normal   Scheduled       6m                default-scheduler                                             Successfully assigned istio-system/istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42-r7jx5 to gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n
  Normal   SandboxChanged  6m                kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n  Pod sandbox changed, it will be killed and re-created.
  Warning  Failed          6m (x3 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n  Failed to pull image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed          6m (x3 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n  Error: ErrImagePull
  Warning  Failed          5m (x7 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n  Error: ImagePullBackOff
  Normal   Pulling         5m (x4 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n  pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   BackOff         1m (x23 over 6m)  kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n  Back-off pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n/10.150.15.225
Start Time:         Sat, 15 Jun 2019 16:09:11 +0000
Labels:             app=security
                    chart=security
                    controller-uid=e99420c1-8f87-11e9-b626-42010a9600f9
                    heritage=Tiller
                    job-name=istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42
                    release=istio-remote
Annotations:        <none>
Status:             Succeeded
IP:                 10.44.1.3
Controlled By:      Job/istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42
Containers:
  kubectl:
    Container ID:  docker://983b81ce91454835cfb57d95a5a8ec10cdb49bcabddf8241f4b7858c3e5ac260
    Image:         gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa
    Image ID:      docker-pullable://gcr.io/istio-testing/kubectl@sha256:699d46a7d82a3ba4c6d25ff250fd0ad0649fa814f1e99af95a0115968e163ec5
    Port:          <none>
    Host Port:     <none>
    Command:
      /bin/bash
      -c
      kubectl get secret --all-namespaces | grep "istio.io/key-and-cert" |  while read -r entry; do
  ns=$(echo $entry | awk '{print $1}');
  name=$(echo $entry | awk '{print $2}');
  kubectl delete secret $name -n $ns;
done

    State:          Terminated
      Reason:       Completed
      Exit Code:    0
      Started:      Sat, 15 Jun 2019 16:15:01 +0000
      Finished:     Sat, 15 Jun 2019 16:15:01 +0000
    Ready:          False
    Restart Count:  0
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from istio-cleanup-secrets-service-account-token-29dcf (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  istio-cleanup-secrets-service-account-token-29dcf:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  istio-cleanup-secrets-service-account-token-29dcf
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason          Age               From                                                          Message
  ----     ------          ----              ----                                                          -------
  Normal   Scheduled       6m                default-scheduler                                             Successfully assigned istio-system/istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42-r7jx5 to gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n
  Normal   SandboxChanged  6m                kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n  Pod sandbox changed, it will be killed and re-created.
  Warning  Failed          6m (x3 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n  Failed to pull image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed          6m (x3 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n  Error: ErrImagePull
  Warning  Failed          5m (x7 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n  Error: ImagePullBackOff
  Normal   Pulling         5m (x4 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n  pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   BackOff         1m (x23 over 6m)  kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n  Back-off pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"

kubectl



				from junit.xml

Find istio-galley-6978b79858-np854_istio-system(605c6eb2-8f87-11e9-a033-42010a960fc1) mentions in log files | View test history on testgrid


Error lines from build-log.txt

... skipping 722 lines ...
2019-06-15T16:09:24.387679Z	info	Running command kubectl -n istio-system rollout status deployment.extensions/istio-pilot --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:09:24.387751Z	info	Running command kubectl -n istio-system rollout status deployment.extensions/istio-sidecar-injector --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:09:24.387960Z	info	Running command kubectl -n istio-system rollout status deployment.extensions/istio-telemetry --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:09:24.388008Z	info	Running command kubectl -n istio-system rollout status deployment.extensions/istio-egressgateway --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:09:24.388054Z	info	Running command kubectl -n istio-system rollout status deployment.extensions/istio-citadel --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:09:24.387979Z	info	Running command kubectl -n istio-system rollout status deployment.extensions/istio-galley --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:22.190419Z	info	Command error: exit status 1
2019-06-15T16:15:22.190740Z	info	Deployment rollout ends after [5m57.803271657s] with err [deployment.extensions/istio-egressgateway in namespace istio-system failed]
2019-06-15T16:15:22.190855Z	error	Failed to deploy Istio.
2019-06-15T16:15:22.190878Z	error	Failed to complete Init. Error deployment.extensions/istio-egressgateway in namespace istio-system failed
2019-06-15T16:15:22.190894Z	info	Saving logs
2019-06-15T16:15:22.190915Z	info	Creating status file
2019-06-15T16:15:22.191257Z	info	Created Status file /logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/pilot_test.json
2019-06-15T16:15:22.191299Z	info	Running command kubectl get ingress --all-namespaces --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:22.195061Z	info	Command error: exit status 1
2019-06-15T16:15:22.206195Z	info	Command error: exit status 1
2019-06-15T16:15:22.538294Z	info	Command output: 
No resources found.
2019-06-15T16:15:22.538377Z	info	Running command kubectl get pods -n istio-system --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:23.136159Z	info	Command error: exit status 1
2019-06-15T16:15:23.137507Z	info	Command error: exit status 1
2019-06-15T16:15:23.139624Z	info	Command error: exit status 1
2019-06-15T16:15:23.318220Z	info	Command output: 
NAME                                                              READY     STATUS                  RESTARTS   AGE
istio-citadel-64f666748f-hn5lx                                    0/1       ImagePullBackOff        0          10m
istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42-hqtkx      0/1       ImagePullBackOff        0          10m
istio-egressgateway-75fc469ddf-qzz76                              0/1       Init:ImagePullBackOff   0          10m
istio-galley-6978b79858-np854                                     0/1       ContainerCreating       0          10m
... skipping 77 lines ...
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age               From                                                          Message
  ----     ------     ----              ----                                                          -------
  Normal   Scheduled  10m               default-scheduler                                             Successfully assigned istio-system/istio-citadel-64f666748f-hn5lx to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f
  Normal   Pulling    8m (x4 over 9m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  pulling image "gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     8m (x4 over 9m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Failed to pull image "gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     8m (x4 over 9m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ErrImagePull
  Normal   BackOff    8m (x6 over 9m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Back-off pulling image "gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     4m (x20 over 9m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ImagePullBackOff
2019-06-15T16:15:23.846974Z	info	Name:               istio-citadel-64f666748f-hn5lx
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f/10.150.0.47
Start Time:         Sat, 15 Jun 2019 16:05:23 +0000
... skipping 57 lines ...
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age               From                                                          Message
  ----     ------     ----              ----                                                          -------
  Normal   Scheduled  10m               default-scheduler                                             Successfully assigned istio-system/istio-citadel-64f666748f-hn5lx to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f
  Normal   Pulling    8m (x4 over 9m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  pulling image "gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     8m (x4 over 9m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Failed to pull image "gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     8m (x4 over 9m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ErrImagePull
  Normal   BackOff    8m (x6 over 9m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Back-off pulling image "gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     4m (x20 over 9m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ImagePullBackOff

2019-06-15T16:15:23.847008Z	info	Running command kubectl get pods -n istio-system istio-citadel-64f666748f-hn5lx -o jsonpath={.spec.containers[*].name} --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:24.170224Z	info	Command error: exit status 1
2019-06-15T16:15:24.170906Z	info	Command error: exit status 1
2019-06-15T16:15:24.175762Z	info	Command error: exit status 1
2019-06-15T16:15:24.202855Z	info	Command output: 
citadel
2019-06-15T16:15:24.203018Z	info	Running command kubectl logs istio-citadel-64f666748f-hn5lx -n istio-system -c citadel --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:24.658545Z	info	Command error: exit status 1
2019-06-15T16:15:24.658647Z	warn	Error getting logs for pod istio-system/istio-citadel-64f666748f-hn5lx container citadel: command failed: "Error from server (BadRequest): container \"citadel\" in pod \"istio-citadel-64f666748f-hn5lx\" is waiting to start: trying and failing to pull image\n" exit status 1

2019-06-15T16:15:24.658685Z	info	Running command kubectl logs istio-citadel-64f666748f-hn5lx -n istio-system -c citadel -p --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:25.093602Z	info	No previous log for istio-citadel-64f666748f-hn5lx
2019-06-15T16:15:25.093705Z	info	Fetching logs on istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42-hqtkx
2019-06-15T16:15:25.093737Z	info	Running command kubectl -n istio-system describe pod istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42-hqtkx --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:25.578521Z	info	Command output: 
... skipping 53 lines ...
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                From                                                          Message
  ----     ------     ----               ----                                                          -------
  Normal   Scheduled  10m                default-scheduler                                             Successfully assigned istio-system/istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42-hqtkx to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6
  Normal   Pulling    8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Failed to pull image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ErrImagePull
  Normal   BackOff    8m (x7 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Back-off pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     4m (x20 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ImagePullBackOff
2019-06-15T16:15:25.578624Z	info	Name:               istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42-hqtkx
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6/10.150.0.41
Start Time:         Sat, 15 Jun 2019 16:05:15 +0000
... skipping 47 lines ...
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                From                                                          Message
  ----     ------     ----               ----                                                          -------
  Normal   Scheduled  10m                default-scheduler                                             Successfully assigned istio-system/istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42-hqtkx to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6
  Normal   Pulling    8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Failed to pull image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ErrImagePull
  Normal   BackOff    8m (x7 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Back-off pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     4m (x20 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ImagePullBackOff

2019-06-15T16:15:25.578667Z	info	Running command kubectl get pods -n istio-system istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42-hqtkx -o jsonpath={.spec.containers[*].name} --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:25.913346Z	info	Command output: 
kubectl
2019-06-15T16:15:25.913582Z	info	Running command kubectl logs istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42-hqtkx -n istio-system -c kubectl --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:26.344732Z	info	Command error: exit status 1
2019-06-15T16:15:26.344881Z	warn	Error getting logs for pod istio-system/istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42-hqtkx container kubectl: command failed: "Error from server (BadRequest): container \"kubectl\" in pod \"istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42-hqtkx\" is waiting to start: trying and failing to pull image\n" exit status 1

2019-06-15T16:15:26.344931Z	info	Running command kubectl logs istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42-hqtkx -n istio-system -c kubectl -p --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:26.768249Z	info	No previous log for istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42-hqtkx
2019-06-15T16:15:26.768360Z	info	Fetching logs on istio-egressgateway-75fc469ddf-qzz76
2019-06-15T16:15:26.768373Z	info	Running command kubectl -n istio-system describe pod istio-egressgateway-75fc469ddf-qzz76 --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:29.765749Z	info	Command output: 
... skipping 117 lines ...
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason          Age                From                                                          Message
  ----     ------          ----               ----                                                          -------
  Normal   Scheduled       10m                default-scheduler                                             Successfully assigned istio-system/istio-egressgateway-75fc469ddf-qzz76 to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6
  Normal   SandboxChanged  10m                kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Pod sandbox changed, it will be killed and re-created.
  Warning  Failed          9m (x3 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Failed to pull image "gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed          9m (x3 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ErrImagePull
  Warning  Failed          8m (x7 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ImagePullBackOff
  Normal   Pulling         8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  pulling image "gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   BackOff         5m (x22 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Back-off pulling image "gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa"
2019-06-15T16:15:29.765884Z	info	Name:               istio-egressgateway-75fc469ddf-qzz76
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
... skipping 113 lines ...
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason          Age                From                                                          Message
  ----     ------          ----               ----                                                          -------
  Normal   Scheduled       10m                default-scheduler                                             Successfully assigned istio-system/istio-egressgateway-75fc469ddf-qzz76 to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6
  Normal   SandboxChanged  10m                kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Pod sandbox changed, it will be killed and re-created.
  Warning  Failed          9m (x3 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Failed to pull image "gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed          9m (x3 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ErrImagePull
  Warning  Failed          8m (x7 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ImagePullBackOff
  Normal   Pulling         8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  pulling image "gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   BackOff         5m (x22 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Back-off pulling image "gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa"

2019-06-15T16:15:29.765924Z	info	Running command kubectl get pods -n istio-system istio-egressgateway-75fc469ddf-qzz76 -o jsonpath={.spec.containers[*].name} --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:30.100761Z	info	Command output: 
istio-proxy
2019-06-15T16:15:30.100905Z	info	Running command kubectl logs istio-egressgateway-75fc469ddf-qzz76 -n istio-system -c istio-proxy --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:30.534024Z	info	Command error: exit status 1
2019-06-15T16:15:30.534110Z	warn	Error getting logs for pod istio-system/istio-egressgateway-75fc469ddf-qzz76 container istio-proxy: command failed: "Error from server (BadRequest): container \"istio-proxy\" in pod \"istio-egressgateway-75fc469ddf-qzz76\" is waiting to start: PodInitializing\n" exit status 1

2019-06-15T16:15:30.534147Z	info	Running command kubectl logs istio-egressgateway-75fc469ddf-qzz76 -n istio-system -c istio-proxy -p --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:30.938514Z	info	No previous log for istio-egressgateway-75fc469ddf-qzz76
2019-06-15T16:15:30.938597Z	info	Fetching logs on istio-galley-6978b79858-np854
2019-06-15T16:15:30.938625Z	info	Running command kubectl -n istio-system describe pod istio-galley-6978b79858-np854 --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:31.428649Z	info	Command output: 
... skipping 76 lines ...
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason       Age                From                                                          Message
  ----     ------       ----               ----                                                          -------
  Normal   Scheduled    10m                default-scheduler                                             Successfully assigned istio-system/istio-galley-6978b79858-np854 to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6
  Warning  FailedMount  1m (x12 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  MountVolume.SetUp failed for volume "certs" : secrets "istio.istio-galley-service-account" not found
  Warning  FailedMount  1m (x4 over 8m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Unable to mount volumes for pod "istio-galley-6978b79858-np854_istio-system(605c6eb2-8f87-11e9-a033-42010a960fc1)": timeout expired waiting for volumes to attach or mount for pod "istio-system"/"istio-galley-6978b79858-np854". list of unmounted volumes=[certs]. list of unattached volumes=[certs config mesh-config istio-galley-service-account-token-jgt4f]
2019-06-15T16:15:31.428777Z	info	Name:               istio-galley-6978b79858-np854
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6/10.150.0.41
... skipping 71 lines ...
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason       Age                From                                                          Message
  ----     ------       ----               ----                                                          -------
  Normal   Scheduled    10m                default-scheduler                                             Successfully assigned istio-system/istio-galley-6978b79858-np854 to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6
  Warning  FailedMount  1m (x12 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  MountVolume.SetUp failed for volume "certs" : secrets "istio.istio-galley-service-account" not found
  Warning  FailedMount  1m (x4 over 8m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Unable to mount volumes for pod "istio-galley-6978b79858-np854_istio-system(605c6eb2-8f87-11e9-a033-42010a960fc1)": timeout expired waiting for volumes to attach or mount for pod "istio-system"/"istio-galley-6978b79858-np854". list of unmounted volumes=[certs]. list of unattached volumes=[certs config mesh-config istio-galley-service-account-token-jgt4f]

2019-06-15T16:15:31.428803Z	info	Running command kubectl get pods -n istio-system istio-galley-6978b79858-np854 -o jsonpath={.spec.containers[*].name} --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:31.764442Z	info	Command output: 
galley
2019-06-15T16:15:31.764579Z	info	Running command kubectl logs istio-galley-6978b79858-np854 -n istio-system -c galley --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:32.188634Z	info	Command error: exit status 1
2019-06-15T16:15:32.188706Z	warn	Error getting logs for pod istio-system/istio-galley-6978b79858-np854 container galley: command failed: "Error from server (BadRequest): container \"galley\" in pod \"istio-galley-6978b79858-np854\" is waiting to start: ContainerCreating\n" exit status 1

2019-06-15T16:15:32.188752Z	info	Running command kubectl logs istio-galley-6978b79858-np854 -n istio-system -c galley -p --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:32.603759Z	info	No previous log for istio-galley-6978b79858-np854
2019-06-15T16:15:32.603833Z	info	Fetching logs on istio-ingressgateway-864db98588-9pcs4
2019-06-15T16:15:32.603846Z	info	Running command kubectl -n istio-system describe pod istio-ingressgateway-864db98588-9pcs4 --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:33.102855Z	info	Command output: 
... skipping 117 lines ...
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                From                                                          Message
  ----     ------     ----               ----                                                          -------
  Normal   Scheduled  10m                default-scheduler                                             Successfully assigned istio-system/istio-ingressgateway-864db98588-9pcs4 to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f
  Normal   Pulling    8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  pulling image "gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Failed to pull image "gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ErrImagePull
  Warning  Failed     8m (x6 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ImagePullBackOff
  Normal   BackOff    2s (x43 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Back-off pulling image "gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa"
2019-06-15T16:15:33.102946Z	info	Name:               istio-ingressgateway-864db98588-9pcs4
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f/10.150.0.47
... skipping 112 lines ...
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                From                                                          Message
  ----     ------     ----               ----                                                          -------
  Normal   Scheduled  10m                default-scheduler                                             Successfully assigned istio-system/istio-ingressgateway-864db98588-9pcs4 to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f
  Normal   Pulling    8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  pulling image "gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Failed to pull image "gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     8m (x4 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ErrImagePull
  Warning  Failed     8m (x6 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ImagePullBackOff
  Normal   BackOff    2s (x43 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Back-off pulling image "gcr.io/istio-testing/proxy_init:283d57410a29fa84b1a7971211380e42c65b8daa"

2019-06-15T16:15:33.102966Z	info	Running command kubectl get pods -n istio-system istio-ingressgateway-864db98588-9pcs4 -o jsonpath={.spec.containers[*].name} --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:33.453824Z	info	Command output: 
istio-proxy
2019-06-15T16:15:33.453959Z	info	Running command kubectl logs istio-ingressgateway-864db98588-9pcs4 -n istio-system -c istio-proxy --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:33.892541Z	info	Command error: exit status 1
2019-06-15T16:15:33.892649Z	warn	Error getting logs for pod istio-system/istio-ingressgateway-864db98588-9pcs4 container istio-proxy: command failed: "Error from server (BadRequest): container \"istio-proxy\" in pod \"istio-ingressgateway-864db98588-9pcs4\" is waiting to start: PodInitializing\n" exit status 1

2019-06-15T16:15:33.892682Z	info	Running command kubectl logs istio-ingressgateway-864db98588-9pcs4 -n istio-system -c istio-proxy -p --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:34.299659Z	info	No previous log for istio-ingressgateway-864db98588-9pcs4
2019-06-15T16:15:34.299749Z	info	Fetching logs on istio-init-crd-10-l6gjh
2019-06-15T16:15:34.299761Z	info	Running command kubectl -n istio-system describe pod istio-init-crd-10-l6gjh --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:34.815263Z	info	Command output: 
... skipping 50 lines ...
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason          Age                 From                                                          Message
  ----     ------          ----                ----                                                          -------
  Normal   Scheduled       10m                 default-scheduler                                             Successfully assigned istio-system/istio-init-crd-10-l6gjh to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6
  Normal   SandboxChanged  10m                 kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Pod sandbox changed, it will be killed and re-created.
  Warning  Failed          10m (x3 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Failed to pull image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed          10m (x3 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ErrImagePull
  Normal   BackOff         9m (x7 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Back-off pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   Pulling         9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed          38s (x45 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ImagePullBackOff
2019-06-15T16:15:34.815380Z	info	Name:               istio-init-crd-10-l6gjh
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6/10.150.0.41
Start Time:         Sat, 15 Jun 2019 16:04:46 +0000
... skipping 44 lines ...
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason          Age                 From                                                          Message
  ----     ------          ----                ----                                                          -------
  Normal   Scheduled       10m                 default-scheduler                                             Successfully assigned istio-system/istio-init-crd-10-l6gjh to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6
  Normal   SandboxChanged  10m                 kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Pod sandbox changed, it will be killed and re-created.
  Warning  Failed          10m (x3 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Failed to pull image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed          10m (x3 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ErrImagePull
  Normal   BackOff         9m (x7 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Back-off pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   Pulling         9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed          38s (x45 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ImagePullBackOff

2019-06-15T16:15:34.815658Z	info	Running command kubectl get pods -n istio-system istio-init-crd-10-l6gjh -o jsonpath={.spec.containers[*].name} --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:35.180830Z	info	Command output: 
istio-init-crd-10
2019-06-15T16:15:35.181004Z	info	Running command kubectl logs istio-init-crd-10-l6gjh -n istio-system -c istio-init-crd-10 --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:35.584909Z	info	Command error: exit status 1
2019-06-15T16:15:35.584986Z	warn	Error getting logs for pod istio-system/istio-init-crd-10-l6gjh container istio-init-crd-10: command failed: "Error from server (BadRequest): container \"istio-init-crd-10\" in pod \"istio-init-crd-10-l6gjh\" is waiting to start: trying and failing to pull image\n" exit status 1

2019-06-15T16:15:35.585019Z	info	Running command kubectl logs istio-init-crd-10-l6gjh -n istio-system -c istio-init-crd-10 -p --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:35.991997Z	info	No previous log for istio-init-crd-10-l6gjh
2019-06-15T16:15:35.992071Z	info	Fetching logs on istio-init-crd-11-wsqpp
2019-06-15T16:15:35.992083Z	info	Running command kubectl -n istio-system describe pod istio-init-crd-11-wsqpp --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:36.476218Z	info	Command output: 
... skipping 51 lines ...
Events:
  Type     Reason          Age                 From                                                          Message
  ----     ------          ----                ----                                                          -------
  Normal   Scheduled       10m                 default-scheduler                                             Successfully assigned istio-system/istio-init-crd-11-wsqpp to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f
  Normal   SandboxChanged  10m                 kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Pod sandbox changed, it will be killed and re-created.
  Normal   Pulling         9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed          9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Failed to pull image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed          9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ErrImagePull
  Warning  Failed          5m (x22 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ImagePullBackOff
  Normal   BackOff         49s (x43 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Back-off pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
2019-06-15T16:15:36.476302Z	info	Name:               istio-init-crd-11-wsqpp
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f/10.150.0.47
... skipping 46 lines ...
Events:
  Type     Reason          Age                 From                                                          Message
  ----     ------          ----                ----                                                          -------
  Normal   Scheduled       10m                 default-scheduler                                             Successfully assigned istio-system/istio-init-crd-11-wsqpp to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f
  Normal   SandboxChanged  10m                 kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Pod sandbox changed, it will be killed and re-created.
  Normal   Pulling         9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed          9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Failed to pull image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed          9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ErrImagePull
  Warning  Failed          5m (x22 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ImagePullBackOff
  Normal   BackOff         49s (x43 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Back-off pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"

2019-06-15T16:15:36.476333Z	info	Running command kubectl get pods -n istio-system istio-init-crd-11-wsqpp -o jsonpath={.spec.containers[*].name} --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:36.827760Z	info	Command output: 
istio-init-crd-11
2019-06-15T16:15:36.827907Z	info	Running command kubectl logs istio-init-crd-11-wsqpp -n istio-system -c istio-init-crd-11 --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:37.234735Z	info	Command error: exit status 1
2019-06-15T16:15:37.234822Z	warn	Error getting logs for pod istio-system/istio-init-crd-11-wsqpp container istio-init-crd-11: command failed: "Error from server (BadRequest): container \"istio-init-crd-11\" in pod \"istio-init-crd-11-wsqpp\" is waiting to start: trying and failing to pull image\n" exit status 1

2019-06-15T16:15:37.234861Z	info	Running command kubectl logs istio-init-crd-11-wsqpp -n istio-system -c istio-init-crd-11 -p --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:37.664574Z	info	No previous log for istio-init-crd-11-wsqpp
2019-06-15T16:15:37.664657Z	info	Fetching logs on istio-init-crd-12-kbfdx
2019-06-15T16:15:37.664682Z	info	Running command kubectl -n istio-system describe pod istio-init-crd-12-kbfdx --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:38.273661Z	info	Command output: 
... skipping 50 lines ...
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                 From                                                          Message
  ----     ------     ----                ----                                                          -------
  Normal   Scheduled  10m                 default-scheduler                                             Successfully assigned istio-system/istio-init-crd-12-kbfdx to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp
  Normal   Pulling    9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Failed to pull image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ErrImagePull
  Warning  Failed     9m (x6 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ImagePullBackOff
  Normal   BackOff    50s (x42 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Back-off pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
2019-06-15T16:15:38.273771Z	info	Name:               istio-init-crd-12-kbfdx
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp/10.150.0.39
... skipping 45 lines ...
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                 From                                                          Message
  ----     ------     ----                ----                                                          -------
  Normal   Scheduled  10m                 default-scheduler                                             Successfully assigned istio-system/istio-init-crd-12-kbfdx to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp
  Normal   Pulling    9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Failed to pull image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ErrImagePull
  Warning  Failed     9m (x6 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ImagePullBackOff
  Normal   BackOff    50s (x42 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Back-off pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"

2019-06-15T16:15:38.273801Z	info	Running command kubectl get pods -n istio-system istio-init-crd-12-kbfdx -o jsonpath={.spec.containers[*].name} --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:38.643228Z	info	Command output: 
istio-init-crd-12
2019-06-15T16:15:38.643452Z	info	Running command kubectl logs istio-init-crd-12-kbfdx -n istio-system -c istio-init-crd-12 --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:39.080208Z	info	Command error: exit status 1
2019-06-15T16:15:39.080320Z	warn	Error getting logs for pod istio-system/istio-init-crd-12-kbfdx container istio-init-crd-12: command failed: "Error from server (BadRequest): container \"istio-init-crd-12\" in pod \"istio-init-crd-12-kbfdx\" is waiting to start: trying and failing to pull image\n" exit status 1

2019-06-15T16:15:39.080355Z	info	Running command kubectl logs istio-init-crd-12-kbfdx -n istio-system -c istio-init-crd-12 -p --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:39.505915Z	info	No previous log for istio-init-crd-12-kbfdx
2019-06-15T16:15:39.506003Z	info	Fetching logs on istio-pilot-696b75d9f8-cz6lj
2019-06-15T16:15:39.506019Z	info	Running command kubectl -n istio-system describe pod istio-pilot-696b75d9f8-cz6lj --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:39.998201Z	info	Command output: 
... skipping 65 lines ...
      /etc/istio/proxy/envoy_pilot.yaml.tmpl
      --controlPlaneAuthPolicy
      NONE
    State:          Running
      Started:      Sat, 15 Jun 2019 16:14:53 +0000
    Last State:     Terminated
      Reason:       Error
      Exit Code:    255
      Started:      Sat, 15 Jun 2019 16:11:27 +0000
      Finished:     Sat, 15 Jun 2019 16:14:52 +0000
    Ready:          True
    Restart Count:  1
    Limits:
... skipping 35 lines ...
Events:
  Type     Reason          Age                From                                                          Message
  ----     ------          ----               ----                                                          -------
  Normal   Scheduled       10m                default-scheduler                                             Successfully assigned istio-system/istio-pilot-696b75d9f8-cz6lj to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp
  Normal   SandboxChanged  10m                kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Pod sandbox changed, it will be killed and re-created.
  Normal   BackOff         10m (x3 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Back-off pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed          10m (x3 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ImagePullBackOff
  Normal   BackOff         10m (x3 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Back-off pulling image "gcr.io/istio-testing/pilot:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   Pulling         9m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  pulling image "gcr.io/istio-testing/pilot:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed          9m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ErrImagePull
  Warning  Failed          9m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Failed to pull image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Normal   Pulling         9m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed          9m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ErrImagePull
  Warning  Failed          9m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Failed to pull image "gcr.io/istio-testing/pilot:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/pilot:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed          5m (x22 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ImagePullBackOff
  Warning  Unhealthy       10s (x14 over 6m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Readiness probe failed: Get http://10.0.2.6:8080/ready: net/http: request canceled (Client.Timeout exceeded while awaiting headers)
2019-06-15T16:15:39.998341Z	info	Name:               istio-pilot-696b75d9f8-cz6lj
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp/10.150.0.39
Start Time:         Sat, 15 Jun 2019 16:05:23 +0000
... skipping 59 lines ...
      /etc/istio/proxy/envoy_pilot.yaml.tmpl
      --controlPlaneAuthPolicy
      NONE
    State:          Running
      Started:      Sat, 15 Jun 2019 16:14:53 +0000
    Last State:     Terminated
      Reason:       Error
      Exit Code:    255
      Started:      Sat, 15 Jun 2019 16:11:27 +0000
      Finished:     Sat, 15 Jun 2019 16:14:52 +0000
    Ready:          True
    Restart Count:  1
    Limits:
... skipping 35 lines ...
Events:
  Type     Reason          Age                From                                                          Message
  ----     ------          ----               ----                                                          -------
  Normal   Scheduled       10m                default-scheduler                                             Successfully assigned istio-system/istio-pilot-696b75d9f8-cz6lj to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp
  Normal   SandboxChanged  10m                kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Pod sandbox changed, it will be killed and re-created.
  Normal   BackOff         10m (x3 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Back-off pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed          10m (x3 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ImagePullBackOff
  Normal   BackOff         10m (x3 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Back-off pulling image "gcr.io/istio-testing/pilot:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   Pulling         9m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  pulling image "gcr.io/istio-testing/pilot:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed          9m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ErrImagePull
  Warning  Failed          9m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Failed to pull image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Normal   Pulling         9m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed          9m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ErrImagePull
  Warning  Failed          9m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Failed to pull image "gcr.io/istio-testing/pilot:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/pilot:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed          5m (x22 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ImagePullBackOff
  Warning  Unhealthy       10s (x14 over 6m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Readiness probe failed: Get http://10.0.2.6:8080/ready: net/http: request canceled (Client.Timeout exceeded while awaiting headers)

2019-06-15T16:15:39.998379Z	info	Running command kubectl get pods -n istio-system istio-pilot-696b75d9f8-cz6lj -o jsonpath={.spec.containers[*].name} --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:40.345672Z	info	Command output: 
discovery istio-proxy
2019-06-15T16:15:40.345858Z	info	Running command kubectl logs istio-pilot-696b75d9f8-cz6lj -n istio-system -c discovery --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:41.226890Z	info	Running command kubectl logs istio-pilot-696b75d9f8-cz6lj -n istio-system -c discovery -p --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
... skipping 117 lines ...
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                 From                                                          Message
  ----     ------     ----                ----                                                          -------
  Normal   Scheduled  10m                 default-scheduler                                             Successfully assigned istio-system/istio-policy-6bb8d44f78-bwswb to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6
  Warning  Failed     10m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Failed to pull image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Normal   BackOff    9m (x2 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Back-off pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x2 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ImagePullBackOff
  Normal   Pulling    9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  pulling image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Failed to pull image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ErrImagePull
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ErrImagePull
  Normal   Pulling    9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   BackOff    5m (x21 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Back-off pulling image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     14s (x43 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ImagePullBackOff
2019-06-15T16:15:43.059249Z	info	Name:               istio-policy-6bb8d44f78-bwswb
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6/10.150.0.41
Start Time:         Sat, 15 Jun 2019 16:05:22 +0000
... skipping 105 lines ...
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                 From                                                          Message
  ----     ------     ----                ----                                                          -------
  Normal   Scheduled  10m                 default-scheduler                                             Successfully assigned istio-system/istio-policy-6bb8d44f78-bwswb to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6
  Warning  Failed     10m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Failed to pull image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Normal   BackOff    9m (x2 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Back-off pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x2 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ImagePullBackOff
  Normal   Pulling    9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  pulling image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Failed to pull image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ErrImagePull
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ErrImagePull
  Normal   Pulling    9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   BackOff    5m (x21 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Back-off pulling image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     14s (x43 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Error: ImagePullBackOff

2019-06-15T16:15:43.059304Z	info	Running command kubectl get pods -n istio-system istio-policy-6bb8d44f78-bwswb -o jsonpath={.spec.containers[*].name} --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:43.403958Z	info	Command output: 
mixer istio-proxy
2019-06-15T16:15:43.404117Z	info	Running command kubectl logs istio-policy-6bb8d44f78-bwswb -n istio-system -c mixer --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:43.825045Z	info	Command error: exit status 1
2019-06-15T16:15:43.825149Z	warn	Error getting logs for pod istio-system/istio-policy-6bb8d44f78-bwswb container mixer: command failed: "Error from server (BadRequest): container \"mixer\" in pod \"istio-policy-6bb8d44f78-bwswb\" is waiting to start: trying and failing to pull image\n" exit status 1

2019-06-15T16:15:43.825191Z	info	Running command kubectl logs istio-policy-6bb8d44f78-bwswb -n istio-system -c mixer -p --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:44.251700Z	info	No previous log for istio-policy-6bb8d44f78-bwswb
2019-06-15T16:15:44.251868Z	info	Running command kubectl logs istio-policy-6bb8d44f78-bwswb -n istio-system -c istio-proxy --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:44.749667Z	info	Running command kubectl logs istio-policy-6bb8d44f78-bwswb -n istio-system -c istio-proxy -p --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:45.183013Z	info	No previous log for istio-policy-6bb8d44f78-bwswb
... skipping 114 lines ...
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                 From                                                          Message
  ----     ------     ----                ----                                                          -------
  Normal   Scheduled  10m                 default-scheduler                                             Successfully assigned istio-system/istio-policy-6bb8d44f78-tbwhc to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp
  Warning  Failed     10m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Failed to pull image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Normal   BackOff    9m (x2 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Back-off pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x2 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ImagePullBackOff
  Normal   Pulling    9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  pulling image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Failed to pull image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ErrImagePull
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ErrImagePull
  Normal   Pulling    9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   BackOff    5m (x21 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Back-off pulling image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     11s (x44 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ImagePullBackOff
2019-06-15T16:15:45.692582Z	info	Name:               istio-policy-6bb8d44f78-tbwhc
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp/10.150.0.39
Start Time:         Sat, 15 Jun 2019 16:05:22 +0000
... skipping 105 lines ...
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                 From                                                          Message
  ----     ------     ----                ----                                                          -------
  Normal   Scheduled  10m                 default-scheduler                                             Successfully assigned istio-system/istio-policy-6bb8d44f78-tbwhc to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp
  Warning  Failed     10m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Failed to pull image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Normal   BackOff    9m (x2 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Back-off pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x2 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ImagePullBackOff
  Normal   Pulling    9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  pulling image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Failed to pull image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ErrImagePull
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ErrImagePull
  Normal   Pulling    9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   BackOff    5m (x21 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Back-off pulling image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     11s (x44 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-dxmp  Error: ImagePullBackOff

2019-06-15T16:15:45.692607Z	info	Running command kubectl get pods -n istio-system istio-policy-6bb8d44f78-tbwhc -o jsonpath={.spec.containers[*].name} --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:46.062436Z	info	Command output: 
mixer istio-proxy
2019-06-15T16:15:46.062579Z	info	Running command kubectl logs istio-policy-6bb8d44f78-tbwhc -n istio-system -c mixer --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:46.505911Z	info	Command error: exit status 1
2019-06-15T16:15:46.506016Z	warn	Error getting logs for pod istio-system/istio-policy-6bb8d44f78-tbwhc container mixer: command failed: "Error from server (BadRequest): container \"mixer\" in pod \"istio-policy-6bb8d44f78-tbwhc\" is waiting to start: trying and failing to pull image\n" exit status 1

2019-06-15T16:15:46.506062Z	info	Running command kubectl logs istio-policy-6bb8d44f78-tbwhc -n istio-system -c mixer -p --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:47.024260Z	info	No previous log for istio-policy-6bb8d44f78-tbwhc
2019-06-15T16:15:47.024416Z	info	Running command kubectl logs istio-policy-6bb8d44f78-tbwhc -n istio-system -c istio-proxy --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:47.554358Z	info	Running command kubectl logs istio-policy-6bb8d44f78-tbwhc -n istio-system -c istio-proxy -p --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:47.985641Z	info	No previous log for istio-policy-6bb8d44f78-tbwhc
... skipping 56 lines ...
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                 From                                                          Message
  ----     ------     ----                ----                                                          -------
  Normal   Scheduled  10m                 default-scheduler                                             Successfully assigned istio-system/istio-security-post-install-283d57410a29fa84b1a7971211380e2xh4v to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f
  Normal   Pulling    9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Failed to pull image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ErrImagePull
  Normal   BackOff    8m (x7 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Back-off pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     23s (x42 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ImagePullBackOff
2019-06-15T16:15:48.510703Z	info	Name:               istio-security-post-install-283d57410a29fa84b1a7971211380e2xh4v
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f/10.150.0.47
Start Time:         Sat, 15 Jun 2019 16:05:16 +0000
... skipping 47 lines ...
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                 From                                                          Message
  ----     ------     ----                ----                                                          -------
  Normal   Scheduled  10m                 default-scheduler                                             Successfully assigned istio-system/istio-security-post-install-283d57410a29fa84b1a7971211380e2xh4v to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f
  Normal   Pulling    9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Failed to pull image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     9m (x4 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ErrImagePull
  Normal   BackOff    8m (x7 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Back-off pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     23s (x42 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ImagePullBackOff

2019-06-15T16:15:48.510726Z	info	Running command kubectl get pods -n istio-system istio-security-post-install-283d57410a29fa84b1a7971211380e2xh4v -o jsonpath={.spec.containers[*].name} --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:48.887682Z	info	Command output: 
kubectl
2019-06-15T16:15:48.887840Z	info	Running command kubectl logs istio-security-post-install-283d57410a29fa84b1a7971211380e2xh4v -n istio-system -c kubectl --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:49.330370Z	info	Command error: exit status 1
2019-06-15T16:15:49.330462Z	warn	Error getting logs for pod istio-system/istio-security-post-install-283d57410a29fa84b1a7971211380e2xh4v container kubectl: command failed: "Error from server (BadRequest): container \"kubectl\" in pod \"istio-security-post-install-283d57410a29fa84b1a7971211380e2xh4v\" is waiting to start: trying and failing to pull image\n" exit status 1

2019-06-15T16:15:49.330501Z	info	Running command kubectl logs istio-security-post-install-283d57410a29fa84b1a7971211380e2xh4v -n istio-system -c kubectl -p --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:49.775974Z	info	No previous log for istio-security-post-install-283d57410a29fa84b1a7971211380e2xh4v
2019-06-15T16:15:49.776047Z	info	Fetching logs on istio-sidecar-injector-7896cddb4f-lzwkv
2019-06-15T16:15:49.776058Z	info	Running command kubectl -n istio-system describe pod istio-sidecar-injector-7896cddb4f-lzwkv --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:50.278688Z	info	Command output: 
... skipping 71 lines ...
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason       Age                 From                                                          Message
  ----     ------       ----                ----                                                          -------
  Normal   Scheduled    10m                 default-scheduler                                             Successfully assigned istio-system/istio-sidecar-injector-7896cddb4f-lzwkv to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-f855
  Warning  FailedMount  1m (x4 over 8m)     kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-f855  Unable to mount volumes for pod "istio-sidecar-injector-7896cddb4f-lzwkv_istio-system(61f3fbbb-8f87-11e9-a033-42010a960fc1)": timeout expired waiting for volumes to attach or mount for pod "istio-system"/"istio-sidecar-injector-7896cddb4f-lzwkv". list of unmounted volumes=[certs]. list of unattached volumes=[config-volume certs inject-config istio-sidecar-injector-service-account-token-fjwdg]
  Warning  FailedMount  10s (x13 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-f855  MountVolume.SetUp failed for volume "certs" : secrets "istio.istio-sidecar-injector-service-account" not found
2019-06-15T16:15:50.278772Z	info	Name:               istio-sidecar-injector-7896cddb4f-lzwkv
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-f855/10.150.0.57
Start Time:         Sat, 15 Jun 2019 16:05:23 +0000
... skipping 65 lines ...
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason       Age                 From                                                          Message
  ----     ------       ----                ----                                                          -------
  Normal   Scheduled    10m                 default-scheduler                                             Successfully assigned istio-system/istio-sidecar-injector-7896cddb4f-lzwkv to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-f855
  Warning  FailedMount  1m (x4 over 8m)     kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-f855  Unable to mount volumes for pod "istio-sidecar-injector-7896cddb4f-lzwkv_istio-system(61f3fbbb-8f87-11e9-a033-42010a960fc1)": timeout expired waiting for volumes to attach or mount for pod "istio-system"/"istio-sidecar-injector-7896cddb4f-lzwkv". list of unmounted volumes=[certs]. list of unattached volumes=[config-volume certs inject-config istio-sidecar-injector-service-account-token-fjwdg]
  Warning  FailedMount  10s (x13 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-f855  MountVolume.SetUp failed for volume "certs" : secrets "istio.istio-sidecar-injector-service-account" not found

2019-06-15T16:15:50.278800Z	info	Running command kubectl get pods -n istio-system istio-sidecar-injector-7896cddb4f-lzwkv -o jsonpath={.spec.containers[*].name} --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:50.649026Z	info	Command output: 
sidecar-injector-webhook
2019-06-15T16:15:50.649159Z	info	Running command kubectl logs istio-sidecar-injector-7896cddb4f-lzwkv -n istio-system -c sidecar-injector-webhook --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:51.098142Z	info	Command error: exit status 1
2019-06-15T16:15:51.098227Z	warn	Error getting logs for pod istio-system/istio-sidecar-injector-7896cddb4f-lzwkv container sidecar-injector-webhook: command failed: "Error from server (BadRequest): container \"sidecar-injector-webhook\" in pod \"istio-sidecar-injector-7896cddb4f-lzwkv\" is waiting to start: ContainerCreating\n" exit status 1

2019-06-15T16:15:51.098283Z	info	Running command kubectl logs istio-sidecar-injector-7896cddb4f-lzwkv -n istio-system -c sidecar-injector-webhook -p --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:51.570621Z	info	No previous log for istio-sidecar-injector-7896cddb4f-lzwkv
2019-06-15T16:15:51.570687Z	info	Fetching logs on istio-telemetry-65874b4d75-p29cv
2019-06-15T16:15:51.570698Z	info	Running command kubectl -n istio-system describe pod istio-telemetry-65874b4d75-p29cv --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:52.086838Z	info	Command output: 
... skipping 114 lines ...
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                 From                                                          Message
  ----     ------     ----                ----                                                          -------
  Normal   Scheduled  10m                 default-scheduler                                             Successfully assigned istio-system/istio-telemetry-65874b4d75-p29cv to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f
  Warning  Failed     10m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ErrImagePull
  Normal   BackOff    10m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Back-off pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     10m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ImagePullBackOff
  Normal   Pulling    9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  pulling image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Failed to pull image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Failed to pull image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ErrImagePull
  Normal   Pulling    9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   BackOff    5m (x21 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Back-off pulling image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     20s (x43 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ImagePullBackOff
2019-06-15T16:15:52.086921Z	info	Name:               istio-telemetry-65874b4d75-p29cv
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f/10.150.0.47
Start Time:         Sat, 15 Jun 2019 16:05:22 +0000
... skipping 108 lines ...
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                 From                                                          Message
  ----     ------     ----                ----                                                          -------
  Normal   Scheduled  10m                 default-scheduler                                             Successfully assigned istio-system/istio-telemetry-65874b4d75-p29cv to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f
  Warning  Failed     10m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ErrImagePull
  Normal   BackOff    10m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Back-off pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     10m (x2 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ImagePullBackOff
  Normal   Pulling    9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  pulling image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Failed to pull image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Failed to pull image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed     9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ErrImagePull
  Normal   Pulling    9m (x3 over 10m)    kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  pulling image "gcr.io/istio-testing/proxyv2:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   BackOff    5m (x21 over 10m)   kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Back-off pulling image "gcr.io/istio-testing/mixer:283d57410a29fa84b1a7971211380e42c65b8daa"
  Warning  Failed     20s (x43 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-s89f  Error: ImagePullBackOff

2019-06-15T16:15:52.086944Z	info	Running command kubectl get pods -n istio-system istio-telemetry-65874b4d75-p29cv -o jsonpath={.spec.containers[*].name} --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:52.452777Z	info	Command output: 
mixer istio-proxy
2019-06-15T16:15:52.452912Z	info	Running command kubectl logs istio-telemetry-65874b4d75-p29cv -n istio-system -c mixer --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:52.899627Z	info	Command error: exit status 1
2019-06-15T16:15:52.899726Z	warn	Error getting logs for pod istio-system/istio-telemetry-65874b4d75-p29cv container mixer: command failed: "Error from server (BadRequest): container \"mixer\" in pod \"istio-telemetry-65874b4d75-p29cv\" is waiting to start: trying and failing to pull image\n" exit status 1

2019-06-15T16:15:52.899762Z	info	Running command kubectl logs istio-telemetry-65874b4d75-p29cv -n istio-system -c mixer -p --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:53.341661Z	info	No previous log for istio-telemetry-65874b4d75-p29cv
2019-06-15T16:15:53.341795Z	info	Running command kubectl logs istio-telemetry-65874b4d75-p29cv -n istio-system -c istio-proxy --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:53.826991Z	info	Running command kubectl logs istio-telemetry-65874b4d75-p29cv -n istio-system -c istio-proxy -p --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:54.283032Z	info	No previous log for istio-telemetry-65874b4d75-p29cv
... skipping 63 lines ...
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason       Age                 From                                                          Message
  ----     ------       ----                ----                                                          -------
  Normal   Scheduled    10m                 default-scheduler                                             Successfully assigned istio-system/prometheus-5b48f5d49-qvflw to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6
  Warning  FailedMount  1m (x4 over 8m)     kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Unable to mount volumes for pod "prometheus-5b48f5d49-qvflw_istio-system(6197df6e-8f87-11e9-a033-42010a960fc1)": timeout expired waiting for volumes to attach or mount for pod "istio-system"/"prometheus-5b48f5d49-qvflw". list of unmounted volumes=[istio-certs]. list of unattached volumes=[config-volume istio-certs prometheus-token-2dsjk]
  Warning  FailedMount  12s (x13 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  MountVolume.SetUp failed for volume "istio-certs" : secrets "istio.default" not found
2019-06-15T16:15:54.834251Z	info	Name:               prometheus-5b48f5d49-qvflw
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
Node:               gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6/10.150.0.41
Start Time:         Sat, 15 Jun 2019 16:05:23 +0000
... skipping 54 lines ...
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason       Age                 From                                                          Message
  ----     ------       ----                ----                                                          -------
  Normal   Scheduled    10m                 default-scheduler                                             Successfully assigned istio-system/prometheus-5b48f5d49-qvflw to gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6
  Warning  FailedMount  1m (x4 over 8m)     kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  Unable to mount volumes for pod "prometheus-5b48f5d49-qvflw_istio-system(6197df6e-8f87-11e9-a033-42010a960fc1)": timeout expired waiting for volumes to attach or mount for pod "istio-system"/"prometheus-5b48f5d49-qvflw". list of unmounted volumes=[istio-certs]. list of unattached volumes=[config-volume istio-certs prometheus-token-2dsjk]
  Warning  FailedMount  12s (x13 over 10m)  kubelet, gke-gke-061519-qhscwqhsc-default-pool-1bf6f707-15t6  MountVolume.SetUp failed for volume "istio-certs" : secrets "istio.default" not found

2019-06-15T16:15:54.834285Z	info	Running command kubectl get pods -n istio-system prometheus-5b48f5d49-qvflw -o jsonpath={.spec.containers[*].name} --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:55.207959Z	info	Command output: 
prometheus
2019-06-15T16:15:55.208108Z	info	Running command kubectl logs prometheus-5b48f5d49-qvflw -n istio-system -c prometheus --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:55.714119Z	info	Command error: exit status 1
2019-06-15T16:15:55.714203Z	warn	Error getting logs for pod istio-system/prometheus-5b48f5d49-qvflw container prometheus: command failed: "Error from server (BadRequest): container \"prometheus\" in pod \"prometheus-5b48f5d49-qvflw\" is waiting to start: ContainerCreating\n" exit status 1

2019-06-15T16:15:55.714240Z	info	Running command kubectl logs prometheus-5b48f5d49-qvflw -n istio-system -c prometheus -p --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
2019-06-15T16:15:56.183361Z	info	No previous log for prometheus-5b48f5d49-qvflw
2019-06-15T16:15:56.183431Z	info	Fetching deployment info on pod

2019-06-15T16:15:56.183445Z	info	Running command kubectl get pod -n istio-system -o yaml --kubeconfig=/logs/artifacts/pilot-test-4bc23b6b646a42a7bf6c8ce1e6/istio-system_kubeconfig
... skipping 80 lines ...
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason          Age               From                                                          Message
  ----     ------          ----              ----                                                          -------
  Normal   Scheduled       6m                default-scheduler                                             Successfully assigned istio-system/istio-citadel-5479f6bd74-smnkz to gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7
  Normal   SandboxChanged  6m                kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7  Pod sandbox changed, it will be killed and re-created.
  Warning  Failed          6m (x3 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7  Failed to pull image "gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed          6m (x3 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7  Error: ErrImagePull
  Warning  Failed          5m (x7 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7  Error: ImagePullBackOff
  Normal   Pulling         5m (x4 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7  pulling image "gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   BackOff         1m (x23 over 6m)  kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7  Back-off pulling image "gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa"
2019-06-15T16:16:01.777358Z	info	Name:               istio-citadel-5479f6bd74-smnkz
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
... skipping 59 lines ...
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason          Age               From                                                          Message
  ----     ------          ----              ----                                                          -------
  Normal   Scheduled       6m                default-scheduler                                             Successfully assigned istio-system/istio-citadel-5479f6bd74-smnkz to gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7
  Normal   SandboxChanged  6m                kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7  Pod sandbox changed, it will be killed and re-created.
  Warning  Failed          6m (x3 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7  Failed to pull image "gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed          6m (x3 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7  Error: ErrImagePull
  Warning  Failed          5m (x7 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7  Error: ImagePullBackOff
  Normal   Pulling         5m (x4 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7  pulling image "gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   BackOff         1m (x23 over 6m)  kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-1zp7  Back-off pulling image "gcr.io/istio-testing/citadel:283d57410a29fa84b1a7971211380e42c65b8daa"

2019-06-15T16:16:01.777378Z	info	Running command kubectl get pods -n istio-system istio-citadel-5479f6bd74-smnkz -o jsonpath={.spec.containers[*].name} --kubeconfig=/tmp/clusterregS3S/gke-061519-zwzkfmy1g7
2019-06-15T16:16:02.150842Z	info	Command output: 
citadel
... skipping 62 lines ...
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason          Age               From                                                          Message
  ----     ------          ----              ----                                                          -------
  Normal   Scheduled       6m                default-scheduler                                             Successfully assigned istio-system/istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42-r7jx5 to gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n
  Normal   SandboxChanged  6m                kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n  Pod sandbox changed, it will be killed and re-created.
  Warning  Failed          6m (x3 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n  Failed to pull image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed          6m (x3 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n  Error: ErrImagePull
  Warning  Failed          5m (x7 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n  Error: ImagePullBackOff
  Normal   Pulling         5m (x4 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n  pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   BackOff         1m (x23 over 6m)  kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n  Back-off pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
2019-06-15T16:16:03.707478Z	info	Name:               istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42-r7jx5
Namespace:          istio-system
Priority:           0
PriorityClassName:  <none>
... skipping 52 lines ...
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason          Age               From                                                          Message
  ----     ------          ----              ----                                                          -------
  Normal   Scheduled       6m                default-scheduler                                             Successfully assigned istio-system/istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42-r7jx5 to gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n
  Normal   SandboxChanged  6m                kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n  Pod sandbox changed, it will be killed and re-created.
  Warning  Failed          6m (x3 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n  Failed to pull image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa": rpc error: code = Unknown desc = Error response from daemon: manifest for gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa not found
  Warning  Failed          6m (x3 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n  Error: ErrImagePull
  Warning  Failed          5m (x7 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n  Error: ImagePullBackOff
  Normal   Pulling         5m (x4 over 6m)   kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n  pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"
  Normal   BackOff         1m (x23 over 6m)  kubelet, gke-gke-061519-zwzkfmy1g-default-pool-03ea3e94-k00n  Back-off pulling image "gcr.io/istio-testing/kubectl:283d57410a29fa84b1a7971211380e42c65b8daa"

2019-06-15T16:16:03.707501Z	info	Running command kubectl get pods -n istio-system istio-cleanup-secrets-283d57410a29fa84b1a7971211380e42-r7jx5 -o jsonpath={.spec.containers[*].name} --kubeconfig=/tmp/clusterregS3S/gke-061519-zwzkfmy1g7
2019-06-15T16:16:04.083245Z	info	Command output: 
kubectl
... skipping 7 lines ...

2019-06-15T16:16:05.435038Z	info	Running command kubectl get service -n istio-system -o yaml --kubeconfig=/tmp/clusterregS3S/gke-061519-zwzkfmy1g7
2019-06-15T16:16:05.807479Z	info	Fetching deployment info on ingress

2019-06-15T16:16:05.807534Z	info	Running command kubectl get ingress -n istio-system -o yaml --kubeconfig=/tmp/clusterregS3S/gke-061519-zwzkfmy1g7
2019-06-15T16:16:06.197779Z	info	Dev mode (--skip_cleanup), skipping cleanup (removal of namespace/install)
FAIL	istio.io/istio/tests/e2e/tests/pilot	685.621s
tests/istio.mk:184: recipe for target 'test/local/noauth/e2e_pilotv2' failed
make[1]: *** [test/local/noauth/e2e_pilotv2] Error 1
make[1]: Leaving directory '/home/prow/go/src/istio.io/istio'
tests/istio.mk:127: recipe for target 'with_junit_report' failed
make: *** [with_junit_report] Error 2
+ cleanup
+ [[ True == \T\r\u\e ]]
+ unsetup_clusters
+ PILOT_CLUSTER=gke_istio-boskos-162_us-east4-a_gke-061519-qhscwqhscp
+ unset IFS
++ kubectl config get-contexts -o name
... skipping 40 lines ...